aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c4
-rw-r--r--net/802/hippi.c5
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c49
-rw-r--r--net/8021q/vlanproc.c3
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/ddp.c31
-rw-r--r--net/appletalk/dev.c11
-rw-r--r--net/atm/br2684.c28
-rw-r--r--net/atm/clip.c42
-rw-r--r--net/atm/lec.c20
-rw-r--r--net/bluetooth/Kconfig3
-rw-r--r--net/bluetooth/cmtp/capi.c2
-rw-r--r--net/bluetooth/hci_core.c41
-rw-r--r--net/bluetooth/l2cap.c117
-rw-r--r--net/bluetooth/rfcomm/core.c12
-rw-r--r--net/bridge/br.c10
-rw-r--r--net/bridge/br_fdb.c45
-rw-r--r--net/bridge/br_netfilter.c33
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_sysfs_br.c3
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/bridge/netfilter/ebtables.c18
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/core/datagram.c241
-rw-r--r--net/core/dev.c688
-rw-r--r--net/core/drop_monitor.c139
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/iovec.c33
-rw-r--r--net/core/neighbour.c57
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/net-traces.c7
-rw-r--r--net/core/net_namespace.c54
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/skb_dma_map.c13
-rw-r--r--net/core/skbuff.c317
-rw-r--r--net/core/sock.c137
-rw-r--r--net/core/stream.c3
-rw-r--r--net/core/user_dma.c46
-rw-r--r--net/dccp/ipv4.c10
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/decnet/af_decnet.c25
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_nsp_in.c17
-rw-r--r--net/decnet/dn_nsp_out.c14
-rw-r--r--net/decnet/dn_route.c25
-rw-r--r--net/decnet/dn_rules.c4
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/econet/af_econet.c18
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/Kconfig12
-rw-r--r--net/ieee802154/Makefile5
-rw-r--r--net/ieee802154/af802154.h36
-rw-r--r--net/ieee802154/af_ieee802154.c372
-rw-r--r--net/ieee802154/dgram.c394
-rw-r--r--net/ieee802154/netlink.c523
-rw-r--r--net/ieee802154/nl_policy.c52
-rw-r--r--net/ieee802154/raw.c254
-rw-r--r--net/ipv4/Kconfig35
-rw-r--r--net/ipv4/af_inet.c25
-rw-r--r--net/ipv4/arp.c6
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_hash.c1
-rw-r--r--net/ipv4/fib_lookup.h3
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/fib_trie.c50
-rw-r--r--net/ipv4/icmp.c20
-rw-r--r--net/ipv4/igmp.c8
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c26
-rw-r--r--net/ipv4/ip_forward.c6
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c28
-rw-r--r--net/ipv4/ip_input.c21
-rw-r--r--net/ipv4/ip_options.c18
-rw-r--r--net/ipv4/ip_output.c49
-rw-r--r--net/ipv4/ip_sockglue.c86
-rw-r--r--net/ipv4/ipconfig.c41
-rw-r--r--net/ipv4/ipip.c16
-rw-r--r--net/ipv4/ipmr.c48
-rw-r--r--net/ipv4/netfilter.c28
-rw-r--r--net/ipv4/netfilter/arp_tables.c109
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c172
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c14
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c7
-rw-r--r--net/ipv4/proc.c10
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c54
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/tcp.c47
-rw-r--r--net/ipv4/tcp_input.c100
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_output.c6
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c89
-rw-r--r--net/ipv6/af_inet6.c35
-rw-r--r--net/ipv6/exthdrs.c40
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_input.c15
-rw-r--r--net/ipv6/ip6_output.c74
-rw-r--r--net/ipv6/ip6_tunnel.c30
-rw-r--r--net/ipv6/ip6mr.c33
-rw-r--r--net/ipv6/mcast.c34
-rw-r--r--net/ipv6/ndisc.c17
-rw-r--r--net/ipv6/netfilter.c16
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c170
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c17
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/proc.c10
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c26
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/sit.c118
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/tcp_ipv6.c17
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c4
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/irda/irlap_frame.c18
-rw-r--r--net/irda/irnetlink.c19
-rw-r--r--net/iucv/af_iucv.c555
-rw-r--r--net/iucv/iucv.c311
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/mac80211/Kconfig21
-rw-r--r--net/mac80211/agg-rx.c19
-rw-r--r--net/mac80211/agg-tx.c19
-rw-r--r--net/mac80211/cfg.c213
-rw-r--r--net/mac80211/debugfs.c74
-rw-r--r--net/mac80211/driver-ops.h191
-rw-r--r--net/mac80211/event.c17
-rw-r--r--net/mac80211/ht.c84
-rw-r--r--net/mac80211/ibss.c501
-rw-r--r--net/mac80211/ieee80211_i.h156
-rw-r--r--net/mac80211/iface.c117
-rw-r--r--net/mac80211/key.c29
-rw-r--r--net/mac80211/key.h3
-rw-r--r--net/mac80211/main.c368
-rw-r--r--net/mac80211/mesh.c46
-rw-r--r--net/mac80211/mesh.h16
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_plink.c21
-rw-r--r--net/mac80211/mlme.c790
-rw-r--r--net/mac80211/pm.c182
-rw-r--r--net/mac80211/rc80211_minstrel.c10
-rw-r--r--net/mac80211/rc80211_pid_algo.c8
-rw-r--r--net/mac80211/rx.c319
-rw-r--r--net/mac80211/scan.c436
-rw-r--r--net/mac80211/spectmgmt.c103
-rw-r--r--net/mac80211/sta_info.c112
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tkip.c6
-rw-r--r--net/mac80211/tx.c72
-rw-r--r--net/mac80211/util.c451
-rw-r--r--net/mac80211/wext.c637
-rw-r--r--net/mac80211/wme.c34
-rw-r--r--net/mac80211/wpa.c2
-rw-r--r--net/netfilter/Kconfig17
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c48
-rw-r--r--net/netfilter/nf_conntrack_acct.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c162
-rw-r--r--net/netfilter/nf_conntrack_ecache.c264
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c14
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c329
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c31
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c27
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c140
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nf_queue.c4
-rw-r--r--net/netfilter/nfnetlink.c28
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/x_tables.c54
-rw-r--r--net/netfilter/xt_NFQUEUE.c93
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_osf.c428
-rw-r--r--net/netfilter/xt_policy.c2
-rw-r--r--net/netfilter/xt_realm.c2
-rw-r--r--net/netfilter/xt_socket.c63
-rw-r--r--net/netlabel/netlabel_cipso_v4.c16
-rw-r--r--net/netlabel/netlabel_mgmt.c16
-rw-r--r--net/netlabel/netlabel_unlabeled.c16
-rw-r--r--net/netlink/genetlink.c46
-rw-r--r--net/packet/af_packet.c604
-rw-r--r--net/phonet/pep-gprs.c9
-rw-r--r--net/phonet/pep.c4
-rw-r--r--net/rds/af_rds.c1
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/ib_ring.c2
-rw-r--r--net/rds/ib_send.c10
-rw-r--r--net/rds/info.c5
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h2
-rw-r--r--net/rds/iw_recv.c2
-rw-r--r--net/rds/iw_ring.c2
-rw-r--r--net/rds/iw_send.c10
-rw-r--r--net/rds/rdma.c7
-rw-r--r--net/rds/rdma_transport.c12
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/send.c10
-rw-r--r--net/rfkill/Kconfig21
-rw-r--r--net/rfkill/Makefile5
-rw-r--r--net/rfkill/core.c1205
-rw-r--r--net/rfkill/input.c342
-rw-r--r--net/rfkill/rfkill-input.c459
-rw-r--r--net/rfkill/rfkill.c882
-rw-r--r--net/rfkill/rfkill.h (renamed from net/rfkill/rfkill-input.h)10
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--net/rxrpc/ar-connection.c10
-rw-r--r--net/rxrpc/ar-connevent.c7
-rw-r--r--net/sched/cls_cgroup.c6
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_meta.c8
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c26
-rw-r--r--net/sctp/associola.c64
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c23
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c16
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c14
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c48
-rw-r--r--net/sctp/sysctl.c6
-rw-r--r--net/sctp/ulpevent.c7
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/eth_media.c2
-rw-r--r--net/tipc/netlink.c38
-rw-r--r--net/wimax/Kconfig15
-rw-r--r--net/wimax/Makefile1
-rw-r--r--net/wimax/debug-levels.h1
-rw-r--r--net/wimax/debugfs.c1
-rw-r--r--net/wimax/op-msg.c17
-rw-r--r--net/wimax/op-rfkill.c125
-rw-r--r--net/wimax/op-state-get.c86
-rw-r--r--net/wimax/stack.c5
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/Makefile3
-rw-r--r--net/wireless/core.c148
-rw-r--r--net/wireless/core.h42
-rw-r--r--net/wireless/debugfs.c131
-rw-r--r--net/wireless/debugfs.h14
-rw-r--r--net/wireless/ibss.c369
-rw-r--r--net/wireless/mlme.c50
-rw-r--r--net/wireless/nl80211.c923
-rw-r--r--net/wireless/nl80211.h32
-rw-r--r--net/wireless/reg.c274
-rw-r--r--net/wireless/scan.c66
-rw-r--r--net/wireless/util.c370
-rw-r--r--net/wireless/wext-compat.c600
-rw-r--r--net/wireless/wext.c48
-rw-r--r--net/xfrm/xfrm_algo.c41
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c8
290 files changed, 14397 insertions, 6816 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 539e6064e6d4..3ef0ab0a543a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -185,10 +185,6 @@ static const struct header_ops fddi_header_ops = {
185static void fddi_setup(struct net_device *dev) 185static void fddi_setup(struct net_device *dev)
186{ 186{
187 dev->header_ops = &fddi_header_ops; 187 dev->header_ops = &fddi_header_ops;
188#ifdef CONFIG_COMPAT_NET_DEV_OPS
189 dev->change_mtu = fddi_change_mtu,
190#endif
191
192 dev->type = ARPHRD_FDDI; 188 dev->type = ARPHRD_FDDI;
193 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ 189 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
194 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */ 190 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 313b9ebf92ee..cd3e8e929529 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -193,11 +193,6 @@ static const struct header_ops hippi_header_ops = {
193 193
194static void hippi_setup(struct net_device *dev) 194static void hippi_setup(struct net_device *dev)
195{ 195{
196#ifdef CONFIG_COMPAT_NET_DEV_OPS
197 dev->change_mtu = hippi_change_mtu;
198 dev->set_mac_address = hippi_mac_addr;
199 dev->neigh_setup = hippi_neigh_setup_dev;
200#endif
201 dev->header_ops = &hippi_header_ops; 196 dev->header_ops = &hippi_header_ops;
202 197
203 /* 198 /*
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index d1e10546eb85..fe649081fbdc 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -378,13 +378,13 @@ static void vlan_sync_address(struct net_device *dev,
378 * the new address */ 378 * the new address */
379 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 379 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
380 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 380 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
381 dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN); 381 dev_unicast_delete(dev, vlandev->dev_addr);
382 382
383 /* vlan address was equal to the old address and is different from 383 /* vlan address was equal to the old address and is different from
384 * the new address */ 384 * the new address */
385 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 385 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
386 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 386 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
387 dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN); 387 dev_unicast_add(dev, vlandev->dev_addr);
388 388
389 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 389 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
390} 390}
@@ -758,7 +758,7 @@ static void __exit vlan_cleanup_module(void)
758 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 758 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
759 759
760 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 760 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
761 synchronize_net(); 761 rcu_barrier(); /* Wait for completion of call_rcu()'s */
762 762
763 vlan_gvrp_uninit(); 763 vlan_gvrp_uninit();
764} 764}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index c67fe6f75653..7f7de1a04de6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,9 +114,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
114EXPORT_SYMBOL(vlan_gro_receive); 114EXPORT_SYMBOL(vlan_gro_receive);
115 115
116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
117 unsigned int vlan_tci, struct napi_gro_fraginfo *info) 117 unsigned int vlan_tci)
118{ 118{
119 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 119 struct sk_buff *skb = napi_frags_skb(napi);
120 120
121 if (!skb) 121 if (!skb)
122 return NET_RX_DROP; 122 return NET_RX_DROP;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b4b9068e55a7..96bad8f233e2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -290,7 +290,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
290 290
291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
292{ 292{
293 struct net_device_stats *stats = &dev->stats; 293 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
295 295
296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
@@ -309,7 +309,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
310 skb = __vlan_put_tag(skb, vlan_tci); 310 skb = __vlan_put_tag(skb, vlan_tci);
311 if (!skb) { 311 if (!skb) {
312 stats->tx_dropped++; 312 txq->tx_dropped++;
313 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
314 } 314 }
315 315
@@ -317,8 +317,8 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
318 } 318 }
319 319
320 stats->tx_packets++; 320 txq->tx_packets++;
321 stats->tx_bytes += skb->len; 321 txq->tx_bytes += skb->len;
322 322
323 skb->dev = vlan_dev_info(dev)->real_dev; 323 skb->dev = vlan_dev_info(dev)->real_dev;
324 dev_queue_xmit(skb); 324 dev_queue_xmit(skb);
@@ -328,15 +328,15 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
329 struct net_device *dev) 329 struct net_device *dev)
330{ 330{
331 struct net_device_stats *stats = &dev->stats; 331 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
332 u16 vlan_tci; 332 u16 vlan_tci;
333 333
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 334 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 337
338 stats->tx_packets++; 338 txq->tx_packets++;
339 stats->tx_bytes += skb->len; 339 txq->tx_bytes += skb->len;
340 340
341 skb->dev = vlan_dev_info(dev)->real_dev; 341 skb->dev = vlan_dev_info(dev)->real_dev;
342 dev_queue_xmit(skb); 342 dev_queue_xmit(skb);
@@ -441,7 +441,7 @@ static int vlan_dev_open(struct net_device *dev)
441 return -ENETDOWN; 441 return -ENETDOWN;
442 442
443 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 443 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
444 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); 444 err = dev_unicast_add(real_dev, dev->dev_addr);
445 if (err < 0) 445 if (err < 0)
446 goto out; 446 goto out;
447 } 447 }
@@ -470,7 +470,7 @@ clear_allmulti:
470 dev_set_allmulti(real_dev, -1); 470 dev_set_allmulti(real_dev, -1);
471del_unicast: 471del_unicast:
472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
473 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 473 dev_unicast_delete(real_dev, dev->dev_addr);
474out: 474out:
475 netif_carrier_off(dev); 475 netif_carrier_off(dev);
476 return err; 476 return err;
@@ -492,7 +492,7 @@ static int vlan_dev_stop(struct net_device *dev)
492 dev_set_promiscuity(real_dev, -1); 492 dev_set_promiscuity(real_dev, -1);
493 493
494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
495 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 495 dev_unicast_delete(real_dev, dev->dev_addr);
496 496
497 netif_carrier_off(dev); 497 netif_carrier_off(dev);
498 return 0; 498 return 0;
@@ -511,13 +511,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
511 goto out; 511 goto out;
512 512
513 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 513 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
514 err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN); 514 err = dev_unicast_add(real_dev, addr->sa_data);
515 if (err < 0) 515 if (err < 0)
516 return err; 516 return err;
517 } 517 }
518 518
519 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 519 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
520 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 520 dev_unicast_delete(real_dev, dev->dev_addr);
521 521
522out: 522out:
523 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 523 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -644,7 +644,6 @@ static int vlan_dev_init(struct net_device *dev)
644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
645 dev->netdev_ops = &vlan_netdev_ops; 645 dev->netdev_ops = &vlan_netdev_ops;
646 } 646 }
647 netdev_resync_ops(dev);
648 647
649 if (is_vlan_dev(real_dev)) 648 if (is_vlan_dev(real_dev))
650 subclass = 1; 649 subclass = 1;
@@ -671,13 +670,7 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
671 struct ethtool_cmd *cmd) 670 struct ethtool_cmd *cmd)
672{ 671{
673 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 672 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
674 struct net_device *real_dev = vlan->real_dev; 673 return dev_ethtool_get_settings(vlan->real_dev, cmd);
675
676 if (!real_dev->ethtool_ops ||
677 !real_dev->ethtool_ops->get_settings)
678 return -EOPNOTSUPP;
679
680 return real_dev->ethtool_ops->get_settings(real_dev, cmd);
681} 674}
682 675
683static void vlan_ethtool_get_drvinfo(struct net_device *dev, 676static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -691,24 +684,13 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
691static u32 vlan_ethtool_get_rx_csum(struct net_device *dev) 684static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
692{ 685{
693 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 686 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
694 struct net_device *real_dev = vlan->real_dev; 687 return dev_ethtool_get_rx_csum(vlan->real_dev);
695
696 if (real_dev->ethtool_ops == NULL ||
697 real_dev->ethtool_ops->get_rx_csum == NULL)
698 return 0;
699 return real_dev->ethtool_ops->get_rx_csum(real_dev);
700} 688}
701 689
702static u32 vlan_ethtool_get_flags(struct net_device *dev) 690static u32 vlan_ethtool_get_flags(struct net_device *dev)
703{ 691{
704 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 692 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
705 struct net_device *real_dev = vlan->real_dev; 693 return dev_ethtool_get_flags(vlan->real_dev);
706
707 if (!(real_dev->features & NETIF_F_HW_VLAN_RX) ||
708 real_dev->ethtool_ops == NULL ||
709 real_dev->ethtool_ops->get_flags == NULL)
710 return 0;
711 return real_dev->ethtool_ops->get_flags(real_dev);
712} 694}
713 695
714static const struct ethtool_ops vlan_ethtool_ops = { 696static const struct ethtool_ops vlan_ethtool_ops = {
@@ -756,6 +738,7 @@ void vlan_setup(struct net_device *dev)
756 ether_setup(dev); 738 ether_setup(dev);
757 739
758 dev->priv_flags |= IFF_802_1Q_VLAN; 740 dev->priv_flags |= IFF_802_1Q_VLAN;
741 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
759 dev->tx_queue_len = 0; 742 dev->tx_queue_len = 0;
760 743
761 dev->netdev_ops = &vlan_netdev_ops; 744 dev->netdev_ops = &vlan_netdev_ops;
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 3628e0a81b40..b55a091a33df 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -279,13 +279,14 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
279{ 279{
280 struct net_device *vlandev = (struct net_device *) seq->private; 280 struct net_device *vlandev = (struct net_device *) seq->private;
281 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 281 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
282 struct net_device_stats *stats = &vlandev->stats; 282 const struct net_device_stats *stats;
283 static const char fmt[] = "%30s %12lu\n"; 283 static const char fmt[] = "%30s %12lu\n";
284 int i; 284 int i;
285 285
286 if (!is_vlan_dev(vlandev)) 286 if (!is_vlan_dev(vlandev))
287 return 0; 287 return 0;
288 288
289 stats = dev_get_stats(vlandev);
289 seq_printf(seq, 290 seq_printf(seq,
290 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", 291 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
291 vlandev->name, dev_info->vlan_id, 292 vlandev->name, dev_info->vlan_id,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index bb8579a141a8..a49484e67e1d 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
246 chan->vdev = vdev; 246 chan->vdev = vdev;
247 247
248 /* We expect one virtqueue, for requests. */ 248 /* We expect one virtqueue, for requests. */
249 chan->vq = vdev->config->find_vq(vdev, 0, req_done); 249 chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
250 if (IS_ERR(chan->vq)) { 250 if (IS_ERR(chan->vq)) {
251 err = PTR_ERR(chan->vq); 251 err = PTR_ERR(chan->vq);
252 goto out_free_vq; 252 goto out_free_vq;
@@ -261,7 +261,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
261 return 0; 261 return 0;
262 262
263out_free_vq: 263out_free_vq:
264 vdev->config->del_vq(chan->vq); 264 vdev->config->del_vqs(vdev);
265fail: 265fail:
266 mutex_lock(&virtio_9p_lock); 266 mutex_lock(&virtio_9p_lock);
267 chan_index--; 267 chan_index--;
@@ -332,7 +332,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
332 BUG_ON(chan->inuse); 332 BUG_ON(chan->inuse);
333 333
334 if (chan->initialized) { 334 if (chan->initialized) {
335 vdev->config->del_vq(chan->vq); 335 vdev->config->del_vqs(vdev);
336 chan->initialized = false; 336 chan->initialized = false;
337 } 337 }
338} 338}
diff --git a/net/Kconfig b/net/Kconfig
index c19f549c8e74..7051b9710675 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -179,6 +179,7 @@ source "net/lapb/Kconfig"
179source "net/econet/Kconfig" 179source "net/econet/Kconfig"
180source "net/wanrouter/Kconfig" 180source "net/wanrouter/Kconfig"
181source "net/phonet/Kconfig" 181source "net/phonet/Kconfig"
182source "net/ieee802154/Kconfig"
182source "net/sched/Kconfig" 183source "net/sched/Kconfig"
183source "net/dcb/Kconfig" 184source "net/dcb/Kconfig"
184 185
diff --git a/net/Makefile b/net/Makefile
index 9e00a55a901b..ba324aefda73 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_NET_9P) += 9p/
60ifneq ($(CONFIG_DCB),) 60ifneq ($(CONFIG_DCB),)
61obj-y += dcb/ 61obj-y += dcb/
62endif 62endif
63obj-y += ieee802154/
63 64
64ifeq ($(CONFIG_NET),y) 65ifeq ($(CONFIG_NET),y)
65obj-$(CONFIG_SYSCTL) += sysctl_net.o 66obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d6a9243641af..b603cbacdc58 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -939,6 +939,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
939 int len, unsigned long sum) 939 int len, unsigned long sum)
940{ 940{
941 int start = skb_headlen(skb); 941 int start = skb_headlen(skb);
942 struct sk_buff *frag_iter;
942 int i, copy; 943 int i, copy;
943 944
944 /* checksum stuff in header space */ 945 /* checksum stuff in header space */
@@ -977,26 +978,22 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
977 start = end; 978 start = end;
978 } 979 }
979 980
980 if (skb_shinfo(skb)->frag_list) { 981 skb_walk_frags(skb, frag_iter) {
981 struct sk_buff *list = skb_shinfo(skb)->frag_list; 982 int end;
982
983 for (; list; list = list->next) {
984 int end;
985 983
986 WARN_ON(start > offset + len); 984 WARN_ON(start > offset + len);
987 985
988 end = start + list->len; 986 end = start + frag_iter->len;
989 if ((copy = end - offset) > 0) { 987 if ((copy = end - offset) > 0) {
990 if (copy > len) 988 if (copy > len)
991 copy = len; 989 copy = len;
992 sum = atalk_sum_skb(list, offset - start, 990 sum = atalk_sum_skb(frag_iter, offset - start,
993 copy, sum); 991 copy, sum);
994 if ((len -= copy) == 0) 992 if ((len -= copy) == 0)
995 return sum; 993 return sum;
996 offset += copy; 994 offset += copy;
997 }
998 start = end;
999 } 995 }
996 start = end;
1000 } 997 }
1001 998
1002 BUG_ON(len > 0); 999 BUG_ON(len > 0);
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index 72277d70c980..6c8016f61866 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -9,21 +9,10 @@
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/if_ltalk.h> 10#include <linux/if_ltalk.h>
11 11
12#ifdef CONFIG_COMPAT_NET_DEV_OPS
13static int ltalk_change_mtu(struct net_device *dev, int mtu)
14{
15 return -EINVAL;
16}
17#endif
18
19static void ltalk_setup(struct net_device *dev) 12static void ltalk_setup(struct net_device *dev)
20{ 13{
21 /* Fill in the fields of the device structure with localtalk-generic values. */ 14 /* Fill in the fields of the device structure with localtalk-generic values. */
22 15
23#ifdef CONFIG_COMPAT_NET_DEV_OPS
24 dev->change_mtu = ltalk_change_mtu;
25#endif
26
27 dev->type = ARPHRD_LOCALTLK; 16 dev->type = ARPHRD_LOCALTLK;
28 dev->hard_header_len = LTALK_HLEN; 17 dev->hard_header_len = LTALK_HLEN;
29 dev->mtu = LTALK_MTU; 18 dev->mtu = LTALK_MTU;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 3100a8940afc..2912665fc58c 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -228,7 +228,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
228 struct br2684_dev *brdev = BRPRIV(dev); 228 struct br2684_dev *brdev = BRPRIV(dev);
229 struct br2684_vcc *brvcc; 229 struct br2684_vcc *brvcc;
230 230
231 pr_debug("br2684_start_xmit, skb->dst=%p\n", skb->dst); 231 pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb));
232 read_lock(&devs_lock); 232 read_lock(&devs_lock);
233 brvcc = pick_outgoing_vcc(skb, brdev); 233 brvcc = pick_outgoing_vcc(skb, brdev);
234 if (brvcc == NULL) { 234 if (brvcc == NULL) {
@@ -445,9 +445,10 @@ free_skb:
445 */ 445 */
446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) 446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
447{ 447{
448 struct sk_buff_head queue;
448 int err; 449 int err;
449 struct br2684_vcc *brvcc; 450 struct br2684_vcc *brvcc;
450 struct sk_buff *skb; 451 struct sk_buff *skb, *tmp;
451 struct sk_buff_head *rq; 452 struct sk_buff_head *rq;
452 struct br2684_dev *brdev; 453 struct br2684_dev *brdev;
453 struct net_device *net_dev; 454 struct net_device *net_dev;
@@ -505,29 +506,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
505 barrier(); 506 barrier();
506 atmvcc->push = br2684_push; 507 atmvcc->push = br2684_push;
507 508
509 __skb_queue_head_init(&queue);
508 rq = &sk_atm(atmvcc)->sk_receive_queue; 510 rq = &sk_atm(atmvcc)->sk_receive_queue;
509 511
510 spin_lock_irqsave(&rq->lock, flags); 512 spin_lock_irqsave(&rq->lock, flags);
511 if (skb_queue_empty(rq)) { 513 skb_queue_splice_init(rq, &queue);
512 skb = NULL;
513 } else {
514 /* NULL terminate the list. */
515 rq->prev->next = NULL;
516 skb = rq->next;
517 }
518 rq->prev = rq->next = (struct sk_buff *)rq;
519 rq->qlen = 0;
520 spin_unlock_irqrestore(&rq->lock, flags); 514 spin_unlock_irqrestore(&rq->lock, flags);
521 515
522 while (skb) { 516 skb_queue_walk_safe(&queue, skb, tmp) {
523 struct sk_buff *next = skb->next; 517 struct net_device *dev = skb->dev;
524 518
525 skb->next = skb->prev = NULL; 519 dev->stats.rx_bytes -= skb->len;
526 br2684_push(atmvcc, skb); 520 dev->stats.rx_packets--;
527 skb->dev->stats.rx_bytes -= skb->len;
528 skb->dev->stats.rx_packets--;
529 521
530 skb = next; 522 br2684_push(atmvcc, skb);
531 } 523 }
532 __module_get(THIS_MODULE); 524 __module_get(THIS_MODULE);
533 return 0; 525 return 0;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 3dc0a3a42a57..e65a3b1477f8 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -369,16 +369,16 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
369 unsigned long flags; 369 unsigned long flags;
370 370
371 pr_debug("clip_start_xmit (skb %p)\n", skb); 371 pr_debug("clip_start_xmit (skb %p)\n", skb);
372 if (!skb->dst) { 372 if (!skb_dst(skb)) {
373 printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); 373 printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n");
374 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
375 dev->stats.tx_dropped++; 375 dev->stats.tx_dropped++;
376 return 0; 376 return 0;
377 } 377 }
378 if (!skb->dst->neighbour) { 378 if (!skb_dst(skb)->neighbour) {
379#if 0 379#if 0
380 skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); 380 skb_dst(skb)->neighbour = clip_find_neighbour(skb_dst(skb), 1);
381 if (!skb->dst->neighbour) { 381 if (!skb_dst(skb)->neighbour) {
382 dev_kfree_skb(skb); /* lost that one */ 382 dev_kfree_skb(skb); /* lost that one */
383 dev->stats.tx_dropped++; 383 dev->stats.tx_dropped++;
384 return 0; 384 return 0;
@@ -389,7 +389,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
389 dev->stats.tx_dropped++; 389 dev->stats.tx_dropped++;
390 return 0; 390 return 0;
391 } 391 }
392 entry = NEIGH2ENTRY(skb->dst->neighbour); 392 entry = NEIGH2ENTRY(skb_dst(skb)->neighbour);
393 if (!entry->vccs) { 393 if (!entry->vccs) {
394 if (time_after(jiffies, entry->expires)) { 394 if (time_after(jiffies, entry->expires)) {
395 /* should be resolved */ 395 /* should be resolved */
@@ -406,7 +406,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
406 } 406 }
407 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 407 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
408 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 408 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
409 pr_debug("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc); 409 pr_debug("using neighbour %p, vcc %p\n", skb_dst(skb)->neighbour, vcc);
410 if (entry->vccs->encap) { 410 if (entry->vccs->encap) {
411 void *here; 411 void *here;
412 412
@@ -445,9 +445,9 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
445 445
446static int clip_mkip(struct atm_vcc *vcc, int timeout) 446static int clip_mkip(struct atm_vcc *vcc, int timeout)
447{ 447{
448 struct sk_buff_head *rq, queue;
448 struct clip_vcc *clip_vcc; 449 struct clip_vcc *clip_vcc;
449 struct sk_buff *skb; 450 struct sk_buff *skb, *tmp;
450 struct sk_buff_head *rq;
451 unsigned long flags; 451 unsigned long flags;
452 452
453 if (!vcc->push) 453 if (!vcc->push)
@@ -469,39 +469,28 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
469 vcc->push = clip_push; 469 vcc->push = clip_push;
470 vcc->pop = clip_pop; 470 vcc->pop = clip_pop;
471 471
472 __skb_queue_head_init(&queue);
472 rq = &sk_atm(vcc)->sk_receive_queue; 473 rq = &sk_atm(vcc)->sk_receive_queue;
473 474
474 spin_lock_irqsave(&rq->lock, flags); 475 spin_lock_irqsave(&rq->lock, flags);
475 if (skb_queue_empty(rq)) { 476 skb_queue_splice_init(rq, &queue);
476 skb = NULL;
477 } else {
478 /* NULL terminate the list. */
479 rq->prev->next = NULL;
480 skb = rq->next;
481 }
482 rq->prev = rq->next = (struct sk_buff *)rq;
483 rq->qlen = 0;
484 spin_unlock_irqrestore(&rq->lock, flags); 477 spin_unlock_irqrestore(&rq->lock, flags);
485 478
486 /* re-process everything received between connection setup and MKIP */ 479 /* re-process everything received between connection setup and MKIP */
487 while (skb) { 480 skb_queue_walk_safe(&queue, skb, tmp) {
488 struct sk_buff *next = skb->next;
489
490 skb->next = skb->prev = NULL;
491 if (!clip_devs) { 481 if (!clip_devs) {
492 atm_return(vcc, skb->truesize); 482 atm_return(vcc, skb->truesize);
493 kfree_skb(skb); 483 kfree_skb(skb);
494 } else { 484 } else {
485 struct net_device *dev = skb->dev;
495 unsigned int len = skb->len; 486 unsigned int len = skb->len;
496 487
497 skb_get(skb); 488 skb_get(skb);
498 clip_push(vcc, skb); 489 clip_push(vcc, skb);
499 skb->dev->stats.rx_packets--; 490 dev->stats.rx_packets--;
500 skb->dev->stats.rx_bytes -= len; 491 dev->stats.rx_bytes -= len;
501 kfree_skb(skb); 492 kfree_skb(skb);
502 } 493 }
503
504 skb = next;
505 } 494 }
506 return 0; 495 return 0;
507} 496}
@@ -568,6 +557,7 @@ static void clip_setup(struct net_device *dev)
568 /* without any more elaborate queuing. 100 is a reasonable */ 557 /* without any more elaborate queuing. 100 is a reasonable */
569 /* compromise between decent burst-tolerance and protection */ 558 /* compromise between decent burst-tolerance and protection */
570 /* against memory hogs. */ 559 /* against memory hogs. */
560 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
571} 561}
572 562
573static int clip_create(int number) 563static int clip_create(int number)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 199b6bb79f42..ff2e594dca9b 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -34,7 +34,6 @@
34 34
35/* Proxy LEC knows about bridging */ 35/* Proxy LEC knows about bridging */
36#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 36#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
37#include <linux/if_bridge.h>
38#include "../bridge/br_private.h" 37#include "../bridge/br_private.h"
39 38
40static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; 39static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
@@ -271,7 +270,8 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
271 printk("%s:No lecd attached\n", dev->name); 270 printk("%s:No lecd attached\n", dev->name);
272 dev->stats.tx_errors++; 271 dev->stats.tx_errors++;
273 netif_stop_queue(dev); 272 netif_stop_queue(dev);
274 return -EUNATCH; 273 kfree_skb(skb);
274 return NETDEV_TX_OK;
275 } 275 }
276 276
277 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 277 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
@@ -518,18 +518,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
518 case l_should_bridge: 518 case l_should_bridge:
519#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 519#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
520 { 520 {
521 struct net_bridge_fdb_entry *f;
522
523 pr_debug("%s: bridge zeppelin asks about %pM\n", 521 pr_debug("%s: bridge zeppelin asks about %pM\n",
524 dev->name, mesg->content.proxy.mac_addr); 522 dev->name, mesg->content.proxy.mac_addr);
525 523
526 if (br_fdb_get_hook == NULL || dev->br_port == NULL) 524 if (br_fdb_test_addr_hook == NULL)
527 break; 525 break;
528 526
529 f = br_fdb_get_hook(dev->br_port->br, 527 if (br_fdb_test_addr_hook(dev,
530 mesg->content.proxy.mac_addr); 528 mesg->content.proxy.mac_addr)) {
531 if (f != NULL && f->dst->dev != dev
532 && f->dst->state == BR_STATE_FORWARDING) {
533 /* hit from bridge table, send LE_ARP_RESPONSE */ 529 /* hit from bridge table, send LE_ARP_RESPONSE */
534 struct sk_buff *skb2; 530 struct sk_buff *skb2;
535 struct sock *sk; 531 struct sock *sk;
@@ -540,10 +536,8 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
540 skb2 = 536 skb2 =
541 alloc_skb(sizeof(struct atmlec_msg), 537 alloc_skb(sizeof(struct atmlec_msg),
542 GFP_ATOMIC); 538 GFP_ATOMIC);
543 if (skb2 == NULL) { 539 if (skb2 == NULL)
544 br_fdb_put_hook(f);
545 break; 540 break;
546 }
547 skb2->len = sizeof(struct atmlec_msg); 541 skb2->len = sizeof(struct atmlec_msg);
548 skb_copy_to_linear_data(skb2, mesg, 542 skb_copy_to_linear_data(skb2, mesg,
549 sizeof(*mesg)); 543 sizeof(*mesg));
@@ -552,8 +546,6 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
552 skb_queue_tail(&sk->sk_receive_queue, skb2); 546 skb_queue_tail(&sk->sk_receive_queue, skb2);
553 sk->sk_data_ready(sk, skb2->len); 547 sk->sk_data_ready(sk, skb2->len);
554 } 548 }
555 if (f != NULL)
556 br_fdb_put_hook(f);
557 } 549 }
558#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 550#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
559 break; 551 break;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 7725da95a767..59fdb1d2e8ed 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -3,8 +3,9 @@
3# 3#
4 4
5menuconfig BT 5menuconfig BT
6 depends on NET && !S390
7 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390
8 depends on RFKILL || !RFKILL
8 help 9 help
9 Bluetooth is low-cost, low-power, short-range wireless technology. 10 Bluetooth is low-cost, low-power, short-range wireless technology.
10 It was designed as a replacement for cables and other short-range 11 It was designed as a replacement for cables and other short-range
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 78958c0f9a40..97f8d68d574d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -382,7 +382,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
382 382
383 BT_DBG("ctrl %p", ctrl); 383 BT_DBG("ctrl %p", ctrl);
384 384
385 capi_ctr_reseted(ctrl); 385 capi_ctr_down(ctrl);
386 386
387 atomic_inc(&session->terminate); 387 atomic_inc(&session->terminate);
388 cmtp_schedule(session); 388 cmtp_schedule(session);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cd061510b6bd..406ad07cdea1 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -39,6 +39,7 @@
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/notifier.h> 41#include <linux/notifier.h>
42#include <linux/rfkill.h>
42#include <net/sock.h> 43#include <net/sock.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
@@ -476,6 +477,11 @@ int hci_dev_open(__u16 dev)
476 477
477 hci_req_lock(hdev); 478 hci_req_lock(hdev);
478 479
480 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
481 ret = -ERFKILL;
482 goto done;
483 }
484
479 if (test_bit(HCI_UP, &hdev->flags)) { 485 if (test_bit(HCI_UP, &hdev->flags)) {
480 ret = -EALREADY; 486 ret = -EALREADY;
481 goto done; 487 goto done;
@@ -813,6 +819,24 @@ int hci_get_dev_info(void __user *arg)
813 819
814/* ---- Interface to HCI drivers ---- */ 820/* ---- Interface to HCI drivers ---- */
815 821
822static int hci_rfkill_set_block(void *data, bool blocked)
823{
824 struct hci_dev *hdev = data;
825
826 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
827
828 if (!blocked)
829 return 0;
830
831 hci_dev_do_close(hdev);
832
833 return 0;
834}
835
836static const struct rfkill_ops hci_rfkill_ops = {
837 .set_block = hci_rfkill_set_block,
838};
839
816/* Alloc HCI device */ 840/* Alloc HCI device */
817struct hci_dev *hci_alloc_dev(void) 841struct hci_dev *hci_alloc_dev(void)
818{ 842{
@@ -844,7 +868,8 @@ int hci_register_dev(struct hci_dev *hdev)
844 struct list_head *head = &hci_dev_list, *p; 868 struct list_head *head = &hci_dev_list, *p;
845 int i, id = 0; 869 int i, id = 0;
846 870
847 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); 871 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
872 hdev->type, hdev->owner);
848 873
849 if (!hdev->open || !hdev->close || !hdev->destruct) 874 if (!hdev->open || !hdev->close || !hdev->destruct)
850 return -EINVAL; 875 return -EINVAL;
@@ -900,6 +925,15 @@ int hci_register_dev(struct hci_dev *hdev)
900 925
901 hci_register_sysfs(hdev); 926 hci_register_sysfs(hdev);
902 927
928 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
929 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
930 if (hdev->rfkill) {
931 if (rfkill_register(hdev->rfkill) < 0) {
932 rfkill_destroy(hdev->rfkill);
933 hdev->rfkill = NULL;
934 }
935 }
936
903 hci_notify(hdev, HCI_DEV_REG); 937 hci_notify(hdev, HCI_DEV_REG);
904 938
905 return id; 939 return id;
@@ -924,6 +958,11 @@ int hci_unregister_dev(struct hci_dev *hdev)
924 958
925 hci_notify(hdev, HCI_DEV_UNREG); 959 hci_notify(hdev, HCI_DEV_UNREG);
926 960
961 if (hdev->rfkill) {
962 rfkill_unregister(hdev->rfkill);
963 rfkill_destroy(hdev->rfkill);
964 }
965
927 hci_unregister_sysfs(hdev); 966 hci_unregister_sysfs(hdev);
928 967
929 __hci_dev_put(hdev); 968 __hci_dev_put(hdev);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index ca4d3b40d5ce..bd0a4c1bced0 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -40,10 +40,10 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/uaccess.h>
43#include <net/sock.h> 44#include <net/sock.h>
44 45
45#include <asm/system.h> 46#include <asm/system.h>
46#include <asm/uaccess.h>
47#include <asm/unaligned.h> 47#include <asm/unaligned.h>
48 48
49#include <net/bluetooth/bluetooth.h> 49#include <net/bluetooth/bluetooth.h>
@@ -52,7 +52,7 @@
52 52
53#define VERSION "2.13" 53#define VERSION "2.13"
54 54
55static u32 l2cap_feat_mask = 0x0080; 55static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
56static u8 l2cap_fixed_chan[8] = { 0x02, }; 56static u8 l2cap_fixed_chan[8] = { 0x02, };
57 57
58static const struct proto_ops l2cap_sock_ops; 58static const struct proto_ops l2cap_sock_ops;
@@ -134,7 +134,8 @@ static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16
134 struct sock *s; 134 struct sock *s;
135 read_lock(&l->lock); 135 read_lock(&l->lock);
136 s = __l2cap_get_chan_by_scid(l, cid); 136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s); 137 if (s)
138 bh_lock_sock(s);
138 read_unlock(&l->lock); 139 read_unlock(&l->lock);
139 return s; 140 return s;
140} 141}
@@ -154,17 +155,18 @@ static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8
154 struct sock *s; 155 struct sock *s;
155 read_lock(&l->lock); 156 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident); 157 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s); 158 if (s)
159 bh_lock_sock(s);
158 read_unlock(&l->lock); 160 read_unlock(&l->lock);
159 return s; 161 return s;
160} 162}
161 163
162static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) 164static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
163{ 165{
164 u16 cid = 0x0040; 166 u16 cid = L2CAP_CID_DYN_START;
165 167
166 for (; cid < 0xffff; cid++) { 168 for (; cid < L2CAP_CID_DYN_END; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid)) 169 if (!__l2cap_get_chan_by_scid(l, cid))
168 return cid; 170 return cid;
169 } 171 }
170 172
@@ -204,7 +206,8 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
204{ 206{
205 struct l2cap_chan_list *l = &conn->chan_list; 207 struct l2cap_chan_list *l = &conn->chan_list;
206 208
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); 209 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
210 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
208 211
209 conn->disc_reason = 0x13; 212 conn->disc_reason = 0x13;
210 213
@@ -215,13 +218,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
215 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 218 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
216 } else if (sk->sk_type == SOCK_DGRAM) { 219 } else if (sk->sk_type == SOCK_DGRAM) {
217 /* Connectionless socket */ 220 /* Connectionless socket */
218 l2cap_pi(sk)->scid = 0x0002; 221 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
219 l2cap_pi(sk)->dcid = 0x0002; 222 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
220 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 223 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
221 } else { 224 } else {
222 /* Raw socket can send/recv signalling messages only */ 225 /* Raw socket can send/recv signalling messages only */
223 l2cap_pi(sk)->scid = 0x0001; 226 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
224 l2cap_pi(sk)->dcid = 0x0001; 227 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 228 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
226 } 229 }
227 230
@@ -272,7 +275,7 @@ static inline int l2cap_check_security(struct sock *sk)
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
273 auth_type = HCI_AT_NO_BONDING_MITM; 276 auth_type = HCI_AT_NO_BONDING_MITM;
274 else 277 else
275 auth_type = HCI_AT_NO_BONDING; 278 auth_type = HCI_AT_NO_BONDING;
276 279
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 281 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
@@ -588,7 +591,8 @@ static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t
588 struct sock *s; 591 struct sock *s;
589 read_lock(&l2cap_sk_list.lock); 592 read_lock(&l2cap_sk_list.lock);
590 s = __l2cap_get_sock_by_psm(state, psm, src); 593 s = __l2cap_get_sock_by_psm(state, psm, src);
591 if (s) bh_lock_sock(s); 594 if (s)
595 bh_lock_sock(s);
592 read_unlock(&l2cap_sk_list.lock); 596 read_unlock(&l2cap_sk_list.lock);
593 return s; 597 return s;
594} 598}
@@ -808,7 +812,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
808 goto done; 812 goto done;
809 } 813 }
810 814
811 if (la.l2_psm && btohs(la.l2_psm) < 0x1001 && 815 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
812 !capable(CAP_NET_BIND_SERVICE)) { 816 !capable(CAP_NET_BIND_SERVICE)) {
813 err = -EACCES; 817 err = -EACCES;
814 goto done; 818 goto done;
@@ -825,7 +829,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
825 l2cap_pi(sk)->sport = la.l2_psm; 829 l2cap_pi(sk)->sport = la.l2_psm;
826 sk->sk_state = BT_BOUND; 830 sk->sk_state = BT_BOUND;
827 831
828 if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003) 832 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
833 __le16_to_cpu(la.l2_psm) == 0x0003)
829 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 834 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
830 } 835 }
831 836
@@ -844,12 +849,13 @@ static int l2cap_do_connect(struct sock *sk)
844 struct hci_conn *hcon; 849 struct hci_conn *hcon;
845 struct hci_dev *hdev; 850 struct hci_dev *hdev;
846 __u8 auth_type; 851 __u8 auth_type;
847 int err = 0; 852 int err;
848 853
849 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 854 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
850 l2cap_pi(sk)->psm); 855 l2cap_pi(sk)->psm);
851 856
852 if (!(hdev = hci_get_route(dst, src))) 857 hdev = hci_get_route(dst, src);
858 if (!hdev)
853 return -EHOSTUNREACH; 859 return -EHOSTUNREACH;
854 860
855 hci_dev_lock_bh(hdev); 861 hci_dev_lock_bh(hdev);
@@ -950,7 +956,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
950 goto done; 956 goto done;
951 } 957 }
952 958
953 switch(sk->sk_state) { 959 switch (sk->sk_state) {
954 case BT_CONNECT: 960 case BT_CONNECT:
955 case BT_CONNECT2: 961 case BT_CONNECT2:
956 case BT_CONFIG: 962 case BT_CONFIG:
@@ -975,7 +981,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
975 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); 981 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
976 l2cap_pi(sk)->psm = la.l2_psm; 982 l2cap_pi(sk)->psm = la.l2_psm;
977 983
978 if ((err = l2cap_do_connect(sk))) 984 err = l2cap_do_connect(sk);
985 if (err)
979 goto done; 986 goto done;
980 987
981wait: 988wait:
@@ -1009,9 +1016,9 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
1009 write_lock_bh(&l2cap_sk_list.lock); 1016 write_lock_bh(&l2cap_sk_list.lock);
1010 1017
1011 for (psm = 0x1001; psm < 0x1100; psm += 2) 1018 for (psm = 0x1001; psm < 0x1100; psm += 2)
1012 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) { 1019 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1013 l2cap_pi(sk)->psm = htobs(psm); 1020 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1014 l2cap_pi(sk)->sport = htobs(psm); 1021 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1015 err = 0; 1022 err = 0;
1016 break; 1023 break;
1017 } 1024 }
@@ -1100,11 +1107,11 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
1100 if (peer) { 1107 if (peer) {
1101 la->l2_psm = l2cap_pi(sk)->psm; 1108 la->l2_psm = l2cap_pi(sk)->psm;
1102 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); 1109 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1103 la->l2_cid = htobs(l2cap_pi(sk)->dcid); 1110 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1104 } else { 1111 } else {
1105 la->l2_psm = l2cap_pi(sk)->sport; 1112 la->l2_psm = l2cap_pi(sk)->sport;
1106 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); 1113 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1107 la->l2_cid = htobs(l2cap_pi(sk)->scid); 1114 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1108 } 1115 }
1109 1116
1110 return 0; 1117 return 0;
@@ -1114,7 +1121,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1114{ 1121{
1115 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1122 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1116 struct sk_buff *skb, **frag; 1123 struct sk_buff *skb, **frag;
1117 int err, hlen, count, sent=0; 1124 int err, hlen, count, sent = 0;
1118 struct l2cap_hdr *lh; 1125 struct l2cap_hdr *lh;
1119 1126
1120 BT_DBG("sk %p len %d", sk, len); 1127 BT_DBG("sk %p len %d", sk, len);
@@ -1167,8 +1174,8 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1167 1174
1168 frag = &(*frag)->next; 1175 frag = &(*frag)->next;
1169 } 1176 }
1170 1177 err = hci_send_acl(conn->hcon, skb, 0);
1171 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0) 1178 if (err < 0)
1172 goto fail; 1179 goto fail;
1173 1180
1174 return sent; 1181 return sent;
@@ -1556,7 +1563,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1556{ 1563{
1557 struct l2cap_chan_list *l = &conn->chan_list; 1564 struct l2cap_chan_list *l = &conn->chan_list;
1558 struct sk_buff *nskb; 1565 struct sk_buff *nskb;
1559 struct sock * sk; 1566 struct sock *sk;
1560 1567
1561 BT_DBG("conn %p", conn); 1568 BT_DBG("conn %p", conn);
1562 1569
@@ -1568,8 +1575,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1568 /* Don't send frame to the socket it came from */ 1575 /* Don't send frame to the socket it came from */
1569 if (skb->sk == sk) 1576 if (skb->sk == sk)
1570 continue; 1577 continue;
1571 1578 nskb = skb_clone(skb, GFP_ATOMIC);
1572 if (!(nskb = skb_clone(skb, GFP_ATOMIC))) 1579 if (!nskb)
1573 continue; 1580 continue;
1574 1581
1575 if (sock_queue_rcv_skb(sk, nskb)) 1582 if (sock_queue_rcv_skb(sk, nskb))
@@ -1587,7 +1594,8 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1587 struct l2cap_hdr *lh; 1594 struct l2cap_hdr *lh;
1588 int len, count; 1595 int len, count;
1589 1596
1590 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); 1597 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1598 conn, code, ident, dlen);
1591 1599
1592 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 1600 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1593 count = min_t(unsigned int, conn->mtu, len); 1601 count = min_t(unsigned int, conn->mtu, len);
@@ -1598,7 +1606,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1598 1606
1599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1607 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1600 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 1608 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1601 lh->cid = cpu_to_le16(0x0001); 1609 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1602 1610
1603 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 1611 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1604 cmd->code = code; 1612 cmd->code = code;
@@ -1739,8 +1747,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1739 while (len >= L2CAP_CONF_OPT_SIZE) { 1747 while (len >= L2CAP_CONF_OPT_SIZE) {
1740 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 1748 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1741 1749
1742 hint = type & 0x80; 1750 hint = type & L2CAP_CONF_HINT;
1743 type &= 0x7f; 1751 type &= L2CAP_CONF_MASK;
1744 1752
1745 switch (type) { 1753 switch (type) {
1746 case L2CAP_CONF_MTU: 1754 case L2CAP_CONF_MTU:
@@ -1966,10 +1974,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
1966 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 1974 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1967 1975
1968 if (scid) { 1976 if (scid) {
1969 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) 1977 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
1978 if (!sk)
1970 return 0; 1979 return 0;
1971 } else { 1980 } else {
1972 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident))) 1981 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
1982 if (!sk)
1973 return 0; 1983 return 0;
1974 } 1984 }
1975 1985
@@ -2012,7 +2022,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2012 2022
2013 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 2023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2014 2024
2015 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) 2025 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2026 if (!sk)
2016 return -ENOENT; 2027 return -ENOENT;
2017 2028
2018 if (sk->sk_state == BT_DISCONN) 2029 if (sk->sk_state == BT_DISCONN)
@@ -2079,9 +2090,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2079 flags = __le16_to_cpu(rsp->flags); 2090 flags = __le16_to_cpu(rsp->flags);
2080 result = __le16_to_cpu(rsp->result); 2091 result = __le16_to_cpu(rsp->result);
2081 2092
2082 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result); 2093 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2094 scid, flags, result);
2083 2095
2084 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) 2096 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2097 if (!sk)
2085 return 0; 2098 return 0;
2086 2099
2087 switch (result) { 2100 switch (result) {
@@ -2142,7 +2155,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2142 2155
2143 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 2156 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2144 2157
2145 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) 2158 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2159 if (!sk)
2146 return 0; 2160 return 0;
2147 2161
2148 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2162 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
@@ -2169,7 +2183,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2169 2183
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 2184 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2171 2185
2172 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) 2186 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2187 if (!sk)
2173 return 0; 2188 return 0;
2174 2189
2175 l2cap_chan_del(sk, 0); 2190 l2cap_chan_del(sk, 0);
@@ -2230,7 +2245,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2230 if (type == L2CAP_IT_FEAT_MASK) { 2245 if (type == L2CAP_IT_FEAT_MASK) {
2231 conn->feat_mask = get_unaligned_le32(rsp->data); 2246 conn->feat_mask = get_unaligned_le32(rsp->data);
2232 2247
2233 if (conn->feat_mask & 0x0080) { 2248 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2234 struct l2cap_info_req req; 2249 struct l2cap_info_req req;
2235 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 2250 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2236 2251
@@ -2403,7 +2418,8 @@ drop:
2403 kfree_skb(skb); 2418 kfree_skb(skb);
2404 2419
2405done: 2420done:
2406 if (sk) bh_unlock_sock(sk); 2421 if (sk)
2422 bh_unlock_sock(sk);
2407 return 0; 2423 return 0;
2408} 2424}
2409 2425
@@ -2420,11 +2436,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2420 BT_DBG("len %d, cid 0x%4.4x", len, cid); 2436 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2421 2437
2422 switch (cid) { 2438 switch (cid) {
2423 case 0x0001: 2439 case L2CAP_CID_SIGNALING:
2424 l2cap_sig_channel(conn, skb); 2440 l2cap_sig_channel(conn, skb);
2425 break; 2441 break;
2426 2442
2427 case 0x0002: 2443 case L2CAP_CID_CONN_LESS:
2428 psm = get_unaligned((__le16 *) skb->data); 2444 psm = get_unaligned((__le16 *) skb->data);
2429 skb_pull(skb, 2); 2445 skb_pull(skb, 2);
2430 l2cap_conless_channel(conn, psm, skb); 2446 l2cap_conless_channel(conn, psm, skb);
@@ -2650,7 +2666,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
2650 } 2666 }
2651 2667
2652 /* Allocate skb for the complete frame (with header) */ 2668 /* Allocate skb for the complete frame (with header) */
2653 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC))) 2669 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
2670 if (!conn->rx_skb)
2654 goto drop; 2671 goto drop;
2655 2672
2656 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 2673 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
@@ -2704,13 +2721,13 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2704 2721
2705 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 2722 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2706 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2723 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2707 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid, 2724 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
2708 pi->imtu, pi->omtu, pi->sec_level); 2725 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
2709 } 2726 }
2710 2727
2711 read_unlock_bh(&l2cap_sk_list.lock); 2728 read_unlock_bh(&l2cap_sk_list.lock);
2712 2729
2713 return (str - buf); 2730 return str - buf;
2714} 2731}
2715 2732
2716static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 2733static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 374536e050aa..e50566ebf9f9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -679,7 +679,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
679 679
680 bacpy(&addr.l2_bdaddr, dst); 680 bacpy(&addr.l2_bdaddr, dst);
681 addr.l2_family = AF_BLUETOOTH; 681 addr.l2_family = AF_BLUETOOTH;
682 addr.l2_psm = htobs(RFCOMM_PSM); 682 addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
683 addr.l2_cid = 0; 683 addr.l2_cid = 0;
684 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); 684 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
685 if (*err == 0 || *err == -EINPROGRESS) 685 if (*err == 0 || *err == -EINPROGRESS)
@@ -852,9 +852,9 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
852 } 852 }
853 853
854 if (cr && channel_mtu >= 0) 854 if (cr && channel_mtu >= 0)
855 pn->mtu = htobs(channel_mtu); 855 pn->mtu = cpu_to_le16(channel_mtu);
856 else 856 else
857 pn->mtu = htobs(d->mtu); 857 pn->mtu = cpu_to_le16(d->mtu);
858 858
859 *ptr = __fcs(buf); ptr++; 859 *ptr = __fcs(buf); ptr++;
860 860
@@ -1056,7 +1056,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
1056 1056
1057 if (len > 127) { 1057 if (len > 127) {
1058 hdr = (void *) skb_push(skb, 4); 1058 hdr = (void *) skb_push(skb, 4);
1059 put_unaligned(htobs(__len16(len)), (__le16 *) &hdr->len); 1059 put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len);
1060 } else { 1060 } else {
1061 hdr = (void *) skb_push(skb, 3); 1061 hdr = (void *) skb_push(skb, 3);
1062 hdr->len = __len8(len); 1062 hdr->len = __len8(len);
@@ -1289,7 +1289,7 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1289 1289
1290 d->priority = pn->priority; 1290 d->priority = pn->priority;
1291 1291
1292 d->mtu = btohs(pn->mtu); 1292 d->mtu = __le16_to_cpu(pn->mtu);
1293 1293
1294 if (cr && d->mtu > s->mtu) 1294 if (cr && d->mtu > s->mtu)
1295 d->mtu = s->mtu; 1295 d->mtu = s->mtu;
@@ -1922,7 +1922,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1922 /* Bind socket */ 1922 /* Bind socket */
1923 bacpy(&addr.l2_bdaddr, ba); 1923 bacpy(&addr.l2_bdaddr, ba);
1924 addr.l2_family = AF_BLUETOOTH; 1924 addr.l2_family = AF_BLUETOOTH;
1925 addr.l2_psm = htobs(RFCOMM_PSM); 1925 addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
1926 addr.l2_cid = 0; 1926 addr.l2_cid = 0;
1927 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); 1927 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1928 if (err < 0) { 1928 if (err < 0) {
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 4d2c1f1cb524..9aac5213105a 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -65,8 +65,9 @@ static int __init br_init(void)
65 brioctl_set(br_ioctl_deviceless_stub); 65 brioctl_set(br_ioctl_deviceless_stub);
66 br_handle_frame_hook = br_handle_frame; 66 br_handle_frame_hook = br_handle_frame;
67 67
68 br_fdb_get_hook = br_fdb_get; 68#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
69 br_fdb_put_hook = br_fdb_put; 69 br_fdb_test_addr_hook = br_fdb_test_addr;
70#endif
70 71
71 return 0; 72 return 0;
72err_out4: 73err_out4:
@@ -95,8 +96,9 @@ static void __exit br_deinit(void)
95 synchronize_net(); 96 synchronize_net();
96 97
97 br_netfilter_fini(); 98 br_netfilter_fini();
98 br_fdb_get_hook = NULL; 99#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
99 br_fdb_put_hook = NULL; 100 br_fdb_test_addr_hook = NULL;
101#endif
100 102
101 br_handle_frame_hook = NULL; 103 br_handle_frame_hook = NULL;
102 br_fdb_fini(); 104 br_fdb_fini();
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index a48f5efdb6bf..57bf05c353bc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -71,10 +71,17 @@ static inline int br_mac_hash(const unsigned char *mac)
71 return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1); 71 return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1);
72} 72}
73 73
74static void fdb_rcu_free(struct rcu_head *head)
75{
76 struct net_bridge_fdb_entry *ent
77 = container_of(head, struct net_bridge_fdb_entry, rcu);
78 kmem_cache_free(br_fdb_cache, ent);
79}
80
74static inline void fdb_delete(struct net_bridge_fdb_entry *f) 81static inline void fdb_delete(struct net_bridge_fdb_entry *f)
75{ 82{
76 hlist_del_rcu(&f->hlist); 83 hlist_del_rcu(&f->hlist);
77 br_fdb_put(f); 84 call_rcu(&f->rcu, fdb_rcu_free);
78} 85}
79 86
80void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 87void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
@@ -226,33 +233,26 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
226 return NULL; 233 return NULL;
227} 234}
228 235
229/* Interface used by ATM hook that keeps a ref count */ 236#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
230struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, 237/* Interface used by ATM LANE hook to test
231 unsigned char *addr) 238 * if an addr is on some other bridge port */
239int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
232{ 240{
233 struct net_bridge_fdb_entry *fdb; 241 struct net_bridge_fdb_entry *fdb;
242 int ret;
243
244 if (!dev->br_port)
245 return 0;
234 246
235 rcu_read_lock(); 247 rcu_read_lock();
236 fdb = __br_fdb_get(br, addr); 248 fdb = __br_fdb_get(dev->br_port->br, addr);
237 if (fdb && !atomic_inc_not_zero(&fdb->use_count)) 249 ret = fdb && fdb->dst->dev != dev &&
238 fdb = NULL; 250 fdb->dst->state == BR_STATE_FORWARDING;
239 rcu_read_unlock(); 251 rcu_read_unlock();
240 return fdb;
241}
242
243static void fdb_rcu_free(struct rcu_head *head)
244{
245 struct net_bridge_fdb_entry *ent
246 = container_of(head, struct net_bridge_fdb_entry, rcu);
247 kmem_cache_free(br_fdb_cache, ent);
248}
249 252
250/* Set entry up for deletion with RCU */ 253 return ret;
251void br_fdb_put(struct net_bridge_fdb_entry *ent)
252{
253 if (atomic_dec_and_test(&ent->use_count))
254 call_rcu(&ent->rcu, fdb_rcu_free);
255} 254}
255#endif /* CONFIG_ATM_LANE */
256 256
257/* 257/*
258 * Fill buffer with forwarding table records in 258 * Fill buffer with forwarding table records in
@@ -326,7 +326,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
326 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); 326 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
327 if (fdb) { 327 if (fdb) {
328 memcpy(fdb->addr.addr, addr, ETH_ALEN); 328 memcpy(fdb->addr.addr, addr, ETH_ALEN);
329 atomic_set(&fdb->use_count, 1);
330 hlist_add_head_rcu(&fdb->hlist, head); 329 hlist_add_head_rcu(&fdb->hlist, head);
331 330
332 fdb->dst = source; 331 fdb->dst = source;
@@ -398,7 +397,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
398 if (unlikely(fdb->is_local)) { 397 if (unlikely(fdb->is_local)) {
399 if (net_ratelimit()) 398 if (net_ratelimit())
400 printk(KERN_WARNING "%s: received packet with " 399 printk(KERN_WARNING "%s: received packet with "
401 " own address as source address\n", 400 "own address as source address\n",
402 source->dev->name); 401 source->dev->name);
403 } else { 402 } else {
404 /* fastpath: update of existing entry */ 403 /* fastpath: update of existing entry */
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e4a418fcb35b..d22f611e4004 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -228,6 +228,7 @@ int nf_bridge_copy_header(struct sk_buff *skb)
228static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) 228static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
229{ 229{
230 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 230 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
231 struct rtable *rt;
231 232
232 if (nf_bridge->mask & BRNF_PKT_TYPE) { 233 if (nf_bridge->mask & BRNF_PKT_TYPE) {
233 skb->pkt_type = PACKET_OTHERHOST; 234 skb->pkt_type = PACKET_OTHERHOST;
@@ -235,12 +236,13 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
235 } 236 }
236 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 237 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
237 238
238 skb->rtable = bridge_parent_rtable(nf_bridge->physindev); 239 rt = bridge_parent_rtable(nf_bridge->physindev);
239 if (!skb->rtable) { 240 if (!rt) {
240 kfree_skb(skb); 241 kfree_skb(skb);
241 return 0; 242 return 0;
242 } 243 }
243 dst_hold(&skb->rtable->u.dst); 244 dst_hold(&rt->u.dst);
245 skb_dst_set(skb, &rt->u.dst);
244 246
245 skb->dev = nf_bridge->physindev; 247 skb->dev = nf_bridge->physindev;
246 nf_bridge_push_encap_header(skb); 248 nf_bridge_push_encap_header(skb);
@@ -320,7 +322,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
320 322
321 skb->dev = bridge_parent(skb->dev); 323 skb->dev = bridge_parent(skb->dev);
322 if (skb->dev) { 324 if (skb->dev) {
323 struct dst_entry *dst = skb->dst; 325 struct dst_entry *dst = skb_dst(skb);
324 326
325 nf_bridge_pull_encap_header(skb); 327 nf_bridge_pull_encap_header(skb);
326 328
@@ -338,6 +340,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
338 struct net_device *dev = skb->dev; 340 struct net_device *dev = skb->dev;
339 struct iphdr *iph = ip_hdr(skb); 341 struct iphdr *iph = ip_hdr(skb);
340 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 342 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
343 struct rtable *rt;
341 int err; 344 int err;
342 345
343 if (nf_bridge->mask & BRNF_PKT_TYPE) { 346 if (nf_bridge->mask & BRNF_PKT_TYPE) {
@@ -347,7 +350,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
347 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 350 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
348 if (dnat_took_place(skb)) { 351 if (dnat_took_place(skb)) {
349 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 352 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
350 struct rtable *rt;
351 struct flowi fl = { 353 struct flowi fl = {
352 .nl_u = { 354 .nl_u = {
353 .ip4_u = { 355 .ip4_u = {
@@ -373,7 +375,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
373 /* - Bridged-and-DNAT'ed traffic doesn't 375 /* - Bridged-and-DNAT'ed traffic doesn't
374 * require ip_forwarding. */ 376 * require ip_forwarding. */
375 if (((struct dst_entry *)rt)->dev == dev) { 377 if (((struct dst_entry *)rt)->dev == dev) {
376 skb->dst = (struct dst_entry *)rt; 378 skb_dst_set(skb, (struct dst_entry *)rt);
377 goto bridged_dnat; 379 goto bridged_dnat;
378 } 380 }
379 /* we are sure that forwarding is disabled, so printing 381 /* we are sure that forwarding is disabled, so printing
@@ -387,7 +389,7 @@ free_skb:
387 kfree_skb(skb); 389 kfree_skb(skb);
388 return 0; 390 return 0;
389 } else { 391 } else {
390 if (skb->dst->dev == dev) { 392 if (skb_dst(skb)->dev == dev) {
391bridged_dnat: 393bridged_dnat:
392 /* Tell br_nf_local_out this is a 394 /* Tell br_nf_local_out this is a
393 * bridged frame */ 395 * bridged frame */
@@ -404,12 +406,13 @@ bridged_dnat:
404 skb->pkt_type = PACKET_HOST; 406 skb->pkt_type = PACKET_HOST;
405 } 407 }
406 } else { 408 } else {
407 skb->rtable = bridge_parent_rtable(nf_bridge->physindev); 409 rt = bridge_parent_rtable(nf_bridge->physindev);
408 if (!skb->rtable) { 410 if (!rt) {
409 kfree_skb(skb); 411 kfree_skb(skb);
410 return 0; 412 return 0;
411 } 413 }
412 dst_hold(&skb->rtable->u.dst); 414 dst_hold(&rt->u.dst);
415 skb_dst_set(skb, &rt->u.dst);
413 } 416 }
414 417
415 skb->dev = nf_bridge->physindev; 418 skb->dev = nf_bridge->physindev;
@@ -628,10 +631,10 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
628 const struct net_device *out, 631 const struct net_device *out,
629 int (*okfn)(struct sk_buff *)) 632 int (*okfn)(struct sk_buff *))
630{ 633{
631 if (skb->rtable && skb->rtable == bridge_parent_rtable(in)) { 634 struct rtable *rt = skb_rtable(skb);
632 dst_release(&skb->rtable->u.dst); 635
633 skb->rtable = NULL; 636 if (rt && rt == bridge_parent_rtable(in))
634 } 637 skb_dst_drop(skb);
635 638
636 return NF_ACCEPT; 639 return NF_ACCEPT;
637} 640}
@@ -846,7 +849,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
846 return NF_ACCEPT; 849 return NF_ACCEPT;
847 850
848#ifdef CONFIG_NETFILTER_DEBUG 851#ifdef CONFIG_NETFILTER_DEBUG
849 if (skb->dst == NULL) { 852 if (skb_dst(skb) == NULL) {
850 printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n"); 853 printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n");
851 goto print_error; 854 goto print_error;
852 } 855 }
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b6c3b71974dc..d5b5537272b4 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -51,7 +51,6 @@ struct net_bridge_fdb_entry
51 struct net_bridge_port *dst; 51 struct net_bridge_port *dst;
52 52
53 struct rcu_head rcu; 53 struct rcu_head rcu;
54 atomic_t use_count;
55 unsigned long ageing_timer; 54 unsigned long ageing_timer;
56 mac_addr addr; 55 mac_addr addr;
57 unsigned char is_local; 56 unsigned char is_local;
@@ -154,9 +153,7 @@ extern void br_fdb_delete_by_port(struct net_bridge *br,
154 const struct net_bridge_port *p, int do_all); 153 const struct net_bridge_port *p, int do_all);
155extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 154extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
156 const unsigned char *addr); 155 const unsigned char *addr);
157extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, 156extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
158 unsigned char *addr);
159extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
160extern int br_fdb_fillbuf(struct net_bridge *br, void *buf, 157extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
161 unsigned long count, unsigned long off); 158 unsigned long count, unsigned long off);
162extern int br_fdb_insert(struct net_bridge *br, 159extern int br_fdb_insert(struct net_bridge *br,
@@ -242,10 +239,9 @@ extern void br_stp_port_timer_init(struct net_bridge_port *p);
242extern unsigned long br_timer_value(const struct timer_list *timer); 239extern unsigned long br_timer_value(const struct timer_list *timer);
243 240
244/* br.c */ 241/* br.c */
245extern struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, 242#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
246 unsigned char *addr); 243extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
247extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); 244#endif
248
249 245
250/* br_netlink.c */ 246/* br_netlink.c */
251extern int br_netlink_init(void); 247extern int br_netlink_init(void);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 603d89248e71..ee4820aa1843 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -172,7 +172,8 @@ static ssize_t store_stp_state(struct device *d,
172 if (endp == buf) 172 if (endp == buf)
173 return -EINVAL; 173 return -EINVAL;
174 174
175 rtnl_lock(); 175 if (!rtnl_trylock())
176 return restart_syscall();
176 br_stp_set_enabled(br, val); 177 br_stp_set_enabled(br, val);
177 rtnl_unlock(); 178 rtnl_unlock();
178 179
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 02b2d50cce4d..4a3cdf8f3813 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -189,7 +189,8 @@ static ssize_t brport_store(struct kobject * kobj,
189 189
190 val = simple_strtoul(buf, &endp, 0); 190 val = simple_strtoul(buf, &endp, 0);
191 if (endp != buf) { 191 if (endp != buf) {
192 rtnl_lock(); 192 if (!rtnl_trylock())
193 return restart_syscall();
193 if (p->dev && p->br && brport_attr->store) { 194 if (p->dev && p->br && brport_attr->store) {
194 spin_lock_bh(&p->br->lock); 195 spin_lock_bh(&p->br->lock);
195 ret = brport_attr->store(p, val); 196 ret = brport_attr->store(p, val);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 820252aee81f..37928d5f2840 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -142,6 +142,12 @@ static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h,
142 return 0; 142 return 0;
143} 143}
144 144
145static inline __pure
146struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
147{
148 return (void *)entry + entry->next_offset;
149}
150
145/* Do some firewalling */ 151/* Do some firewalling */
146unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, 152unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
147 const struct net_device *in, const struct net_device *out, 153 const struct net_device *in, const struct net_device *out,
@@ -164,7 +170,7 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
164 mtpar.in = tgpar.in = in; 170 mtpar.in = tgpar.in = in;
165 mtpar.out = tgpar.out = out; 171 mtpar.out = tgpar.out = out;
166 mtpar.hotdrop = &hotdrop; 172 mtpar.hotdrop = &hotdrop;
167 tgpar.hooknum = hook; 173 mtpar.hooknum = tgpar.hooknum = hook;
168 174
169 read_lock_bh(&table->lock); 175 read_lock_bh(&table->lock);
170 private = table->private; 176 private = table->private;
@@ -249,8 +255,7 @@ letsreturn:
249 /* jump to a udc */ 255 /* jump to a udc */
250 cs[sp].n = i + 1; 256 cs[sp].n = i + 1;
251 cs[sp].chaininfo = chaininfo; 257 cs[sp].chaininfo = chaininfo;
252 cs[sp].e = (struct ebt_entry *) 258 cs[sp].e = ebt_next_entry(point);
253 (((char *)point) + point->next_offset);
254 i = 0; 259 i = 0;
255 chaininfo = (struct ebt_entries *) (base + verdict); 260 chaininfo = (struct ebt_entries *) (base + verdict);
256#ifdef CONFIG_NETFILTER_DEBUG 261#ifdef CONFIG_NETFILTER_DEBUG
@@ -266,8 +271,7 @@ letsreturn:
266 sp++; 271 sp++;
267 continue; 272 continue;
268letscontinue: 273letscontinue:
269 point = (struct ebt_entry *) 274 point = ebt_next_entry(point);
270 (((char *)point) + point->next_offset);
271 i++; 275 i++;
272 } 276 }
273 277
@@ -787,7 +791,7 @@ static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s
787 /* this can't be 0, so the loop test is correct */ 791 /* this can't be 0, so the loop test is correct */
788 cl_s[i].cs.n = pos + 1; 792 cl_s[i].cs.n = pos + 1;
789 pos = 0; 793 pos = 0;
790 cl_s[i].cs.e = ((void *)e + e->next_offset); 794 cl_s[i].cs.e = ebt_next_entry(e);
791 e = (struct ebt_entry *)(hlp2->data); 795 e = (struct ebt_entry *)(hlp2->data);
792 nentries = hlp2->nentries; 796 nentries = hlp2->nentries;
793 cl_s[i].from = chain_nr; 797 cl_s[i].from = chain_nr;
@@ -797,7 +801,7 @@ static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s
797 continue; 801 continue;
798 } 802 }
799letscontinue: 803letscontinue:
800 e = (void *)e + e->next_offset; 804 e = ebt_next_entry(e);
801 pos++; 805 pos++;
802 } 806 }
803 return 0; 807 return 0;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 10f0528c3bf5..e733725b11d4 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -903,6 +903,8 @@ static __exit void can_exit(void)
903 } 903 }
904 spin_unlock(&can_rcvlists_lock); 904 spin_unlock(&can_rcvlists_lock);
905 905
906 rcu_barrier(); /* Wait for completion of call_rcu()'s */
907
906 kmem_cache_destroy(rcv_cache); 908 kmem_cache_destroy(rcv_cache);
907} 909}
908 910
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b01a76abe1d2..58abee1f1df1 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -260,7 +260,9 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
260 spin_unlock_bh(&sk->sk_receive_queue.lock); 260 spin_unlock_bh(&sk->sk_receive_queue.lock);
261 } 261 }
262 262
263 skb_free_datagram(sk, skb); 263 kfree_skb(skb);
264 sk_mem_reclaim_partial(sk);
265
264 return err; 266 return err;
265} 267}
266 268
@@ -280,6 +282,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
280{ 282{
281 int start = skb_headlen(skb); 283 int start = skb_headlen(skb);
282 int i, copy = start - offset; 284 int i, copy = start - offset;
285 struct sk_buff *frag_iter;
283 286
284 /* Copy header. */ 287 /* Copy header. */
285 if (copy > 0) { 288 if (copy > 0) {
@@ -320,28 +323,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
320 start = end; 323 start = end;
321 } 324 }
322 325
323 if (skb_shinfo(skb)->frag_list) { 326 skb_walk_frags(skb, frag_iter) {
324 struct sk_buff *list = skb_shinfo(skb)->frag_list; 327 int end;
325 328
326 for (; list; list = list->next) { 329 WARN_ON(start > offset + len);
327 int end; 330
328 331 end = start + frag_iter->len;
329 WARN_ON(start > offset + len); 332 if ((copy = end - offset) > 0) {
330 333 if (copy > len)
331 end = start + list->len; 334 copy = len;
332 if ((copy = end - offset) > 0) { 335 if (skb_copy_datagram_iovec(frag_iter,
333 if (copy > len) 336 offset - start,
334 copy = len; 337 to, copy))
335 if (skb_copy_datagram_iovec(list, 338 goto fault;
336 offset - start, 339 if ((len -= copy) == 0)
337 to, copy)) 340 return 0;
338 goto fault; 341 offset += copy;
339 if ((len -= copy) == 0)
340 return 0;
341 offset += copy;
342 }
343 start = end;
344 } 342 }
343 start = end;
345 } 344 }
346 if (!len) 345 if (!len)
347 return 0; 346 return 0;
@@ -351,30 +350,124 @@ fault:
351} 350}
352 351
353/** 352/**
353 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
354 * @skb: buffer to copy
355 * @offset: offset in the buffer to start copying from
356 * @to: io vector to copy to
357 * @to_offset: offset in the io vector to start copying to
358 * @len: amount of data to copy from buffer to iovec
359 *
360 * Returns 0 or -EFAULT.
361 * Note: the iovec is not modified during the copy.
362 */
363int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
364 const struct iovec *to, int to_offset,
365 int len)
366{
367 int start = skb_headlen(skb);
368 int i, copy = start - offset;
369 struct sk_buff *frag_iter;
370
371 /* Copy header. */
372 if (copy > 0) {
373 if (copy > len)
374 copy = len;
375 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
376 goto fault;
377 if ((len -= copy) == 0)
378 return 0;
379 offset += copy;
380 to_offset += copy;
381 }
382
383 /* Copy paged appendix. Hmm... why does this look so complicated? */
384 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
385 int end;
386
387 WARN_ON(start > offset + len);
388
389 end = start + skb_shinfo(skb)->frags[i].size;
390 if ((copy = end - offset) > 0) {
391 int err;
392 u8 *vaddr;
393 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
394 struct page *page = frag->page;
395
396 if (copy > len)
397 copy = len;
398 vaddr = kmap(page);
399 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
400 offset - start, to_offset, copy);
401 kunmap(page);
402 if (err)
403 goto fault;
404 if (!(len -= copy))
405 return 0;
406 offset += copy;
407 to_offset += copy;
408 }
409 start = end;
410 }
411
412 skb_walk_frags(skb, frag_iter) {
413 int end;
414
415 WARN_ON(start > offset + len);
416
417 end = start + frag_iter->len;
418 if ((copy = end - offset) > 0) {
419 if (copy > len)
420 copy = len;
421 if (skb_copy_datagram_const_iovec(frag_iter,
422 offset - start,
423 to, to_offset,
424 copy))
425 goto fault;
426 if ((len -= copy) == 0)
427 return 0;
428 offset += copy;
429 to_offset += copy;
430 }
431 start = end;
432 }
433 if (!len)
434 return 0;
435
436fault:
437 return -EFAULT;
438}
439EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
440
441/**
354 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. 442 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
355 * @skb: buffer to copy 443 * @skb: buffer to copy
356 * @offset: offset in the buffer to start copying to 444 * @offset: offset in the buffer to start copying to
357 * @from: io vector to copy to 445 * @from: io vector to copy to
446 * @from_offset: offset in the io vector to start copying from
358 * @len: amount of data to copy to buffer from iovec 447 * @len: amount of data to copy to buffer from iovec
359 * 448 *
360 * Returns 0 or -EFAULT. 449 * Returns 0 or -EFAULT.
361 * Note: the iovec is modified during the copy. 450 * Note: the iovec is not modified during the copy.
362 */ 451 */
363int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 452int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
364 struct iovec *from, int len) 453 const struct iovec *from, int from_offset,
454 int len)
365{ 455{
366 int start = skb_headlen(skb); 456 int start = skb_headlen(skb);
367 int i, copy = start - offset; 457 int i, copy = start - offset;
458 struct sk_buff *frag_iter;
368 459
369 /* Copy header. */ 460 /* Copy header. */
370 if (copy > 0) { 461 if (copy > 0) {
371 if (copy > len) 462 if (copy > len)
372 copy = len; 463 copy = len;
373 if (memcpy_fromiovec(skb->data + offset, from, copy)) 464 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
465 copy))
374 goto fault; 466 goto fault;
375 if ((len -= copy) == 0) 467 if ((len -= copy) == 0)
376 return 0; 468 return 0;
377 offset += copy; 469 offset += copy;
470 from_offset += copy;
378 } 471 }
379 472
380 /* Copy paged appendix. Hmm... why does this look so complicated? */ 473 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -393,8 +486,9 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
393 if (copy > len) 486 if (copy > len)
394 copy = len; 487 copy = len;
395 vaddr = kmap(page); 488 vaddr = kmap(page);
396 err = memcpy_fromiovec(vaddr + frag->page_offset + 489 err = memcpy_fromiovecend(vaddr + frag->page_offset +
397 offset - start, from, copy); 490 offset - start,
491 from, from_offset, copy);
398 kunmap(page); 492 kunmap(page);
399 if (err) 493 if (err)
400 goto fault; 494 goto fault;
@@ -402,32 +496,32 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
402 if (!(len -= copy)) 496 if (!(len -= copy))
403 return 0; 497 return 0;
404 offset += copy; 498 offset += copy;
499 from_offset += copy;
405 } 500 }
406 start = end; 501 start = end;
407 } 502 }
408 503
409 if (skb_shinfo(skb)->frag_list) { 504 skb_walk_frags(skb, frag_iter) {
410 struct sk_buff *list = skb_shinfo(skb)->frag_list; 505 int end;
411 506
412 for (; list; list = list->next) { 507 WARN_ON(start > offset + len);
413 int end; 508
414 509 end = start + frag_iter->len;
415 WARN_ON(start > offset + len); 510 if ((copy = end - offset) > 0) {
416 511 if (copy > len)
417 end = start + list->len; 512 copy = len;
418 if ((copy = end - offset) > 0) { 513 if (skb_copy_datagram_from_iovec(frag_iter,
419 if (copy > len) 514 offset - start,
420 copy = len; 515 from,
421 if (skb_copy_datagram_from_iovec(list, 516 from_offset,
422 offset - start, 517 copy))
423 from, copy)) 518 goto fault;
424 goto fault; 519 if ((len -= copy) == 0)
425 if ((len -= copy) == 0) 520 return 0;
426 return 0; 521 offset += copy;
427 offset += copy; 522 from_offset += copy;
428 }
429 start = end;
430 } 523 }
524 start = end;
431 } 525 }
432 if (!len) 526 if (!len)
433 return 0; 527 return 0;
@@ -442,8 +536,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
442 __wsum *csump) 536 __wsum *csump)
443{ 537{
444 int start = skb_headlen(skb); 538 int start = skb_headlen(skb);
445 int pos = 0;
446 int i, copy = start - offset; 539 int i, copy = start - offset;
540 struct sk_buff *frag_iter;
541 int pos = 0;
447 542
448 /* Copy header. */ 543 /* Copy header. */
449 if (copy > 0) { 544 if (copy > 0) {
@@ -494,33 +589,29 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
494 start = end; 589 start = end;
495 } 590 }
496 591
497 if (skb_shinfo(skb)->frag_list) { 592 skb_walk_frags(skb, frag_iter) {
498 struct sk_buff *list = skb_shinfo(skb)->frag_list; 593 int end;
499 594
500 for (; list; list=list->next) { 595 WARN_ON(start > offset + len);
501 int end; 596
502 597 end = start + frag_iter->len;
503 WARN_ON(start > offset + len); 598 if ((copy = end - offset) > 0) {
504 599 __wsum csum2 = 0;
505 end = start + list->len; 600 if (copy > len)
506 if ((copy = end - offset) > 0) { 601 copy = len;
507 __wsum csum2 = 0; 602 if (skb_copy_and_csum_datagram(frag_iter,
508 if (copy > len) 603 offset - start,
509 copy = len; 604 to, copy,
510 if (skb_copy_and_csum_datagram(list, 605 &csum2))
511 offset - start, 606 goto fault;
512 to, copy, 607 *csump = csum_block_add(*csump, csum2, pos);
513 &csum2)) 608 if ((len -= copy) == 0)
514 goto fault; 609 return 0;
515 *csump = csum_block_add(*csump, csum2, pos); 610 offset += copy;
516 if ((len -= copy) == 0) 611 to += copy;
517 return 0; 612 pos += copy;
518 offset += copy;
519 to += copy;
520 pos += copy;
521 }
522 start = end;
523 } 613 }
614 start = end;
524 } 615 }
525 if (!len) 616 if (!len)
526 return 0; 617 return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index e2e9e4af3ace..576a61574a93 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -126,6 +126,7 @@
126#include <linux/in.h> 126#include <linux/in.h>
127#include <linux/jhash.h> 127#include <linux/jhash.h>
128#include <linux/random.h> 128#include <linux/random.h>
129#include <trace/events/napi.h>
129 130
130#include "net-sysfs.h" 131#include "net-sysfs.h"
131 132
@@ -268,7 +269,8 @@ static const unsigned short netdev_lock_type[] =
268 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
269 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
270 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
271 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
272 274
273static const char *netdev_lock_name[] = 275static const char *netdev_lock_name[] =
274 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -285,7 +287,8 @@ static const char *netdev_lock_name[] =
285 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
286 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
287 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
288 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
289 292
290static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1047,7 +1050,7 @@ void dev_load(struct net *net, const char *name)
1047int dev_open(struct net_device *dev) 1050int dev_open(struct net_device *dev)
1048{ 1051{
1049 const struct net_device_ops *ops = dev->netdev_ops; 1052 const struct net_device_ops *ops = dev->netdev_ops;
1050 int ret = 0; 1053 int ret;
1051 1054
1052 ASSERT_RTNL(); 1055 ASSERT_RTNL();
1053 1056
@@ -1064,6 +1067,11 @@ int dev_open(struct net_device *dev)
1064 if (!netif_device_present(dev)) 1067 if (!netif_device_present(dev))
1065 return -ENODEV; 1068 return -ENODEV;
1066 1069
1070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
1067 /* 1075 /*
1068 * Call device private open method 1076 * Call device private open method
1069 */ 1077 */
@@ -1688,7 +1696,16 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1688 goto gso; 1696 goto gso;
1689 } 1697 }
1690 1698
1699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
1703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
1691 rc = ops->ndo_start_xmit(skb, dev); 1706 rc = ops->ndo_start_xmit(skb, dev);
1707 if (rc == 0)
1708 txq_trans_update(txq);
1692 /* 1709 /*
1693 * TODO: if skb_orphan() was called by 1710 * TODO: if skb_orphan() was called by
1694 * dev->hard_start_xmit() (for example, the unmodified 1711 * dev->hard_start_xmit() (for example, the unmodified
@@ -1718,6 +1735,7 @@ gso:
1718 skb->next = nskb; 1735 skb->next = nskb;
1719 return rc; 1736 return rc;
1720 } 1737 }
1738 txq_trans_update(txq);
1721 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 1739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1722 return NETDEV_TX_BUSY; 1740 return NETDEV_TX_BUSY;
1723 } while (skb->next); 1741 } while (skb->next);
@@ -1735,8 +1753,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1735{ 1753{
1736 u32 hash; 1754 u32 hash;
1737 1755
1738 if (skb_rx_queue_recorded(skb)) 1756 if (skb_rx_queue_recorded(skb)) {
1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues; 1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
1740 1762
1741 if (skb->sk && skb->sk->sk_hash) 1763 if (skb->sk && skb->sk->sk_hash)
1742 hash = skb->sk->sk_hash; 1764 hash = skb->sk->sk_hash;
@@ -1800,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1800 if (netif_needs_gso(dev, skb)) 1822 if (netif_needs_gso(dev, skb))
1801 goto gso; 1823 goto gso;
1802 1824
1803 if (skb_shinfo(skb)->frag_list && 1825 if (skb_has_frags(skb) &&
1804 !(dev->features & NETIF_F_FRAGLIST) && 1826 !(dev->features & NETIF_F_FRAGLIST) &&
1805 __skb_linearize(skb)) 1827 __skb_linearize(skb))
1806 goto out_kfree_skb; 1828 goto out_kfree_skb;
@@ -2049,11 +2071,13 @@ static inline int deliver_skb(struct sk_buff *skb,
2049} 2071}
2050 2072
2051#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 2073#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2052/* These hooks defined here for ATM */ 2074
2053struct net_bridge; 2075#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2054struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, 2076/* This hook is defined here for ATM LANE */
2055 unsigned char *addr); 2077int (*br_fdb_test_addr_hook)(struct net_device *dev,
2056void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; 2078 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook);
2080#endif
2057 2081
2058/* 2082/*
2059 * If bridge module is loaded call bridging hook. 2083 * If bridge module is loaded call bridging hook.
@@ -2061,6 +2085,8 @@ void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2061 */ 2085 */
2062struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, 2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2063 struct sk_buff *skb) __read_mostly; 2087 struct sk_buff *skb) __read_mostly;
2088EXPORT_SYMBOL(br_handle_frame_hook);
2089
2064static inline struct sk_buff *handle_bridge(struct sk_buff *skb, 2090static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2065 struct packet_type **pt_prev, int *ret, 2091 struct packet_type **pt_prev, int *ret,
2066 struct net_device *orig_dev) 2092 struct net_device *orig_dev)
@@ -2374,26 +2400,6 @@ void napi_gro_flush(struct napi_struct *napi)
2374} 2400}
2375EXPORT_SYMBOL(napi_gro_flush); 2401EXPORT_SYMBOL(napi_gro_flush);
2376 2402
2377void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2378{
2379 unsigned int offset = skb_gro_offset(skb);
2380
2381 hlen += offset;
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2384
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2390
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset +
2393 offset - skb_headlen(skb);
2394}
2395EXPORT_SYMBOL(skb_gro_header);
2396
2397int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2403int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398{ 2404{
2399 struct sk_buff **pp = NULL; 2405 struct sk_buff **pp = NULL;
@@ -2407,7 +2413,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2407 if (!(skb->dev->features & NETIF_F_GRO)) 2413 if (!(skb->dev->features & NETIF_F_GRO))
2408 goto normal; 2414 goto normal;
2409 2415
2410 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) 2416 if (skb_is_gso(skb) || skb_has_frags(skb))
2411 goto normal; 2417 goto normal;
2412 2418
2413 rcu_read_lock(); 2419 rcu_read_lock();
@@ -2456,10 +2462,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2456 ret = GRO_HELD; 2462 ret = GRO_HELD;
2457 2463
2458pull: 2464pull:
2459 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { 2465 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2460 if (napi->gro_list == skb) 2466 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2461 napi->gro_list = skb->next; 2467
2462 ret = GRO_DROP; 2468 BUG_ON(skb->end - skb->tail < grow);
2469
2470 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2471
2472 skb->tail += grow;
2473 skb->data_len -= grow;
2474
2475 skb_shinfo(skb)->frags[0].page_offset += grow;
2476 skb_shinfo(skb)->frags[0].size -= grow;
2477
2478 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2479 put_page(skb_shinfo(skb)->frags[0].page);
2480 memmove(skb_shinfo(skb)->frags,
2481 skb_shinfo(skb)->frags + 1,
2482 --skb_shinfo(skb)->nr_frags);
2483 }
2463 } 2484 }
2464 2485
2465ok: 2486ok:
@@ -2509,6 +2530,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
2509} 2530}
2510EXPORT_SYMBOL(napi_skb_finish); 2531EXPORT_SYMBOL(napi_skb_finish);
2511 2532
2533void skb_gro_reset_offset(struct sk_buff *skb)
2534{
2535 NAPI_GRO_CB(skb)->data_offset = 0;
2536 NAPI_GRO_CB(skb)->frag0 = NULL;
2537 NAPI_GRO_CB(skb)->frag0_len = 0;
2538
2539 if (skb->mac_header == skb->tail &&
2540 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2541 NAPI_GRO_CB(skb)->frag0 =
2542 page_address(skb_shinfo(skb)->frags[0].page) +
2543 skb_shinfo(skb)->frags[0].page_offset;
2544 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2545 }
2546}
2547EXPORT_SYMBOL(skb_gro_reset_offset);
2548
2512int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2549int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2513{ 2550{
2514 skb_gro_reset_offset(skb); 2551 skb_gro_reset_offset(skb);
@@ -2526,16 +2563,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2526} 2563}
2527EXPORT_SYMBOL(napi_reuse_skb); 2564EXPORT_SYMBOL(napi_reuse_skb);
2528 2565
2529struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, 2566struct sk_buff *napi_get_frags(struct napi_struct *napi)
2530 struct napi_gro_fraginfo *info)
2531{ 2567{
2532 struct net_device *dev = napi->dev; 2568 struct net_device *dev = napi->dev;
2533 struct sk_buff *skb = napi->skb; 2569 struct sk_buff *skb = napi->skb;
2534 struct ethhdr *eth;
2535 skb_frag_t *frag;
2536 int i;
2537
2538 napi->skb = NULL;
2539 2570
2540 if (!skb) { 2571 if (!skb) {
2541 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); 2572 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
@@ -2543,47 +2574,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2543 goto out; 2574 goto out;
2544 2575
2545 skb_reserve(skb, NET_IP_ALIGN); 2576 skb_reserve(skb, NET_IP_ALIGN);
2546 }
2547 2577
2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2578 napi->skb = skb;
2549 frag = info->frags;
2550
2551 for (i = 0; i < info->nr_frags; i++) {
2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2553 frag->size);
2554 frag++;
2555 }
2556 skb_shinfo(skb)->nr_frags = info->nr_frags;
2557
2558 skb->data_len = info->len;
2559 skb->len += info->len;
2560 skb->truesize += info->len;
2561
2562 skb_reset_mac_header(skb);
2563 skb_gro_reset_offset(skb);
2564
2565 eth = skb_gro_header(skb, sizeof(*eth));
2566 if (!eth) {
2567 napi_reuse_skb(napi, skb);
2568 skb = NULL;
2569 goto out;
2570 } 2579 }
2571 2580
2572 skb_gro_pull(skb, sizeof(*eth));
2573
2574 /*
2575 * This works because the only protocols we care about don't require
2576 * special handling. We'll fix it up properly at the end.
2577 */
2578 skb->protocol = eth->h_proto;
2579
2580 skb->ip_summed = info->ip_summed;
2581 skb->csum = info->csum;
2582
2583out: 2581out:
2584 return skb; 2582 return skb;
2585} 2583}
2586EXPORT_SYMBOL(napi_fraginfo_skb); 2584EXPORT_SYMBOL(napi_get_frags);
2587 2585
2588int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2586int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2589{ 2587{
@@ -2613,9 +2611,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2613} 2611}
2614EXPORT_SYMBOL(napi_frags_finish); 2612EXPORT_SYMBOL(napi_frags_finish);
2615 2613
2616int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2614struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2617{ 2615{
2618 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 2616 struct sk_buff *skb = napi->skb;
2617 struct ethhdr *eth;
2618 unsigned int hlen;
2619 unsigned int off;
2620
2621 napi->skb = NULL;
2622
2623 skb_reset_mac_header(skb);
2624 skb_gro_reset_offset(skb);
2625
2626 off = skb_gro_offset(skb);
2627 hlen = off + sizeof(*eth);
2628 eth = skb_gro_header_fast(skb, off);
2629 if (skb_gro_header_hard(skb, hlen)) {
2630 eth = skb_gro_header_slow(skb, hlen, off);
2631 if (unlikely(!eth)) {
2632 napi_reuse_skb(napi, skb);
2633 skb = NULL;
2634 goto out;
2635 }
2636 }
2637
2638 skb_gro_pull(skb, sizeof(*eth));
2639
2640 /*
2641 * This works because the only protocols we care about don't require
2642 * special handling. We'll fix it up properly at the end.
2643 */
2644 skb->protocol = eth->h_proto;
2645
2646out:
2647 return skb;
2648}
2649EXPORT_SYMBOL(napi_frags_skb);
2650
2651int napi_gro_frags(struct napi_struct *napi)
2652{
2653 struct sk_buff *skb = napi_frags_skb(napi);
2619 2654
2620 if (!skb) 2655 if (!skb)
2621 return NET_RX_DROP; 2656 return NET_RX_DROP;
@@ -2719,7 +2754,7 @@ void netif_napi_del(struct napi_struct *napi)
2719 struct sk_buff *skb, *next; 2754 struct sk_buff *skb, *next;
2720 2755
2721 list_del_init(&napi->dev_list); 2756 list_del_init(&napi->dev_list);
2722 kfree_skb(napi->skb); 2757 napi_free_frags(napi);
2723 2758
2724 for (skb = napi->gro_list; skb; skb = next) { 2759 for (skb = napi->gro_list; skb; skb = next) {
2725 next = skb->next; 2760 next = skb->next;
@@ -2773,8 +2808,10 @@ static void net_rx_action(struct softirq_action *h)
2773 * accidently calling ->poll() when NAPI is not scheduled. 2808 * accidently calling ->poll() when NAPI is not scheduled.
2774 */ 2809 */
2775 work = 0; 2810 work = 0;
2776 if (test_bit(NAPI_STATE_SCHED, &n->state)) 2811 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2777 work = n->poll(n, weight); 2812 work = n->poll(n, weight);
2813 trace_napi_poll(n);
2814 }
2778 2815
2779 WARN_ON_ONCE(work > weight); 2816 WARN_ON_ONCE(work > weight);
2780 2817
@@ -3444,6 +3481,319 @@ void dev_set_rx_mode(struct net_device *dev)
3444 netif_addr_unlock_bh(dev); 3481 netif_addr_unlock_bh(dev);
3445} 3482}
3446 3483
3484/* hw addresses list handling functions */
3485
3486static int __hw_addr_add(struct list_head *list, int *delta,
3487 unsigned char *addr, int addr_len,
3488 unsigned char addr_type)
3489{
3490 struct netdev_hw_addr *ha;
3491 int alloc_size;
3492
3493 if (addr_len > MAX_ADDR_LEN)
3494 return -EINVAL;
3495
3496 list_for_each_entry(ha, list, list) {
3497 if (!memcmp(ha->addr, addr, addr_len) &&
3498 ha->type == addr_type) {
3499 ha->refcount++;
3500 return 0;
3501 }
3502 }
3503
3504
3505 alloc_size = sizeof(*ha);
3506 if (alloc_size < L1_CACHE_BYTES)
3507 alloc_size = L1_CACHE_BYTES;
3508 ha = kmalloc(alloc_size, GFP_ATOMIC);
3509 if (!ha)
3510 return -ENOMEM;
3511 memcpy(ha->addr, addr, addr_len);
3512 ha->type = addr_type;
3513 ha->refcount = 1;
3514 ha->synced = false;
3515 list_add_tail_rcu(&ha->list, list);
3516 if (delta)
3517 (*delta)++;
3518 return 0;
3519}
3520
3521static void ha_rcu_free(struct rcu_head *head)
3522{
3523 struct netdev_hw_addr *ha;
3524
3525 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3526 kfree(ha);
3527}
3528
3529static int __hw_addr_del(struct list_head *list, int *delta,
3530 unsigned char *addr, int addr_len,
3531 unsigned char addr_type)
3532{
3533 struct netdev_hw_addr *ha;
3534
3535 list_for_each_entry(ha, list, list) {
3536 if (!memcmp(ha->addr, addr, addr_len) &&
3537 (ha->type == addr_type || !addr_type)) {
3538 if (--ha->refcount)
3539 return 0;
3540 list_del_rcu(&ha->list);
3541 call_rcu(&ha->rcu_head, ha_rcu_free);
3542 if (delta)
3543 (*delta)--;
3544 return 0;
3545 }
3546 }
3547 return -ENOENT;
3548}
3549
3550static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3551 struct list_head *from_list, int addr_len,
3552 unsigned char addr_type)
3553{
3554 int err;
3555 struct netdev_hw_addr *ha, *ha2;
3556 unsigned char type;
3557
3558 list_for_each_entry(ha, from_list, list) {
3559 type = addr_type ? addr_type : ha->type;
3560 err = __hw_addr_add(to_list, to_delta, ha->addr,
3561 addr_len, type);
3562 if (err)
3563 goto unroll;
3564 }
3565 return 0;
3566
3567unroll:
3568 list_for_each_entry(ha2, from_list, list) {
3569 if (ha2 == ha)
3570 break;
3571 type = addr_type ? addr_type : ha2->type;
3572 __hw_addr_del(to_list, to_delta, ha2->addr,
3573 addr_len, type);
3574 }
3575 return err;
3576}
3577
3578static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3579 struct list_head *from_list, int addr_len,
3580 unsigned char addr_type)
3581{
3582 struct netdev_hw_addr *ha;
3583 unsigned char type;
3584
3585 list_for_each_entry(ha, from_list, list) {
3586 type = addr_type ? addr_type : ha->type;
3587 __hw_addr_del(to_list, to_delta, ha->addr,
3588 addr_len, addr_type);
3589 }
3590}
3591
3592static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3593 struct list_head *from_list, int *from_delta,
3594 int addr_len)
3595{
3596 int err = 0;
3597 struct netdev_hw_addr *ha, *tmp;
3598
3599 list_for_each_entry_safe(ha, tmp, from_list, list) {
3600 if (!ha->synced) {
3601 err = __hw_addr_add(to_list, to_delta, ha->addr,
3602 addr_len, ha->type);
3603 if (err)
3604 break;
3605 ha->synced = true;
3606 ha->refcount++;
3607 } else if (ha->refcount == 1) {
3608 __hw_addr_del(to_list, to_delta, ha->addr,
3609 addr_len, ha->type);
3610 __hw_addr_del(from_list, from_delta, ha->addr,
3611 addr_len, ha->type);
3612 }
3613 }
3614 return err;
3615}
3616
3617static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3618 struct list_head *from_list, int *from_delta,
3619 int addr_len)
3620{
3621 struct netdev_hw_addr *ha, *tmp;
3622
3623 list_for_each_entry_safe(ha, tmp, from_list, list) {
3624 if (ha->synced) {
3625 __hw_addr_del(to_list, to_delta, ha->addr,
3626 addr_len, ha->type);
3627 ha->synced = false;
3628 __hw_addr_del(from_list, from_delta, ha->addr,
3629 addr_len, ha->type);
3630 }
3631 }
3632}
3633
3634
3635static void __hw_addr_flush(struct list_head *list)
3636{
3637 struct netdev_hw_addr *ha, *tmp;
3638
3639 list_for_each_entry_safe(ha, tmp, list, list) {
3640 list_del_rcu(&ha->list);
3641 call_rcu(&ha->rcu_head, ha_rcu_free);
3642 }
3643}
3644
3645/* Device addresses handling functions */
3646
3647static void dev_addr_flush(struct net_device *dev)
3648{
3649 /* rtnl_mutex must be held here */
3650
3651 __hw_addr_flush(&dev->dev_addr_list);
3652 dev->dev_addr = NULL;
3653}
3654
3655static int dev_addr_init(struct net_device *dev)
3656{
3657 unsigned char addr[MAX_ADDR_LEN];
3658 struct netdev_hw_addr *ha;
3659 int err;
3660
3661 /* rtnl_mutex must be held here */
3662
3663 INIT_LIST_HEAD(&dev->dev_addr_list);
3664 memset(addr, 0, sizeof(addr));
3665 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
3666 NETDEV_HW_ADDR_T_LAN);
3667 if (!err) {
3668 /*
3669 * Get the first (previously created) address from the list
3670 * and set dev_addr pointer to this location.
3671 */
3672 ha = list_first_entry(&dev->dev_addr_list,
3673 struct netdev_hw_addr, list);
3674 dev->dev_addr = ha->addr;
3675 }
3676 return err;
3677}
3678
3679/**
3680 * dev_addr_add - Add a device address
3681 * @dev: device
3682 * @addr: address to add
3683 * @addr_type: address type
3684 *
3685 * Add a device address to the device or increase the reference count if
3686 * it already exists.
3687 *
3688 * The caller must hold the rtnl_mutex.
3689 */
3690int dev_addr_add(struct net_device *dev, unsigned char *addr,
3691 unsigned char addr_type)
3692{
3693 int err;
3694
3695 ASSERT_RTNL();
3696
3697 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3698 addr_type);
3699 if (!err)
3700 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3701 return err;
3702}
3703EXPORT_SYMBOL(dev_addr_add);
3704
3705/**
3706 * dev_addr_del - Release a device address.
3707 * @dev: device
3708 * @addr: address to delete
3709 * @addr_type: address type
3710 *
3711 * Release reference to a device address and remove it from the device
3712 * if the reference count drops to zero.
3713 *
3714 * The caller must hold the rtnl_mutex.
3715 */
3716int dev_addr_del(struct net_device *dev, unsigned char *addr,
3717 unsigned char addr_type)
3718{
3719 int err;
3720 struct netdev_hw_addr *ha;
3721
3722 ASSERT_RTNL();
3723
3724 /*
3725 * We can not remove the first address from the list because
3726 * dev->dev_addr points to that.
3727 */
3728 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3729 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3730 return -ENOENT;
3731
3732 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3733 addr_type);
3734 if (!err)
3735 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3736 return err;
3737}
3738EXPORT_SYMBOL(dev_addr_del);
3739
3740/**
3741 * dev_addr_add_multiple - Add device addresses from another device
3742 * @to_dev: device to which addresses will be added
3743 * @from_dev: device from which addresses will be added
3744 * @addr_type: address type - 0 means type will be used from from_dev
3745 *
3746 * Add device addresses of the one device to another.
3747 **
3748 * The caller must hold the rtnl_mutex.
3749 */
3750int dev_addr_add_multiple(struct net_device *to_dev,
3751 struct net_device *from_dev,
3752 unsigned char addr_type)
3753{
3754 int err;
3755
3756 ASSERT_RTNL();
3757
3758 if (from_dev->addr_len != to_dev->addr_len)
3759 return -EINVAL;
3760 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3761 &from_dev->dev_addr_list,
3762 to_dev->addr_len, addr_type);
3763 if (!err)
3764 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3765 return err;
3766}
3767EXPORT_SYMBOL(dev_addr_add_multiple);
3768
3769/**
3770 * dev_addr_del_multiple - Delete device addresses by another device
3771 * @to_dev: device where the addresses will be deleted
3772 * @from_dev: device by which addresses the addresses will be deleted
3773 * @addr_type: address type - 0 means type will used from from_dev
3774 *
3775 * Deletes addresses in to device by the list of addresses in from device.
3776 *
3777 * The caller must hold the rtnl_mutex.
3778 */
3779int dev_addr_del_multiple(struct net_device *to_dev,
3780 struct net_device *from_dev,
3781 unsigned char addr_type)
3782{
3783 ASSERT_RTNL();
3784
3785 if (from_dev->addr_len != to_dev->addr_len)
3786 return -EINVAL;
3787 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3788 &from_dev->dev_addr_list,
3789 to_dev->addr_len, addr_type);
3790 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3791 return 0;
3792}
3793EXPORT_SYMBOL(dev_addr_del_multiple);
3794
3795/* unicast and multicast addresses handling functions */
3796
3447int __dev_addr_delete(struct dev_addr_list **list, int *count, 3797int __dev_addr_delete(struct dev_addr_list **list, int *count,
3448 void *addr, int alen, int glbl) 3798 void *addr, int alen, int glbl)
3449{ 3799{
@@ -3506,24 +3856,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
3506 * dev_unicast_delete - Release secondary unicast address. 3856 * dev_unicast_delete - Release secondary unicast address.
3507 * @dev: device 3857 * @dev: device
3508 * @addr: address to delete 3858 * @addr: address to delete
3509 * @alen: length of @addr
3510 * 3859 *
3511 * Release reference to a secondary unicast address and remove it 3860 * Release reference to a secondary unicast address and remove it
3512 * from the device if the reference count drops to zero. 3861 * from the device if the reference count drops to zero.
3513 * 3862 *
3514 * The caller must hold the rtnl_mutex. 3863 * The caller must hold the rtnl_mutex.
3515 */ 3864 */
3516int dev_unicast_delete(struct net_device *dev, void *addr, int alen) 3865int dev_unicast_delete(struct net_device *dev, void *addr)
3517{ 3866{
3518 int err; 3867 int err;
3519 3868
3520 ASSERT_RTNL(); 3869 ASSERT_RTNL();
3521 3870
3522 netif_addr_lock_bh(dev); 3871 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3523 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3872 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3524 if (!err) 3873 if (!err)
3525 __dev_set_rx_mode(dev); 3874 __dev_set_rx_mode(dev);
3526 netif_addr_unlock_bh(dev);
3527 return err; 3875 return err;
3528} 3876}
3529EXPORT_SYMBOL(dev_unicast_delete); 3877EXPORT_SYMBOL(dev_unicast_delete);
@@ -3532,24 +3880,22 @@ EXPORT_SYMBOL(dev_unicast_delete);
3532 * dev_unicast_add - add a secondary unicast address 3880 * dev_unicast_add - add a secondary unicast address
3533 * @dev: device 3881 * @dev: device
3534 * @addr: address to add 3882 * @addr: address to add
3535 * @alen: length of @addr
3536 * 3883 *
3537 * Add a secondary unicast address to the device or increase 3884 * Add a secondary unicast address to the device or increase
3538 * the reference count if it already exists. 3885 * the reference count if it already exists.
3539 * 3886 *
3540 * The caller must hold the rtnl_mutex. 3887 * The caller must hold the rtnl_mutex.
3541 */ 3888 */
3542int dev_unicast_add(struct net_device *dev, void *addr, int alen) 3889int dev_unicast_add(struct net_device *dev, void *addr)
3543{ 3890{
3544 int err; 3891 int err;
3545 3892
3546 ASSERT_RTNL(); 3893 ASSERT_RTNL();
3547 3894
3548 netif_addr_lock_bh(dev); 3895 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3549 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3896 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3550 if (!err) 3897 if (!err)
3551 __dev_set_rx_mode(dev); 3898 __dev_set_rx_mode(dev);
3552 netif_addr_unlock_bh(dev);
3553 return err; 3899 return err;
3554} 3900}
3555EXPORT_SYMBOL(dev_unicast_add); 3901EXPORT_SYMBOL(dev_unicast_add);
@@ -3606,8 +3952,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3606 * @from: source device 3952 * @from: source device
3607 * 3953 *
3608 * Add newly added addresses to the destination device and release 3954 * Add newly added addresses to the destination device and release
3609 * addresses that have no users left. The source device must be 3955 * addresses that have no users left.
3610 * locked by netif_tx_lock_bh.
3611 * 3956 *
3612 * This function is intended to be called from the dev->set_rx_mode 3957 * This function is intended to be called from the dev->set_rx_mode
3613 * function of layered software devices. 3958 * function of layered software devices.
@@ -3616,12 +3961,15 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3616{ 3961{
3617 int err = 0; 3962 int err = 0;
3618 3963
3619 netif_addr_lock_bh(to); 3964 ASSERT_RTNL();
3620 err = __dev_addr_sync(&to->uc_list, &to->uc_count, 3965
3621 &from->uc_list, &from->uc_count); 3966 if (to->addr_len != from->addr_len)
3967 return -EINVAL;
3968
3969 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3970 &from->uc_list, &from->uc_count, to->addr_len);
3622 if (!err) 3971 if (!err)
3623 __dev_set_rx_mode(to); 3972 __dev_set_rx_mode(to);
3624 netif_addr_unlock_bh(to);
3625 return err; 3973 return err;
3626} 3974}
3627EXPORT_SYMBOL(dev_unicast_sync); 3975EXPORT_SYMBOL(dev_unicast_sync);
@@ -3637,18 +3985,33 @@ EXPORT_SYMBOL(dev_unicast_sync);
3637 */ 3985 */
3638void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3986void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3639{ 3987{
3640 netif_addr_lock_bh(from); 3988 ASSERT_RTNL();
3641 netif_addr_lock(to);
3642 3989
3643 __dev_addr_unsync(&to->uc_list, &to->uc_count, 3990 if (to->addr_len != from->addr_len)
3644 &from->uc_list, &from->uc_count); 3991 return;
3645 __dev_set_rx_mode(to);
3646 3992
3647 netif_addr_unlock(to); 3993 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3648 netif_addr_unlock_bh(from); 3994 &from->uc_list, &from->uc_count, to->addr_len);
3995 __dev_set_rx_mode(to);
3649} 3996}
3650EXPORT_SYMBOL(dev_unicast_unsync); 3997EXPORT_SYMBOL(dev_unicast_unsync);
3651 3998
3999static void dev_unicast_flush(struct net_device *dev)
4000{
4001 /* rtnl_mutex must be held here */
4002
4003 __hw_addr_flush(&dev->uc_list);
4004 dev->uc_count = 0;
4005}
4006
4007static void dev_unicast_init(struct net_device *dev)
4008{
4009 /* rtnl_mutex must be held here */
4010
4011 INIT_LIST_HEAD(&dev->uc_list);
4012}
4013
4014
3652static void __dev_addr_discard(struct dev_addr_list **list) 4015static void __dev_addr_discard(struct dev_addr_list **list)
3653{ 4016{
3654 struct dev_addr_list *tmp; 4017 struct dev_addr_list *tmp;
@@ -3667,9 +4030,6 @@ static void dev_addr_discard(struct net_device *dev)
3667{ 4030{
3668 netif_addr_lock_bh(dev); 4031 netif_addr_lock_bh(dev);
3669 4032
3670 __dev_addr_discard(&dev->uc_list);
3671 dev->uc_count = 0;
3672
3673 __dev_addr_discard(&dev->mc_list); 4033 __dev_addr_discard(&dev->mc_list);
3674 dev->mc_count = 0; 4034 dev->mc_count = 0;
3675 4035
@@ -3853,7 +4213,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
3853 4213
3854 switch (cmd) { 4214 switch (cmd) {
3855 case SIOCGIFFLAGS: /* Get interface flags */ 4215 case SIOCGIFFLAGS: /* Get interface flags */
3856 ifr->ifr_flags = dev_get_flags(dev); 4216 ifr->ifr_flags = (short) dev_get_flags(dev);
3857 return 0; 4217 return 0;
3858 4218
3859 case SIOCGIFMETRIC: /* Get the metric on the interface 4219 case SIOCGIFMETRIC: /* Get the metric on the interface
@@ -4262,6 +4622,7 @@ static void rollback_registered(struct net_device *dev)
4262 /* 4622 /*
4263 * Flush the unicast and multicast chains 4623 * Flush the unicast and multicast chains
4264 */ 4624 */
4625 dev_unicast_flush(dev);
4265 dev_addr_discard(dev); 4626 dev_addr_discard(dev);
4266 4627
4267 if (dev->netdev_ops->ndo_uninit) 4628 if (dev->netdev_ops->ndo_uninit)
@@ -4333,39 +4694,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4333} 4694}
4334EXPORT_SYMBOL(netdev_fix_features); 4695EXPORT_SYMBOL(netdev_fix_features);
4335 4696
4336/* Some devices need to (re-)set their netdev_ops inside
4337 * ->init() or similar. If that happens, we have to setup
4338 * the compat pointers again.
4339 */
4340void netdev_resync_ops(struct net_device *dev)
4341{
4342#ifdef CONFIG_COMPAT_NET_DEV_OPS
4343 const struct net_device_ops *ops = dev->netdev_ops;
4344
4345 dev->init = ops->ndo_init;
4346 dev->uninit = ops->ndo_uninit;
4347 dev->open = ops->ndo_open;
4348 dev->change_rx_flags = ops->ndo_change_rx_flags;
4349 dev->set_rx_mode = ops->ndo_set_rx_mode;
4350 dev->set_multicast_list = ops->ndo_set_multicast_list;
4351 dev->set_mac_address = ops->ndo_set_mac_address;
4352 dev->validate_addr = ops->ndo_validate_addr;
4353 dev->do_ioctl = ops->ndo_do_ioctl;
4354 dev->set_config = ops->ndo_set_config;
4355 dev->change_mtu = ops->ndo_change_mtu;
4356 dev->neigh_setup = ops->ndo_neigh_setup;
4357 dev->tx_timeout = ops->ndo_tx_timeout;
4358 dev->get_stats = ops->ndo_get_stats;
4359 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4360 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4361 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4362#ifdef CONFIG_NET_POLL_CONTROLLER
4363 dev->poll_controller = ops->ndo_poll_controller;
4364#endif
4365#endif
4366}
4367EXPORT_SYMBOL(netdev_resync_ops);
4368
4369/** 4697/**
4370 * register_netdevice - register a network device 4698 * register_netdevice - register a network device
4371 * @dev: device to register 4699 * @dev: device to register
@@ -4405,23 +4733,6 @@ int register_netdevice(struct net_device *dev)
4405 4733
4406 dev->iflink = -1; 4734 dev->iflink = -1;
4407 4735
4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4409 /* Netdevice_ops API compatibility support.
4410 * This is temporary until all network devices are converted.
4411 */
4412 if (dev->netdev_ops) {
4413 netdev_resync_ops(dev);
4414 } else {
4415 char drivername[64];
4416 pr_info("%s (%s): not using net_device_ops yet\n",
4417 dev->name, netdev_drivername(dev, drivername, 64));
4418
4419 /* This works only because net_device_ops and the
4420 compatibility structure are the same. */
4421 dev->netdev_ops = (void *) &(dev->init);
4422 }
4423#endif
4424
4425 /* Init, if this function is available */ 4736 /* Init, if this function is available */
4426 if (dev->netdev_ops->ndo_init) { 4737 if (dev->netdev_ops->ndo_init) {
4427 ret = dev->netdev_ops->ndo_init(dev); 4738 ret = dev->netdev_ops->ndo_init(dev);
@@ -4707,13 +5018,30 @@ void netdev_run_todo(void)
4707 * the internal statistics structure is used. 5018 * the internal statistics structure is used.
4708 */ 5019 */
4709const struct net_device_stats *dev_get_stats(struct net_device *dev) 5020const struct net_device_stats *dev_get_stats(struct net_device *dev)
4710 { 5021{
4711 const struct net_device_ops *ops = dev->netdev_ops; 5022 const struct net_device_ops *ops = dev->netdev_ops;
4712 5023
4713 if (ops->ndo_get_stats) 5024 if (ops->ndo_get_stats)
4714 return ops->ndo_get_stats(dev); 5025 return ops->ndo_get_stats(dev);
4715 else 5026 else {
4716 return &dev->stats; 5027 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5028 struct net_device_stats *stats = &dev->stats;
5029 unsigned int i;
5030 struct netdev_queue *txq;
5031
5032 for (i = 0; i < dev->num_tx_queues; i++) {
5033 txq = netdev_get_tx_queue(dev, i);
5034 tx_bytes += txq->tx_bytes;
5035 tx_packets += txq->tx_packets;
5036 tx_dropped += txq->tx_dropped;
5037 }
5038 if (tx_bytes || tx_packets || tx_dropped) {
5039 stats->tx_bytes = tx_bytes;
5040 stats->tx_packets = tx_packets;
5041 stats->tx_dropped = tx_dropped;
5042 }
5043 return stats;
5044 }
4717} 5045}
4718EXPORT_SYMBOL(dev_get_stats); 5046EXPORT_SYMBOL(dev_get_stats);
4719 5047
@@ -4748,18 +5076,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4748 struct netdev_queue *tx; 5076 struct netdev_queue *tx;
4749 struct net_device *dev; 5077 struct net_device *dev;
4750 size_t alloc_size; 5078 size_t alloc_size;
4751 void *p; 5079 struct net_device *p;
4752 5080
4753 BUG_ON(strlen(name) >= sizeof(dev->name)); 5081 BUG_ON(strlen(name) >= sizeof(dev->name));
4754 5082
4755 alloc_size = sizeof(struct net_device); 5083 alloc_size = sizeof(struct net_device);
4756 if (sizeof_priv) { 5084 if (sizeof_priv) {
4757 /* ensure 32-byte alignment of private area */ 5085 /* ensure 32-byte alignment of private area */
4758 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 5086 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
4759 alloc_size += sizeof_priv; 5087 alloc_size += sizeof_priv;
4760 } 5088 }
4761 /* ensure 32-byte alignment of whole construct */ 5089 /* ensure 32-byte alignment of whole construct */
4762 alloc_size += NETDEV_ALIGN_CONST; 5090 alloc_size += NETDEV_ALIGN - 1;
4763 5091
4764 p = kzalloc(alloc_size, GFP_KERNEL); 5092 p = kzalloc(alloc_size, GFP_KERNEL);
4765 if (!p) { 5093 if (!p) {
@@ -4771,13 +5099,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4771 if (!tx) { 5099 if (!tx) {
4772 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5100 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4773 "tx qdiscs.\n"); 5101 "tx qdiscs.\n");
4774 kfree(p); 5102 goto free_p;
4775 return NULL;
4776 } 5103 }
4777 5104
4778 dev = (struct net_device *) 5105 dev = PTR_ALIGN(p, NETDEV_ALIGN);
4779 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4780 dev->padded = (char *)dev - (char *)p; 5106 dev->padded = (char *)dev - (char *)p;
5107
5108 if (dev_addr_init(dev))
5109 goto free_tx;
5110
5111 dev_unicast_init(dev);
5112
4781 dev_net_set(dev, &init_net); 5113 dev_net_set(dev, &init_net);
4782 5114
4783 dev->_tx = tx; 5115 dev->_tx = tx;
@@ -4789,9 +5121,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4789 netdev_init_queues(dev); 5121 netdev_init_queues(dev);
4790 5122
4791 INIT_LIST_HEAD(&dev->napi_list); 5123 INIT_LIST_HEAD(&dev->napi_list);
5124 dev->priv_flags = IFF_XMIT_DST_RELEASE;
4792 setup(dev); 5125 setup(dev);
4793 strcpy(dev->name, name); 5126 strcpy(dev->name, name);
4794 return dev; 5127 return dev;
5128
5129free_tx:
5130 kfree(tx);
5131
5132free_p:
5133 kfree(p);
5134 return NULL;
4795} 5135}
4796EXPORT_SYMBOL(alloc_netdev_mq); 5136EXPORT_SYMBOL(alloc_netdev_mq);
4797 5137
@@ -4811,6 +5151,9 @@ void free_netdev(struct net_device *dev)
4811 5151
4812 kfree(dev->_tx); 5152 kfree(dev->_tx);
4813 5153
5154 /* Flush device addresses */
5155 dev_addr_flush(dev);
5156
4814 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5157 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4815 netif_napi_del(p); 5158 netif_napi_del(p);
4816 5159
@@ -4970,6 +5313,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4970 /* 5313 /*
4971 * Flush the unicast and multicast chains 5314 * Flush the unicast and multicast chains
4972 */ 5315 */
5316 dev_unicast_flush(dev);
4973 dev_addr_discard(dev); 5317 dev_addr_discard(dev);
4974 5318
4975 netdev_unregister_kobject(dev); 5319 netdev_unregister_kobject(dev);
@@ -5325,12 +5669,6 @@ EXPORT_SYMBOL(net_enable_timestamp);
5325EXPORT_SYMBOL(net_disable_timestamp); 5669EXPORT_SYMBOL(net_disable_timestamp);
5326EXPORT_SYMBOL(dev_get_flags); 5670EXPORT_SYMBOL(dev_get_flags);
5327 5671
5328#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5329EXPORT_SYMBOL(br_handle_frame_hook);
5330EXPORT_SYMBOL(br_fdb_get_hook);
5331EXPORT_SYMBOL(br_fdb_put_hook);
5332#endif
5333
5334EXPORT_SYMBOL(dev_load); 5672EXPORT_SYMBOL(dev_load);
5335 5673
5336EXPORT_PER_CPU_SYMBOL(softnet_data); 5674EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9fd0dc3cca99..9d66fa953ab7 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -22,8 +22,10 @@
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <net/genetlink.h> 24#include <net/genetlink.h>
25#include <net/netevent.h>
25 26
26#include <trace/skb.h> 27#include <trace/events/skb.h>
28#include <trace/events/napi.h>
27 29
28#include <asm/unaligned.h> 30#include <asm/unaligned.h>
29 31
@@ -38,7 +40,8 @@ static void send_dm_alert(struct work_struct *unused);
38 * and the work handle that will send up 40 * and the work handle that will send up
39 * netlink alerts 41 * netlink alerts
40 */ 42 */
41struct sock *dm_sock; 43static int trace_state = TRACE_OFF;
44static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED;
42 45
43struct per_cpu_dm_data { 46struct per_cpu_dm_data {
44 struct work_struct dm_alert_work; 47 struct work_struct dm_alert_work;
@@ -47,11 +50,18 @@ struct per_cpu_dm_data {
47 struct timer_list send_timer; 50 struct timer_list send_timer;
48}; 51};
49 52
53struct dm_hw_stat_delta {
54 struct net_device *dev;
55 struct list_head list;
56 struct rcu_head rcu;
57 unsigned long last_drop_val;
58};
59
50static struct genl_family net_drop_monitor_family = { 60static struct genl_family net_drop_monitor_family = {
51 .id = GENL_ID_GENERATE, 61 .id = GENL_ID_GENERATE,
52 .hdrsize = 0, 62 .hdrsize = 0,
53 .name = "NET_DM", 63 .name = "NET_DM",
54 .version = 1, 64 .version = 2,
55 .maxattr = NET_DM_CMD_MAX, 65 .maxattr = NET_DM_CMD_MAX,
56}; 66};
57 67
@@ -59,19 +69,24 @@ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
59 69
60static int dm_hit_limit = 64; 70static int dm_hit_limit = 64;
61static int dm_delay = 1; 71static int dm_delay = 1;
62 72static unsigned long dm_hw_check_delta = 2*HZ;
73static LIST_HEAD(hw_stats_list);
63 74
64static void reset_per_cpu_data(struct per_cpu_dm_data *data) 75static void reset_per_cpu_data(struct per_cpu_dm_data *data)
65{ 76{
66 size_t al; 77 size_t al;
67 struct net_dm_alert_msg *msg; 78 struct net_dm_alert_msg *msg;
79 struct nlattr *nla;
68 80
69 al = sizeof(struct net_dm_alert_msg); 81 al = sizeof(struct net_dm_alert_msg);
70 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 82 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
83 al += sizeof(struct nlattr);
84
71 data->skb = genlmsg_new(al, GFP_KERNEL); 85 data->skb = genlmsg_new(al, GFP_KERNEL);
72 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 86 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
73 0, NET_DM_CMD_ALERT); 87 0, NET_DM_CMD_ALERT);
74 msg = __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_alert_msg)); 88 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
89 msg = nla_data(nla);
75 memset(msg, 0, al); 90 memset(msg, 0, al);
76 atomic_set(&data->dm_hit_count, dm_hit_limit); 91 atomic_set(&data->dm_hit_count, dm_hit_limit);
77} 92}
@@ -111,10 +126,11 @@ static void sched_send_work(unsigned long unused)
111 schedule_work(&data->dm_alert_work); 126 schedule_work(&data->dm_alert_work);
112} 127}
113 128
114static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) 129static void trace_drop_common(struct sk_buff *skb, void *location)
115{ 130{
116 struct net_dm_alert_msg *msg; 131 struct net_dm_alert_msg *msg;
117 struct nlmsghdr *nlh; 132 struct nlmsghdr *nlh;
133 struct nlattr *nla;
118 int i; 134 int i;
119 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 135 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
120 136
@@ -127,7 +143,8 @@ static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
127 } 143 }
128 144
129 nlh = (struct nlmsghdr *)data->skb->data; 145 nlh = (struct nlmsghdr *)data->skb->data;
130 msg = genlmsg_data(nlmsg_data(nlh)); 146 nla = genlmsg_data(nlmsg_data(nlh));
147 msg = nla_data(nla);
131 for (i = 0; i < msg->entries; i++) { 148 for (i = 0; i < msg->entries; i++) {
132 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 149 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
133 msg->points[i].count++; 150 msg->points[i].count++;
@@ -139,6 +156,7 @@ static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
139 * We need to create a new entry 156 * We need to create a new entry
140 */ 157 */
141 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 158 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
159 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
142 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 160 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
143 msg->points[msg->entries].count = 1; 161 msg->points[msg->entries].count = 1;
144 msg->entries++; 162 msg->entries++;
@@ -152,24 +170,80 @@ out:
152 return; 170 return;
153} 171}
154 172
173static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
174{
175 trace_drop_common(skb, location);
176}
177
178static void trace_napi_poll_hit(struct napi_struct *napi)
179{
180 struct dm_hw_stat_delta *new_stat;
181
182 /*
183 * Ratelimit our check time to dm_hw_check_delta jiffies
184 */
185 if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta))
186 return;
187
188 rcu_read_lock();
189 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
190 if ((new_stat->dev == napi->dev) &&
191 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
192 trace_drop_common(NULL, NULL);
193 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
194 break;
195 }
196 }
197 rcu_read_unlock();
198}
199
200
201static void free_dm_hw_stat(struct rcu_head *head)
202{
203 struct dm_hw_stat_delta *n;
204 n = container_of(head, struct dm_hw_stat_delta, rcu);
205 kfree(n);
206}
207
155static int set_all_monitor_traces(int state) 208static int set_all_monitor_traces(int state)
156{ 209{
157 int rc = 0; 210 int rc = 0;
211 struct dm_hw_stat_delta *new_stat = NULL;
212 struct dm_hw_stat_delta *temp;
213
214 spin_lock(&trace_state_lock);
158 215
159 switch (state) { 216 switch (state) {
160 case TRACE_ON: 217 case TRACE_ON:
161 rc |= register_trace_kfree_skb(trace_kfree_skb_hit); 218 rc |= register_trace_kfree_skb(trace_kfree_skb_hit);
219 rc |= register_trace_napi_poll(trace_napi_poll_hit);
162 break; 220 break;
163 case TRACE_OFF: 221 case TRACE_OFF:
164 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); 222 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit);
223 rc |= unregister_trace_napi_poll(trace_napi_poll_hit);
165 224
166 tracepoint_synchronize_unregister(); 225 tracepoint_synchronize_unregister();
226
227 /*
228 * Clean the device list
229 */
230 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
231 if (new_stat->dev == NULL) {
232 list_del_rcu(&new_stat->list);
233 call_rcu(&new_stat->rcu, free_dm_hw_stat);
234 }
235 }
167 break; 236 break;
168 default: 237 default:
169 rc = 1; 238 rc = 1;
170 break; 239 break;
171 } 240 }
172 241
242 if (!rc)
243 trace_state = state;
244
245 spin_unlock(&trace_state_lock);
246
173 if (rc) 247 if (rc)
174 return -EINPROGRESS; 248 return -EINPROGRESS;
175 return rc; 249 return rc;
@@ -197,6 +271,44 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
197 return -ENOTSUPP; 271 return -ENOTSUPP;
198} 272}
199 273
274static int dropmon_net_event(struct notifier_block *ev_block,
275 unsigned long event, void *ptr)
276{
277 struct net_device *dev = ptr;
278 struct dm_hw_stat_delta *new_stat = NULL;
279 struct dm_hw_stat_delta *tmp;
280
281 switch (event) {
282 case NETDEV_REGISTER:
283 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
284
285 if (!new_stat)
286 goto out;
287
288 new_stat->dev = dev;
289 INIT_RCU_HEAD(&new_stat->rcu);
290 spin_lock(&trace_state_lock);
291 list_add_rcu(&new_stat->list, &hw_stats_list);
292 spin_unlock(&trace_state_lock);
293 break;
294 case NETDEV_UNREGISTER:
295 spin_lock(&trace_state_lock);
296 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
297 if (new_stat->dev == dev) {
298 new_stat->dev = NULL;
299 if (trace_state == TRACE_OFF) {
300 list_del_rcu(&new_stat->list);
301 call_rcu(&new_stat->rcu, free_dm_hw_stat);
302 break;
303 }
304 }
305 }
306 spin_unlock(&trace_state_lock);
307 break;
308 }
309out:
310 return NOTIFY_DONE;
311}
200 312
201static struct genl_ops dropmon_ops[] = { 313static struct genl_ops dropmon_ops[] = {
202 { 314 {
@@ -213,6 +325,10 @@ static struct genl_ops dropmon_ops[] = {
213 }, 325 },
214}; 326};
215 327
328static struct notifier_block dropmon_net_notifier = {
329 .notifier_call = dropmon_net_event
330};
331
216static int __init init_net_drop_monitor(void) 332static int __init init_net_drop_monitor(void)
217{ 333{
218 int cpu; 334 int cpu;
@@ -236,12 +352,18 @@ static int __init init_net_drop_monitor(void)
236 ret = genl_register_ops(&net_drop_monitor_family, 352 ret = genl_register_ops(&net_drop_monitor_family,
237 &dropmon_ops[i]); 353 &dropmon_ops[i]);
238 if (ret) { 354 if (ret) {
239 printk(KERN_CRIT "failed to register operation %d\n", 355 printk(KERN_CRIT "Failed to register operation %d\n",
240 dropmon_ops[i].cmd); 356 dropmon_ops[i].cmd);
241 goto out_unreg; 357 goto out_unreg;
242 } 358 }
243 } 359 }
244 360
361 rc = register_netdevice_notifier(&dropmon_net_notifier);
362 if (rc < 0) {
363 printk(KERN_CRIT "Failed to register netdevice notifier\n");
364 goto out_unreg;
365 }
366
245 rc = 0; 367 rc = 0;
246 368
247 for_each_present_cpu(cpu) { 369 for_each_present_cpu(cpu) {
@@ -252,6 +374,7 @@ static int __init init_net_drop_monitor(void)
252 data->send_timer.data = cpu; 374 data->send_timer.data = cpu;
253 data->send_timer.function = sched_send_work; 375 data->send_timer.function = sched_send_work;
254 } 376 }
377
255 goto out; 378 goto out;
256 379
257out_unreg: 380out_unreg:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 98691e1466b8..bd309384f8b8 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -299,7 +299,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
299 } else if (rule->action == FR_ACT_GOTO) 299 } else if (rule->action == FR_ACT_GOTO)
300 goto errout_free; 300 goto errout_free;
301 301
302 err = ops->configure(rule, skb, nlh, frh, tb); 302 err = ops->configure(rule, skb, frh, tb);
303 if (err < 0) 303 if (err < 0)
304 goto errout_free; 304 goto errout_free;
305 305
@@ -500,7 +500,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
500 if (rule->target) 500 if (rule->target)
501 NLA_PUT_U32(skb, FRA_GOTO, rule->target); 501 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
502 502
503 if (ops->fill(rule, skb, nlh, frh) < 0) 503 if (ops->fill(rule, skb, frh) < 0)
504 goto nla_put_failure; 504 goto nla_put_failure;
505 505
506 return nlmsg_end(skb, nlh); 506 return nlmsg_end(skb, nlh);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6d62d4618cfc..78e5bfc454ae 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -128,12 +128,12 @@ static void est_timer(unsigned long arg)
128 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
129 brate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
130 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log; 131 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
132 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
133 133
134 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
135 e->last_packets = npackets; 135 e->last_packets = npackets;
136 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; 136 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
137 e->rate_est->pps = (e->avpps+0x1FF)>>10; 137 e->rate_est->pps = (e->avpps+0x1FF)>>10;
138skip: 138skip:
139 read_unlock(&est_lock); 139 read_unlock(&est_lock);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 4c9c0121c9da..16ad45d4882b 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -98,6 +98,31 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
98} 98}
99 99
100/* 100/*
101 * Copy kernel to iovec. Returns -EFAULT on error.
102 */
103
104int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
105 int offset, int len)
106{
107 int copy;
108 for (; len > 0; ++iov) {
109 /* Skip over the finished iovecs */
110 if (unlikely(offset >= iov->iov_len)) {
111 offset -= iov->iov_len;
112 continue;
113 }
114 copy = min_t(unsigned int, iov->iov_len - offset, len);
115 if (copy_to_user(iov->iov_base + offset, kdata, copy))
116 return -EFAULT;
117 offset = 0;
118 kdata += copy;
119 len -= copy;
120 }
121
122 return 0;
123}
124
125/*
101 * Copy iovec to kernel. Returns -EFAULT on error. 126 * Copy iovec to kernel. Returns -EFAULT on error.
102 * 127 *
103 * Note: this modifies the original iovec. 128 * Note: this modifies the original iovec.
@@ -122,10 +147,11 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
122} 147}
123 148
124/* 149/*
125 * For use with ip_build_xmit 150 * Copy iovec from kernel. Returns -EFAULT on error.
126 */ 151 */
127int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, 152
128 int len) 153int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
154 int offset, int len)
129{ 155{
130 /* Skip over the finished iovecs */ 156 /* Skip over the finished iovecs */
131 while (offset >= iov->iov_len) { 157 while (offset >= iov->iov_len) {
@@ -236,3 +262,4 @@ EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
236EXPORT_SYMBOL(memcpy_fromiovec); 262EXPORT_SYMBOL(memcpy_fromiovec);
237EXPORT_SYMBOL(memcpy_fromiovecend); 263EXPORT_SYMBOL(memcpy_fromiovecend);
238EXPORT_SYMBOL(memcpy_toiovec); 264EXPORT_SYMBOL(memcpy_toiovec);
265EXPORT_SYMBOL(memcpy_toiovecend);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a1cbce7fdae5..163b4f5b0365 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -771,6 +771,28 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
771 p->ucast_probes + p->app_probes + p->mcast_probes); 771 p->ucast_probes + p->app_probes + p->mcast_probes);
772} 772}
773 773
774static void neigh_invalidate(struct neighbour *neigh)
775{
776 struct sk_buff *skb;
777
778 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
779 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
780 neigh->updated = jiffies;
781
782 /* It is very thin place. report_unreachable is very complicated
783 routine. Particularly, it can hit the same neighbour entry!
784
785 So that, we try to be accurate and avoid dead loop. --ANK
786 */
787 while (neigh->nud_state == NUD_FAILED &&
788 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
789 write_unlock(&neigh->lock);
790 neigh->ops->error_report(neigh, skb);
791 write_lock(&neigh->lock);
792 }
793 skb_queue_purge(&neigh->arp_queue);
794}
795
774/* Called when a timer expires for a neighbour entry. */ 796/* Called when a timer expires for a neighbour entry. */
775 797
776static void neigh_timer_handler(unsigned long arg) 798static void neigh_timer_handler(unsigned long arg)
@@ -835,26 +857,9 @@ static void neigh_timer_handler(unsigned long arg)
835 857
836 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 858 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
837 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 859 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
838 struct sk_buff *skb;
839
840 neigh->nud_state = NUD_FAILED; 860 neigh->nud_state = NUD_FAILED;
841 neigh->updated = jiffies;
842 notify = 1; 861 notify = 1;
843 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 862 neigh_invalidate(neigh);
844 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
845
846 /* It is very thin place. report_unreachable is very complicated
847 routine. Particularly, it can hit the same neighbour entry!
848
849 So that, we try to be accurate and avoid dead loop. --ANK
850 */
851 while (neigh->nud_state == NUD_FAILED &&
852 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
853 write_unlock(&neigh->lock);
854 neigh->ops->error_report(neigh, skb);
855 write_lock(&neigh->lock);
856 }
857 skb_queue_purge(&neigh->arp_queue);
858 } 863 }
859 864
860 if (neigh->nud_state & NUD_IN_TIMER) { 865 if (neigh->nud_state & NUD_IN_TIMER) {
@@ -1001,6 +1006,11 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1001 neigh->nud_state = new; 1006 neigh->nud_state = new;
1002 err = 0; 1007 err = 0;
1003 notify = old & NUD_VALID; 1008 notify = old & NUD_VALID;
1009 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1010 (new & NUD_FAILED)) {
1011 neigh_invalidate(neigh);
1012 notify = 1;
1013 }
1004 goto out; 1014 goto out;
1005 } 1015 }
1006 1016
@@ -1088,8 +1098,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1088 struct neighbour *n1 = neigh; 1098 struct neighbour *n1 = neigh;
1089 write_unlock_bh(&neigh->lock); 1099 write_unlock_bh(&neigh->lock);
1090 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1100 /* On shaper/eql skb->dst->neighbour != neigh :( */
1091 if (skb->dst && skb->dst->neighbour) 1101 if (skb_dst(skb) && skb_dst(skb)->neighbour)
1092 n1 = skb->dst->neighbour; 1102 n1 = skb_dst(skb)->neighbour;
1093 n1->output(skb); 1103 n1->output(skb);
1094 write_lock_bh(&neigh->lock); 1104 write_lock_bh(&neigh->lock);
1095 } 1105 }
@@ -1182,7 +1192,7 @@ EXPORT_SYMBOL(neigh_compat_output);
1182 1192
1183int neigh_resolve_output(struct sk_buff *skb) 1193int neigh_resolve_output(struct sk_buff *skb)
1184{ 1194{
1185 struct dst_entry *dst = skb->dst; 1195 struct dst_entry *dst = skb_dst(skb);
1186 struct neighbour *neigh; 1196 struct neighbour *neigh;
1187 int rc = 0; 1197 int rc = 0;
1188 1198
@@ -1229,7 +1239,7 @@ EXPORT_SYMBOL(neigh_resolve_output);
1229int neigh_connected_output(struct sk_buff *skb) 1239int neigh_connected_output(struct sk_buff *skb)
1230{ 1240{
1231 int err; 1241 int err;
1232 struct dst_entry *dst = skb->dst; 1242 struct dst_entry *dst = skb_dst(skb);
1233 struct neighbour *neigh = dst->neighbour; 1243 struct neighbour *neigh = dst->neighbour;
1234 struct net_device *dev = neigh->dev; 1244 struct net_device *dev = neigh->dev;
1235 1245
@@ -1298,8 +1308,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1298 if (time_before(tbl->proxy_timer.expires, sched_next)) 1308 if (time_before(tbl->proxy_timer.expires, sched_next))
1299 sched_next = tbl->proxy_timer.expires; 1309 sched_next = tbl->proxy_timer.expires;
1300 } 1310 }
1301 dst_release(skb->dst); 1311 skb_dst_drop(skb);
1302 skb->dst = NULL;
1303 dev_hold(skb->dev); 1312 dev_hold(skb->dev);
1304 __skb_queue_tail(&tbl->proxy_queue, skb); 1313 __skb_queue_tail(&tbl->proxy_queue, skb);
1305 mod_timer(&tbl->proxy_timer, sched_next); 1314 mod_timer(&tbl->proxy_timer, sched_next);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 2da59a0ac4ac..3994680c08b9 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -78,7 +78,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
78 goto err; 78 goto err;
79 79
80 if (!rtnl_trylock()) 80 if (!rtnl_trylock())
81 return -ERESTARTSYS; 81 return restart_syscall();
82 82
83 if (dev_isalive(net)) { 83 if (dev_isalive(net)) {
84 if ((ret = (*set)(net, new)) == 0) 84 if ((ret = (*set)(net, new)) == 0)
@@ -225,7 +225,8 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
225 if (len > 0 && buf[len - 1] == '\n') 225 if (len > 0 && buf[len - 1] == '\n')
226 --count; 226 --count;
227 227
228 rtnl_lock(); 228 if (!rtnl_trylock())
229 return restart_syscall();
229 ret = dev_set_alias(netdev, buf, count); 230 ret = dev_set_alias(netdev, buf, count);
230 rtnl_unlock(); 231 rtnl_unlock();
231 232
@@ -238,7 +239,8 @@ static ssize_t show_ifalias(struct device *dev,
238 const struct net_device *netdev = to_net_dev(dev); 239 const struct net_device *netdev = to_net_dev(dev);
239 ssize_t ret = 0; 240 ssize_t ret = 0;
240 241
241 rtnl_lock(); 242 if (!rtnl_trylock())
243 return restart_syscall();
242 if (netdev->ifalias) 244 if (netdev->ifalias)
243 ret = sprintf(buf, "%s\n", netdev->ifalias); 245 ret = sprintf(buf, "%s\n", netdev->ifalias);
244 rtnl_unlock(); 246 rtnl_unlock();
@@ -497,7 +499,6 @@ int netdev_register_kobject(struct net_device *net)
497 dev->platform_data = net; 499 dev->platform_data = net;
498 dev->groups = groups; 500 dev->groups = groups;
499 501
500 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
501 dev_set_name(dev, "%s", net->name); 502 dev_set_name(dev, "%s", net->name);
502 503
503#ifdef CONFIG_SYSFS 504#ifdef CONFIG_SYSFS
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index c8fb45665e4f..f1e982c508bb 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -19,11 +19,14 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/netlink.h> 20#include <linux/netlink.h>
21#include <linux/net_dropmon.h> 21#include <linux/net_dropmon.h>
22#include <trace/skb.h>
23 22
24#include <asm/unaligned.h> 23#include <asm/unaligned.h>
25#include <asm/bitops.h> 24#include <asm/bitops.h>
26 25
26#define CREATE_TRACE_POINTS
27#include <trace/events/skb.h>
28#include <trace/events/napi.h>
27 29
28DEFINE_TRACE(kfree_skb);
29EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); 30EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
31
32EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e3bebd36f053..b7292a2719dc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -115,41 +115,34 @@ static void net_free(struct net *net)
115 kmem_cache_free(net_cachep, net); 115 kmem_cache_free(net_cachep, net);
116} 116}
117 117
118struct net *copy_net_ns(unsigned long flags, struct net *old_net) 118static struct net *net_create(void)
119{ 119{
120 struct net *new_net = NULL; 120 struct net *net;
121 int err; 121 int rv;
122
123 get_net(old_net);
124
125 if (!(flags & CLONE_NEWNET))
126 return old_net;
127
128 err = -ENOMEM;
129 new_net = net_alloc();
130 if (!new_net)
131 goto out_err;
132 122
123 net = net_alloc();
124 if (!net)
125 return ERR_PTR(-ENOMEM);
133 mutex_lock(&net_mutex); 126 mutex_lock(&net_mutex);
134 err = setup_net(new_net); 127 rv = setup_net(net);
135 if (!err) { 128 if (rv == 0) {
136 rtnl_lock(); 129 rtnl_lock();
137 list_add_tail(&new_net->list, &net_namespace_list); 130 list_add_tail(&net->list, &net_namespace_list);
138 rtnl_unlock(); 131 rtnl_unlock();
139 } 132 }
140 mutex_unlock(&net_mutex); 133 mutex_unlock(&net_mutex);
134 if (rv < 0) {
135 net_free(net);
136 return ERR_PTR(rv);
137 }
138 return net;
139}
141 140
142 if (err) 141struct net *copy_net_ns(unsigned long flags, struct net *old_net)
143 goto out_free; 142{
144out: 143 if (!(flags & CLONE_NEWNET))
145 put_net(old_net); 144 return get_net(old_net);
146 return new_net; 145 return net_create();
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
153} 146}
154 147
155static void cleanup_net(struct work_struct *work) 148static void cleanup_net(struct work_struct *work)
@@ -203,9 +196,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
203static int __init net_ns_init(void) 196static int __init net_ns_init(void)
204{ 197{
205 struct net_generic *ng; 198 struct net_generic *ng;
206 int err;
207 199
208 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
209#ifdef CONFIG_NET_NS 200#ifdef CONFIG_NET_NS
210 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 201 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
211 SMP_CACHE_BYTES, 202 SMP_CACHE_BYTES,
@@ -224,15 +215,14 @@ static int __init net_ns_init(void)
224 rcu_assign_pointer(init_net.gen, ng); 215 rcu_assign_pointer(init_net.gen, ng);
225 216
226 mutex_lock(&net_mutex); 217 mutex_lock(&net_mutex);
227 err = setup_net(&init_net); 218 if (setup_net(&init_net))
219 panic("Could not setup the initial network namespace");
228 220
229 rtnl_lock(); 221 rtnl_lock();
230 list_add_tail(&init_net.list, &net_namespace_list); 222 list_add_tail(&init_net.list, &net_namespace_list);
231 rtnl_unlock(); 223 rtnl_unlock();
232 224
233 mutex_unlock(&net_mutex); 225 mutex_unlock(&net_mutex);
234 if (err)
235 panic("Could not setup the initial network namespace");
236 226
237 return 0; 227 return 0;
238} 228}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 64f51eec6576..9675f312830d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -24,6 +24,7 @@
24#include <net/tcp.h> 24#include <net/tcp.h>
25#include <net/udp.h> 25#include <net/udp.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <trace/events/napi.h>
27 28
28/* 29/*
29 * We maintain a small pool of fully-sized skbs, to make sure the 30 * We maintain a small pool of fully-sized skbs, to make sure the
@@ -137,6 +138,7 @@ static int poll_one_napi(struct netpoll_info *npinfo,
137 set_bit(NAPI_STATE_NPSVC, &napi->state); 138 set_bit(NAPI_STATE_NPSVC, &napi->state);
138 139
139 work = napi->poll(napi, budget); 140 work = napi->poll(napi, budget);
141 trace_napi_poll(napi);
140 142
141 clear_bit(NAPI_STATE_NPSVC, &napi->state); 143 clear_bit(NAPI_STATE_NPSVC, &napi->state);
142 atomic_dec(&trapped); 144 atomic_dec(&trapped);
@@ -300,8 +302,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
300 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
301 tries > 0; --tries) { 303 tries > 0; --tries) {
302 if (__netif_tx_trylock(txq)) { 304 if (__netif_tx_trylock(txq)) {
303 if (!netif_tx_queue_stopped(txq)) 305 if (!netif_tx_queue_stopped(txq)) {
304 status = ops->ndo_start_xmit(skb, dev); 306 status = ops->ndo_start_xmit(skb, dev);
307 if (status == NETDEV_TX_OK)
308 txq_trans_update(txq);
309 }
305 __netif_tx_unlock(txq); 310 __netif_tx_unlock(txq);
306 311
307 if (status == NETDEV_TX_OK) 312 if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0666a827bc62..19b8c20e98a4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3438,6 +3438,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3438 retry_now: 3438 retry_now:
3439 ret = (*xmit)(pkt_dev->skb, odev); 3439 ret = (*xmit)(pkt_dev->skb, odev);
3440 if (likely(ret == NETDEV_TX_OK)) { 3440 if (likely(ret == NETDEV_TX_OK)) {
3441 txq_trans_update(txq);
3441 pkt_dev->last_ok = 1; 3442 pkt_dev->last_ok = 1;
3442 pkt_dev->sofar++; 3443 pkt_dev->sofar++;
3443 pkt_dev->seq_num++; 3444 pkt_dev->seq_num++;
@@ -3690,8 +3691,7 @@ out1:
3690#ifdef CONFIG_XFRM 3691#ifdef CONFIG_XFRM
3691 free_SAs(pkt_dev); 3692 free_SAs(pkt_dev);
3692#endif 3693#endif
3693 if (pkt_dev->flows) 3694 vfree(pkt_dev->flows);
3694 vfree(pkt_dev->flows);
3695 kfree(pkt_dev); 3695 kfree(pkt_dev);
3696 return err; 3696 return err;
3697} 3697}
@@ -3790,8 +3790,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3790#ifdef CONFIG_XFRM 3790#ifdef CONFIG_XFRM
3791 free_SAs(pkt_dev); 3791 free_SAs(pkt_dev);
3792#endif 3792#endif
3793 if (pkt_dev->flows) 3793 vfree(pkt_dev->flows);
3794 vfree(pkt_dev->flows);
3795 kfree(pkt_dev); 3794 kfree(pkt_dev);
3796 return 0; 3795 return 0;
3797} 3796}
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
index 86234923a3b7..79687dfd6957 100644
--- a/net/core/skb_dma_map.c
+++ b/net/core/skb_dma_map.c
@@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
20 if (dma_mapping_error(dev, map)) 20 if (dma_mapping_error(dev, map))
21 goto out_err; 21 goto out_err;
22 22
23 sp->dma_maps[0] = map; 23 sp->dma_head = map;
24 for (i = 0; i < sp->nr_frags; i++) { 24 for (i = 0; i < sp->nr_frags; i++) {
25 skb_frag_t *fp = &sp->frags[i]; 25 skb_frag_t *fp = &sp->frags[i];
26 26
@@ -28,9 +28,8 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
28 fp->size, dir); 28 fp->size, dir);
29 if (dma_mapping_error(dev, map)) 29 if (dma_mapping_error(dev, map))
30 goto unwind; 30 goto unwind;
31 sp->dma_maps[i + 1] = map; 31 sp->dma_maps[i] = map;
32 } 32 }
33 sp->num_dma_maps = i + 1;
34 33
35 return 0; 34 return 0;
36 35
@@ -38,10 +37,10 @@ unwind:
38 while (--i >= 0) { 37 while (--i >= 0) {
39 skb_frag_t *fp = &sp->frags[i]; 38 skb_frag_t *fp = &sp->frags[i];
40 39
41 dma_unmap_page(dev, sp->dma_maps[i + 1], 40 dma_unmap_page(dev, sp->dma_maps[i],
42 fp->size, dir); 41 fp->size, dir);
43 } 42 }
44 dma_unmap_single(dev, sp->dma_maps[0], 43 dma_unmap_single(dev, sp->dma_head,
45 skb_headlen(skb), dir); 44 skb_headlen(skb), dir);
46out_err: 45out_err:
47 return -ENOMEM; 46 return -ENOMEM;
@@ -54,12 +53,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
54 struct skb_shared_info *sp = skb_shinfo(skb); 53 struct skb_shared_info *sp = skb_shinfo(skb);
55 int i; 54 int i;
56 55
57 dma_unmap_single(dev, sp->dma_maps[0], 56 dma_unmap_single(dev, sp->dma_head,
58 skb_headlen(skb), dir); 57 skb_headlen(skb), dir);
59 for (i = 0; i < sp->nr_frags; i++) { 58 for (i = 0; i < sp->nr_frags; i++) {
60 skb_frag_t *fp = &sp->frags[i]; 59 skb_frag_t *fp = &sp->frags[i];
61 60
62 dma_unmap_page(dev, sp->dma_maps[i + 1], 61 dma_unmap_page(dev, sp->dma_maps[i],
63 fp->size, dir); 62 fp->size, dir);
64 } 63 }
65} 64}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e505b5392e1e..5c93435b0347 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/kernel.h> 41#include <linux/kernel.h>
42#include <linux/kmemcheck.h>
42#include <linux/mm.h> 43#include <linux/mm.h>
43#include <linux/interrupt.h> 44#include <linux/interrupt.h>
44#include <linux/in.h> 45#include <linux/in.h>
@@ -65,7 +66,7 @@
65 66
66#include <asm/uaccess.h> 67#include <asm/uaccess.h>
67#include <asm/system.h> 68#include <asm/system.h>
68#include <trace/skb.h> 69#include <trace/events/skb.h>
69 70
70#include "kmap_skb.h" 71#include "kmap_skb.h"
71 72
@@ -201,6 +202,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
201 skb->data = data; 202 skb->data = data;
202 skb_reset_tail_pointer(skb); 203 skb_reset_tail_pointer(skb);
203 skb->end = skb->tail + size; 204 skb->end = skb->tail + size;
205 kmemcheck_annotate_bitfield(skb, flags1);
206 kmemcheck_annotate_bitfield(skb, flags2);
204 /* make sure we initialize shinfo sequentially */ 207 /* make sure we initialize shinfo sequentially */
205 shinfo = skb_shinfo(skb); 208 shinfo = skb_shinfo(skb);
206 atomic_set(&shinfo->dataref, 1); 209 atomic_set(&shinfo->dataref, 1);
@@ -210,13 +213,15 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 shinfo->gso_type = 0; 213 shinfo->gso_type = 0;
211 shinfo->ip6_frag_id = 0; 214 shinfo->ip6_frag_id = 0;
212 shinfo->tx_flags.flags = 0; 215 shinfo->tx_flags.flags = 0;
213 shinfo->frag_list = NULL; 216 skb_frag_list_init(skb);
214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 217 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
215 218
216 if (fclone) { 219 if (fclone) {
217 struct sk_buff *child = skb + 1; 220 struct sk_buff *child = skb + 1;
218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 221 atomic_t *fclone_ref = (atomic_t *) (child + 1);
219 222
223 kmemcheck_annotate_bitfield(child, flags1);
224 kmemcheck_annotate_bitfield(child, flags2);
220 skb->fclone = SKB_FCLONE_ORIG; 225 skb->fclone = SKB_FCLONE_ORIG;
221 atomic_set(fclone_ref, 1); 226 atomic_set(fclone_ref, 1);
222 227
@@ -323,7 +328,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
323{ 328{
324 struct sk_buff *list; 329 struct sk_buff *list;
325 330
326 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 331 skb_walk_frags(skb, list)
327 skb_get(list); 332 skb_get(list);
328} 333}
329 334
@@ -338,7 +343,7 @@ static void skb_release_data(struct sk_buff *skb)
338 put_page(skb_shinfo(skb)->frags[i].page); 343 put_page(skb_shinfo(skb)->frags[i].page);
339 } 344 }
340 345
341 if (skb_shinfo(skb)->frag_list) 346 if (skb_has_frags(skb))
342 skb_drop_fraglist(skb); 347 skb_drop_fraglist(skb);
343 348
344 kfree(skb->head); 349 kfree(skb->head);
@@ -381,7 +386,7 @@ static void kfree_skbmem(struct sk_buff *skb)
381 386
382static void skb_release_head_state(struct sk_buff *skb) 387static void skb_release_head_state(struct sk_buff *skb)
383{ 388{
384 dst_release(skb->dst); 389 skb_dst_drop(skb);
385#ifdef CONFIG_XFRM 390#ifdef CONFIG_XFRM
386 secpath_put(skb->sp); 391 secpath_put(skb->sp);
387#endif 392#endif
@@ -503,7 +508,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
503 shinfo->gso_type = 0; 508 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 509 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0; 510 shinfo->tx_flags.flags = 0;
506 shinfo->frag_list = NULL; 511 skb_frag_list_init(skb);
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 512 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
508 513
509 memset(skb, 0, offsetof(struct sk_buff, tail)); 514 memset(skb, 0, offsetof(struct sk_buff, tail));
@@ -521,13 +526,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
521 new->transport_header = old->transport_header; 526 new->transport_header = old->transport_header;
522 new->network_header = old->network_header; 527 new->network_header = old->network_header;
523 new->mac_header = old->mac_header; 528 new->mac_header = old->mac_header;
524 new->dst = dst_clone(old->dst); 529 skb_dst_set(new, dst_clone(skb_dst(old)));
525#ifdef CONFIG_XFRM 530#ifdef CONFIG_XFRM
526 new->sp = secpath_get(old->sp); 531 new->sp = secpath_get(old->sp);
527#endif 532#endif
528 memcpy(new->cb, old->cb, sizeof(old->cb)); 533 memcpy(new->cb, old->cb, sizeof(old->cb));
529 new->csum_start = old->csum_start; 534 new->csum = old->csum;
530 new->csum_offset = old->csum_offset;
531 new->local_df = old->local_df; 535 new->local_df = old->local_df;
532 new->pkt_type = old->pkt_type; 536 new->pkt_type = old->pkt_type;
533 new->ip_summed = old->ip_summed; 537 new->ip_summed = old->ip_summed;
@@ -538,6 +542,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
538#endif 542#endif
539 new->protocol = old->protocol; 543 new->protocol = old->protocol;
540 new->mark = old->mark; 544 new->mark = old->mark;
545 new->iif = old->iif;
541 __nf_copy(new, old); 546 __nf_copy(new, old);
542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 547#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 548 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -550,10 +555,17 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
550#endif 555#endif
551#endif 556#endif
552 new->vlan_tci = old->vlan_tci; 557 new->vlan_tci = old->vlan_tci;
558#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
559 new->do_not_encrypt = old->do_not_encrypt;
560#endif
553 561
554 skb_copy_secmark(new, old); 562 skb_copy_secmark(new, old);
555} 563}
556 564
565/*
566 * You should not add any new code to this function. Add it to
567 * __copy_skb_header above instead.
568 */
557static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 569static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
558{ 570{
559#define C(x) n->x = skb->x 571#define C(x) n->x = skb->x
@@ -569,16 +581,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 n->cloned = 1; 581 n->cloned = 1;
570 n->nohdr = 0; 582 n->nohdr = 0;
571 n->destructor = NULL; 583 n->destructor = NULL;
572 C(iif);
573 C(tail); 584 C(tail);
574 C(end); 585 C(end);
575 C(head); 586 C(head);
576 C(data); 587 C(data);
577 C(truesize); 588 C(truesize);
578#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
579 C(do_not_encrypt);
580 C(requeue);
581#endif
582 atomic_set(&n->users, 1); 589 atomic_set(&n->users, 1);
583 590
584 atomic_inc(&(skb_shinfo(skb)->dataref)); 591 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -633,6 +640,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
633 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 640 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
634 if (!n) 641 if (!n)
635 return NULL; 642 return NULL;
643
644 kmemcheck_annotate_bitfield(n, flags1);
645 kmemcheck_annotate_bitfield(n, flags2);
636 n->fclone = SKB_FCLONE_UNAVAILABLE; 646 n->fclone = SKB_FCLONE_UNAVAILABLE;
637 } 647 }
638 648
@@ -755,7 +765,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
755 skb_shinfo(n)->nr_frags = i; 765 skb_shinfo(n)->nr_frags = i;
756 } 766 }
757 767
758 if (skb_shinfo(skb)->frag_list) { 768 if (skb_has_frags(skb)) {
759 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 769 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
760 skb_clone_fraglist(n); 770 skb_clone_fraglist(n);
761 } 771 }
@@ -818,7 +828,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
818 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
819 get_page(skb_shinfo(skb)->frags[i].page); 829 get_page(skb_shinfo(skb)->frags[i].page);
820 830
821 if (skb_shinfo(skb)->frag_list) 831 if (skb_has_frags(skb))
822 skb_clone_fraglist(skb); 832 skb_clone_fraglist(skb);
823 833
824 skb_release_data(skb); 834 skb_release_data(skb);
@@ -1090,7 +1100,7 @@ drop_pages:
1090 for (; i < nfrags; i++) 1100 for (; i < nfrags; i++)
1091 put_page(skb_shinfo(skb)->frags[i].page); 1101 put_page(skb_shinfo(skb)->frags[i].page);
1092 1102
1093 if (skb_shinfo(skb)->frag_list) 1103 if (skb_has_frags(skb))
1094 skb_drop_fraglist(skb); 1104 skb_drop_fraglist(skb);
1095 goto done; 1105 goto done;
1096 } 1106 }
@@ -1185,7 +1195,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1185 /* Optimization: no fragments, no reasons to preestimate 1195 /* Optimization: no fragments, no reasons to preestimate
1186 * size of pulled pages. Superb. 1196 * size of pulled pages. Superb.
1187 */ 1197 */
1188 if (!skb_shinfo(skb)->frag_list) 1198 if (!skb_has_frags(skb))
1189 goto pull_pages; 1199 goto pull_pages;
1190 1200
1191 /* Estimate size of pulled pages. */ 1201 /* Estimate size of pulled pages. */
@@ -1282,8 +1292,9 @@ EXPORT_SYMBOL(__pskb_pull_tail);
1282 1292
1283int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1293int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1284{ 1294{
1285 int i, copy;
1286 int start = skb_headlen(skb); 1295 int start = skb_headlen(skb);
1296 struct sk_buff *frag_iter;
1297 int i, copy;
1287 1298
1288 if (offset > (int)skb->len - len) 1299 if (offset > (int)skb->len - len)
1289 goto fault; 1300 goto fault;
@@ -1325,28 +1336,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1325 start = end; 1336 start = end;
1326 } 1337 }
1327 1338
1328 if (skb_shinfo(skb)->frag_list) { 1339 skb_walk_frags(skb, frag_iter) {
1329 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1340 int end;
1330 1341
1331 for (; list; list = list->next) { 1342 WARN_ON(start > offset + len);
1332 int end; 1343
1333 1344 end = start + frag_iter->len;
1334 WARN_ON(start > offset + len); 1345 if ((copy = end - offset) > 0) {
1335 1346 if (copy > len)
1336 end = start + list->len; 1347 copy = len;
1337 if ((copy = end - offset) > 0) { 1348 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1338 if (copy > len) 1349 goto fault;
1339 copy = len; 1350 if ((len -= copy) == 0)
1340 if (skb_copy_bits(list, offset - start, 1351 return 0;
1341 to, copy)) 1352 offset += copy;
1342 goto fault; 1353 to += copy;
1343 if ((len -= copy) == 0)
1344 return 0;
1345 offset += copy;
1346 to += copy;
1347 }
1348 start = end;
1349 } 1354 }
1355 start = end;
1350 } 1356 }
1351 if (!len) 1357 if (!len)
1352 return 0; 1358 return 0;
@@ -1531,6 +1537,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1531 .ops = &sock_pipe_buf_ops, 1537 .ops = &sock_pipe_buf_ops,
1532 .spd_release = sock_spd_release, 1538 .spd_release = sock_spd_release,
1533 }; 1539 };
1540 struct sk_buff *frag_iter;
1534 struct sock *sk = skb->sk; 1541 struct sock *sk = skb->sk;
1535 1542
1536 /* 1543 /*
@@ -1545,13 +1552,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1545 /* 1552 /*
1546 * now see if we have a frag_list to map 1553 * now see if we have a frag_list to map
1547 */ 1554 */
1548 if (skb_shinfo(skb)->frag_list) { 1555 skb_walk_frags(skb, frag_iter) {
1549 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1556 if (!tlen)
1550 1557 break;
1551 for (; list && tlen; list = list->next) { 1558 if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1552 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) 1559 break;
1553 break;
1554 }
1555 } 1560 }
1556 1561
1557done: 1562done:
@@ -1590,8 +1595,9 @@ done:
1590 1595
1591int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1596int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1592{ 1597{
1593 int i, copy;
1594 int start = skb_headlen(skb); 1598 int start = skb_headlen(skb);
1599 struct sk_buff *frag_iter;
1600 int i, copy;
1595 1601
1596 if (offset > (int)skb->len - len) 1602 if (offset > (int)skb->len - len)
1597 goto fault; 1603 goto fault;
@@ -1632,28 +1638,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1632 start = end; 1638 start = end;
1633 } 1639 }
1634 1640
1635 if (skb_shinfo(skb)->frag_list) { 1641 skb_walk_frags(skb, frag_iter) {
1636 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1642 int end;
1637 1643
1638 for (; list; list = list->next) { 1644 WARN_ON(start > offset + len);
1639 int end; 1645
1640 1646 end = start + frag_iter->len;
1641 WARN_ON(start > offset + len); 1647 if ((copy = end - offset) > 0) {
1642 1648 if (copy > len)
1643 end = start + list->len; 1649 copy = len;
1644 if ((copy = end - offset) > 0) { 1650 if (skb_store_bits(frag_iter, offset - start,
1645 if (copy > len) 1651 from, copy))
1646 copy = len; 1652 goto fault;
1647 if (skb_store_bits(list, offset - start, 1653 if ((len -= copy) == 0)
1648 from, copy)) 1654 return 0;
1649 goto fault; 1655 offset += copy;
1650 if ((len -= copy) == 0) 1656 from += copy;
1651 return 0;
1652 offset += copy;
1653 from += copy;
1654 }
1655 start = end;
1656 } 1657 }
1658 start = end;
1657 } 1659 }
1658 if (!len) 1660 if (!len)
1659 return 0; 1661 return 0;
@@ -1670,6 +1672,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1670{ 1672{
1671 int start = skb_headlen(skb); 1673 int start = skb_headlen(skb);
1672 int i, copy = start - offset; 1674 int i, copy = start - offset;
1675 struct sk_buff *frag_iter;
1673 int pos = 0; 1676 int pos = 0;
1674 1677
1675 /* Checksum header. */ 1678 /* Checksum header. */
@@ -1709,29 +1712,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1709 start = end; 1712 start = end;
1710 } 1713 }
1711 1714
1712 if (skb_shinfo(skb)->frag_list) { 1715 skb_walk_frags(skb, frag_iter) {
1713 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1716 int end;
1714 1717
1715 for (; list; list = list->next) { 1718 WARN_ON(start > offset + len);
1716 int end; 1719
1717 1720 end = start + frag_iter->len;
1718 WARN_ON(start > offset + len); 1721 if ((copy = end - offset) > 0) {
1719 1722 __wsum csum2;
1720 end = start + list->len; 1723 if (copy > len)
1721 if ((copy = end - offset) > 0) { 1724 copy = len;
1722 __wsum csum2; 1725 csum2 = skb_checksum(frag_iter, offset - start,
1723 if (copy > len) 1726 copy, 0);
1724 copy = len; 1727 csum = csum_block_add(csum, csum2, pos);
1725 csum2 = skb_checksum(list, offset - start, 1728 if ((len -= copy) == 0)
1726 copy, 0); 1729 return csum;
1727 csum = csum_block_add(csum, csum2, pos); 1730 offset += copy;
1728 if ((len -= copy) == 0) 1731 pos += copy;
1729 return csum;
1730 offset += copy;
1731 pos += copy;
1732 }
1733 start = end;
1734 } 1732 }
1733 start = end;
1735 } 1734 }
1736 BUG_ON(len); 1735 BUG_ON(len);
1737 1736
@@ -1746,6 +1745,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1746{ 1745{
1747 int start = skb_headlen(skb); 1746 int start = skb_headlen(skb);
1748 int i, copy = start - offset; 1747 int i, copy = start - offset;
1748 struct sk_buff *frag_iter;
1749 int pos = 0; 1749 int pos = 0;
1750 1750
1751 /* Copy header. */ 1751 /* Copy header. */
@@ -1790,31 +1790,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1790 start = end; 1790 start = end;
1791 } 1791 }
1792 1792
1793 if (skb_shinfo(skb)->frag_list) { 1793 skb_walk_frags(skb, frag_iter) {
1794 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1794 __wsum csum2;
1795 int end;
1795 1796
1796 for (; list; list = list->next) { 1797 WARN_ON(start > offset + len);
1797 __wsum csum2; 1798
1798 int end; 1799 end = start + frag_iter->len;
1799 1800 if ((copy = end - offset) > 0) {
1800 WARN_ON(start > offset + len); 1801 if (copy > len)
1801 1802 copy = len;
1802 end = start + list->len; 1803 csum2 = skb_copy_and_csum_bits(frag_iter,
1803 if ((copy = end - offset) > 0) { 1804 offset - start,
1804 if (copy > len) 1805 to, copy, 0);
1805 copy = len; 1806 csum = csum_block_add(csum, csum2, pos);
1806 csum2 = skb_copy_and_csum_bits(list, 1807 if ((len -= copy) == 0)
1807 offset - start, 1808 return csum;
1808 to, copy, 0); 1809 offset += copy;
1809 csum = csum_block_add(csum, csum2, pos); 1810 to += copy;
1810 if ((len -= copy) == 0) 1811 pos += copy;
1811 return csum;
1812 offset += copy;
1813 to += copy;
1814 pos += copy;
1815 }
1816 start = end;
1817 } 1812 }
1813 start = end;
1818 } 1814 }
1819 BUG_ON(len); 1815 BUG_ON(len);
1820 return csum; 1816 return csum;
@@ -2324,8 +2320,7 @@ next_skb:
2324 st->frag_data = NULL; 2320 st->frag_data = NULL;
2325 } 2321 }
2326 2322
2327 if (st->root_skb == st->cur_skb && 2323 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2328 skb_shinfo(st->root_skb)->frag_list) {
2329 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2324 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2330 st->frag_idx = 0; 2325 st->frag_idx = 0;
2331 goto next_skb; 2326 goto next_skb;
@@ -2636,7 +2631,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2636 } else 2631 } else
2637 skb_get(fskb2); 2632 skb_get(fskb2);
2638 2633
2639 BUG_ON(skb_shinfo(nskb)->frag_list); 2634 SKB_FRAG_ASSERT(nskb);
2640 skb_shinfo(nskb)->frag_list = fskb2; 2635 skb_shinfo(nskb)->frag_list = fskb2;
2641 } 2636 }
2642 2637
@@ -2661,30 +2656,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2661{ 2656{
2662 struct sk_buff *p = *head; 2657 struct sk_buff *p = *head;
2663 struct sk_buff *nskb; 2658 struct sk_buff *nskb;
2659 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2660 struct skb_shared_info *pinfo = skb_shinfo(p);
2664 unsigned int headroom; 2661 unsigned int headroom;
2665 unsigned int len = skb_gro_len(skb); 2662 unsigned int len = skb_gro_len(skb);
2663 unsigned int offset = skb_gro_offset(skb);
2664 unsigned int headlen = skb_headlen(skb);
2666 2665
2667 if (p->len + len >= 65536) 2666 if (p->len + len >= 65536)
2668 return -E2BIG; 2667 return -E2BIG;
2669 2668
2670 if (skb_shinfo(p)->frag_list) 2669 if (pinfo->frag_list)
2671 goto merge; 2670 goto merge;
2672 else if (skb_headlen(skb) <= skb_gro_offset(skb)) { 2671 else if (headlen <= offset) {
2673 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > 2672 skb_frag_t *frag;
2674 MAX_SKB_FRAGS) 2673 skb_frag_t *frag2;
2674 int i = skbinfo->nr_frags;
2675 int nr_frags = pinfo->nr_frags + i;
2676
2677 offset -= headlen;
2678
2679 if (nr_frags > MAX_SKB_FRAGS)
2675 return -E2BIG; 2680 return -E2BIG;
2676 2681
2677 skb_shinfo(skb)->frags[0].page_offset += 2682 pinfo->nr_frags = nr_frags;
2678 skb_gro_offset(skb) - skb_headlen(skb); 2683 skbinfo->nr_frags = 0;
2679 skb_shinfo(skb)->frags[0].size -=
2680 skb_gro_offset(skb) - skb_headlen(skb);
2681 2684
2682 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2685 frag = pinfo->frags + nr_frags;
2683 skb_shinfo(skb)->frags, 2686 frag2 = skbinfo->frags + i;
2684 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2687 do {
2688 *--frag = *--frag2;
2689 } while (--i);
2685 2690
2686 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2691 frag->page_offset += offset;
2687 skb_shinfo(skb)->nr_frags = 0; 2692 frag->size -= offset;
2688 2693
2689 skb->truesize -= skb->data_len; 2694 skb->truesize -= skb->data_len;
2690 skb->len -= skb->data_len; 2695 skb->len -= skb->data_len;
@@ -2715,7 +2720,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2715 2720
2716 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2721 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2717 skb_shinfo(nskb)->frag_list = p; 2722 skb_shinfo(nskb)->frag_list = p;
2718 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2723 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2719 skb_header_release(p); 2724 skb_header_release(p);
2720 nskb->prev = p; 2725 nskb->prev = p;
2721 2726
@@ -2730,16 +2735,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2730 p = nskb; 2735 p = nskb;
2731 2736
2732merge: 2737merge:
2733 if (skb_gro_offset(skb) > skb_headlen(skb)) { 2738 if (offset > headlen) {
2734 skb_shinfo(skb)->frags[0].page_offset += 2739 skbinfo->frags[0].page_offset += offset - headlen;
2735 skb_gro_offset(skb) - skb_headlen(skb); 2740 skbinfo->frags[0].size -= offset - headlen;
2736 skb_shinfo(skb)->frags[0].size -= 2741 offset = headlen;
2737 skb_gro_offset(skb) - skb_headlen(skb);
2738 skb_gro_reset_offset(skb);
2739 skb_gro_pull(skb, skb_headlen(skb));
2740 } 2742 }
2741 2743
2742 __skb_pull(skb, skb_gro_offset(skb)); 2744 __skb_pull(skb, offset);
2743 2745
2744 p->prev->next = skb; 2746 p->prev->next = skb;
2745 p->prev = skb; 2747 p->prev = skb;
@@ -2786,6 +2788,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2786{ 2788{
2787 int start = skb_headlen(skb); 2789 int start = skb_headlen(skb);
2788 int i, copy = start - offset; 2790 int i, copy = start - offset;
2791 struct sk_buff *frag_iter;
2789 int elt = 0; 2792 int elt = 0;
2790 2793
2791 if (copy > 0) { 2794 if (copy > 0) {
@@ -2819,26 +2822,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2819 start = end; 2822 start = end;
2820 } 2823 }
2821 2824
2822 if (skb_shinfo(skb)->frag_list) { 2825 skb_walk_frags(skb, frag_iter) {
2823 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2826 int end;
2824
2825 for (; list; list = list->next) {
2826 int end;
2827 2827
2828 WARN_ON(start > offset + len); 2828 WARN_ON(start > offset + len);
2829 2829
2830 end = start + list->len; 2830 end = start + frag_iter->len;
2831 if ((copy = end - offset) > 0) { 2831 if ((copy = end - offset) > 0) {
2832 if (copy > len) 2832 if (copy > len)
2833 copy = len; 2833 copy = len;
2834 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2834 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2835 copy); 2835 copy);
2836 if ((len -= copy) == 0) 2836 if ((len -= copy) == 0)
2837 return elt; 2837 return elt;
2838 offset += copy; 2838 offset += copy;
2839 }
2840 start = end;
2841 } 2839 }
2840 start = end;
2842 } 2841 }
2843 BUG_ON(len); 2842 BUG_ON(len);
2844 return elt; 2843 return elt;
@@ -2886,7 +2885,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2886 return -ENOMEM; 2885 return -ENOMEM;
2887 2886
2888 /* Easy case. Most of packets will go this way. */ 2887 /* Easy case. Most of packets will go this way. */
2889 if (!skb_shinfo(skb)->frag_list) { 2888 if (!skb_has_frags(skb)) {
2890 /* A little of trouble, not enough of space for trailer. 2889 /* A little of trouble, not enough of space for trailer.
2891 * This should not happen, when stack is tuned to generate 2890 * This should not happen, when stack is tuned to generate
2892 * good frames. OK, on miss we reallocate and reserve even more 2891 * good frames. OK, on miss we reallocate and reserve even more
@@ -2921,7 +2920,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2921 2920
2922 if (skb1->next == NULL && tailbits) { 2921 if (skb1->next == NULL && tailbits) {
2923 if (skb_shinfo(skb1)->nr_frags || 2922 if (skb_shinfo(skb1)->nr_frags ||
2924 skb_shinfo(skb1)->frag_list || 2923 skb_has_frags(skb1) ||
2925 skb_tailroom(skb1) < tailbits) 2924 skb_tailroom(skb1) < tailbits)
2926 ntail = tailbits + 128; 2925 ntail = tailbits + 128;
2927 } 2926 }
@@ -2930,7 +2929,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2930 skb_cloned(skb1) || 2929 skb_cloned(skb1) ||
2931 ntail || 2930 ntail ||
2932 skb_shinfo(skb1)->nr_frags || 2931 skb_shinfo(skb1)->nr_frags ||
2933 skb_shinfo(skb1)->frag_list) { 2932 skb_has_frags(skb1)) {
2934 struct sk_buff *skb2; 2933 struct sk_buff *skb2;
2935 2934
2936 /* Fuck, we are miserable poor guys... */ 2935 /* Fuck, we are miserable poor guys... */
@@ -3016,12 +3015,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3016 */ 3015 */
3017bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3016bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3018{ 3017{
3019 if (unlikely(start > skb->len - 2) || 3018 if (unlikely(start > skb_headlen(skb)) ||
3020 unlikely((int)start + off > skb->len - 2)) { 3019 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3021 if (net_ratelimit()) 3020 if (net_ratelimit())
3022 printk(KERN_WARNING 3021 printk(KERN_WARNING
3023 "bad partial csum: csum=%u/%u len=%u\n", 3022 "bad partial csum: csum=%u/%u len=%u\n",
3024 start, off, skb->len); 3023 start, off, skb_headlen(skb));
3025 return false; 3024 return false;
3026 } 3025 }
3027 skb->ip_summed = CHECKSUM_PARTIAL; 3026 skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dbf3ffb35cc..b0ba569bc973 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -155,6 +155,7 @@ static const char *af_family_key_strings[AF_MAX+1] = {
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
158 "sk_lock-AF_IEEE802154",
158 "sk_lock-AF_MAX" 159 "sk_lock-AF_MAX"
159}; 160};
160static const char *af_family_slock_key_strings[AF_MAX+1] = { 161static const char *af_family_slock_key_strings[AF_MAX+1] = {
@@ -170,6 +171,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
170 "slock-27" , "slock-28" , "slock-AF_CAN" , 171 "slock-27" , "slock-28" , "slock-AF_CAN" ,
171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 172 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
172 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 173 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
174 "slock-AF_IEEE802154",
173 "slock-AF_MAX" 175 "slock-AF_MAX"
174}; 176};
175static const char *af_family_clock_key_strings[AF_MAX+1] = { 177static const char *af_family_clock_key_strings[AF_MAX+1] = {
@@ -185,6 +187,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
185 "clock-27" , "clock-28" , "clock-AF_CAN" , 187 "clock-27" , "clock-28" , "clock-AF_CAN" ,
186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 188 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 189 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
190 "clock-AF_IEEE802154",
188 "clock-AF_MAX" 191 "clock-AF_MAX"
189}; 192};
190 193
@@ -212,6 +215,7 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
212 215
213/* Maximal space eaten by iovec or ancilliary data plus some space */ 216/* Maximal space eaten by iovec or ancilliary data plus some space */
214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 217int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
218EXPORT_SYMBOL(sysctl_optmem_max);
215 219
216static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 220static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
217{ 221{
@@ -444,7 +448,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
444int sock_setsockopt(struct socket *sock, int level, int optname, 448int sock_setsockopt(struct socket *sock, int level, int optname,
445 char __user *optval, int optlen) 449 char __user *optval, int optlen)
446{ 450{
447 struct sock *sk=sock->sk; 451 struct sock *sk = sock->sk;
448 int val; 452 int val;
449 int valbool; 453 int valbool;
450 struct linger ling; 454 struct linger ling;
@@ -463,15 +467,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
463 if (get_user(val, (int __user *)optval)) 467 if (get_user(val, (int __user *)optval))
464 return -EFAULT; 468 return -EFAULT;
465 469
466 valbool = val?1:0; 470 valbool = val ? 1 : 0;
467 471
468 lock_sock(sk); 472 lock_sock(sk);
469 473
470 switch(optname) { 474 switch (optname) {
471 case SO_DEBUG: 475 case SO_DEBUG:
472 if (val && !capable(CAP_NET_ADMIN)) { 476 if (val && !capable(CAP_NET_ADMIN))
473 ret = -EACCES; 477 ret = -EACCES;
474 } else 478 else
475 sock_valbool_flag(sk, SOCK_DBG, valbool); 479 sock_valbool_flag(sk, SOCK_DBG, valbool);
476 break; 480 break;
477 case SO_REUSEADDR: 481 case SO_REUSEADDR:
@@ -582,7 +586,7 @@ set_rcvbuf:
582 ret = -EINVAL; /* 1003.1g */ 586 ret = -EINVAL; /* 1003.1g */
583 break; 587 break;
584 } 588 }
585 if (copy_from_user(&ling,optval,sizeof(ling))) { 589 if (copy_from_user(&ling, optval, sizeof(ling))) {
586 ret = -EFAULT; 590 ret = -EFAULT;
587 break; 591 break;
588 } 592 }
@@ -690,9 +694,8 @@ set_rcvbuf:
690 case SO_MARK: 694 case SO_MARK:
691 if (!capable(CAP_NET_ADMIN)) 695 if (!capable(CAP_NET_ADMIN))
692 ret = -EPERM; 696 ret = -EPERM;
693 else { 697 else
694 sk->sk_mark = val; 698 sk->sk_mark = val;
695 }
696 break; 699 break;
697 700
698 /* We implement the SO_SNDLOWAT etc to 701 /* We implement the SO_SNDLOWAT etc to
@@ -704,6 +707,7 @@ set_rcvbuf:
704 release_sock(sk); 707 release_sock(sk);
705 return ret; 708 return ret;
706} 709}
710EXPORT_SYMBOL(sock_setsockopt);
707 711
708 712
709int sock_getsockopt(struct socket *sock, int level, int optname, 713int sock_getsockopt(struct socket *sock, int level, int optname,
@@ -727,7 +731,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
727 731
728 memset(&v, 0, sizeof(v)); 732 memset(&v, 0, sizeof(v));
729 733
730 switch(optname) { 734 switch (optname) {
731 case SO_DEBUG: 735 case SO_DEBUG:
732 v.val = sock_flag(sk, SOCK_DBG); 736 v.val = sock_flag(sk, SOCK_DBG);
733 break; 737 break;
@@ -762,7 +766,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
762 766
763 case SO_ERROR: 767 case SO_ERROR:
764 v.val = -sock_error(sk); 768 v.val = -sock_error(sk);
765 if (v.val==0) 769 if (v.val == 0)
766 v.val = xchg(&sk->sk_err_soft, 0); 770 v.val = xchg(&sk->sk_err_soft, 0);
767 break; 771 break;
768 772
@@ -816,7 +820,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
816 break; 820 break;
817 821
818 case SO_RCVTIMEO: 822 case SO_RCVTIMEO:
819 lv=sizeof(struct timeval); 823 lv = sizeof(struct timeval);
820 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 824 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
821 v.tm.tv_sec = 0; 825 v.tm.tv_sec = 0;
822 v.tm.tv_usec = 0; 826 v.tm.tv_usec = 0;
@@ -827,7 +831,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
827 break; 831 break;
828 832
829 case SO_SNDTIMEO: 833 case SO_SNDTIMEO:
830 lv=sizeof(struct timeval); 834 lv = sizeof(struct timeval);
831 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 835 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
832 v.tm.tv_sec = 0; 836 v.tm.tv_sec = 0;
833 v.tm.tv_usec = 0; 837 v.tm.tv_usec = 0;
@@ -842,7 +846,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
842 break; 846 break;
843 847
844 case SO_SNDLOWAT: 848 case SO_SNDLOWAT:
845 v.val=1; 849 v.val = 1;
846 break; 850 break;
847 851
848 case SO_PASSCRED: 852 case SO_PASSCRED:
@@ -941,6 +945,8 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
941 sk = kmalloc(prot->obj_size, priority); 945 sk = kmalloc(prot->obj_size, priority);
942 946
943 if (sk != NULL) { 947 if (sk != NULL) {
948 kmemcheck_annotate_bitfield(sk, flags);
949
944 if (security_sk_alloc(sk, family, priority)) 950 if (security_sk_alloc(sk, family, priority))
945 goto out_free; 951 goto out_free;
946 952
@@ -1002,8 +1008,9 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1002 1008
1003 return sk; 1009 return sk;
1004} 1010}
1011EXPORT_SYMBOL(sk_alloc);
1005 1012
1006void sk_free(struct sock *sk) 1013static void __sk_free(struct sock *sk)
1007{ 1014{
1008 struct sk_filter *filter; 1015 struct sk_filter *filter;
1009 1016
@@ -1027,6 +1034,18 @@ void sk_free(struct sock *sk)
1027 sk_prot_free(sk->sk_prot_creator, sk); 1034 sk_prot_free(sk->sk_prot_creator, sk);
1028} 1035}
1029 1036
1037void sk_free(struct sock *sk)
1038{
1039 /*
1040 * We substract one from sk_wmem_alloc and can know if
1041 * some packets are still in some tx queue.
1042 * If not null, sock_wfree() will call __sk_free(sk) later
1043 */
1044 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1045 __sk_free(sk);
1046}
1047EXPORT_SYMBOL(sk_free);
1048
1030/* 1049/*
1031 * Last sock_put should drop referrence to sk->sk_net. It has already 1050 * Last sock_put should drop referrence to sk->sk_net. It has already
1032 * been dropped in sk_change_net. Taking referrence to stopping namespace 1051 * been dropped in sk_change_net. Taking referrence to stopping namespace
@@ -1065,7 +1084,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1065 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1084 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1066 1085
1067 atomic_set(&newsk->sk_rmem_alloc, 0); 1086 atomic_set(&newsk->sk_rmem_alloc, 0);
1068 atomic_set(&newsk->sk_wmem_alloc, 0); 1087 /*
1088 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1089 */
1090 atomic_set(&newsk->sk_wmem_alloc, 1);
1069 atomic_set(&newsk->sk_omem_alloc, 0); 1091 atomic_set(&newsk->sk_omem_alloc, 0);
1070 skb_queue_head_init(&newsk->sk_receive_queue); 1092 skb_queue_head_init(&newsk->sk_receive_queue);
1071 skb_queue_head_init(&newsk->sk_write_queue); 1093 skb_queue_head_init(&newsk->sk_write_queue);
@@ -1126,7 +1148,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1126out: 1148out:
1127 return newsk; 1149 return newsk;
1128} 1150}
1129
1130EXPORT_SYMBOL_GPL(sk_clone); 1151EXPORT_SYMBOL_GPL(sk_clone);
1131 1152
1132void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1153void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
@@ -1170,13 +1191,20 @@ void __init sk_init(void)
1170void sock_wfree(struct sk_buff *skb) 1191void sock_wfree(struct sk_buff *skb)
1171{ 1192{
1172 struct sock *sk = skb->sk; 1193 struct sock *sk = skb->sk;
1194 int res;
1173 1195
1174 /* In case it might be waiting for more memory. */ 1196 /* In case it might be waiting for more memory. */
1175 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 1197 res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc);
1176 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) 1198 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1177 sk->sk_write_space(sk); 1199 sk->sk_write_space(sk);
1178 sock_put(sk); 1200 /*
1201 * if sk_wmem_alloc reached 0, we are last user and should
1202 * free this sock, as sk_free() call could not do it.
1203 */
1204 if (res == 0)
1205 __sk_free(sk);
1179} 1206}
1207EXPORT_SYMBOL(sock_wfree);
1180 1208
1181/* 1209/*
1182 * Read buffer destructor automatically called from kfree_skb. 1210 * Read buffer destructor automatically called from kfree_skb.
@@ -1188,6 +1216,7 @@ void sock_rfree(struct sk_buff *skb)
1188 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1216 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1189 sk_mem_uncharge(skb->sk, skb->truesize); 1217 sk_mem_uncharge(skb->sk, skb->truesize);
1190} 1218}
1219EXPORT_SYMBOL(sock_rfree);
1191 1220
1192 1221
1193int sock_i_uid(struct sock *sk) 1222int sock_i_uid(struct sock *sk)
@@ -1199,6 +1228,7 @@ int sock_i_uid(struct sock *sk)
1199 read_unlock(&sk->sk_callback_lock); 1228 read_unlock(&sk->sk_callback_lock);
1200 return uid; 1229 return uid;
1201} 1230}
1231EXPORT_SYMBOL(sock_i_uid);
1202 1232
1203unsigned long sock_i_ino(struct sock *sk) 1233unsigned long sock_i_ino(struct sock *sk)
1204{ 1234{
@@ -1209,6 +1239,7 @@ unsigned long sock_i_ino(struct sock *sk)
1209 read_unlock(&sk->sk_callback_lock); 1239 read_unlock(&sk->sk_callback_lock);
1210 return ino; 1240 return ino;
1211} 1241}
1242EXPORT_SYMBOL(sock_i_ino);
1212 1243
1213/* 1244/*
1214 * Allocate a skb from the socket's send buffer. 1245 * Allocate a skb from the socket's send buffer.
@@ -1217,7 +1248,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1217 gfp_t priority) 1248 gfp_t priority)
1218{ 1249{
1219 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1250 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1220 struct sk_buff * skb = alloc_skb(size, priority); 1251 struct sk_buff *skb = alloc_skb(size, priority);
1221 if (skb) { 1252 if (skb) {
1222 skb_set_owner_w(skb, sk); 1253 skb_set_owner_w(skb, sk);
1223 return skb; 1254 return skb;
@@ -1225,6 +1256,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1225 } 1256 }
1226 return NULL; 1257 return NULL;
1227} 1258}
1259EXPORT_SYMBOL(sock_wmalloc);
1228 1260
1229/* 1261/*
1230 * Allocate a skb from the socket's receive buffer. 1262 * Allocate a skb from the socket's receive buffer.
@@ -1261,6 +1293,7 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1261 } 1293 }
1262 return NULL; 1294 return NULL;
1263} 1295}
1296EXPORT_SYMBOL(sock_kmalloc);
1264 1297
1265/* 1298/*
1266 * Free an option memory block. 1299 * Free an option memory block.
@@ -1270,11 +1303,12 @@ void sock_kfree_s(struct sock *sk, void *mem, int size)
1270 kfree(mem); 1303 kfree(mem);
1271 atomic_sub(size, &sk->sk_omem_alloc); 1304 atomic_sub(size, &sk->sk_omem_alloc);
1272} 1305}
1306EXPORT_SYMBOL(sock_kfree_s);
1273 1307
1274/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1308/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1275 I think, these locks should be removed for datagram sockets. 1309 I think, these locks should be removed for datagram sockets.
1276 */ 1310 */
1277static long sock_wait_for_wmem(struct sock * sk, long timeo) 1311static long sock_wait_for_wmem(struct sock *sk, long timeo)
1278{ 1312{
1279 DEFINE_WAIT(wait); 1313 DEFINE_WAIT(wait);
1280 1314
@@ -1392,6 +1426,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1392{ 1426{
1393 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1427 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1394} 1428}
1429EXPORT_SYMBOL(sock_alloc_send_skb);
1395 1430
1396static void __lock_sock(struct sock *sk) 1431static void __lock_sock(struct sock *sk)
1397{ 1432{
@@ -1460,7 +1495,6 @@ int sk_wait_data(struct sock *sk, long *timeo)
1460 finish_wait(sk->sk_sleep, &wait); 1495 finish_wait(sk->sk_sleep, &wait);
1461 return rc; 1496 return rc;
1462} 1497}
1463
1464EXPORT_SYMBOL(sk_wait_data); 1498EXPORT_SYMBOL(sk_wait_data);
1465 1499
1466/** 1500/**
@@ -1541,7 +1575,6 @@ suppress_allocation:
1541 atomic_sub(amt, prot->memory_allocated); 1575 atomic_sub(amt, prot->memory_allocated);
1542 return 0; 1576 return 0;
1543} 1577}
1544
1545EXPORT_SYMBOL(__sk_mem_schedule); 1578EXPORT_SYMBOL(__sk_mem_schedule);
1546 1579
1547/** 1580/**
@@ -1560,7 +1593,6 @@ void __sk_mem_reclaim(struct sock *sk)
1560 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1593 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1561 *prot->memory_pressure = 0; 1594 *prot->memory_pressure = 0;
1562} 1595}
1563
1564EXPORT_SYMBOL(__sk_mem_reclaim); 1596EXPORT_SYMBOL(__sk_mem_reclaim);
1565 1597
1566 1598
@@ -1575,78 +1607,92 @@ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1575{ 1607{
1576 return -EOPNOTSUPP; 1608 return -EOPNOTSUPP;
1577} 1609}
1610EXPORT_SYMBOL(sock_no_bind);
1578 1611
1579int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1612int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1580 int len, int flags) 1613 int len, int flags)
1581{ 1614{
1582 return -EOPNOTSUPP; 1615 return -EOPNOTSUPP;
1583} 1616}
1617EXPORT_SYMBOL(sock_no_connect);
1584 1618
1585int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1619int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1586{ 1620{
1587 return -EOPNOTSUPP; 1621 return -EOPNOTSUPP;
1588} 1622}
1623EXPORT_SYMBOL(sock_no_socketpair);
1589 1624
1590int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1625int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1591{ 1626{
1592 return -EOPNOTSUPP; 1627 return -EOPNOTSUPP;
1593} 1628}
1629EXPORT_SYMBOL(sock_no_accept);
1594 1630
1595int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1631int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1596 int *len, int peer) 1632 int *len, int peer)
1597{ 1633{
1598 return -EOPNOTSUPP; 1634 return -EOPNOTSUPP;
1599} 1635}
1636EXPORT_SYMBOL(sock_no_getname);
1600 1637
1601unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt) 1638unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1602{ 1639{
1603 return 0; 1640 return 0;
1604} 1641}
1642EXPORT_SYMBOL(sock_no_poll);
1605 1643
1606int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1644int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1607{ 1645{
1608 return -EOPNOTSUPP; 1646 return -EOPNOTSUPP;
1609} 1647}
1648EXPORT_SYMBOL(sock_no_ioctl);
1610 1649
1611int sock_no_listen(struct socket *sock, int backlog) 1650int sock_no_listen(struct socket *sock, int backlog)
1612{ 1651{
1613 return -EOPNOTSUPP; 1652 return -EOPNOTSUPP;
1614} 1653}
1654EXPORT_SYMBOL(sock_no_listen);
1615 1655
1616int sock_no_shutdown(struct socket *sock, int how) 1656int sock_no_shutdown(struct socket *sock, int how)
1617{ 1657{
1618 return -EOPNOTSUPP; 1658 return -EOPNOTSUPP;
1619} 1659}
1660EXPORT_SYMBOL(sock_no_shutdown);
1620 1661
1621int sock_no_setsockopt(struct socket *sock, int level, int optname, 1662int sock_no_setsockopt(struct socket *sock, int level, int optname,
1622 char __user *optval, int optlen) 1663 char __user *optval, int optlen)
1623{ 1664{
1624 return -EOPNOTSUPP; 1665 return -EOPNOTSUPP;
1625} 1666}
1667EXPORT_SYMBOL(sock_no_setsockopt);
1626 1668
1627int sock_no_getsockopt(struct socket *sock, int level, int optname, 1669int sock_no_getsockopt(struct socket *sock, int level, int optname,
1628 char __user *optval, int __user *optlen) 1670 char __user *optval, int __user *optlen)
1629{ 1671{
1630 return -EOPNOTSUPP; 1672 return -EOPNOTSUPP;
1631} 1673}
1674EXPORT_SYMBOL(sock_no_getsockopt);
1632 1675
1633int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1676int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1634 size_t len) 1677 size_t len)
1635{ 1678{
1636 return -EOPNOTSUPP; 1679 return -EOPNOTSUPP;
1637} 1680}
1681EXPORT_SYMBOL(sock_no_sendmsg);
1638 1682
1639int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1683int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1640 size_t len, int flags) 1684 size_t len, int flags)
1641{ 1685{
1642 return -EOPNOTSUPP; 1686 return -EOPNOTSUPP;
1643} 1687}
1688EXPORT_SYMBOL(sock_no_recvmsg);
1644 1689
1645int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1690int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1646{ 1691{
1647 /* Mirror missing mmap method error code */ 1692 /* Mirror missing mmap method error code */
1648 return -ENODEV; 1693 return -ENODEV;
1649} 1694}
1695EXPORT_SYMBOL(sock_no_mmap);
1650 1696
1651ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1697ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1652{ 1698{
@@ -1660,6 +1706,7 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz
1660 kunmap(page); 1706 kunmap(page);
1661 return res; 1707 return res;
1662} 1708}
1709EXPORT_SYMBOL(sock_no_sendpage);
1663 1710
1664/* 1711/*
1665 * Default Socket Callbacks 1712 * Default Socket Callbacks
@@ -1723,6 +1770,7 @@ void sk_send_sigurg(struct sock *sk)
1723 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1770 if (send_sigurg(&sk->sk_socket->file->f_owner))
1724 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1771 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1725} 1772}
1773EXPORT_SYMBOL(sk_send_sigurg);
1726 1774
1727void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1775void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1728 unsigned long expires) 1776 unsigned long expires)
@@ -1730,7 +1778,6 @@ void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1730 if (!mod_timer(timer, expires)) 1778 if (!mod_timer(timer, expires))
1731 sock_hold(sk); 1779 sock_hold(sk);
1732} 1780}
1733
1734EXPORT_SYMBOL(sk_reset_timer); 1781EXPORT_SYMBOL(sk_reset_timer);
1735 1782
1736void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1783void sk_stop_timer(struct sock *sk, struct timer_list* timer)
@@ -1738,7 +1785,6 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1738 if (timer_pending(timer) && del_timer(timer)) 1785 if (timer_pending(timer) && del_timer(timer))
1739 __sock_put(sk); 1786 __sock_put(sk);
1740} 1787}
1741
1742EXPORT_SYMBOL(sk_stop_timer); 1788EXPORT_SYMBOL(sk_stop_timer);
1743 1789
1744void sock_init_data(struct socket *sock, struct sock *sk) 1790void sock_init_data(struct socket *sock, struct sock *sk)
@@ -1795,8 +1841,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1795 sk->sk_stamp = ktime_set(-1L, 0); 1841 sk->sk_stamp = ktime_set(-1L, 0);
1796 1842
1797 atomic_set(&sk->sk_refcnt, 1); 1843 atomic_set(&sk->sk_refcnt, 1);
1844 atomic_set(&sk->sk_wmem_alloc, 1);
1798 atomic_set(&sk->sk_drops, 0); 1845 atomic_set(&sk->sk_drops, 0);
1799} 1846}
1847EXPORT_SYMBOL(sock_init_data);
1800 1848
1801void lock_sock_nested(struct sock *sk, int subclass) 1849void lock_sock_nested(struct sock *sk, int subclass)
1802{ 1850{
@@ -1812,7 +1860,6 @@ void lock_sock_nested(struct sock *sk, int subclass)
1812 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1860 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
1813 local_bh_enable(); 1861 local_bh_enable();
1814} 1862}
1815
1816EXPORT_SYMBOL(lock_sock_nested); 1863EXPORT_SYMBOL(lock_sock_nested);
1817 1864
1818void release_sock(struct sock *sk) 1865void release_sock(struct sock *sk)
@@ -1895,7 +1942,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
1895 1942
1896 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 1943 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1897} 1944}
1898
1899EXPORT_SYMBOL(sock_common_getsockopt); 1945EXPORT_SYMBOL(sock_common_getsockopt);
1900 1946
1901#ifdef CONFIG_COMPAT 1947#ifdef CONFIG_COMPAT
@@ -1925,7 +1971,6 @@ int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1925 msg->msg_namelen = addr_len; 1971 msg->msg_namelen = addr_len;
1926 return err; 1972 return err;
1927} 1973}
1928
1929EXPORT_SYMBOL(sock_common_recvmsg); 1974EXPORT_SYMBOL(sock_common_recvmsg);
1930 1975
1931/* 1976/*
@@ -1938,7 +1983,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname,
1938 1983
1939 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 1984 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1940} 1985}
1941
1942EXPORT_SYMBOL(sock_common_setsockopt); 1986EXPORT_SYMBOL(sock_common_setsockopt);
1943 1987
1944#ifdef CONFIG_COMPAT 1988#ifdef CONFIG_COMPAT
@@ -1989,7 +2033,6 @@ void sk_common_release(struct sock *sk)
1989 sk_refcnt_debug_release(sk); 2033 sk_refcnt_debug_release(sk);
1990 sock_put(sk); 2034 sock_put(sk);
1991} 2035}
1992
1993EXPORT_SYMBOL(sk_common_release); 2036EXPORT_SYMBOL(sk_common_release);
1994 2037
1995static DEFINE_RWLOCK(proto_list_lock); 2038static DEFINE_RWLOCK(proto_list_lock);
@@ -2171,7 +2214,6 @@ out_free_sock_slab:
2171out: 2214out:
2172 return -ENOBUFS; 2215 return -ENOBUFS;
2173} 2216}
2174
2175EXPORT_SYMBOL(proto_register); 2217EXPORT_SYMBOL(proto_register);
2176 2218
2177void proto_unregister(struct proto *prot) 2219void proto_unregister(struct proto *prot)
@@ -2198,7 +2240,6 @@ void proto_unregister(struct proto *prot)
2198 prot->twsk_prot->twsk_slab = NULL; 2240 prot->twsk_prot->twsk_slab = NULL;
2199 } 2241 }
2200} 2242}
2201
2202EXPORT_SYMBOL(proto_unregister); 2243EXPORT_SYMBOL(proto_unregister);
2203 2244
2204#ifdef CONFIG_PROC_FS 2245#ifdef CONFIG_PROC_FS
@@ -2324,33 +2365,3 @@ static int __init proto_init(void)
2324subsys_initcall(proto_init); 2365subsys_initcall(proto_init);
2325 2366
2326#endif /* PROC_FS */ 2367#endif /* PROC_FS */
2327
2328EXPORT_SYMBOL(sk_alloc);
2329EXPORT_SYMBOL(sk_free);
2330EXPORT_SYMBOL(sk_send_sigurg);
2331EXPORT_SYMBOL(sock_alloc_send_skb);
2332EXPORT_SYMBOL(sock_init_data);
2333EXPORT_SYMBOL(sock_kfree_s);
2334EXPORT_SYMBOL(sock_kmalloc);
2335EXPORT_SYMBOL(sock_no_accept);
2336EXPORT_SYMBOL(sock_no_bind);
2337EXPORT_SYMBOL(sock_no_connect);
2338EXPORT_SYMBOL(sock_no_getname);
2339EXPORT_SYMBOL(sock_no_getsockopt);
2340EXPORT_SYMBOL(sock_no_ioctl);
2341EXPORT_SYMBOL(sock_no_listen);
2342EXPORT_SYMBOL(sock_no_mmap);
2343EXPORT_SYMBOL(sock_no_poll);
2344EXPORT_SYMBOL(sock_no_recvmsg);
2345EXPORT_SYMBOL(sock_no_sendmsg);
2346EXPORT_SYMBOL(sock_no_sendpage);
2347EXPORT_SYMBOL(sock_no_setsockopt);
2348EXPORT_SYMBOL(sock_no_shutdown);
2349EXPORT_SYMBOL(sock_no_socketpair);
2350EXPORT_SYMBOL(sock_rfree);
2351EXPORT_SYMBOL(sock_setsockopt);
2352EXPORT_SYMBOL(sock_wfree);
2353EXPORT_SYMBOL(sock_wmalloc);
2354EXPORT_SYMBOL(sock_i_uid);
2355EXPORT_SYMBOL(sock_i_ino);
2356EXPORT_SYMBOL(sysctl_optmem_max);
diff --git a/net/core/stream.c b/net/core/stream.c
index 8727cead64ad..a37debfeb1b2 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -33,7 +33,8 @@ void sk_stream_write_space(struct sock *sk)
33 clear_bit(SOCK_NOSPACE, &sock->flags); 33 clear_bit(SOCK_NOSPACE, &sock->flags);
34 34
35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
36 wake_up_interruptible(sk->sk_sleep); 36 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT |
37 POLLWRNORM | POLLWRBAND);
37 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 38 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
38 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 39 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
39 } 40 }
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 164b090d5ac3..25d717ebc92e 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -51,6 +51,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
51{ 51{
52 int start = skb_headlen(skb); 52 int start = skb_headlen(skb);
53 int i, copy = start - offset; 53 int i, copy = start - offset;
54 struct sk_buff *frag_iter;
54 dma_cookie_t cookie = 0; 55 dma_cookie_t cookie = 0;
55 56
56 /* Copy header. */ 57 /* Copy header. */
@@ -94,31 +95,28 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
94 start = end; 95 start = end;
95 } 96 }
96 97
97 if (skb_shinfo(skb)->frag_list) { 98 skb_walk_frags(skb, frag_iter) {
98 struct sk_buff *list = skb_shinfo(skb)->frag_list; 99 int end;
99 100
100 for (; list; list = list->next) { 101 WARN_ON(start > offset + len);
101 int end; 102
102 103 end = start + frag_iter->len;
103 WARN_ON(start > offset + len); 104 copy = end - offset;
104 105 if (copy > 0) {
105 end = start + list->len; 106 if (copy > len)
106 copy = end - offset; 107 copy = len;
107 if (copy > 0) { 108 cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
108 if (copy > len) 109 offset - start,
109 copy = len; 110 to, copy,
110 cookie = dma_skb_copy_datagram_iovec(chan, list, 111 pinned_list);
111 offset - start, to, copy, 112 if (cookie < 0)
112 pinned_list); 113 goto fault;
113 if (cookie < 0) 114 len -= copy;
114 goto fault; 115 if (len == 0)
115 len -= copy; 116 goto end;
116 if (len == 0) 117 offset += copy;
117 goto end;
118 offset += copy;
119 }
120 start = end;
121 } 118 }
119 start = end;
122 } 120 }
123 121
124end: 122end:
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d1dd95289b89..a0a36c9e6cce 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -452,7 +452,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
452 struct sk_buff *skb) 452 struct sk_buff *skb)
453{ 453{
454 struct rtable *rt; 454 struct rtable *rt;
455 struct flowi fl = { .oif = skb->rtable->rt_iif, 455 struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
456 .nl_u = { .ip4_u = 456 .nl_u = { .ip4_u =
457 { .daddr = ip_hdr(skb)->saddr, 457 { .daddr = ip_hdr(skb)->saddr,
458 .saddr = ip_hdr(skb)->daddr, 458 .saddr = ip_hdr(skb)->daddr,
@@ -507,14 +507,14 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
507 const struct iphdr *rxiph; 507 const struct iphdr *rxiph;
508 struct sk_buff *skb; 508 struct sk_buff *skb;
509 struct dst_entry *dst; 509 struct dst_entry *dst;
510 struct net *net = dev_net(rxskb->dst->dev); 510 struct net *net = dev_net(skb_dst(rxskb)->dev);
511 struct sock *ctl_sk = net->dccp.v4_ctl_sk; 511 struct sock *ctl_sk = net->dccp.v4_ctl_sk;
512 512
513 /* Never send a reset in response to a reset. */ 513 /* Never send a reset in response to a reset. */
514 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 514 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
515 return; 515 return;
516 516
517 if (rxskb->rtable->rt_type != RTN_LOCAL) 517 if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
518 return; 518 return;
519 519
520 dst = dccp_v4_route_skb(net, ctl_sk, rxskb); 520 dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
@@ -528,7 +528,7 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
528 rxiph = ip_hdr(rxskb); 528 rxiph = ip_hdr(rxskb);
529 dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, 529 dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
530 rxiph->daddr); 530 rxiph->daddr);
531 skb->dst = dst_clone(dst); 531 skb_dst_set(skb, dst_clone(dst));
532 532
533 bh_lock_sock(ctl_sk); 533 bh_lock_sock(ctl_sk);
534 err = ip_build_and_send_pkt(skb, ctl_sk, 534 err = ip_build_and_send_pkt(skb, ctl_sk,
@@ -567,7 +567,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
567 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 567 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
568 568
569 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ 569 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
570 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 570 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
571 return 0; /* discard, don't send a reset here */ 571 return 0; /* discard, don't send a reset here */
572 572
573 if (dccp_bad_service_code(sk, service)) { 573 if (dccp_bad_service_code(sk, service)) {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b963f35c65f6..05ea7440d9e5 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -314,8 +314,9 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
314 struct ipv6hdr *rxip6h; 314 struct ipv6hdr *rxip6h;
315 struct sk_buff *skb; 315 struct sk_buff *skb;
316 struct flowi fl; 316 struct flowi fl;
317 struct net *net = dev_net(rxskb->dst->dev); 317 struct net *net = dev_net(skb_dst(rxskb)->dev);
318 struct sock *ctl_sk = net->dccp.v6_ctl_sk; 318 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
319 struct dst_entry *dst;
319 320
320 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 321 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
321 return; 322 return;
@@ -342,8 +343,9 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
342 security_skb_classify_flow(rxskb, &fl); 343 security_skb_classify_flow(rxskb, &fl);
343 344
344 /* sk = NULL, but it is safe for now. RST socket required. */ 345 /* sk = NULL, but it is safe for now. RST socket required. */
345 if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) { 346 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
346 if (xfrm_lookup(net, &skb->dst, &fl, NULL, 0) >= 0) { 347 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
348 skb_dst_set(skb, dst);
347 ip6_xmit(ctl_sk, skb, &fl, NULL, 0); 349 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
348 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 350 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
349 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 351 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 36bcc00654d3..c0e88c16d088 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -350,7 +350,7 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
350 /* Reserve space for headers. */ 350 /* Reserve space for headers. */
351 skb_reserve(skb, sk->sk_prot->max_header); 351 skb_reserve(skb, sk->sk_prot->max_header);
352 352
353 skb->dst = dst_clone(dst); 353 skb_dst_set(skb, dst_clone(dst));
354 354
355 dreq = dccp_rsk(req); 355 dreq = dccp_rsk(req);
356 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ 356 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9647d911f916..a5e3a593e472 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1075,6 +1075,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1075 int err = 0; 1075 int err = 0;
1076 unsigned char type; 1076 unsigned char type;
1077 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 1077 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1078 struct dst_entry *dst;
1078 1079
1079 lock_sock(sk); 1080 lock_sock(sk);
1080 1081
@@ -1102,8 +1103,9 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1102 } 1103 }
1103 release_sock(sk); 1104 release_sock(sk);
1104 1105
1105 dst_release(xchg(&newsk->sk_dst_cache, skb->dst)); 1106 dst = skb_dst(skb);
1106 skb->dst = NULL; 1107 dst_release(xchg(&newsk->sk_dst_cache, dst));
1108 skb_dst_set(skb, NULL);
1107 1109
1108 DN_SK(newsk)->state = DN_CR; 1110 DN_SK(newsk)->state = DN_CR;
1109 DN_SK(newsk)->addrrem = cb->src_port; 1111 DN_SK(newsk)->addrrem = cb->src_port;
@@ -1250,14 +1252,8 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1250 if (skb) { 1252 if (skb) {
1251 amount = skb->len; 1253 amount = skb->len;
1252 } else { 1254 } else {
1253 skb = sk->sk_receive_queue.next; 1255 skb_queue_walk(&sk->sk_receive_queue, skb)
1254 for (;;) {
1255 if (skb ==
1256 (struct sk_buff *)&sk->sk_receive_queue)
1257 break;
1258 amount += skb->len; 1256 amount += skb->len;
1259 skb = skb->next;
1260 }
1261 } 1257 }
1262 release_sock(sk); 1258 release_sock(sk);
1263 err = put_user(amount, (int __user *)arg); 1259 err = put_user(amount, (int __user *)arg);
@@ -1644,13 +1640,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1644 1640
1645static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) 1641static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1646{ 1642{
1647 struct sk_buff *skb = q->next; 1643 struct sk_buff *skb;
1648 int len = 0; 1644 int len = 0;
1649 1645
1650 if (flags & MSG_OOB) 1646 if (flags & MSG_OOB)
1651 return !skb_queue_empty(q) ? 1 : 0; 1647 return !skb_queue_empty(q) ? 1 : 0;
1652 1648
1653 while(skb != (struct sk_buff *)q) { 1649 skb_queue_walk(q, skb) {
1654 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1650 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1655 len += skb->len; 1651 len += skb->len;
1656 1652
@@ -1666,8 +1662,6 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
1666 /* minimum data length for read exceeded */ 1662 /* minimum data length for read exceeded */
1667 if (len >= target) 1663 if (len >= target)
1668 return 1; 1664 return 1;
1669
1670 skb = skb->next;
1671 } 1665 }
1672 1666
1673 return 0; 1667 return 0;
@@ -1683,7 +1677,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1683 size_t target = size > 1 ? 1 : 0; 1677 size_t target = size > 1 ? 1 : 0;
1684 size_t copied = 0; 1678 size_t copied = 0;
1685 int rv = 0; 1679 int rv = 0;
1686 struct sk_buff *skb, *nskb; 1680 struct sk_buff *skb, *n;
1687 struct dn_skb_cb *cb = NULL; 1681 struct dn_skb_cb *cb = NULL;
1688 unsigned char eor = 0; 1682 unsigned char eor = 0;
1689 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1683 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1758,7 +1752,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1758 finish_wait(sk->sk_sleep, &wait); 1752 finish_wait(sk->sk_sleep, &wait);
1759 } 1753 }
1760 1754
1761 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1755 skb_queue_walk_safe(queue, skb, n) {
1762 unsigned int chunk = skb->len; 1756 unsigned int chunk = skb->len;
1763 cb = DN_SKB_CB(skb); 1757 cb = DN_SKB_CB(skb);
1764 1758
@@ -1775,7 +1769,6 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1775 skb_pull(skb, chunk); 1769 skb_pull(skb, chunk);
1776 1770
1777 eor = cb->nsp_flags & 0x40; 1771 eor = cb->nsp_flags & 0x40;
1778 nskb = skb->next;
1779 1772
1780 if (skb->len == 0) { 1773 if (skb->len == 0) {
1781 skb_unlink(skb, queue); 1774 skb_unlink(skb, queue);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 05b5aa05e50e..923786bd6d01 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -204,7 +204,7 @@ static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
204 204
205static int dn_neigh_output_packet(struct sk_buff *skb) 205static int dn_neigh_output_packet(struct sk_buff *skb)
206{ 206{
207 struct dst_entry *dst = skb->dst; 207 struct dst_entry *dst = skb_dst(skb);
208 struct dn_route *rt = (struct dn_route *)dst; 208 struct dn_route *rt = (struct dn_route *)dst;
209 struct neighbour *neigh = dst->neighbour; 209 struct neighbour *neigh = dst->neighbour;
210 struct net_device *dev = neigh->dev; 210 struct net_device *dev = neigh->dev;
@@ -224,7 +224,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
224 224
225static int dn_long_output(struct sk_buff *skb) 225static int dn_long_output(struct sk_buff *skb)
226{ 226{
227 struct dst_entry *dst = skb->dst; 227 struct dst_entry *dst = skb_dst(skb);
228 struct neighbour *neigh = dst->neighbour; 228 struct neighbour *neigh = dst->neighbour;
229 struct net_device *dev = neigh->dev; 229 struct net_device *dev = neigh->dev;
230 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; 230 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
@@ -270,7 +270,7 @@ static int dn_long_output(struct sk_buff *skb)
270 270
271static int dn_short_output(struct sk_buff *skb) 271static int dn_short_output(struct sk_buff *skb)
272{ 272{
273 struct dst_entry *dst = skb->dst; 273 struct dst_entry *dst = skb_dst(skb);
274 struct neighbour *neigh = dst->neighbour; 274 struct neighbour *neigh = dst->neighbour;
275 struct net_device *dev = neigh->dev; 275 struct net_device *dev = neigh->dev;
276 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 276 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -313,7 +313,7 @@ static int dn_short_output(struct sk_buff *skb)
313 */ 313 */
314static int dn_phase3_output(struct sk_buff *skb) 314static int dn_phase3_output(struct sk_buff *skb)
315{ 315{
316 struct dst_entry *dst = skb->dst; 316 struct dst_entry *dst = skb_dst(skb);
317 struct neighbour *neigh = dst->neighbour; 317 struct neighbour *neigh = dst->neighbour;
318 struct net_device *dev = neigh->dev; 318 struct net_device *dev = neigh->dev;
319 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 319 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 5d8a2a56fd39..932408dca86d 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -578,6 +578,7 @@ out:
578static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 578static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
579{ 579{
580 int err; 580 int err;
581 int skb_len;
581 582
582 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 583 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
583 number of warnings when compiling with -W --ANK 584 number of warnings when compiling with -W --ANK
@@ -592,22 +593,12 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
592 if (err) 593 if (err)
593 goto out; 594 goto out;
594 595
596 skb_len = skb->len;
595 skb_set_owner_r(skb, sk); 597 skb_set_owner_r(skb, sk);
596 skb_queue_tail(queue, skb); 598 skb_queue_tail(queue, skb);
597 599
598 /* This code only runs from BH or BH protected context. 600 if (!sock_flag(sk, SOCK_DEAD))
599 * Therefore the plain read_lock is ok here. -DaveM 601 sk->sk_data_ready(sk, skb_len);
600 */
601 read_lock(&sk->sk_callback_lock);
602 if (!sock_flag(sk, SOCK_DEAD)) {
603 struct socket *sock = sk->sk_socket;
604 wake_up_interruptible(sk->sk_sleep);
605 if (sock && sock->fasync_list &&
606 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
607 __kill_fasync(sock->fasync_list, sig,
608 (sig == SIGURG) ? POLL_PRI : POLL_IN);
609 }
610 read_unlock(&sk->sk_callback_lock);
611out: 602out:
612 return err; 603 return err;
613} 604}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 2013c25b7f5a..a65e929ce76c 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -85,7 +85,7 @@ static void dn_nsp_send(struct sk_buff *skb)
85 dst = sk_dst_check(sk, 0); 85 dst = sk_dst_check(sk, 0);
86 if (dst) { 86 if (dst) {
87try_again: 87try_again:
88 skb->dst = dst; 88 skb_dst_set(skb, dst);
89 dst_output(skb); 89 dst_output(skb);
90 return; 90 return;
91 } 91 }
@@ -382,7 +382,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
382{ 382{
383 struct dn_skb_cb *cb = DN_SKB_CB(skb); 383 struct dn_skb_cb *cb = DN_SKB_CB(skb);
384 struct dn_scp *scp = DN_SK(sk); 384 struct dn_scp *scp = DN_SK(sk);
385 struct sk_buff *skb2, *list, *ack = NULL; 385 struct sk_buff *skb2, *n, *ack = NULL;
386 int wakeup = 0; 386 int wakeup = 0;
387 int try_retrans = 0; 387 int try_retrans = 0;
388 unsigned long reftime = cb->stamp; 388 unsigned long reftime = cb->stamp;
@@ -390,9 +390,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
390 unsigned short xmit_count; 390 unsigned short xmit_count;
391 unsigned short segnum; 391 unsigned short segnum;
392 392
393 skb2 = q->next; 393 skb_queue_walk_safe(q, skb2, n) {
394 list = (struct sk_buff *)q;
395 while(list != skb2) {
396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); 394 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
397 395
398 if (dn_before_or_equal(cb2->segnum, acknum)) 396 if (dn_before_or_equal(cb2->segnum, acknum))
@@ -400,8 +398,6 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
400 398
401 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ 399 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
402 400
403 skb2 = skb2->next;
404
405 if (ack == NULL) 401 if (ack == NULL)
406 continue; 402 continue;
407 403
@@ -586,7 +582,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
586 * to be able to send disc packets out which have no socket 582 * to be able to send disc packets out which have no socket
587 * associations. 583 * associations.
588 */ 584 */
589 skb->dst = dst_clone(dst); 585 skb_dst_set(skb, dst_clone(dst));
590 dst_output(skb); 586 dst_output(skb);
591} 587}
592 588
@@ -615,7 +611,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
615 int ddl = 0; 611 int ddl = 0;
616 gfp_t gfp = GFP_ATOMIC; 612 gfp_t gfp = GFP_ATOMIC;
617 613
618 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 614 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl,
619 NULL, cb->src_port, cb->dst_port); 615 NULL, cb->src_port, cb->dst_port);
620} 616}
621 617
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 0cc4394117df..1d6ca8a98dc6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -678,7 +678,7 @@ out:
678 678
679static int dn_output(struct sk_buff *skb) 679static int dn_output(struct sk_buff *skb)
680{ 680{
681 struct dst_entry *dst = skb->dst; 681 struct dst_entry *dst = skb_dst(skb);
682 struct dn_route *rt = (struct dn_route *)dst; 682 struct dn_route *rt = (struct dn_route *)dst;
683 struct net_device *dev = dst->dev; 683 struct net_device *dev = dst->dev;
684 struct dn_skb_cb *cb = DN_SKB_CB(skb); 684 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -717,7 +717,7 @@ error:
717static int dn_forward(struct sk_buff *skb) 717static int dn_forward(struct sk_buff *skb)
718{ 718{
719 struct dn_skb_cb *cb = DN_SKB_CB(skb); 719 struct dn_skb_cb *cb = DN_SKB_CB(skb);
720 struct dst_entry *dst = skb->dst; 720 struct dst_entry *dst = skb_dst(skb);
721 struct dn_dev *dn_db = dst->dev->dn_ptr; 721 struct dn_dev *dn_db = dst->dev->dn_ptr;
722 struct dn_route *rt; 722 struct dn_route *rt;
723 struct neighbour *neigh = dst->neighbour; 723 struct neighbour *neigh = dst->neighbour;
@@ -730,7 +730,7 @@ static int dn_forward(struct sk_buff *skb)
730 goto drop; 730 goto drop;
731 731
732 /* Ensure that we have enough space for headers */ 732 /* Ensure that we have enough space for headers */
733 rt = (struct dn_route *)skb->dst; 733 rt = (struct dn_route *)skb_dst(skb);
734 header_len = dn_db->use_long ? 21 : 6; 734 header_len = dn_db->use_long ? 21 : 6;
735 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 735 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
736 goto drop; 736 goto drop;
@@ -1392,7 +1392,8 @@ make_route:
1392 goto e_neighbour; 1392 goto e_neighbour;
1393 1393
1394 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1394 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1395 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); 1395 dn_insert_route(rt, hash, &rt);
1396 skb_dst_set(skb, &rt->u.dst);
1396 1397
1397done: 1398done:
1398 if (neigh) 1399 if (neigh)
@@ -1424,7 +1425,7 @@ static int dn_route_input(struct sk_buff *skb)
1424 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1425 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1425 unsigned hash = dn_hash(cb->src, cb->dst); 1426 unsigned hash = dn_hash(cb->src, cb->dst);
1426 1427
1427 if (skb->dst) 1428 if (skb_dst(skb))
1428 return 0; 1429 return 0;
1429 1430
1430 rcu_read_lock(); 1431 rcu_read_lock();
@@ -1437,7 +1438,7 @@ static int dn_route_input(struct sk_buff *skb)
1437 (rt->fl.iif == cb->iif)) { 1438 (rt->fl.iif == cb->iif)) {
1438 dst_use(&rt->u.dst, jiffies); 1439 dst_use(&rt->u.dst, jiffies);
1439 rcu_read_unlock(); 1440 rcu_read_unlock();
1440 skb->dst = (struct dst_entry *)rt; 1441 skb_dst_set(skb, (struct dst_entry *)rt);
1441 return 0; 1442 return 0;
1442 } 1443 }
1443 } 1444 }
@@ -1449,7 +1450,7 @@ static int dn_route_input(struct sk_buff *skb)
1449static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1450static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1450 int event, int nowait, unsigned int flags) 1451 int event, int nowait, unsigned int flags)
1451{ 1452{
1452 struct dn_route *rt = (struct dn_route *)skb->dst; 1453 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1453 struct rtmsg *r; 1454 struct rtmsg *r;
1454 struct nlmsghdr *nlh; 1455 struct nlmsghdr *nlh;
1455 unsigned char *b = skb_tail_pointer(skb); 1456 unsigned char *b = skb_tail_pointer(skb);
@@ -1554,7 +1555,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1554 err = dn_route_input(skb); 1555 err = dn_route_input(skb);
1555 local_bh_enable(); 1556 local_bh_enable();
1556 memset(cb, 0, sizeof(struct dn_skb_cb)); 1557 memset(cb, 0, sizeof(struct dn_skb_cb));
1557 rt = (struct dn_route *)skb->dst; 1558 rt = (struct dn_route *)skb_dst(skb);
1558 if (!err && -rt->u.dst.error) 1559 if (!err && -rt->u.dst.error)
1559 err = rt->u.dst.error; 1560 err = rt->u.dst.error;
1560 } else { 1561 } else {
@@ -1570,7 +1571,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1570 skb->dev = NULL; 1571 skb->dev = NULL;
1571 if (err) 1572 if (err)
1572 goto out_free; 1573 goto out_free;
1573 skb->dst = &rt->u.dst; 1574 skb_dst_set(skb, &rt->u.dst);
1574 if (rtm->rtm_flags & RTM_F_NOTIFY) 1575 if (rtm->rtm_flags & RTM_F_NOTIFY)
1575 rt->rt_flags |= RTCF_NOTIFY; 1576 rt->rt_flags |= RTCF_NOTIFY;
1576 1577
@@ -1622,15 +1623,15 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1622 rt = rcu_dereference(rt->u.dst.dn_next), idx++) { 1623 rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
1623 if (idx < s_idx) 1624 if (idx < s_idx)
1624 continue; 1625 continue;
1625 skb->dst = dst_clone(&rt->u.dst); 1626 skb_dst_set(skb, dst_clone(&rt->u.dst));
1626 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1627 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1627 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1628 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1628 1, NLM_F_MULTI) <= 0) { 1629 1, NLM_F_MULTI) <= 0) {
1629 dst_release(xchg(&skb->dst, NULL)); 1630 skb_dst_drop(skb);
1630 rcu_read_unlock_bh(); 1631 rcu_read_unlock_bh();
1631 goto done; 1632 goto done;
1632 } 1633 }
1633 dst_release(xchg(&skb->dst, NULL)); 1634 skb_dst_drop(skb);
1634 } 1635 }
1635 rcu_read_unlock_bh(); 1636 rcu_read_unlock_bh();
1636 } 1637 }
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 14fbca55e908..72495f25269f 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -115,7 +115,7 @@ static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
115} 115}
116 116
117static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 117static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
118 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 118 struct fib_rule_hdr *frh,
119 struct nlattr **tb) 119 struct nlattr **tb)
120{ 120{
121 int err = -EINVAL; 121 int err = -EINVAL;
@@ -192,7 +192,7 @@ unsigned dnet_addr_type(__le16 addr)
192} 192}
193 193
194static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 194static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 195 struct fib_rule_hdr *frh)
196{ 196{
197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule; 197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
198 198
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ed131181215d..2175e6d5cc8d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
67 return -ENETDOWN; 67 return -ENETDOWN;
68 68
69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { 69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
70 err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN); 70 err = dev_unicast_add(master, dev->dev_addr);
71 if (err < 0) 71 if (err < 0)
72 goto out; 72 goto out;
73 } 73 }
@@ -90,7 +90,7 @@ clear_allmulti:
90 dev_set_allmulti(master, -1); 90 dev_set_allmulti(master, -1);
91del_unicast: 91del_unicast:
92 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 92 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
93 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 93 dev_unicast_delete(master, dev->dev_addr);
94out: 94out:
95 return err; 95 return err;
96} 96}
@@ -108,7 +108,7 @@ static int dsa_slave_close(struct net_device *dev)
108 dev_set_promiscuity(master, -1); 108 dev_set_promiscuity(master, -1);
109 109
110 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 110 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
111 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 111 dev_unicast_delete(master, dev->dev_addr);
112 112
113 return 0; 113 return 0;
114} 114}
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
147 goto out; 147 goto out;
148 148
149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) { 149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
150 err = dev_unicast_add(master, addr->sa_data, ETH_ALEN); 150 err = dev_unicast_add(master, addr->sa_data);
151 if (err < 0) 151 if (err < 0)
152 return err; 152 return err;
153 } 153 }
154 154
155 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 155 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
156 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 156 dev_unicast_delete(master, dev->dev_addr);
157 157
158out: 158out:
159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 6f479fa522c3..8121bf0029e3 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -901,15 +901,10 @@ static void aun_tx_ack(unsigned long seq, int result)
901 struct ec_cb *eb; 901 struct ec_cb *eb;
902 902
903 spin_lock_irqsave(&aun_queue_lock, flags); 903 spin_lock_irqsave(&aun_queue_lock, flags);
904 skb = skb_peek(&aun_queue); 904 skb_queue_walk(&aun_queue, skb) {
905 while (skb && skb != (struct sk_buff *)&aun_queue)
906 {
907 struct sk_buff *newskb = skb->next;
908 eb = (struct ec_cb *)&skb->cb; 905 eb = (struct ec_cb *)&skb->cb;
909 if (eb->seq == seq) 906 if (eb->seq == seq)
910 goto foundit; 907 goto foundit;
911
912 skb = newskb;
913 } 908 }
914 spin_unlock_irqrestore(&aun_queue_lock, flags); 909 spin_unlock_irqrestore(&aun_queue_lock, flags);
915 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); 910 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
@@ -982,23 +977,18 @@ static void aun_data_available(struct sock *sk, int slen)
982 977
983static void ab_cleanup(unsigned long h) 978static void ab_cleanup(unsigned long h)
984{ 979{
985 struct sk_buff *skb; 980 struct sk_buff *skb, *n;
986 unsigned long flags; 981 unsigned long flags;
987 982
988 spin_lock_irqsave(&aun_queue_lock, flags); 983 spin_lock_irqsave(&aun_queue_lock, flags);
989 skb = skb_peek(&aun_queue); 984 skb_queue_walk_safe(&aun_queue, skb, n) {
990 while (skb && skb != (struct sk_buff *)&aun_queue)
991 {
992 struct sk_buff *newskb = skb->next;
993 struct ec_cb *eb = (struct ec_cb *)&skb->cb; 985 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
994 if ((jiffies - eb->start) > eb->timeout) 986 if ((jiffies - eb->start) > eb->timeout) {
995 {
996 tx_result(skb->sk, eb->cookie, 987 tx_result(skb->sk, eb->cookie,
997 ECTYPE_TRANSMIT_NOT_PRESENT); 988 ECTYPE_TRANSMIT_NOT_PRESENT);
998 skb_unlink(skb, &aun_queue); 989 skb_unlink(skb, &aun_queue);
999 kfree_skb(skb); 990 kfree_skb(skb);
1000 } 991 }
1001 skb = newskb;
1002 } 992 }
1003 spin_unlock_irqrestore(&aun_queue_lock, flags); 993 spin_unlock_irqrestore(&aun_queue_lock, flags);
1004 994
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 280352aba403..5a883affecd3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -337,11 +337,6 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
337void ether_setup(struct net_device *dev) 337void ether_setup(struct net_device *dev)
338{ 338{
339 dev->header_ops = &eth_header_ops; 339 dev->header_ops = &eth_header_ops;
340#ifdef CONFIG_COMPAT_NET_DEV_OPS
341 dev->change_mtu = eth_change_mtu;
342 dev->set_mac_address = eth_mac_addr;
343 dev->validate_addr = eth_validate_addr;
344#endif
345 dev->type = ARPHRD_ETHER; 340 dev->type = ARPHRD_ETHER;
346 dev->hard_header_len = ETH_HLEN; 341 dev->hard_header_len = ETH_HLEN;
347 dev->mtu = ETH_DATA_LEN; 342 dev->mtu = ETH_DATA_LEN;
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
new file mode 100644
index 000000000000..1c1de97d264a
--- /dev/null
+++ b/net/ieee802154/Kconfig
@@ -0,0 +1,12 @@
1config IEEE802154
2 tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support (EXPERIMENTAL)"
3 depends on EXPERIMENTAL
4 ---help---
5 IEEE Std 802.15.4 defines a low data rate, low power and low
6 complexity short range wireless personal area networks. It was
7 designed to organise networks of sensors, switches, etc automation
8 devices. Maximum allowed data rate is 250 kb/s and typical personal
9 operating space around 10m.
10
11 Say Y here to compile LR-WPAN support into the kernel or say M to
12 compile it as modules.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
new file mode 100644
index 000000000000..f99338a26100
--- /dev/null
+++ b/net/ieee802154/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o
2nl802154-y := netlink.o nl_policy.o
3af_802154-y := af_ieee802154.o raw.o dgram.o
4
5ccflags-y += -Wall -DDEBUG
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h
new file mode 100644
index 000000000000..b1ec52537522
--- /dev/null
+++ b/net/ieee802154/af802154.h
@@ -0,0 +1,36 @@
1/*
2 * Internal interfaces for ieee 802.15.4 address family.
3 *
4 * Copyright 2007, 2008, 2009 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#ifndef AF802154_H
25#define AF802154_H
26
27struct sk_buff;
28struct net_devce;
29extern struct proto ieee802154_raw_prot;
30extern struct proto ieee802154_dgram_prot;
31void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb);
32int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb);
33struct net_device *ieee802154_get_dev(struct net *net,
34 struct ieee802154_addr *addr);
35
36#endif
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
new file mode 100644
index 000000000000..882a927cefae
--- /dev/null
+++ b/net/ieee802154/af_ieee802154.c
@@ -0,0 +1,372 @@
1/*
2 * IEEE802154.4 socket interface
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
22 */
23
24#include <linux/net.h>
25#include <linux/capability.h>
26#include <linux/module.h>
27#include <linux/if_arp.h>
28#include <linux/if.h>
29#include <linux/termios.h> /* For TIOCOUTQ/INQ */
30#include <linux/list.h>
31#include <net/datalink.h>
32#include <net/psnap.h>
33#include <net/sock.h>
34#include <net/tcp_states.h>
35#include <net/route.h>
36
37#include <net/ieee802154/af_ieee802154.h>
38#include <net/ieee802154/netdevice.h>
39
40#include "af802154.h"
41
42#define DBG_DUMP(data, len) { \
43 int i; \
44 pr_debug("function: %s: data: len %d:\n", __func__, len); \
45 for (i = 0; i < len; i++) {\
46 pr_debug("%02x: %02x\n", i, (data)[i]); \
47 } \
48}
49
50/*
51 * Utility function for families
52 */
53struct net_device *ieee802154_get_dev(struct net *net,
54 struct ieee802154_addr *addr)
55{
56 struct net_device *dev = NULL;
57 struct net_device *tmp;
58 u16 pan_id, short_addr;
59
60 switch (addr->addr_type) {
61 case IEEE802154_ADDR_LONG:
62 rtnl_lock();
63 dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr);
64 if (dev)
65 dev_hold(dev);
66 rtnl_unlock();
67 break;
68 case IEEE802154_ADDR_SHORT:
69 if (addr->pan_id == 0xffff ||
70 addr->short_addr == IEEE802154_ADDR_UNDEF ||
71 addr->short_addr == 0xffff)
72 break;
73
74 rtnl_lock();
75
76 for_each_netdev(net, tmp) {
77 if (tmp->type != ARPHRD_IEEE802154)
78 continue;
79
80 pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
81 short_addr =
82 ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
83
84 if (pan_id == addr->pan_id &&
85 short_addr == addr->short_addr) {
86 dev = tmp;
87 dev_hold(dev);
88 break;
89 }
90 }
91
92 rtnl_unlock();
93 break;
94 default:
95 pr_warning("Unsupported ieee802154 address type: %d\n",
96 addr->addr_type);
97 break;
98 }
99
100 return dev;
101}
102
103static int ieee802154_sock_release(struct socket *sock)
104{
105 struct sock *sk = sock->sk;
106
107 if (sk) {
108 sock->sk = NULL;
109 sk->sk_prot->close(sk, 0);
110 }
111 return 0;
112}
113static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
114 struct msghdr *msg, size_t len)
115{
116 struct sock *sk = sock->sk;
117
118 return sk->sk_prot->sendmsg(iocb, sk, msg, len);
119}
120
121static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
122 int addr_len)
123{
124 struct sock *sk = sock->sk;
125
126 if (sk->sk_prot->bind)
127 return sk->sk_prot->bind(sk, uaddr, addr_len);
128
129 return sock_no_bind(sock, uaddr, addr_len);
130}
131
132static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
133 int addr_len, int flags)
134{
135 struct sock *sk = sock->sk;
136
137 if (uaddr->sa_family == AF_UNSPEC)
138 return sk->sk_prot->disconnect(sk, flags);
139
140 return sk->sk_prot->connect(sk, uaddr, addr_len);
141}
142
143static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
144 unsigned int cmd)
145{
146 struct ifreq ifr;
147 int ret = -EINVAL;
148 struct net_device *dev;
149
150 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
151 return -EFAULT;
152
153 ifr.ifr_name[IFNAMSIZ-1] = 0;
154
155 dev_load(sock_net(sk), ifr.ifr_name);
156 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
157 if (dev->type == ARPHRD_IEEE802154 ||
158 dev->type == ARPHRD_IEEE802154_PHY)
159 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
160
161 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
162 ret = -EFAULT;
163 dev_put(dev);
164
165 return ret;
166}
167
168static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
169 unsigned long arg)
170{
171 struct sock *sk = sock->sk;
172
173 switch (cmd) {
174 case SIOCGSTAMP:
175 return sock_get_timestamp(sk, (struct timeval __user *)arg);
176 case SIOCGSTAMPNS:
177 return sock_get_timestampns(sk, (struct timespec __user *)arg);
178 case SIOCGIFADDR:
179 case SIOCSIFADDR:
180 return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
181 cmd);
182 default:
183 if (!sk->sk_prot->ioctl)
184 return -ENOIOCTLCMD;
185 return sk->sk_prot->ioctl(sk, cmd, arg);
186 }
187}
188
189static const struct proto_ops ieee802154_raw_ops = {
190 .family = PF_IEEE802154,
191 .owner = THIS_MODULE,
192 .release = ieee802154_sock_release,
193 .bind = ieee802154_sock_bind,
194 .connect = ieee802154_sock_connect,
195 .socketpair = sock_no_socketpair,
196 .accept = sock_no_accept,
197 .getname = sock_no_getname,
198 .poll = datagram_poll,
199 .ioctl = ieee802154_sock_ioctl,
200 .listen = sock_no_listen,
201 .shutdown = sock_no_shutdown,
202 .setsockopt = sock_common_setsockopt,
203 .getsockopt = sock_common_getsockopt,
204 .sendmsg = ieee802154_sock_sendmsg,
205 .recvmsg = sock_common_recvmsg,
206 .mmap = sock_no_mmap,
207 .sendpage = sock_no_sendpage,
208#ifdef CONFIG_COMPAT
209 .compat_setsockopt = compat_sock_common_setsockopt,
210 .compat_getsockopt = compat_sock_common_getsockopt,
211#endif
212};
213
214static const struct proto_ops ieee802154_dgram_ops = {
215 .family = PF_IEEE802154,
216 .owner = THIS_MODULE,
217 .release = ieee802154_sock_release,
218 .bind = ieee802154_sock_bind,
219 .connect = ieee802154_sock_connect,
220 .socketpair = sock_no_socketpair,
221 .accept = sock_no_accept,
222 .getname = sock_no_getname,
223 .poll = datagram_poll,
224 .ioctl = ieee802154_sock_ioctl,
225 .listen = sock_no_listen,
226 .shutdown = sock_no_shutdown,
227 .setsockopt = sock_common_setsockopt,
228 .getsockopt = sock_common_getsockopt,
229 .sendmsg = ieee802154_sock_sendmsg,
230 .recvmsg = sock_common_recvmsg,
231 .mmap = sock_no_mmap,
232 .sendpage = sock_no_sendpage,
233#ifdef CONFIG_COMPAT
234 .compat_setsockopt = compat_sock_common_setsockopt,
235 .compat_getsockopt = compat_sock_common_getsockopt,
236#endif
237};
238
239
240/*
241 * Create a socket. Initialise the socket, blank the addresses
242 * set the state.
243 */
244static int ieee802154_create(struct net *net, struct socket *sock,
245 int protocol)
246{
247 struct sock *sk;
248 int rc;
249 struct proto *proto;
250 const struct proto_ops *ops;
251
252 if (net != &init_net)
253 return -EAFNOSUPPORT;
254
255 switch (sock->type) {
256 case SOCK_RAW:
257 proto = &ieee802154_raw_prot;
258 ops = &ieee802154_raw_ops;
259 break;
260 case SOCK_DGRAM:
261 proto = &ieee802154_dgram_prot;
262 ops = &ieee802154_dgram_ops;
263 break;
264 default:
265 rc = -ESOCKTNOSUPPORT;
266 goto out;
267 }
268
269 rc = -ENOMEM;
270 sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
271 if (!sk)
272 goto out;
273 rc = 0;
274
275 sock->ops = ops;
276
277 sock_init_data(sock, sk);
278 /* FIXME: sk->sk_destruct */
279 sk->sk_family = PF_IEEE802154;
280
281 /* Checksums on by default */
282 sock_set_flag(sk, SOCK_ZAPPED);
283
284 if (sk->sk_prot->hash)
285 sk->sk_prot->hash(sk);
286
287 if (sk->sk_prot->init) {
288 rc = sk->sk_prot->init(sk);
289 if (rc)
290 sk_common_release(sk);
291 }
292out:
293 return rc;
294}
295
296static struct net_proto_family ieee802154_family_ops = {
297 .family = PF_IEEE802154,
298 .create = ieee802154_create,
299 .owner = THIS_MODULE,
300};
301
302static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
303 struct packet_type *pt, struct net_device *orig_dev)
304{
305 DBG_DUMP(skb->data, skb->len);
306 if (!netif_running(dev))
307 return -ENODEV;
308 pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
309
310 if (!net_eq(dev_net(dev), &init_net))
311 goto drop;
312
313 ieee802154_raw_deliver(dev, skb);
314
315 if (dev->type != ARPHRD_IEEE802154)
316 goto drop;
317
318 if (skb->pkt_type != PACKET_OTHERHOST)
319 return ieee802154_dgram_deliver(dev, skb);
320
321drop:
322 kfree_skb(skb);
323 return NET_RX_DROP;
324}
325
326
327static struct packet_type ieee802154_packet_type = {
328 .type = __constant_htons(ETH_P_IEEE802154),
329 .func = ieee802154_rcv,
330};
331
332static int __init af_ieee802154_init(void)
333{
334 int rc = -EINVAL;
335
336 rc = proto_register(&ieee802154_raw_prot, 1);
337 if (rc)
338 goto out;
339
340 rc = proto_register(&ieee802154_dgram_prot, 1);
341 if (rc)
342 goto err_dgram;
343
344 /* Tell SOCKET that we are alive */
345 rc = sock_register(&ieee802154_family_ops);
346 if (rc)
347 goto err_sock;
348 dev_add_pack(&ieee802154_packet_type);
349
350 rc = 0;
351 goto out;
352
353err_sock:
354 proto_unregister(&ieee802154_dgram_prot);
355err_dgram:
356 proto_unregister(&ieee802154_raw_prot);
357out:
358 return rc;
359}
360static void __exit af_ieee802154_remove(void)
361{
362 dev_remove_pack(&ieee802154_packet_type);
363 sock_unregister(PF_IEEE802154);
364 proto_unregister(&ieee802154_dgram_prot);
365 proto_unregister(&ieee802154_raw_prot);
366}
367
368module_init(af_ieee802154_init);
369module_exit(af_ieee802154_remove);
370
371MODULE_LICENSE("GPL");
372MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
new file mode 100644
index 000000000000..1779677aed46
--- /dev/null
+++ b/net/ieee802154/dgram.c
@@ -0,0 +1,394 @@
1/*
2 * ZigBee socket interface
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/net.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27#include <linux/list.h>
28#include <net/sock.h>
29#include <net/ieee802154/af_ieee802154.h>
30#include <net/ieee802154/mac_def.h>
31#include <net/ieee802154/netdevice.h>
32
33#include <asm/ioctls.h>
34
35#include "af802154.h"
36
37static HLIST_HEAD(dgram_head);
38static DEFINE_RWLOCK(dgram_lock);
39
40struct dgram_sock {
41 struct sock sk;
42
43 int bound;
44 struct ieee802154_addr src_addr;
45 struct ieee802154_addr dst_addr;
46};
47
48static inline struct dgram_sock *dgram_sk(const struct sock *sk)
49{
50 return container_of(sk, struct dgram_sock, sk);
51}
52
53
54static void dgram_hash(struct sock *sk)
55{
56 write_lock_bh(&dgram_lock);
57 sk_add_node(sk, &dgram_head);
58 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
59 write_unlock_bh(&dgram_lock);
60}
61
62static void dgram_unhash(struct sock *sk)
63{
64 write_lock_bh(&dgram_lock);
65 if (sk_del_node_init(sk))
66 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
67 write_unlock_bh(&dgram_lock);
68}
69
70static int dgram_init(struct sock *sk)
71{
72 struct dgram_sock *ro = dgram_sk(sk);
73
74 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
75 ro->dst_addr.pan_id = 0xffff;
76 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
77 return 0;
78}
79
80static void dgram_close(struct sock *sk, long timeout)
81{
82 sk_common_release(sk);
83}
84
85static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
86{
87 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
88 struct dgram_sock *ro = dgram_sk(sk);
89 int err = 0;
90 struct net_device *dev;
91
92 ro->bound = 0;
93
94 if (len < sizeof(*addr))
95 return -EINVAL;
96
97 if (addr->family != AF_IEEE802154)
98 return -EINVAL;
99
100 lock_sock(sk);
101
102 dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
103 if (!dev) {
104 err = -ENODEV;
105 goto out;
106 }
107
108 if (dev->type != ARPHRD_IEEE802154) {
109 err = -ENODEV;
110 goto out_put;
111 }
112
113 memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr));
114
115 ro->bound = 1;
116out_put:
117 dev_put(dev);
118out:
119 release_sock(sk);
120
121 return err;
122}
123
124static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
125{
126 switch (cmd) {
127 case SIOCOUTQ:
128 {
129 int amount = atomic_read(&sk->sk_wmem_alloc);
130 return put_user(amount, (int __user *)arg);
131 }
132
133 case SIOCINQ:
134 {
135 struct sk_buff *skb;
136 unsigned long amount;
137
138 amount = 0;
139 spin_lock_bh(&sk->sk_receive_queue.lock);
140 skb = skb_peek(&sk->sk_receive_queue);
141 if (skb != NULL) {
142 /*
143 * We will only return the amount
144 * of this packet since that is all
145 * that will be read.
146 */
147 /* FIXME: parse the header for more correct value */
148 amount = skb->len - (3+8+8);
149 }
150 spin_unlock_bh(&sk->sk_receive_queue.lock);
151 return put_user(amount, (int __user *)arg);
152 }
153
154 }
155 return -ENOIOCTLCMD;
156}
157
158/* FIXME: autobind */
159static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
160 int len)
161{
162 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
163 struct dgram_sock *ro = dgram_sk(sk);
164 int err = 0;
165
166 if (len < sizeof(*addr))
167 return -EINVAL;
168
169 if (addr->family != AF_IEEE802154)
170 return -EINVAL;
171
172 lock_sock(sk);
173
174 if (!ro->bound) {
175 err = -ENETUNREACH;
176 goto out;
177 }
178
179 memcpy(&ro->dst_addr, &addr->addr, sizeof(struct ieee802154_addr));
180
181out:
182 release_sock(sk);
183 return err;
184}
185
186static int dgram_disconnect(struct sock *sk, int flags)
187{
188 struct dgram_sock *ro = dgram_sk(sk);
189
190 lock_sock(sk);
191
192 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
193 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
194
195 release_sock(sk);
196
197 return 0;
198}
199
200static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
201 struct msghdr *msg, size_t size)
202{
203 struct net_device *dev;
204 unsigned mtu;
205 struct sk_buff *skb;
206 struct dgram_sock *ro = dgram_sk(sk);
207 int err;
208
209 if (msg->msg_flags & MSG_OOB) {
210 pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
211 return -EOPNOTSUPP;
212 }
213
214 if (!ro->bound)
215 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
216 else
217 dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
218
219 if (!dev) {
220 pr_debug("no dev\n");
221 err = -ENXIO;
222 goto out;
223 }
224 mtu = dev->mtu;
225 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
226
227 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
228 msg->msg_flags & MSG_DONTWAIT,
229 &err);
230 if (!skb)
231 goto out_dev;
232
233 skb_reserve(skb, LL_RESERVED_SPACE(dev));
234
235 skb_reset_network_header(skb);
236
237 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ;
238 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
239 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
240 ro->bound ? &ro->src_addr : NULL, size);
241 if (err < 0)
242 goto out_skb;
243
244 skb_reset_mac_header(skb);
245
246 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
247 if (err < 0)
248 goto out_skb;
249
250 if (size > mtu) {
251 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
252 err = -EINVAL;
253 goto out_skb;
254 }
255
256 skb->dev = dev;
257 skb->sk = sk;
258 skb->protocol = htons(ETH_P_IEEE802154);
259
260 dev_put(dev);
261
262 err = dev_queue_xmit(skb);
263 if (err > 0)
264 err = net_xmit_errno(err);
265
266 return err ?: size;
267
268out_skb:
269 kfree_skb(skb);
270out_dev:
271 dev_put(dev);
272out:
273 return err;
274}
275
276static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
277 struct msghdr *msg, size_t len, int noblock, int flags,
278 int *addr_len)
279{
280 size_t copied = 0;
281 int err = -EOPNOTSUPP;
282 struct sk_buff *skb;
283
284 skb = skb_recv_datagram(sk, flags, noblock, &err);
285 if (!skb)
286 goto out;
287
288 copied = skb->len;
289 if (len < copied) {
290 msg->msg_flags |= MSG_TRUNC;
291 copied = len;
292 }
293
294 /* FIXME: skip headers if necessary ?! */
295 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
296 if (err)
297 goto done;
298
299 sock_recv_timestamp(msg, sk, skb);
300
301 if (flags & MSG_TRUNC)
302 copied = skb->len;
303done:
304 skb_free_datagram(sk, skb);
305out:
306 if (err)
307 return err;
308 return copied;
309}
310
311static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
312{
313 if (sock_queue_rcv_skb(sk, skb) < 0) {
314 atomic_inc(&sk->sk_drops);
315 kfree_skb(skb);
316 return NET_RX_DROP;
317 }
318
319 return NET_RX_SUCCESS;
320}
321
322static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
323 u16 short_addr, struct dgram_sock *ro)
324{
325 if (!ro->bound)
326 return 1;
327
328 if (ro->src_addr.addr_type == IEEE802154_ADDR_LONG &&
329 !memcmp(ro->src_addr.hwaddr, hw_addr, IEEE802154_ADDR_LEN))
330 return 1;
331
332 if (ro->src_addr.addr_type == IEEE802154_ADDR_SHORT &&
333 pan_id == ro->src_addr.pan_id &&
334 short_addr == ro->src_addr.short_addr)
335 return 1;
336
337 return 0;
338}
339
340int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
341{
342 struct sock *sk, *prev = NULL;
343 struct hlist_node *node;
344 int ret = NET_RX_SUCCESS;
345 u16 pan_id, short_addr;
346
347 /* Data frame processing */
348 BUG_ON(dev->type != ARPHRD_IEEE802154);
349
350 pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
351 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
352
353 read_lock(&dgram_lock);
354 sk_for_each(sk, node, &dgram_head) {
355 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
356 dgram_sk(sk))) {
357 if (prev) {
358 struct sk_buff *clone;
359 clone = skb_clone(skb, GFP_ATOMIC);
360 if (clone)
361 dgram_rcv_skb(prev, clone);
362 }
363
364 prev = sk;
365 }
366 }
367
368 if (prev)
369 dgram_rcv_skb(prev, skb);
370 else {
371 kfree_skb(skb);
372 ret = NET_RX_DROP;
373 }
374 read_unlock(&dgram_lock);
375
376 return ret;
377}
378
379struct proto ieee802154_dgram_prot = {
380 .name = "IEEE-802.15.4-MAC",
381 .owner = THIS_MODULE,
382 .obj_size = sizeof(struct dgram_sock),
383 .init = dgram_init,
384 .close = dgram_close,
385 .bind = dgram_bind,
386 .sendmsg = dgram_sendmsg,
387 .recvmsg = dgram_recvmsg,
388 .hash = dgram_hash,
389 .unhash = dgram_unhash,
390 .connect = dgram_connect,
391 .disconnect = dgram_disconnect,
392 .ioctl = dgram_ioctl,
393};
394
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
new file mode 100644
index 000000000000..105ad10876af
--- /dev/null
+++ b/net/ieee802154/netlink.c
@@ -0,0 +1,523 @@
1/*
2 * Netlink inteface for IEEE 802.15.4 stack
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/kernel.h>
25#include <linux/if_arp.h>
26#include <linux/netdevice.h>
27#include <net/netlink.h>
28#include <net/genetlink.h>
29#include <linux/nl802154.h>
30#include <net/ieee802154/af_ieee802154.h>
31#include <net/ieee802154/nl802154.h>
32#include <net/ieee802154/netdevice.h>
33
34static unsigned int ieee802154_seq_num;
35
36static struct genl_family ieee802154_coordinator_family = {
37 .id = GENL_ID_GENERATE,
38 .hdrsize = 0,
39 .name = IEEE802154_NL_NAME,
40 .version = 1,
41 .maxattr = IEEE802154_ATTR_MAX,
42};
43
44static struct genl_multicast_group ieee802154_coord_mcgrp = {
45 .name = IEEE802154_MCAST_COORD_NAME,
46};
47
48static struct genl_multicast_group ieee802154_beacon_mcgrp = {
49 .name = IEEE802154_MCAST_BEACON_NAME,
50};
51
52/* Requests to userspace */
53static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
54{
55 void *hdr;
56 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
57
58 if (!msg)
59 return NULL;
60
61 hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
62 &ieee802154_coordinator_family, flags, req);
63 if (!hdr) {
64 nlmsg_free(msg);
65 return NULL;
66 }
67
68 return msg;
69}
70
71static int ieee802154_nl_finish(struct sk_buff *msg)
72{
73 /* XXX: nlh is right at the start of msg */
74 void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
75
76 if (!genlmsg_end(msg, hdr))
77 goto out;
78
79 return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id,
80 GFP_ATOMIC);
81out:
82 nlmsg_free(msg);
83 return -ENOBUFS;
84}
85
86int ieee802154_nl_assoc_indic(struct net_device *dev,
87 struct ieee802154_addr *addr, u8 cap)
88{
89 struct sk_buff *msg;
90
91 pr_debug("%s\n", __func__);
92
93 if (addr->addr_type != IEEE802154_ADDR_LONG) {
94 pr_err("%s: received non-long source address!\n", __func__);
95 return -EINVAL;
96 }
97
98 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
99 if (!msg)
100 return -ENOBUFS;
101
102 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
103 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
104 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
105 dev->dev_addr);
106
107 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
108 addr->hwaddr);
109
110 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
111
112 return ieee802154_nl_finish(msg);
113
114nla_put_failure:
115 nlmsg_free(msg);
116 return -ENOBUFS;
117}
118EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
119
120int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
121 u8 status)
122{
123 struct sk_buff *msg;
124
125 pr_debug("%s\n", __func__);
126
127 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
128 if (!msg)
129 return -ENOBUFS;
130
131 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
132 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
133 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
134 dev->dev_addr);
135
136 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
137 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
138
139 return ieee802154_nl_finish(msg);
140
141nla_put_failure:
142 nlmsg_free(msg);
143 return -ENOBUFS;
144}
145EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
146
147int ieee802154_nl_disassoc_indic(struct net_device *dev,
148 struct ieee802154_addr *addr, u8 reason)
149{
150 struct sk_buff *msg;
151
152 pr_debug("%s\n", __func__);
153
154 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
155 if (!msg)
156 return -ENOBUFS;
157
158 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
159 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
160 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
161 dev->dev_addr);
162
163 if (addr->addr_type == IEEE802154_ADDR_LONG)
164 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
165 addr->hwaddr);
166 else
167 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
168 addr->short_addr);
169
170 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
171
172 return ieee802154_nl_finish(msg);
173
174nla_put_failure:
175 nlmsg_free(msg);
176 return -ENOBUFS;
177}
178EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
179
180int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
181{
182 struct sk_buff *msg;
183
184 pr_debug("%s\n", __func__);
185
186 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
187 if (!msg)
188 return -ENOBUFS;
189
190 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
191 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
192 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
193 dev->dev_addr);
194
195 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
196
197 return ieee802154_nl_finish(msg);
198
199nla_put_failure:
200 nlmsg_free(msg);
201 return -ENOBUFS;
202}
203EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
204
205int ieee802154_nl_beacon_indic(struct net_device *dev,
206 u16 panid, u16 coord_addr)
207{
208 struct sk_buff *msg;
209
210 pr_debug("%s\n", __func__);
211
212 msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
213 if (!msg)
214 return -ENOBUFS;
215
216 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
217 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
218 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
219 dev->dev_addr);
220 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
221 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
222
223 return ieee802154_nl_finish(msg);
224
225nla_put_failure:
226 nlmsg_free(msg);
227 return -ENOBUFS;
228}
229EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
230
231int ieee802154_nl_scan_confirm(struct net_device *dev,
232 u8 status, u8 scan_type, u32 unscanned,
233 u8 *edl/* , struct list_head *pan_desc_list */)
234{
235 struct sk_buff *msg;
236
237 pr_debug("%s\n", __func__);
238
239 msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
240 if (!msg)
241 return -ENOBUFS;
242
243 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
244 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
245 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
246 dev->dev_addr);
247
248 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
249 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
250 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
251
252 if (edl)
253 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
254
255 return ieee802154_nl_finish(msg);
256
257nla_put_failure:
258 nlmsg_free(msg);
259 return -ENOBUFS;
260}
261EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
262
263/* Requests from userspace */
264static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
265{
266 struct net_device *dev;
267
268 if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
269 char name[IFNAMSIZ + 1];
270 nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
271 sizeof(name));
272 dev = dev_get_by_name(&init_net, name);
273 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
274 dev = dev_get_by_index(&init_net,
275 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
276 else
277 return NULL;
278
279 if (dev->type != ARPHRD_IEEE802154) {
280 dev_put(dev);
281 return NULL;
282 }
283
284 return dev;
285}
286
287static int ieee802154_associate_req(struct sk_buff *skb,
288 struct genl_info *info)
289{
290 struct net_device *dev;
291 struct ieee802154_addr addr;
292 int ret = -EINVAL;
293
294 if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
295 !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
296 (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
297 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
298 !info->attrs[IEEE802154_ATTR_CAPABILITY])
299 return -EINVAL;
300
301 dev = ieee802154_nl_get_dev(info);
302 if (!dev)
303 return -ENODEV;
304
305 if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
306 addr.addr_type = IEEE802154_ADDR_LONG;
307 nla_memcpy(addr.hwaddr,
308 info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
309 IEEE802154_ADDR_LEN);
310 } else {
311 addr.addr_type = IEEE802154_ADDR_SHORT;
312 addr.short_addr = nla_get_u16(
313 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
314 }
315 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
316
317 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
318 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
319 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
320
321 dev_put(dev);
322 return ret;
323}
324
325static int ieee802154_associate_resp(struct sk_buff *skb,
326 struct genl_info *info)
327{
328 struct net_device *dev;
329 struct ieee802154_addr addr;
330 int ret = -EINVAL;
331
332 if (!info->attrs[IEEE802154_ATTR_STATUS] ||
333 !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
334 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
335 return -EINVAL;
336
337 dev = ieee802154_nl_get_dev(info);
338 if (!dev)
339 return -ENODEV;
340
341 addr.addr_type = IEEE802154_ADDR_LONG;
342 nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
343 IEEE802154_ADDR_LEN);
344 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
345
346
347 ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
348 nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
349 nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
350
351 dev_put(dev);
352 return ret;
353}
354
355static int ieee802154_disassociate_req(struct sk_buff *skb,
356 struct genl_info *info)
357{
358 struct net_device *dev;
359 struct ieee802154_addr addr;
360 int ret = -EINVAL;
361
362 if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
363 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
364 !info->attrs[IEEE802154_ATTR_REASON])
365 return -EINVAL;
366
367 dev = ieee802154_nl_get_dev(info);
368 if (!dev)
369 return -ENODEV;
370
371 if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
372 addr.addr_type = IEEE802154_ADDR_LONG;
373 nla_memcpy(addr.hwaddr,
374 info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
375 IEEE802154_ADDR_LEN);
376 } else {
377 addr.addr_type = IEEE802154_ADDR_SHORT;
378 addr.short_addr = nla_get_u16(
379 info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
380 }
381 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
382
383 ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
384 nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
385
386 dev_put(dev);
387 return ret;
388}
389
390/*
391 * PANid, channel, beacon_order = 15, superframe_order = 15,
392 * PAN_coordinator, battery_life_extension = 0,
393 * coord_realignment = 0, security_enable = 0
394*/
395static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
396{
397 struct net_device *dev;
398 struct ieee802154_addr addr;
399
400 u8 channel, bcn_ord, sf_ord;
401 int pan_coord, blx, coord_realign;
402 int ret;
403
404 if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
405 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
406 !info->attrs[IEEE802154_ATTR_CHANNEL] ||
407 !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
408 !info->attrs[IEEE802154_ATTR_SF_ORD] ||
409 !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
410 !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
411 !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
412 )
413 return -EINVAL;
414
415 dev = ieee802154_nl_get_dev(info);
416 if (!dev)
417 return -ENODEV;
418
419 addr.addr_type = IEEE802154_ADDR_SHORT;
420 addr.short_addr = nla_get_u16(
421 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
422 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
423
424 channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
425 bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
426 sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
427 pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
428 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
429 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
430
431 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel,
432 bcn_ord, sf_ord, pan_coord, blx, coord_realign);
433
434 dev_put(dev);
435 return ret;
436}
437
438static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
439{
440 struct net_device *dev;
441 int ret;
442 u8 type;
443 u32 channels;
444 u8 duration;
445
446 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
447 !info->attrs[IEEE802154_ATTR_CHANNELS] ||
448 !info->attrs[IEEE802154_ATTR_DURATION])
449 return -EINVAL;
450
451 dev = ieee802154_nl_get_dev(info);
452 if (!dev)
453 return -ENODEV;
454
455 type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
456 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
457 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
458
459 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
460 duration);
461
462 dev_put(dev);
463 return ret;
464}
465
466#define IEEE802154_OP(_cmd, _func) \
467 { \
468 .cmd = _cmd, \
469 .policy = ieee802154_policy, \
470 .doit = _func, \
471 .dumpit = NULL, \
472 .flags = GENL_ADMIN_PERM, \
473 }
474
475static struct genl_ops ieee802154_coordinator_ops[] = {
476 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
477 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
478 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
479 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
480 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
481};
482
483static int __init ieee802154_nl_init(void)
484{
485 int rc;
486 int i;
487
488 rc = genl_register_family(&ieee802154_coordinator_family);
489 if (rc)
490 goto fail;
491
492 rc = genl_register_mc_group(&ieee802154_coordinator_family,
493 &ieee802154_coord_mcgrp);
494 if (rc)
495 goto fail;
496
497 rc = genl_register_mc_group(&ieee802154_coordinator_family,
498 &ieee802154_beacon_mcgrp);
499 if (rc)
500 goto fail;
501
502
503 for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
504 rc = genl_register_ops(&ieee802154_coordinator_family,
505 &ieee802154_coordinator_ops[i]);
506 if (rc)
507 goto fail;
508 }
509
510 return 0;
511
512fail:
513 genl_unregister_family(&ieee802154_coordinator_family);
514 return rc;
515}
516module_init(ieee802154_nl_init);
517
518static void __exit ieee802154_nl_exit(void)
519{
520 genl_unregister_family(&ieee802154_coordinator_family);
521}
522module_exit(ieee802154_nl_exit);
523
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
new file mode 100644
index 000000000000..c7d71d1adcac
--- /dev/null
+++ b/net/ieee802154/nl_policy.c
@@ -0,0 +1,52 @@
1/*
2 * nl802154.h
3 *
4 * Copyright (C) 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <net/netlink.h>
23#include <linux/nl802154.h>
24
25#define NLA_HW_ADDR NLA_U64
26
27struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
30
31 [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, },
32 [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
33 [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
34 [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
35 [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
36 [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
37 [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
38 [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
39 [IEEE802154_ATTR_SRC_SHORT_ADDR] = { .type = NLA_U16, },
40 [IEEE802154_ATTR_SRC_HW_ADDR] = { .type = NLA_HW_ADDR, },
41 [IEEE802154_ATTR_SRC_PAN_ID] = { .type = NLA_U16, },
42 [IEEE802154_ATTR_DEST_SHORT_ADDR] = { .type = NLA_U16, },
43 [IEEE802154_ATTR_DEST_HW_ADDR] = { .type = NLA_HW_ADDR, },
44 [IEEE802154_ATTR_DEST_PAN_ID] = { .type = NLA_U16, },
45
46 [IEEE802154_ATTR_CAPABILITY] = { .type = NLA_U8, },
47 [IEEE802154_ATTR_REASON] = { .type = NLA_U8, },
48 [IEEE802154_ATTR_SCAN_TYPE] = { .type = NLA_U8, },
49 [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, },
50 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
51 [IEEE802154_ATTR_ED_LIST] = { .len = 27 },
52};
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
new file mode 100644
index 000000000000..fca44d59f97e
--- /dev/null
+++ b/net/ieee802154/raw.c
@@ -0,0 +1,254 @@
1/*
2 * Raw IEEE 802.15.4 sockets
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/net.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27#include <linux/list.h>
28#include <net/sock.h>
29#include <net/ieee802154/af_ieee802154.h>
30
31#include "af802154.h"
32
33static HLIST_HEAD(raw_head);
34static DEFINE_RWLOCK(raw_lock);
35
36static void raw_hash(struct sock *sk)
37{
38 write_lock_bh(&raw_lock);
39 sk_add_node(sk, &raw_head);
40 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
41 write_unlock_bh(&raw_lock);
42}
43
44static void raw_unhash(struct sock *sk)
45{
46 write_lock_bh(&raw_lock);
47 if (sk_del_node_init(sk))
48 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
49 write_unlock_bh(&raw_lock);
50}
51
52static void raw_close(struct sock *sk, long timeout)
53{
54 sk_common_release(sk);
55}
56
57static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len)
58{
59 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
60 int err = 0;
61 struct net_device *dev = NULL;
62
63 if (len < sizeof(*addr))
64 return -EINVAL;
65
66 if (addr->family != AF_IEEE802154)
67 return -EINVAL;
68
69 lock_sock(sk);
70
71 dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
72 if (!dev) {
73 err = -ENODEV;
74 goto out;
75 }
76
77 if (dev->type != ARPHRD_IEEE802154_PHY &&
78 dev->type != ARPHRD_IEEE802154) {
79 err = -ENODEV;
80 goto out_put;
81 }
82
83 sk->sk_bound_dev_if = dev->ifindex;
84 sk_dst_reset(sk);
85
86out_put:
87 dev_put(dev);
88out:
89 release_sock(sk);
90
91 return err;
92}
93
94static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
95 int addr_len)
96{
97 return -ENOTSUPP;
98}
99
100static int raw_disconnect(struct sock *sk, int flags)
101{
102 return 0;
103}
104
105static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
106 size_t size)
107{
108 struct net_device *dev;
109 unsigned mtu;
110 struct sk_buff *skb;
111 int err;
112
113 if (msg->msg_flags & MSG_OOB) {
114 pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
115 return -EOPNOTSUPP;
116 }
117
118 lock_sock(sk);
119 if (!sk->sk_bound_dev_if)
120 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
121 else
122 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
123 release_sock(sk);
124
125 if (!dev) {
126 pr_debug("no dev\n");
127 err = -ENXIO;
128 goto out;
129 }
130
131 mtu = dev->mtu;
132 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
133
134 if (size > mtu) {
135 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
136 err = -EINVAL;
137 goto out_dev;
138 }
139
140 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
141 msg->msg_flags & MSG_DONTWAIT, &err);
142 if (!skb)
143 goto out_dev;
144
145 skb_reserve(skb, LL_RESERVED_SPACE(dev));
146
147 skb_reset_mac_header(skb);
148 skb_reset_network_header(skb);
149
150 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
151 if (err < 0)
152 goto out_skb;
153
154 skb->dev = dev;
155 skb->sk = sk;
156 skb->protocol = htons(ETH_P_IEEE802154);
157
158 dev_put(dev);
159
160 err = dev_queue_xmit(skb);
161 if (err > 0)
162 err = net_xmit_errno(err);
163
164 return err ?: size;
165
166out_skb:
167 kfree_skb(skb);
168out_dev:
169 dev_put(dev);
170out:
171 return err;
172}
173
174static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
175 size_t len, int noblock, int flags, int *addr_len)
176{
177 size_t copied = 0;
178 int err = -EOPNOTSUPP;
179 struct sk_buff *skb;
180
181 skb = skb_recv_datagram(sk, flags, noblock, &err);
182 if (!skb)
183 goto out;
184
185 copied = skb->len;
186 if (len < copied) {
187 msg->msg_flags |= MSG_TRUNC;
188 copied = len;
189 }
190
191 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
192 if (err)
193 goto done;
194
195 sock_recv_timestamp(msg, sk, skb);
196
197 if (flags & MSG_TRUNC)
198 copied = skb->len;
199done:
200 skb_free_datagram(sk, skb);
201out:
202 if (err)
203 return err;
204 return copied;
205}
206
207static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
208{
209 if (sock_queue_rcv_skb(sk, skb) < 0) {
210 atomic_inc(&sk->sk_drops);
211 kfree_skb(skb);
212 return NET_RX_DROP;
213 }
214
215 return NET_RX_SUCCESS;
216}
217
218
219void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
220{
221 struct sock *sk;
222 struct hlist_node *node;
223
224 read_lock(&raw_lock);
225 sk_for_each(sk, node, &raw_head) {
226 bh_lock_sock(sk);
227 if (!sk->sk_bound_dev_if ||
228 sk->sk_bound_dev_if == dev->ifindex) {
229
230 struct sk_buff *clone;
231
232 clone = skb_clone(skb, GFP_ATOMIC);
233 if (clone)
234 raw_rcv_skb(sk, clone);
235 }
236 bh_unlock_sock(sk);
237 }
238 read_unlock(&raw_lock);
239}
240
241struct proto ieee802154_raw_prot = {
242 .name = "IEEE-802.15.4-RAW",
243 .owner = THIS_MODULE,
244 .obj_size = sizeof(struct sock),
245 .close = raw_close,
246 .bind = raw_bind,
247 .sendmsg = raw_sendmsg,
248 .recvmsg = raw_recvmsg,
249 .hash = raw_hash,
250 .unhash = raw_unhash,
251 .connect = raw_connect,
252 .disconnect = raw_disconnect,
253};
254
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 5b919f7b45db..70491d9035eb 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -273,29 +273,20 @@ config IP_PIMSM_V2
273 you want to play with it. 273 you want to play with it.
274 274
275config ARPD 275config ARPD
276 bool "IP: ARP daemon support (EXPERIMENTAL)" 276 bool "IP: ARP daemon support"
277 depends on EXPERIMENTAL
278 ---help--- 277 ---help---
279 Normally, the kernel maintains an internal cache which maps IP 278 The kernel maintains an internal cache which maps IP addresses to
280 addresses to hardware addresses on the local network, so that 279 hardware addresses on the local network, so that Ethernet/Token Ring/
281 Ethernet/Token Ring/ etc. frames are sent to the proper address on 280 etc. frames are sent to the proper address on the physical networking
282 the physical networking layer. For small networks having a few 281 layer. Normally, kernel uses the ARP protocol to resolve these
283 hundred directly connected hosts or less, keeping this address 282 mappings.
284 resolution (ARP) cache inside the kernel works well. However, 283
285 maintaining an internal ARP cache does not work well for very large 284 Saying Y here adds support to have an user space daemon to do this
286 switched networks, and will use a lot of kernel memory if TCP/IP 285 resolution instead. This is useful for implementing an alternate
287 connections are made to many machines on the network. 286 address resolution protocol (e.g. NHRP on mGRE tunnels) and also for
288 287 testing purposes.
289 If you say Y here, the kernel's internal ARP cache will never grow 288
290 to more than 256 entries (the oldest entries are expired in a LIFO 289 If unsure, say N.
291 manner) and communication will be attempted with the user space ARP
292 daemon arpd. Arpd then answers the address resolution request either
293 from its own cache or by asking the net.
294
295 This code is experimental and also obsolete. If you want to use it,
296 you need to find a version of the daemon arpd on the net somewhere,
297 and you should also say Y to "Kernel/User network link driver",
298 below. If unsure, say N.
299 290
300config SYN_COOKIES 291config SYN_COOKIES
301 bool "IP: TCP syncookie support (disabled per default)" 292 bool "IP: TCP syncookie support (disabled per default)"
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7f03373b8c07..566ea6c4321d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -116,7 +116,6 @@
116#include <linux/mroute.h> 116#include <linux/mroute.h>
117#endif 117#endif
118 118
119extern void ip_mc_drop_socket(struct sock *sk);
120 119
121/* The inetsw table contains everything that inet_create needs to 120/* The inetsw table contains everything that inet_create needs to
122 * build a new socket. 121 * build a new socket.
@@ -375,6 +374,7 @@ lookup_protocol:
375 inet->uc_ttl = -1; 374 inet->uc_ttl = -1;
376 inet->mc_loop = 1; 375 inet->mc_loop = 1;
377 inet->mc_ttl = 1; 376 inet->mc_ttl = 1;
377 inet->mc_all = 1;
378 inet->mc_index = 0; 378 inet->mc_index = 0;
379 inet->mc_list = NULL; 379 inet->mc_list = NULL;
380 380
@@ -1003,8 +1003,6 @@ void inet_register_protosw(struct inet_protosw *p)
1003out: 1003out:
1004 spin_unlock_bh(&inetsw_lock); 1004 spin_unlock_bh(&inetsw_lock);
1005 1005
1006 synchronize_net();
1007
1008 return; 1006 return;
1009 1007
1010out_permanent: 1008out_permanent:
@@ -1248,13 +1246,20 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1248 struct sk_buff **pp = NULL; 1246 struct sk_buff **pp = NULL;
1249 struct sk_buff *p; 1247 struct sk_buff *p;
1250 struct iphdr *iph; 1248 struct iphdr *iph;
1249 unsigned int hlen;
1250 unsigned int off;
1251 unsigned int id;
1251 int flush = 1; 1252 int flush = 1;
1252 int proto; 1253 int proto;
1253 int id;
1254 1254
1255 iph = skb_gro_header(skb, sizeof(*iph)); 1255 off = skb_gro_offset(skb);
1256 if (unlikely(!iph)) 1256 hlen = off + sizeof(*iph);
1257 goto out; 1257 iph = skb_gro_header_fast(skb, off);
1258 if (skb_gro_header_hard(skb, hlen)) {
1259 iph = skb_gro_header_slow(skb, hlen, off);
1260 if (unlikely(!iph))
1261 goto out;
1262 }
1258 1263
1259 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1264 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1260 1265
@@ -1269,9 +1274,9 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1269 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1274 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1270 goto out_unlock; 1275 goto out_unlock;
1271 1276
1272 flush = ntohs(iph->tot_len) != skb_gro_len(skb) || 1277 id = ntohl(*(u32 *)&iph->id);
1273 iph->frag_off != htons(IP_DF); 1278 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1274 id = ntohs(iph->id); 1279 id >>= 16;
1275 1280
1276 for (p = *head; p; p = p->next) { 1281 for (p = *head; p; p = p->next) {
1277 struct iphdr *iph2; 1282 struct iphdr *iph2;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f11931c18381..8a3881e28aca 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -468,13 +468,13 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
468 __be32 paddr; 468 __be32 paddr;
469 struct neighbour *n; 469 struct neighbour *n;
470 470
471 if (!skb->dst) { 471 if (!skb_dst(skb)) {
472 printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); 472 printk(KERN_DEBUG "arp_find is called with dst==NULL\n");
473 kfree_skb(skb); 473 kfree_skb(skb);
474 return 1; 474 return 1;
475 } 475 }
476 476
477 paddr = skb->rtable->rt_gateway; 477 paddr = skb_rtable(skb)->rt_gateway;
478 478
479 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) 479 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev))
480 return 0; 480 return 0;
@@ -817,7 +817,7 @@ static int arp_process(struct sk_buff *skb)
817 if (arp->ar_op == htons(ARPOP_REQUEST) && 817 if (arp->ar_op == htons(ARPOP_REQUEST) &&
818 ip_route_input(skb, tip, sip, 0, dev) == 0) { 818 ip_route_input(skb, tip, sip, 0, dev) == 0) {
819 819
820 rt = skb->rtable; 820 rt = skb_rtable(skb);
821 addr_type = rt->rt_type; 821 addr_type = rt->rt_type;
822 822
823 if (addr_type == RTN_LOCAL) { 823 if (addr_type == RTN_LOCAL) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 126bb911880f..3863c3a4223f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1347,7 +1347,8 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1347 struct net *net = ctl->extra2; 1347 struct net *net = ctl->extra2;
1348 1348
1349 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1349 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1350 rtnl_lock(); 1350 if (!rtnl_trylock())
1351 return restart_syscall();
1351 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1352 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1352 inet_forward_change(net); 1353 inet_forward_change(net);
1353 } else if (*valp) { 1354 } else if (*valp) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cafcc49d0993..e2f950592566 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -40,7 +40,6 @@
40#include <net/route.h> 40#include <net/route.h>
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/icmp.h>
44#include <net/arp.h> 43#include <net/arp.h>
45#include <net/ip_fib.h> 44#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 45#include <net/rtnetlink.h>
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index ded8c44fb848..ecd39454235c 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -263,7 +263,6 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
263 263
264 err = fib_semantic_match(&f->fn_alias, 264 err = fib_semantic_match(&f->fn_alias,
265 flp, res, 265 flp, res,
266 f->fn_key, fz->fz_mask,
267 fz->fz_order); 266 fz->fz_order);
268 if (err <= 0) 267 if (err <= 0)
269 goto out; 268 goto out;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 2c1623d2768b..637b133973bd 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -22,8 +22,7 @@ struct fib_alias {
22/* Exported by fib_semantics.c */ 22/* Exported by fib_semantics.c */
23extern int fib_semantic_match(struct list_head *head, 23extern int fib_semantic_match(struct list_head *head,
24 const struct flowi *flp, 24 const struct flowi *flp,
25 struct fib_result *res, __be32 zone, __be32 mask, 25 struct fib_result *res, int prefixlen);
26 int prefixlen);
27extern void fib_release_info(struct fib_info *); 26extern void fib_release_info(struct fib_info *);
28extern struct fib_info *fib_create_info(struct fib_config *cfg); 27extern struct fib_info *fib_create_info(struct fib_config *cfg);
29extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); 28extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6080d7120821..92d9d97ec5e3 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -134,7 +134,7 @@ static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
134}; 134};
135 135
136static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 136static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
137 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 137 struct fib_rule_hdr *frh,
138 struct nlattr **tb) 138 struct nlattr **tb)
139{ 139{
140 struct net *net = sock_net(skb->sk); 140 struct net *net = sock_net(skb->sk);
@@ -209,7 +209,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
209} 209}
210 210
211static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 211static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
212 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 212 struct fib_rule_hdr *frh)
213{ 213{
214 struct fib4_rule *rule4 = (struct fib4_rule *) rule; 214 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
215 215
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f831df500907..9b096d6ff3f2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -866,8 +866,7 @@ failure:
866 866
867/* Note! fib_semantic_match intentionally uses RCU list functions. */ 867/* Note! fib_semantic_match intentionally uses RCU list functions. */
868int fib_semantic_match(struct list_head *head, const struct flowi *flp, 868int fib_semantic_match(struct list_head *head, const struct flowi *flp,
869 struct fib_result *res, __be32 zone, __be32 mask, 869 struct fib_result *res, int prefixlen)
870 int prefixlen)
871{ 870{
872 struct fib_alias *fa; 871 struct fib_alias *fa;
873 int nh_sel = 0; 872 int nh_sel = 0;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 33c7c85dfe40..d1a39b1277d6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -123,6 +123,7 @@ struct tnode {
123 union { 123 union {
124 struct rcu_head rcu; 124 struct rcu_head rcu;
125 struct work_struct work; 125 struct work_struct work;
126 struct tnode *tnode_free;
126 }; 127 };
127 struct node *child[0]; 128 struct node *child[0];
128}; 129};
@@ -161,6 +162,8 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
161static struct node *resize(struct trie *t, struct tnode *tn); 162static struct node *resize(struct trie *t, struct tnode *tn);
162static struct tnode *inflate(struct trie *t, struct tnode *tn); 163static struct tnode *inflate(struct trie *t, struct tnode *tn);
163static struct tnode *halve(struct trie *t, struct tnode *tn); 164static struct tnode *halve(struct trie *t, struct tnode *tn);
165/* tnodes to free after resize(); protected by RTNL */
166static struct tnode *tnode_free_head;
164 167
165static struct kmem_cache *fn_alias_kmem __read_mostly; 168static struct kmem_cache *fn_alias_kmem __read_mostly;
166static struct kmem_cache *trie_leaf_kmem __read_mostly; 169static struct kmem_cache *trie_leaf_kmem __read_mostly;
@@ -385,6 +388,29 @@ static inline void tnode_free(struct tnode *tn)
385 call_rcu(&tn->rcu, __tnode_free_rcu); 388 call_rcu(&tn->rcu, __tnode_free_rcu);
386} 389}
387 390
391static void tnode_free_safe(struct tnode *tn)
392{
393 BUG_ON(IS_LEAF(tn));
394
395 if (node_parent((struct node *) tn)) {
396 tn->tnode_free = tnode_free_head;
397 tnode_free_head = tn;
398 } else {
399 tnode_free(tn);
400 }
401}
402
403static void tnode_free_flush(void)
404{
405 struct tnode *tn;
406
407 while ((tn = tnode_free_head)) {
408 tnode_free_head = tn->tnode_free;
409 tn->tnode_free = NULL;
410 tnode_free(tn);
411 }
412}
413
388static struct leaf *leaf_new(void) 414static struct leaf *leaf_new(void)
389{ 415{
390 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); 416 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
@@ -495,7 +521,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
495 521
496 /* No children */ 522 /* No children */
497 if (tn->empty_children == tnode_child_length(tn)) { 523 if (tn->empty_children == tnode_child_length(tn)) {
498 tnode_free(tn); 524 tnode_free_safe(tn);
499 return NULL; 525 return NULL;
500 } 526 }
501 /* One child */ 527 /* One child */
@@ -509,7 +535,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
509 535
510 /* compress one level */ 536 /* compress one level */
511 node_set_parent(n, NULL); 537 node_set_parent(n, NULL);
512 tnode_free(tn); 538 tnode_free_safe(tn);
513 return n; 539 return n;
514 } 540 }
515 /* 541 /*
@@ -670,7 +696,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
670 /* compress one level */ 696 /* compress one level */
671 697
672 node_set_parent(n, NULL); 698 node_set_parent(n, NULL);
673 tnode_free(tn); 699 tnode_free_safe(tn);
674 return n; 700 return n;
675 } 701 }
676 702
@@ -756,7 +782,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
756 put_child(t, tn, 2*i, inode->child[0]); 782 put_child(t, tn, 2*i, inode->child[0]);
757 put_child(t, tn, 2*i+1, inode->child[1]); 783 put_child(t, tn, 2*i+1, inode->child[1]);
758 784
759 tnode_free(inode); 785 tnode_free_safe(inode);
760 continue; 786 continue;
761 } 787 }
762 788
@@ -801,9 +827,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
801 put_child(t, tn, 2*i, resize(t, left)); 827 put_child(t, tn, 2*i, resize(t, left));
802 put_child(t, tn, 2*i+1, resize(t, right)); 828 put_child(t, tn, 2*i+1, resize(t, right));
803 829
804 tnode_free(inode); 830 tnode_free_safe(inode);
805 } 831 }
806 tnode_free(oldtnode); 832 tnode_free_safe(oldtnode);
807 return tn; 833 return tn;
808nomem: 834nomem:
809 { 835 {
@@ -885,7 +911,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
885 put_child(t, newBinNode, 1, right); 911 put_child(t, newBinNode, 1, right);
886 put_child(t, tn, i/2, resize(t, newBinNode)); 912 put_child(t, tn, i/2, resize(t, newBinNode));
887 } 913 }
888 tnode_free(oldtnode); 914 tnode_free_safe(oldtnode);
889 return tn; 915 return tn;
890nomem: 916nomem:
891 { 917 {
@@ -989,7 +1015,6 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
989 t_key cindex, key; 1015 t_key cindex, key;
990 struct tnode *tp; 1016 struct tnode *tp;
991 1017
992 preempt_disable();
993 key = tn->key; 1018 key = tn->key;
994 1019
995 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { 1020 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
@@ -1001,16 +1026,18 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1001 (struct node *)tn, wasfull); 1026 (struct node *)tn, wasfull);
1002 1027
1003 tp = node_parent((struct node *) tn); 1028 tp = node_parent((struct node *) tn);
1029 tnode_free_flush();
1004 if (!tp) 1030 if (!tp)
1005 break; 1031 break;
1006 tn = tp; 1032 tn = tp;
1007 } 1033 }
1008 1034
1009 /* Handle last (top) tnode */ 1035 /* Handle last (top) tnode */
1010 if (IS_TNODE(tn)) 1036 if (IS_TNODE(tn)) {
1011 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1037 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1038 tnode_free_flush();
1039 }
1012 1040
1013 preempt_enable();
1014 return (struct node *)tn; 1041 return (struct node *)tn;
1015} 1042}
1016 1043
@@ -1351,8 +1378,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
1351 if (l->key != (key & ntohl(mask))) 1378 if (l->key != (key & ntohl(mask)))
1352 continue; 1379 continue;
1353 1380
1354 err = fib_semantic_match(&li->falh, flp, res, 1381 err = fib_semantic_match(&li->falh, flp, res, plen);
1355 htonl(l->key), mask, plen);
1356 1382
1357#ifdef CONFIG_IP_FIB_TRIE_STATS 1383#ifdef CONFIG_IP_FIB_TRIE_STATS
1358 if (err <= 0) 1384 if (err <= 0)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 3f50807237e0..97c410e84388 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
356static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 356static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
357{ 357{
358 struct ipcm_cookie ipc; 358 struct ipcm_cookie ipc;
359 struct rtable *rt = skb->rtable; 359 struct rtable *rt = skb_rtable(skb);
360 struct net *net = dev_net(rt->u.dst.dev); 360 struct net *net = dev_net(rt->u.dst.dev);
361 struct sock *sk; 361 struct sock *sk;
362 struct inet_sock *inet; 362 struct inet_sock *inet;
@@ -416,7 +416,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
416 struct iphdr *iph; 416 struct iphdr *iph;
417 int room; 417 int room;
418 struct icmp_bxm icmp_param; 418 struct icmp_bxm icmp_param;
419 struct rtable *rt = skb_in->rtable; 419 struct rtable *rt = skb_rtable(skb_in);
420 struct ipcm_cookie ipc; 420 struct ipcm_cookie ipc;
421 __be32 saddr; 421 __be32 saddr;
422 u8 tos; 422 u8 tos;
@@ -591,13 +591,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
591 goto relookup_failed; 591 goto relookup_failed;
592 592
593 /* Ugh! */ 593 /* Ugh! */
594 odst = skb_in->dst; 594 odst = skb_dst(skb_in);
595 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, 595 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
596 RT_TOS(tos), rt2->u.dst.dev); 596 RT_TOS(tos), rt2->u.dst.dev);
597 597
598 dst_release(&rt2->u.dst); 598 dst_release(&rt2->u.dst);
599 rt2 = skb_in->rtable; 599 rt2 = skb_rtable(skb_in);
600 skb_in->dst = odst; 600 skb_dst_set(skb_in, odst);
601 } 601 }
602 602
603 if (err) 603 if (err)
@@ -659,7 +659,7 @@ static void icmp_unreach(struct sk_buff *skb)
659 u32 info = 0; 659 u32 info = 0;
660 struct net *net; 660 struct net *net;
661 661
662 net = dev_net(skb->dst->dev); 662 net = dev_net(skb_dst(skb)->dev);
663 663
664 /* 664 /*
665 * Incomplete header ? 665 * Incomplete header ?
@@ -822,7 +822,7 @@ static void icmp_echo(struct sk_buff *skb)
822{ 822{
823 struct net *net; 823 struct net *net;
824 824
825 net = dev_net(skb->dst->dev); 825 net = dev_net(skb_dst(skb)->dev);
826 if (!net->ipv4.sysctl_icmp_echo_ignore_all) { 826 if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
827 struct icmp_bxm icmp_param; 827 struct icmp_bxm icmp_param;
828 828
@@ -873,7 +873,7 @@ static void icmp_timestamp(struct sk_buff *skb)
873out: 873out:
874 return; 874 return;
875out_err: 875out_err:
876 ICMP_INC_STATS_BH(dev_net(skb->dst->dev), ICMP_MIB_INERRORS); 876 ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
877 goto out; 877 goto out;
878} 878}
879 879
@@ -926,7 +926,7 @@ static void icmp_address(struct sk_buff *skb)
926 926
927static void icmp_address_reply(struct sk_buff *skb) 927static void icmp_address_reply(struct sk_buff *skb)
928{ 928{
929 struct rtable *rt = skb->rtable; 929 struct rtable *rt = skb_rtable(skb);
930 struct net_device *dev = skb->dev; 930 struct net_device *dev = skb->dev;
931 struct in_device *in_dev; 931 struct in_device *in_dev;
932 struct in_ifaddr *ifa; 932 struct in_ifaddr *ifa;
@@ -970,7 +970,7 @@ static void icmp_discard(struct sk_buff *skb)
970int icmp_rcv(struct sk_buff *skb) 970int icmp_rcv(struct sk_buff *skb)
971{ 971{
972 struct icmphdr *icmph; 972 struct icmphdr *icmph;
973 struct rtable *rt = skb->rtable; 973 struct rtable *rt = skb_rtable(skb);
974 struct net *net = dev_net(rt->u.dst.dev); 974 struct net *net = dev_net(rt->u.dst.dev);
975 975
976 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 976 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9eb6219af615..01b4284ed694 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -311,7 +311,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
311 return NULL; 311 return NULL;
312 } 312 }
313 313
314 skb->dst = &rt->u.dst; 314 skb_dst_set(skb, &rt->u.dst);
315 skb->dev = dev; 315 skb->dev = dev;
316 316
317 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 317 skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -659,7 +659,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
659 return -1; 659 return -1;
660 } 660 }
661 661
662 skb->dst = &rt->u.dst; 662 skb_dst_set(skb, &rt->u.dst);
663 663
664 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 664 skb_reserve(skb, LL_RESERVED_SPACE(dev));
665 665
@@ -948,7 +948,7 @@ int igmp_rcv(struct sk_buff *skb)
948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 948 case IGMPV2_HOST_MEMBERSHIP_REPORT:
949 case IGMPV3_HOST_MEMBERSHIP_REPORT: 949 case IGMPV3_HOST_MEMBERSHIP_REPORT:
950 /* Is it our report looped back? */ 950 /* Is it our report looped back? */
951 if (skb->rtable->fl.iif == 0) 951 if (skb_rtable(skb)->fl.iif == 0)
952 break; 952 break;
953 /* don't rely on MC router hearing unicast reports */ 953 /* don't rely on MC router hearing unicast reports */
954 if (skb->pkt_type == PACKET_MULTICAST || 954 if (skb->pkt_type == PACKET_MULTICAST ||
@@ -2196,7 +2196,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2196 break; 2196 break;
2197 } 2197 }
2198 if (!pmc) 2198 if (!pmc)
2199 return 1; 2199 return inet->mc_all;
2200 psl = pmc->sflist; 2200 psl = pmc->sflist;
2201 if (!psl) 2201 if (!psl)
2202 return pmc->sfmode == MCAST_EXCLUDE; 2202 return pmc->sfmode == MCAST_EXCLUDE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 588a7796e3e3..b0b273503e2a 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -198,8 +198,6 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
198 tmo = 0; 198 tmo = 0;
199 199
200 r->idiag_family = tw->tw_family; 200 r->idiag_family = tw->tw_family;
201 r->idiag_state = tw->tw_state;
202 r->idiag_timer = 0;
203 r->idiag_retrans = 0; 201 r->idiag_retrans = 0;
204 r->id.idiag_if = tw->tw_bound_dev_if; 202 r->id.idiag_if = tw->tw_bound_dev_if;
205 r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 203 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 8554d0ea1719..61283f928825 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemcheck.h>
12#include <net/inet_hashtables.h> 13#include <net/inet_hashtables.h>
13#include <net/inet_timewait_sock.h> 14#include <net/inet_timewait_sock.h>
14#include <net/ip.h> 15#include <net/ip.h>
@@ -49,19 +50,22 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
49 inet_twsk_put(tw); 50 inet_twsk_put(tw);
50} 51}
51 52
52void inet_twsk_put(struct inet_timewait_sock *tw) 53static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
53{ 54{
54 if (atomic_dec_and_test(&tw->tw_refcnt)) { 55 struct module *owner = tw->tw_prot->owner;
55 struct module *owner = tw->tw_prot->owner; 56 twsk_destructor((struct sock *)tw);
56 twsk_destructor((struct sock *)tw);
57#ifdef SOCK_REFCNT_DEBUG 57#ifdef SOCK_REFCNT_DEBUG
58 printk(KERN_DEBUG "%s timewait_sock %p released\n", 58 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
59 tw->tw_prot->name, tw);
60#endif 59#endif
61 release_net(twsk_net(tw)); 60 release_net(twsk_net(tw));
62 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 61 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
63 module_put(owner); 62 module_put(owner);
64 } 63}
64
65void inet_twsk_put(struct inet_timewait_sock *tw)
66{
67 if (atomic_dec_and_test(&tw->tw_refcnt))
68 inet_twsk_free(tw);
65} 69}
66EXPORT_SYMBOL_GPL(inet_twsk_put); 70EXPORT_SYMBOL_GPL(inet_twsk_put);
67 71
@@ -117,6 +121,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
117 if (tw != NULL) { 121 if (tw != NULL) {
118 const struct inet_sock *inet = inet_sk(sk); 122 const struct inet_sock *inet = inet_sk(sk);
119 123
124 kmemcheck_annotate_bitfield(tw, flags);
125
120 /* Give us an identity. */ 126 /* Give us an identity. */
121 tw->tw_daddr = inet->daddr; 127 tw->tw_daddr = inet->daddr;
122 tw->tw_rcv_saddr = inet->rcv_saddr; 128 tw->tw_rcv_saddr = inet->rcv_saddr;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index df3fe50bbf0d..a2991bc8e32e 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,7 +42,7 @@ static int ip_forward_finish(struct sk_buff *skb)
42{ 42{
43 struct ip_options * opt = &(IPCB(skb)->opt); 43 struct ip_options * opt = &(IPCB(skb)->opt);
44 44
45 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 45 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
46 46
47 if (unlikely(opt->optlen)) 47 if (unlikely(opt->optlen))
48 ip_forward_options(skb); 48 ip_forward_options(skb);
@@ -81,7 +81,7 @@ int ip_forward(struct sk_buff *skb)
81 if (!xfrm4_route_forward(skb)) 81 if (!xfrm4_route_forward(skb))
82 goto drop; 82 goto drop;
83 83
84 rt = skb->rtable; 84 rt = skb_rtable(skb);
85 85
86 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 86 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
87 goto sr_failed; 87 goto sr_failed;
@@ -123,7 +123,7 @@ sr_failed:
123 123
124too_many_hops: 124too_many_hops:
125 /* Tell the sender its packet died... */ 125 /* Tell the sender its packet died... */
126 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_INHDRERRORS); 126 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS);
127 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); 127 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
128drop: 128drop:
129 kfree_skb(skb); 129 kfree_skb(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 7985346653bd..575f9bd51ccd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -507,7 +507,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
507 /* If the first fragment is fragmented itself, we split 507 /* If the first fragment is fragmented itself, we split
508 * it to two chunks: the first with data and paged part 508 * it to two chunks: the first with data and paged part
509 * and the second, holding only fragments. */ 509 * and the second, holding only fragments. */
510 if (skb_shinfo(head)->frag_list) { 510 if (skb_has_frags(head)) {
511 struct sk_buff *clone; 511 struct sk_buff *clone;
512 int i, plen = 0; 512 int i, plen = 0;
513 513
@@ -516,7 +516,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
516 clone->next = head->next; 516 clone->next = head->next;
517 head->next = clone; 517 head->next = clone;
518 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 518 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
519 skb_shinfo(head)->frag_list = NULL; 519 skb_frag_list_init(head);
520 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 520 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
521 plen += skb_shinfo(head)->frags[i].size; 521 plen += skb_shinfo(head)->frags[i].size;
522 clone->len = clone->data_len = head->data_len - plen; 522 clone->len = clone->data_len = head->data_len - plen;
@@ -573,7 +573,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
573 struct ipq *qp; 573 struct ipq *qp;
574 struct net *net; 574 struct net *net;
575 575
576 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); 576 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
577 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 577 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
578 578
579 /* Start by cleaning up the memory. */ 579 /* Start by cleaning up the memory. */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e62510d5ea5a..44e2a3d2359a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -602,7 +602,7 @@ static int ipgre_rcv(struct sk_buff *skb)
602#ifdef CONFIG_NET_IPGRE_BROADCAST 602#ifdef CONFIG_NET_IPGRE_BROADCAST
603 if (ipv4_is_multicast(iph->daddr)) { 603 if (ipv4_is_multicast(iph->daddr)) {
604 /* Looped back packet, drop it! */ 604 /* Looped back packet, drop it! */
605 if (skb->rtable->fl.iif == 0) 605 if (skb_rtable(skb)->fl.iif == 0)
606 goto drop; 606 goto drop;
607 stats->multicast++; 607 stats->multicast++;
608 skb->pkt_type = PACKET_BROADCAST; 608 skb->pkt_type = PACKET_BROADCAST;
@@ -643,8 +643,7 @@ static int ipgre_rcv(struct sk_buff *skb)
643 stats->rx_packets++; 643 stats->rx_packets++;
644 stats->rx_bytes += len; 644 stats->rx_bytes += len;
645 skb->dev = tunnel->dev; 645 skb->dev = tunnel->dev;
646 dst_release(skb->dst); 646 skb_dst_drop(skb);
647 skb->dst = NULL;
648 nf_reset(skb); 647 nf_reset(skb);
649 648
650 skb_reset_network_header(skb); 649 skb_reset_network_header(skb);
@@ -698,13 +697,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
698 if ((dst = tiph->daddr) == 0) { 697 if ((dst = tiph->daddr) == 0) {
699 /* NBMA tunnel */ 698 /* NBMA tunnel */
700 699
701 if (skb->dst == NULL) { 700 if (skb_dst(skb) == NULL) {
702 stats->tx_fifo_errors++; 701 stats->tx_fifo_errors++;
703 goto tx_error; 702 goto tx_error;
704 } 703 }
705 704
706 if (skb->protocol == htons(ETH_P_IP)) { 705 if (skb->protocol == htons(ETH_P_IP)) {
707 rt = skb->rtable; 706 rt = skb_rtable(skb);
708 if ((dst = rt->rt_gateway) == 0) 707 if ((dst = rt->rt_gateway) == 0)
709 goto tx_error_icmp; 708 goto tx_error_icmp;
710 } 709 }
@@ -712,7 +711,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
712 else if (skb->protocol == htons(ETH_P_IPV6)) { 711 else if (skb->protocol == htons(ETH_P_IPV6)) {
713 struct in6_addr *addr6; 712 struct in6_addr *addr6;
714 int addr_type; 713 int addr_type;
715 struct neighbour *neigh = skb->dst->neighbour; 714 struct neighbour *neigh = skb_dst(skb)->neighbour;
716 715
717 if (neigh == NULL) 716 if (neigh == NULL)
718 goto tx_error; 717 goto tx_error;
@@ -766,10 +765,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
766 if (df) 765 if (df)
767 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; 766 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
768 else 767 else
769 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 768 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
770 769
771 if (skb->dst) 770 if (skb_dst(skb))
772 skb->dst->ops->update_pmtu(skb->dst, mtu); 771 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
773 772
774 if (skb->protocol == htons(ETH_P_IP)) { 773 if (skb->protocol == htons(ETH_P_IP)) {
775 df |= (old_iph->frag_off&htons(IP_DF)); 774 df |= (old_iph->frag_off&htons(IP_DF));
@@ -783,14 +782,14 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
783 } 782 }
784#ifdef CONFIG_IPV6 783#ifdef CONFIG_IPV6
785 else if (skb->protocol == htons(ETH_P_IPV6)) { 784 else if (skb->protocol == htons(ETH_P_IPV6)) {
786 struct rt6_info *rt6 = (struct rt6_info *)skb->dst; 785 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
787 786
788 if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { 787 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
789 if ((tunnel->parms.iph.daddr && 788 if ((tunnel->parms.iph.daddr &&
790 !ipv4_is_multicast(tunnel->parms.iph.daddr)) || 789 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
791 rt6->rt6i_dst.plen == 128) { 790 rt6->rt6i_dst.plen == 128) {
792 rt6->rt6i_flags |= RTF_MODIFIED; 791 rt6->rt6i_flags |= RTF_MODIFIED;
793 skb->dst->metrics[RTAX_MTU-1] = mtu; 792 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
794 } 793 }
795 } 794 }
796 795
@@ -837,8 +836,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
837 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 836 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
838 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 837 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
839 IPSKB_REROUTED); 838 IPSKB_REROUTED);
840 dst_release(skb->dst); 839 skb_dst_drop(skb);
841 skb->dst = &rt->u.dst; 840 skb_dst_set(skb, &rt->u.dst);
842 841
843 /* 842 /*
844 * Push down and install the IPIP header. 843 * Push down and install the IPIP header.
@@ -1238,6 +1237,7 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1238 dev->iflink = 0; 1237 dev->iflink = 0;
1239 dev->addr_len = 4; 1238 dev->addr_len = 4;
1240 dev->features |= NETIF_F_NETNS_LOCAL; 1239 dev->features |= NETIF_F_NETNS_LOCAL;
1240 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1241} 1241}
1242 1242
1243static int ipgre_tunnel_init(struct net_device *dev) 1243static int ipgre_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 1a58a6fa1dc0..490ce20faf38 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -329,7 +329,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
329 * Initialise the virtual path cache for the packet. It describes 329 * Initialise the virtual path cache for the packet. It describes
330 * how the packet travels inside Linux networking. 330 * how the packet travels inside Linux networking.
331 */ 331 */
332 if (skb->dst == NULL) { 332 if (skb_dst(skb) == NULL) {
333 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 333 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
334 skb->dev); 334 skb->dev);
335 if (unlikely(err)) { 335 if (unlikely(err)) {
@@ -344,9 +344,9 @@ static int ip_rcv_finish(struct sk_buff *skb)
344 } 344 }
345 345
346#ifdef CONFIG_NET_CLS_ROUTE 346#ifdef CONFIG_NET_CLS_ROUTE
347 if (unlikely(skb->dst->tclassid)) { 347 if (unlikely(skb_dst(skb)->tclassid)) {
348 struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); 348 struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id());
349 u32 idx = skb->dst->tclassid; 349 u32 idx = skb_dst(skb)->tclassid;
350 st[idx&0xFF].o_packets++; 350 st[idx&0xFF].o_packets++;
351 st[idx&0xFF].o_bytes += skb->len; 351 st[idx&0xFF].o_bytes += skb->len;
352 st[(idx>>16)&0xFF].i_packets++; 352 st[(idx>>16)&0xFF].i_packets++;
@@ -357,11 +357,13 @@ static int ip_rcv_finish(struct sk_buff *skb)
357 if (iph->ihl > 5 && ip_rcv_options(skb)) 357 if (iph->ihl > 5 && ip_rcv_options(skb))
358 goto drop; 358 goto drop;
359 359
360 rt = skb->rtable; 360 rt = skb_rtable(skb);
361 if (rt->rt_type == RTN_MULTICAST) 361 if (rt->rt_type == RTN_MULTICAST) {
362 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCASTPKTS); 362 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST,
363 else if (rt->rt_type == RTN_BROADCAST) 363 skb->len);
364 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCASTPKTS); 364 } else if (rt->rt_type == RTN_BROADCAST)
365 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST,
366 skb->len);
365 367
366 return dst_input(skb); 368 return dst_input(skb);
367 369
@@ -384,7 +386,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
384 if (skb->pkt_type == PACKET_OTHERHOST) 386 if (skb->pkt_type == PACKET_OTHERHOST)
385 goto drop; 387 goto drop;
386 388
387 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INRECEIVES); 389
390 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
388 391
389 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 392 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
390 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); 393 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 2c88da6e7862..94bf105ef3c9 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -102,7 +102,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
102 sptr = skb_network_header(skb); 102 sptr = skb_network_header(skb);
103 dptr = dopt->__data; 103 dptr = dopt->__data;
104 104
105 daddr = skb->rtable->rt_spec_dst; 105 daddr = skb_rtable(skb)->rt_spec_dst;
106 106
107 if (sopt->rr) { 107 if (sopt->rr) {
108 optlen = sptr[sopt->rr+1]; 108 optlen = sptr[sopt->rr+1];
@@ -143,7 +143,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
143 __be32 addr; 143 __be32 addr;
144 144
145 memcpy(&addr, sptr+soffset-1, 4); 145 memcpy(&addr, sptr+soffset-1, 4);
146 if (inet_addr_type(dev_net(skb->dst->dev), addr) != RTN_LOCAL) { 146 if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) {
147 dopt->ts_needtime = 1; 147 dopt->ts_needtime = 1;
148 soffset += 8; 148 soffset += 8;
149 } 149 }
@@ -257,7 +257,7 @@ int ip_options_compile(struct net *net,
257 struct rtable *rt = NULL; 257 struct rtable *rt = NULL;
258 258
259 if (skb != NULL) { 259 if (skb != NULL) {
260 rt = skb->rtable; 260 rt = skb_rtable(skb);
261 optptr = (unsigned char *)&(ip_hdr(skb)[1]); 261 optptr = (unsigned char *)&(ip_hdr(skb)[1]);
262 } else 262 } else
263 optptr = opt->__data; 263 optptr = opt->__data;
@@ -550,7 +550,7 @@ void ip_forward_options(struct sk_buff *skb)
550{ 550{
551 struct ip_options * opt = &(IPCB(skb)->opt); 551 struct ip_options * opt = &(IPCB(skb)->opt);
552 unsigned char * optptr; 552 unsigned char * optptr;
553 struct rtable *rt = skb->rtable; 553 struct rtable *rt = skb_rtable(skb);
554 unsigned char *raw = skb_network_header(skb); 554 unsigned char *raw = skb_network_header(skb);
555 555
556 if (opt->rr_needaddr) { 556 if (opt->rr_needaddr) {
@@ -598,7 +598,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
598 __be32 nexthop; 598 __be32 nexthop;
599 struct iphdr *iph = ip_hdr(skb); 599 struct iphdr *iph = ip_hdr(skb);
600 unsigned char *optptr = skb_network_header(skb) + opt->srr; 600 unsigned char *optptr = skb_network_header(skb) + opt->srr;
601 struct rtable *rt = skb->rtable; 601 struct rtable *rt = skb_rtable(skb);
602 struct rtable *rt2; 602 struct rtable *rt2;
603 int err; 603 int err;
604 604
@@ -623,13 +623,13 @@ int ip_options_rcv_srr(struct sk_buff *skb)
623 } 623 }
624 memcpy(&nexthop, &optptr[srrptr-1], 4); 624 memcpy(&nexthop, &optptr[srrptr-1], 4);
625 625
626 rt = skb->rtable; 626 rt = skb_rtable(skb);
627 skb->rtable = NULL; 627 skb_dst_set(skb, NULL);
628 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); 628 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
629 rt2 = skb->rtable; 629 rt2 = skb_rtable(skb);
630 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { 630 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
631 ip_rt_put(rt2); 631 ip_rt_put(rt2);
632 skb->rtable = rt; 632 skb_dst_set(skb, &rt->u.dst);
633 return -EINVAL; 633 return -EINVAL;
634 } 634 }
635 ip_rt_put(rt); 635 ip_rt_put(rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3e7e910c7c0f..247026282669 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -95,7 +95,7 @@ int __ip_local_out(struct sk_buff *skb)
95 95
96 iph->tot_len = htons(skb->len); 96 iph->tot_len = htons(skb->len);
97 ip_send_check(iph); 97 ip_send_check(iph);
98 return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, 98 return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
99 dst_output); 99 dst_output);
100} 100}
101 101
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 __skb_pull(newskb, skb_network_offset(newskb)); 118 __skb_pull(newskb, skb_network_offset(newskb));
119 newskb->pkt_type = PACKET_LOOPBACK; 119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY; 120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 WARN_ON(!newskb->dst); 121 WARN_ON(!skb_dst(newskb));
122 netif_rx(newskb); 122 netif_rx(newskb);
123 return 0; 123 return 0;
124} 124}
@@ -140,7 +140,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
140 __be32 saddr, __be32 daddr, struct ip_options *opt) 140 __be32 saddr, __be32 daddr, struct ip_options *opt)
141{ 141{
142 struct inet_sock *inet = inet_sk(sk); 142 struct inet_sock *inet = inet_sk(sk);
143 struct rtable *rt = skb->rtable; 143 struct rtable *rt = skb_rtable(skb);
144 struct iphdr *iph; 144 struct iphdr *iph;
145 145
146 /* Build the IP header. */ 146 /* Build the IP header. */
@@ -176,15 +176,15 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
176 176
177static inline int ip_finish_output2(struct sk_buff *skb) 177static inline int ip_finish_output2(struct sk_buff *skb)
178{ 178{
179 struct dst_entry *dst = skb->dst; 179 struct dst_entry *dst = skb_dst(skb);
180 struct rtable *rt = (struct rtable *)dst; 180 struct rtable *rt = (struct rtable *)dst;
181 struct net_device *dev = dst->dev; 181 struct net_device *dev = dst->dev;
182 unsigned int hh_len = LL_RESERVED_SPACE(dev); 182 unsigned int hh_len = LL_RESERVED_SPACE(dev);
183 183
184 if (rt->rt_type == RTN_MULTICAST) 184 if (rt->rt_type == RTN_MULTICAST) {
185 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS); 185 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
186 else if (rt->rt_type == RTN_BROADCAST) 186 } else if (rt->rt_type == RTN_BROADCAST)
187 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS); 187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
188 188
189 /* Be paranoid, rather than too clever. */ 189 /* Be paranoid, rather than too clever. */
190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -217,14 +217,14 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; 217 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
218 218
219 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? 219 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
220 skb->dst->dev->mtu : dst_mtu(skb->dst); 220 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
221} 221}
222 222
223static int ip_finish_output(struct sk_buff *skb) 223static int ip_finish_output(struct sk_buff *skb)
224{ 224{
225#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 225#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
226 /* Policy lookup after SNAT yielded a new policy */ 226 /* Policy lookup after SNAT yielded a new policy */
227 if (skb->dst->xfrm != NULL) { 227 if (skb_dst(skb)->xfrm != NULL) {
228 IPCB(skb)->flags |= IPSKB_REROUTED; 228 IPCB(skb)->flags |= IPSKB_REROUTED;
229 return dst_output(skb); 229 return dst_output(skb);
230 } 230 }
@@ -238,13 +238,13 @@ static int ip_finish_output(struct sk_buff *skb)
238int ip_mc_output(struct sk_buff *skb) 238int ip_mc_output(struct sk_buff *skb)
239{ 239{
240 struct sock *sk = skb->sk; 240 struct sock *sk = skb->sk;
241 struct rtable *rt = skb->rtable; 241 struct rtable *rt = skb_rtable(skb);
242 struct net_device *dev = rt->u.dst.dev; 242 struct net_device *dev = rt->u.dst.dev;
243 243
244 /* 244 /*
245 * If the indicated interface is up and running, send the packet. 245 * If the indicated interface is up and running, send the packet.
246 */ 246 */
247 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); 247 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
248 248
249 skb->dev = dev; 249 skb->dev = dev;
250 skb->protocol = htons(ETH_P_IP); 250 skb->protocol = htons(ETH_P_IP);
@@ -296,9 +296,9 @@ int ip_mc_output(struct sk_buff *skb)
296 296
297int ip_output(struct sk_buff *skb) 297int ip_output(struct sk_buff *skb)
298{ 298{
299 struct net_device *dev = skb->dst->dev; 299 struct net_device *dev = skb_dst(skb)->dev;
300 300
301 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); 301 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
302 302
303 skb->dev = dev; 303 skb->dev = dev;
304 skb->protocol = htons(ETH_P_IP); 304 skb->protocol = htons(ETH_P_IP);
@@ -319,7 +319,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
319 /* Skip all of this if the packet is already routed, 319 /* Skip all of this if the packet is already routed,
320 * f.e. by something like SCTP. 320 * f.e. by something like SCTP.
321 */ 321 */
322 rt = skb->rtable; 322 rt = skb_rtable(skb);
323 if (rt != NULL) 323 if (rt != NULL)
324 goto packet_routed; 324 goto packet_routed;
325 325
@@ -355,7 +355,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
355 } 355 }
356 sk_setup_caps(sk, &rt->u.dst); 356 sk_setup_caps(sk, &rt->u.dst);
357 } 357 }
358 skb->dst = dst_clone(&rt->u.dst); 358 skb_dst_set(skb, dst_clone(&rt->u.dst));
359 359
360packet_routed: 360packet_routed:
361 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 361 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -401,8 +401,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
401 to->pkt_type = from->pkt_type; 401 to->pkt_type = from->pkt_type;
402 to->priority = from->priority; 402 to->priority = from->priority;
403 to->protocol = from->protocol; 403 to->protocol = from->protocol;
404 dst_release(to->dst); 404 skb_dst_drop(to);
405 to->dst = dst_clone(from->dst); 405 skb_dst_set(to, dst_clone(skb_dst(from)));
406 to->dev = from->dev; 406 to->dev = from->dev;
407 to->mark = from->mark; 407 to->mark = from->mark;
408 408
@@ -440,7 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
440 unsigned int mtu, hlen, left, len, ll_rs, pad; 440 unsigned int mtu, hlen, left, len, ll_rs, pad;
441 int offset; 441 int offset;
442 __be16 not_last_frag; 442 __be16 not_last_frag;
443 struct rtable *rt = skb->rtable; 443 struct rtable *rt = skb_rtable(skb);
444 int err = 0; 444 int err = 0;
445 445
446 dev = rt->u.dst.dev; 446 dev = rt->u.dst.dev;
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
474 * LATER: this step can be merged to real generation of fragments, 474 * LATER: this step can be merged to real generation of fragments,
475 * we can switch to copy when see the first bad fragment. 475 * we can switch to copy when see the first bad fragment.
476 */ 476 */
477 if (skb_shinfo(skb)->frag_list) { 477 if (skb_has_frags(skb)) {
478 struct sk_buff *frag; 478 struct sk_buff *frag;
479 int first_len = skb_pagelen(skb); 479 int first_len = skb_pagelen(skb);
480 int truesizes = 0; 480 int truesizes = 0;
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
485 skb_cloned(skb)) 485 skb_cloned(skb))
486 goto slow_path; 486 goto slow_path;
487 487
488 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 488 skb_walk_frags(skb, frag) {
489 /* Correct geometry. */ 489 /* Correct geometry. */
490 if (frag->len > mtu || 490 if (frag->len > mtu ||
491 ((frag->len & 7) && frag->next) || 491 ((frag->len & 7) && frag->next) ||
@@ -498,7 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
498 498
499 BUG_ON(frag->sk); 499 BUG_ON(frag->sk);
500 if (skb->sk) { 500 if (skb->sk) {
501 sock_hold(skb->sk);
502 frag->sk = skb->sk; 501 frag->sk = skb->sk;
503 frag->destructor = sock_wfree; 502 frag->destructor = sock_wfree;
504 truesizes += frag->truesize; 503 truesizes += frag->truesize;
@@ -510,7 +509,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
510 err = 0; 509 err = 0;
511 offset = 0; 510 offset = 0;
512 frag = skb_shinfo(skb)->frag_list; 511 frag = skb_shinfo(skb)->frag_list;
513 skb_shinfo(skb)->frag_list = NULL; 512 skb_frag_list_init(skb);
514 skb->data_len = first_len - skb_headlen(skb); 513 skb->data_len = first_len - skb_headlen(skb);
515 skb->truesize -= truesizes; 514 skb->truesize -= truesizes;
516 skb->len = first_len; 515 skb->len = first_len;
@@ -1294,7 +1293,7 @@ int ip_push_pending_frames(struct sock *sk)
1294 * on dst refcount 1293 * on dst refcount
1295 */ 1294 */
1296 inet->cork.dst = NULL; 1295 inet->cork.dst = NULL;
1297 skb->dst = &rt->u.dst; 1296 skb_dst_set(skb, &rt->u.dst);
1298 1297
1299 if (iph->protocol == IPPROTO_ICMP) 1298 if (iph->protocol == IPPROTO_ICMP)
1300 icmp_out_count(net, ((struct icmphdr *) 1299 icmp_out_count(net, ((struct icmphdr *)
@@ -1362,7 +1361,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1362 } replyopts; 1361 } replyopts;
1363 struct ipcm_cookie ipc; 1362 struct ipcm_cookie ipc;
1364 __be32 daddr; 1363 __be32 daddr;
1365 struct rtable *rt = skb->rtable; 1364 struct rtable *rt = skb_rtable(skb);
1366 1365
1367 if (ip_options_echo(&replyopts.opt, skb)) 1366 if (ip_options_echo(&replyopts.opt, skb))
1368 return; 1367 return;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 43c05854d752..fc7993e9061f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -57,7 +57,7 @@
57static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 57static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
58{ 58{
59 struct in_pktinfo info; 59 struct in_pktinfo info;
60 struct rtable *rt = skb->rtable; 60 struct rtable *rt = skb_rtable(skb);
61 61
62 info.ipi_addr.s_addr = ip_hdr(skb)->daddr; 62 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
63 if (rt) { 63 if (rt) {
@@ -157,38 +157,39 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
157 /* Ordered by supposed usage frequency */ 157 /* Ordered by supposed usage frequency */
158 if (flags & 1) 158 if (flags & 1)
159 ip_cmsg_recv_pktinfo(msg, skb); 159 ip_cmsg_recv_pktinfo(msg, skb);
160 if ((flags>>=1) == 0) 160 if ((flags >>= 1) == 0)
161 return; 161 return;
162 162
163 if (flags & 1) 163 if (flags & 1)
164 ip_cmsg_recv_ttl(msg, skb); 164 ip_cmsg_recv_ttl(msg, skb);
165 if ((flags>>=1) == 0) 165 if ((flags >>= 1) == 0)
166 return; 166 return;
167 167
168 if (flags & 1) 168 if (flags & 1)
169 ip_cmsg_recv_tos(msg, skb); 169 ip_cmsg_recv_tos(msg, skb);
170 if ((flags>>=1) == 0) 170 if ((flags >>= 1) == 0)
171 return; 171 return;
172 172
173 if (flags & 1) 173 if (flags & 1)
174 ip_cmsg_recv_opts(msg, skb); 174 ip_cmsg_recv_opts(msg, skb);
175 if ((flags>>=1) == 0) 175 if ((flags >>= 1) == 0)
176 return; 176 return;
177 177
178 if (flags & 1) 178 if (flags & 1)
179 ip_cmsg_recv_retopts(msg, skb); 179 ip_cmsg_recv_retopts(msg, skb);
180 if ((flags>>=1) == 0) 180 if ((flags >>= 1) == 0)
181 return; 181 return;
182 182
183 if (flags & 1) 183 if (flags & 1)
184 ip_cmsg_recv_security(msg, skb); 184 ip_cmsg_recv_security(msg, skb);
185 185
186 if ((flags>>=1) == 0) 186 if ((flags >>= 1) == 0)
187 return; 187 return;
188 if (flags & 1) 188 if (flags & 1)
189 ip_cmsg_recv_dstaddr(msg, skb); 189 ip_cmsg_recv_dstaddr(msg, skb);
190 190
191} 191}
192EXPORT_SYMBOL(ip_cmsg_recv);
192 193
193int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) 194int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
194{ 195{
@@ -203,7 +204,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
203 switch (cmsg->cmsg_type) { 204 switch (cmsg->cmsg_type) {
204 case IP_RETOPTS: 205 case IP_RETOPTS:
205 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 206 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
206 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); 207 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
208 err < 40 ? err : 40);
207 if (err) 209 if (err)
208 return err; 210 return err;
209 break; 211 break;
@@ -238,7 +240,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
238struct ip_ra_chain *ip_ra_chain; 240struct ip_ra_chain *ip_ra_chain;
239DEFINE_RWLOCK(ip_ra_lock); 241DEFINE_RWLOCK(ip_ra_lock);
240 242
241int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) 243int ip_ra_control(struct sock *sk, unsigned char on,
244 void (*destructor)(struct sock *))
242{ 245{
243 struct ip_ra_chain *ra, *new_ra, **rap; 246 struct ip_ra_chain *ra, *new_ra, **rap;
244 247
@@ -248,7 +251,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
248 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 251 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
249 252
250 write_lock_bh(&ip_ra_lock); 253 write_lock_bh(&ip_ra_lock);
251 for (rap = &ip_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { 254 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
252 if (ra->sk == sk) { 255 if (ra->sk == sk) {
253 if (on) { 256 if (on) {
254 write_unlock_bh(&ip_ra_lock); 257 write_unlock_bh(&ip_ra_lock);
@@ -416,7 +419,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
416 /* Reset and regenerate socket error */ 419 /* Reset and regenerate socket error */
417 spin_lock_bh(&sk->sk_error_queue.lock); 420 spin_lock_bh(&sk->sk_error_queue.lock);
418 sk->sk_err = 0; 421 sk->sk_err = 0;
419 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { 422 skb2 = skb_peek(&sk->sk_error_queue);
423 if (skb2 != NULL) {
420 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; 424 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
421 spin_unlock_bh(&sk->sk_error_queue.lock); 425 spin_unlock_bh(&sk->sk_error_queue.lock);
422 sk->sk_error_report(sk); 426 sk->sk_error_report(sk);
@@ -431,8 +435,8 @@ out:
431 435
432 436
433/* 437/*
434 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on 438 * Socket option code for IP. This is the end of the line after any
435 * an IP socket. 439 * TCP,UDP etc options on an IP socket.
436 */ 440 */
437 441
438static int do_ip_setsockopt(struct sock *sk, int level, 442static int do_ip_setsockopt(struct sock *sk, int level,
@@ -449,6 +453,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
449 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
450 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
451 optname == IP_MULTICAST_TTL || 455 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL ||
452 optname == IP_MULTICAST_LOOP || 457 optname == IP_MULTICAST_LOOP ||
453 optname == IP_RECVORIGDSTADDR) { 458 optname == IP_RECVORIGDSTADDR) {
454 if (optlen >= sizeof(int)) { 459 if (optlen >= sizeof(int)) {
@@ -474,7 +479,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
474 switch (optname) { 479 switch (optname) {
475 case IP_OPTIONS: 480 case IP_OPTIONS:
476 { 481 {
477 struct ip_options * opt = NULL; 482 struct ip_options *opt = NULL;
478 if (optlen > 40 || optlen < 0) 483 if (optlen > 40 || optlen < 0)
479 goto e_inval; 484 goto e_inval;
480 err = ip_options_get_from_user(sock_net(sk), &opt, 485 err = ip_options_get_from_user(sock_net(sk), &opt,
@@ -556,9 +561,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
556 } 561 }
557 break; 562 break;
558 case IP_TTL: 563 case IP_TTL:
559 if (optlen<1) 564 if (optlen < 1)
560 goto e_inval; 565 goto e_inval;
561 if (val != -1 && (val < 1 || val>255)) 566 if (val != -1 && (val < 0 || val > 255))
562 goto e_inval; 567 goto e_inval;
563 inet->uc_ttl = val; 568 inet->uc_ttl = val;
564 break; 569 break;
@@ -570,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
570 inet->hdrincl = val ? 1 : 0; 575 inet->hdrincl = val ? 1 : 0;
571 break; 576 break;
572 case IP_MTU_DISCOVER: 577 case IP_MTU_DISCOVER:
573 if (val<0 || val>3) 578 if (val < 0 || val > 3)
574 goto e_inval; 579 goto e_inval;
575 inet->pmtudisc = val; 580 inet->pmtudisc = val;
576 break; 581 break;
@@ -582,7 +587,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
582 case IP_MULTICAST_TTL: 587 case IP_MULTICAST_TTL:
583 if (sk->sk_type == SOCK_STREAM) 588 if (sk->sk_type == SOCK_STREAM)
584 goto e_inval; 589 goto e_inval;
585 if (optlen<1) 590 if (optlen < 1)
586 goto e_inval; 591 goto e_inval;
587 if (val == -1) 592 if (val == -1)
588 val = 1; 593 val = 1;
@@ -591,7 +596,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
591 inet->mc_ttl = val; 596 inet->mc_ttl = val;
592 break; 597 break;
593 case IP_MULTICAST_LOOP: 598 case IP_MULTICAST_LOOP:
594 if (optlen<1) 599 if (optlen < 1)
595 goto e_inval; 600 goto e_inval;
596 inet->mc_loop = !!val; 601 inet->mc_loop = !!val;
597 break; 602 break;
@@ -613,7 +618,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
613 } else { 618 } else {
614 memset(&mreq, 0, sizeof(mreq)); 619 memset(&mreq, 0, sizeof(mreq));
615 if (optlen >= sizeof(struct in_addr) && 620 if (optlen >= sizeof(struct in_addr) &&
616 copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) 621 copy_from_user(&mreq.imr_address, optval,
622 sizeof(struct in_addr)))
617 break; 623 break;
618 } 624 }
619 625
@@ -677,7 +683,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
677 } 683 }
678 case IP_MSFILTER: 684 case IP_MSFILTER:
679 { 685 {
680 extern int sysctl_igmp_max_msf;
681 struct ip_msfilter *msf; 686 struct ip_msfilter *msf;
682 687
683 if (optlen < IP_MSFILTER_SIZE(0)) 688 if (optlen < IP_MSFILTER_SIZE(0))
@@ -831,7 +836,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
831 } 836 }
832 case MCAST_MSFILTER: 837 case MCAST_MSFILTER:
833 { 838 {
834 extern int sysctl_igmp_max_msf;
835 struct sockaddr_in *psin; 839 struct sockaddr_in *psin;
836 struct ip_msfilter *msf = NULL; 840 struct ip_msfilter *msf = NULL;
837 struct group_filter *gsf = NULL; 841 struct group_filter *gsf = NULL;
@@ -849,9 +853,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
849 break; 853 break;
850 } 854 }
851 err = -EFAULT; 855 err = -EFAULT;
852 if (copy_from_user(gsf, optval, optlen)) { 856 if (copy_from_user(gsf, optval, optlen))
853 goto mc_msf_out; 857 goto mc_msf_out;
854 } 858
855 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 859 /* numsrc >= (4G-140)/128 overflow in 32 bits */
856 if (gsf->gf_numsrc >= 0x1ffffff || 860 if (gsf->gf_numsrc >= 0x1ffffff ||
857 gsf->gf_numsrc > sysctl_igmp_max_msf) { 861 gsf->gf_numsrc > sysctl_igmp_max_msf) {
@@ -879,7 +883,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
879 msf->imsf_fmode = gsf->gf_fmode; 883 msf->imsf_fmode = gsf->gf_fmode;
880 msf->imsf_numsrc = gsf->gf_numsrc; 884 msf->imsf_numsrc = gsf->gf_numsrc;
881 err = -EADDRNOTAVAIL; 885 err = -EADDRNOTAVAIL;
882 for (i=0; i<gsf->gf_numsrc; ++i) { 886 for (i = 0; i < gsf->gf_numsrc; ++i) {
883 psin = (struct sockaddr_in *)&gsf->gf_slist[i]; 887 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
884 888
885 if (psin->sin_family != AF_INET) 889 if (psin->sin_family != AF_INET)
@@ -890,17 +894,24 @@ static int do_ip_setsockopt(struct sock *sk, int level,
890 gsf = NULL; 894 gsf = NULL;
891 895
892 err = ip_mc_msfilter(sk, msf, ifindex); 896 err = ip_mc_msfilter(sk, msf, ifindex);
893 mc_msf_out: 897mc_msf_out:
894 kfree(msf); 898 kfree(msf);
895 kfree(gsf); 899 kfree(gsf);
896 break; 900 break;
897 } 901 }
902 case IP_MULTICAST_ALL:
903 if (optlen < 1)
904 goto e_inval;
905 if (val != 0 && val != 1)
906 goto e_inval;
907 inet->mc_all = val;
908 break;
898 case IP_ROUTER_ALERT: 909 case IP_ROUTER_ALERT:
899 err = ip_ra_control(sk, val ? 1 : 0, NULL); 910 err = ip_ra_control(sk, val ? 1 : 0, NULL);
900 break; 911 break;
901 912
902 case IP_FREEBIND: 913 case IP_FREEBIND:
903 if (optlen<1) 914 if (optlen < 1)
904 goto e_inval; 915 goto e_inval;
905 inet->freebind = !!val; 916 inet->freebind = !!val;
906 break; 917 break;
@@ -957,6 +968,7 @@ int ip_setsockopt(struct sock *sk, int level,
957#endif 968#endif
958 return err; 969 return err;
959} 970}
971EXPORT_SYMBOL(ip_setsockopt);
960 972
961#ifdef CONFIG_COMPAT 973#ifdef CONFIG_COMPAT
962int compat_ip_setsockopt(struct sock *sk, int level, int optname, 974int compat_ip_setsockopt(struct sock *sk, int level, int optname,
@@ -986,13 +998,12 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
986#endif 998#endif
987 return err; 999 return err;
988} 1000}
989
990EXPORT_SYMBOL(compat_ip_setsockopt); 1001EXPORT_SYMBOL(compat_ip_setsockopt);
991#endif 1002#endif
992 1003
993/* 1004/*
994 * Get the options. Note for future reference. The GET of IP options gets the 1005 * Get the options. Note for future reference. The GET of IP options gets
995 * _received_ ones. The set sets the _sent_ ones. 1006 * the _received_ ones. The set sets the _sent_ ones.
996 */ 1007 */
997 1008
998static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1009static int do_ip_getsockopt(struct sock *sk, int level, int optname,
@@ -1143,10 +1154,14 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1143 return -EFAULT; 1154 return -EFAULT;
1144 } 1155 }
1145 err = ip_mc_gsfget(sk, &gsf, 1156 err = ip_mc_gsfget(sk, &gsf,
1146 (struct group_filter __user *)optval, optlen); 1157 (struct group_filter __user *)optval,
1158 optlen);
1147 release_sock(sk); 1159 release_sock(sk);
1148 return err; 1160 return err;
1149 } 1161 }
1162 case IP_MULTICAST_ALL:
1163 val = inet->mc_all;
1164 break;
1150 case IP_PKTOPTIONS: 1165 case IP_PKTOPTIONS:
1151 { 1166 {
1152 struct msghdr msg; 1167 struct msghdr msg;
@@ -1187,7 +1202,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1187 } 1202 }
1188 release_sock(sk); 1203 release_sock(sk);
1189 1204
1190 if (len < sizeof(int) && len > 0 && val>=0 && val<=255) { 1205 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1191 unsigned char ucval = (unsigned char)val; 1206 unsigned char ucval = (unsigned char)val;
1192 len = 1; 1207 len = 1;
1193 if (put_user(len, optlen)) 1208 if (put_user(len, optlen))
@@ -1230,6 +1245,7 @@ int ip_getsockopt(struct sock *sk, int level,
1230#endif 1245#endif
1231 return err; 1246 return err;
1232} 1247}
1248EXPORT_SYMBOL(ip_getsockopt);
1233 1249
1234#ifdef CONFIG_COMPAT 1250#ifdef CONFIG_COMPAT
1235int compat_ip_getsockopt(struct sock *sk, int level, int optname, 1251int compat_ip_getsockopt(struct sock *sk, int level, int optname,
@@ -1262,11 +1278,5 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1262#endif 1278#endif
1263 return err; 1279 return err;
1264} 1280}
1265
1266EXPORT_SYMBOL(compat_ip_getsockopt); 1281EXPORT_SYMBOL(compat_ip_getsockopt);
1267#endif 1282#endif
1268
1269EXPORT_SYMBOL(ip_cmsg_recv);
1270
1271EXPORT_SYMBOL(ip_getsockopt);
1272EXPORT_SYMBOL(ip_setsockopt);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 88bf051d0cbb..f8d04c256454 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -160,6 +160,9 @@ static char user_dev_name[IFNAMSIZ] __initdata = { 0, };
160/* Protocols supported by available interfaces */ 160/* Protocols supported by available interfaces */
161static int ic_proto_have_if __initdata = 0; 161static int ic_proto_have_if __initdata = 0;
162 162
163/* MTU for boot device */
164static int ic_dev_mtu __initdata = 0;
165
163#ifdef IPCONFIG_DYNAMIC 166#ifdef IPCONFIG_DYNAMIC
164static DEFINE_SPINLOCK(ic_recv_lock); 167static DEFINE_SPINLOCK(ic_recv_lock);
165static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */ 168static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */
@@ -286,7 +289,7 @@ set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
286 sin->sin_port = port; 289 sin->sin_port = port;
287} 290}
288 291
289static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) 292static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
290{ 293{
291 int res; 294 int res;
292 295
@@ -297,6 +300,17 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
297 return res; 300 return res;
298} 301}
299 302
303static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
304{
305 int res;
306
307 mm_segment_t oldfs = get_fs();
308 set_fs(get_ds());
309 res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
310 set_fs(oldfs);
311 return res;
312}
313
300static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) 314static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
301{ 315{
302 int res; 316 int res;
@@ -321,20 +335,31 @@ static int __init ic_setup_if(void)
321 memset(&ir, 0, sizeof(ir)); 335 memset(&ir, 0, sizeof(ir));
322 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); 336 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
323 set_sockaddr(sin, ic_myaddr, 0); 337 set_sockaddr(sin, ic_myaddr, 0);
324 if ((err = ic_dev_ioctl(SIOCSIFADDR, &ir)) < 0) { 338 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
325 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err); 339 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err);
326 return -1; 340 return -1;
327 } 341 }
328 set_sockaddr(sin, ic_netmask, 0); 342 set_sockaddr(sin, ic_netmask, 0);
329 if ((err = ic_dev_ioctl(SIOCSIFNETMASK, &ir)) < 0) { 343 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
330 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err); 344 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err);
331 return -1; 345 return -1;
332 } 346 }
333 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0); 347 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
334 if ((err = ic_dev_ioctl(SIOCSIFBRDADDR, &ir)) < 0) { 348 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
335 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err); 349 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err);
336 return -1; 350 return -1;
337 } 351 }
352 /* Handle the case where we need non-standard MTU on the boot link (a network
353 * using jumbo frames, for instance). If we can't set the mtu, don't error
354 * out, we'll try to muddle along.
355 */
356 if (ic_dev_mtu != 0) {
357 strcpy(ir.ifr_name, ic_dev->name);
358 ir.ifr_mtu = ic_dev_mtu;
359 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
360 printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n",
361 ic_dev_mtu, err);
362 }
338 return 0; 363 return 0;
339} 364}
340 365
@@ -623,6 +648,7 @@ ic_dhcp_init_options(u8 *options)
623 12, /* Host name */ 648 12, /* Host name */
624 15, /* Domain name */ 649 15, /* Domain name */
625 17, /* Boot path */ 650 17, /* Boot path */
651 26, /* MTU */
626 40, /* NIS domain name */ 652 40, /* NIS domain name */
627 }; 653 };
628 654
@@ -798,6 +824,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
798{ 824{
799 u8 servers; 825 u8 servers;
800 int i; 826 int i;
827 u16 mtu;
801 828
802#ifdef IPCONFIG_DEBUG 829#ifdef IPCONFIG_DEBUG
803 u8 *c; 830 u8 *c;
@@ -837,6 +864,10 @@ static void __init ic_do_bootp_ext(u8 *ext)
837 if (!root_server_path[0]) 864 if (!root_server_path[0])
838 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path)); 865 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path));
839 break; 866 break;
867 case 26: /* Interface MTU */
868 memcpy(&mtu, ext+1, sizeof(mtu));
869 ic_dev_mtu = ntohs(mtu);
870 break;
840 case 40: /* NIS Domain name (_not_ DNS) */ 871 case 40: /* NIS Domain name (_not_ DNS) */
841 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN); 872 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN);
842 break; 873 break;
@@ -1403,6 +1434,8 @@ static int __init ip_auto_config(void)
1403 printk(",\n bootserver=%pI4", &ic_servaddr); 1434 printk(",\n bootserver=%pI4", &ic_servaddr);
1404 printk(", rootserver=%pI4", &root_server_addr); 1435 printk(", rootserver=%pI4", &root_server_addr);
1405 printk(", rootpath=%s", root_server_path); 1436 printk(", rootpath=%s", root_server_path);
1437 if (ic_dev_mtu)
1438 printk(", mtu=%d", ic_dev_mtu);
1406 printk("\n"); 1439 printk("\n");
1407#endif /* !SILENT */ 1440#endif /* !SILENT */
1408 1441
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9054139795af..93e2b787da20 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -370,8 +370,7 @@ static int ipip_rcv(struct sk_buff *skb)
370 tunnel->dev->stats.rx_packets++; 370 tunnel->dev->stats.rx_packets++;
371 tunnel->dev->stats.rx_bytes += skb->len; 371 tunnel->dev->stats.rx_bytes += skb->len;
372 skb->dev = tunnel->dev; 372 skb->dev = tunnel->dev;
373 dst_release(skb->dst); 373 skb_dst_drop(skb);
374 skb->dst = NULL;
375 nf_reset(skb); 374 nf_reset(skb);
376 ipip_ecn_decapsulate(iph, skb); 375 ipip_ecn_decapsulate(iph, skb);
377 netif_rx(skb); 376 netif_rx(skb);
@@ -416,7 +415,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
416 415
417 if (!dst) { 416 if (!dst) {
418 /* NBMA tunnel */ 417 /* NBMA tunnel */
419 if ((rt = skb->rtable) == NULL) { 418 if ((rt = skb_rtable(skb)) == NULL) {
420 stats->tx_fifo_errors++; 419 stats->tx_fifo_errors++;
421 goto tx_error; 420 goto tx_error;
422 } 421 }
@@ -447,15 +446,15 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
447 if (tiph->frag_off) 446 if (tiph->frag_off)
448 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 447 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
449 else 448 else
450 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 449 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
451 450
452 if (mtu < 68) { 451 if (mtu < 68) {
453 stats->collisions++; 452 stats->collisions++;
454 ip_rt_put(rt); 453 ip_rt_put(rt);
455 goto tx_error; 454 goto tx_error;
456 } 455 }
457 if (skb->dst) 456 if (skb_dst(skb))
458 skb->dst->ops->update_pmtu(skb->dst, mtu); 457 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
459 458
460 df |= (old_iph->frag_off&htons(IP_DF)); 459 df |= (old_iph->frag_off&htons(IP_DF));
461 460
@@ -502,8 +501,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
502 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 501 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
503 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 502 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
504 IPSKB_REROUTED); 503 IPSKB_REROUTED);
505 dst_release(skb->dst); 504 skb_dst_drop(skb);
506 skb->dst = &rt->u.dst; 505 skb_dst_set(skb, &rt->u.dst);
507 506
508 /* 507 /*
509 * Push down and install the IPIP header. 508 * Push down and install the IPIP header.
@@ -713,6 +712,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
713 dev->iflink = 0; 712 dev->iflink = 0;
714 dev->addr_len = 4; 713 dev->addr_len = 4;
715 dev->features |= NETIF_F_NETNS_LOCAL; 714 dev->features |= NETIF_F_NETNS_LOCAL;
715 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
716} 716}
717 717
718static void ipip_tunnel_init(struct net_device *dev) 718static void ipip_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 13e9dd3012b3..9a8da5ed92b7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -226,9 +226,10 @@ static void reg_vif_setup(struct net_device *dev)
226 dev->flags = IFF_NOARP; 226 dev->flags = IFF_NOARP;
227 dev->netdev_ops = &reg_vif_netdev_ops, 227 dev->netdev_ops = &reg_vif_netdev_ops,
228 dev->destructor = free_netdev; 228 dev->destructor = free_netdev;
229 dev->features |= NETIF_F_NETNS_LOCAL;
229} 230}
230 231
231static struct net_device *ipmr_reg_vif(void) 232static struct net_device *ipmr_reg_vif(struct net *net)
232{ 233{
233 struct net_device *dev; 234 struct net_device *dev;
234 struct in_device *in_dev; 235 struct in_device *in_dev;
@@ -238,6 +239,8 @@ static struct net_device *ipmr_reg_vif(void)
238 if (dev == NULL) 239 if (dev == NULL)
239 return NULL; 240 return NULL;
240 241
242 dev_net_set(dev, net);
243
241 if (register_netdevice(dev)) { 244 if (register_netdevice(dev)) {
242 free_netdev(dev); 245 free_netdev(dev);
243 return NULL; 246 return NULL;
@@ -448,7 +451,7 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
448 */ 451 */
449 if (net->ipv4.mroute_reg_vif_num >= 0) 452 if (net->ipv4.mroute_reg_vif_num >= 0)
450 return -EADDRINUSE; 453 return -EADDRINUSE;
451 dev = ipmr_reg_vif(); 454 dev = ipmr_reg_vif(net);
452 if (!dev) 455 if (!dev)
453 return -ENOBUFS; 456 return -ENOBUFS;
454 err = dev_set_allmulti(dev, 1); 457 err = dev_set_allmulti(dev, 1);
@@ -651,7 +654,7 @@ static int ipmr_cache_report(struct net *net,
651 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 654 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
652 msg = (struct igmpmsg *)skb_network_header(skb); 655 msg = (struct igmpmsg *)skb_network_header(skb);
653 msg->im_vif = vifi; 656 msg->im_vif = vifi;
654 skb->dst = dst_clone(pkt->dst); 657 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
655 658
656 /* 659 /*
657 * Add our header 660 * Add our header
@@ -1031,16 +1034,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1031 if (v != net->ipv4.mroute_do_pim) { 1034 if (v != net->ipv4.mroute_do_pim) {
1032 net->ipv4.mroute_do_pim = v; 1035 net->ipv4.mroute_do_pim = v;
1033 net->ipv4.mroute_do_assert = v; 1036 net->ipv4.mroute_do_assert = v;
1034#ifdef CONFIG_IP_PIMSM_V2
1035 if (net->ipv4.mroute_do_pim)
1036 ret = inet_add_protocol(&pim_protocol,
1037 IPPROTO_PIM);
1038 else
1039 ret = inet_del_protocol(&pim_protocol,
1040 IPPROTO_PIM);
1041 if (ret < 0)
1042 ret = -EAGAIN;
1043#endif
1044 } 1037 }
1045 rtnl_unlock(); 1038 rtnl_unlock();
1046 return ret; 1039 return ret;
@@ -1201,7 +1194,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1201 iph->protocol = IPPROTO_IPIP; 1194 iph->protocol = IPPROTO_IPIP;
1202 iph->ihl = 5; 1195 iph->ihl = 5;
1203 iph->tot_len = htons(skb->len); 1196 iph->tot_len = htons(skb->len);
1204 ip_select_ident(iph, skb->dst, NULL); 1197 ip_select_ident(iph, skb_dst(skb), NULL);
1205 ip_send_check(iph); 1198 ip_send_check(iph);
1206 1199
1207 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1200 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1212,7 +1205,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1212{ 1205{
1213 struct ip_options * opt = &(IPCB(skb)->opt); 1206 struct ip_options * opt = &(IPCB(skb)->opt);
1214 1207
1215 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1208 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1216 1209
1217 if (unlikely(opt->optlen)) 1210 if (unlikely(opt->optlen))
1218 ip_forward_options(skb); 1211 ip_forward_options(skb);
@@ -1290,8 +1283,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1290 vif->pkt_out++; 1283 vif->pkt_out++;
1291 vif->bytes_out += skb->len; 1284 vif->bytes_out += skb->len;
1292 1285
1293 dst_release(skb->dst); 1286 skb_dst_drop(skb);
1294 skb->dst = &rt->u.dst; 1287 skb_dst_set(skb, &rt->u.dst);
1295 ip_decrease_ttl(ip_hdr(skb)); 1288 ip_decrease_ttl(ip_hdr(skb));
1296 1289
1297 /* FIXME: forward and output firewalls used to be called here. 1290 /* FIXME: forward and output firewalls used to be called here.
@@ -1354,7 +1347,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1354 if (net->ipv4.vif_table[vif].dev != skb->dev) { 1347 if (net->ipv4.vif_table[vif].dev != skb->dev) {
1355 int true_vifi; 1348 int true_vifi;
1356 1349
1357 if (skb->rtable->fl.iif == 0) { 1350 if (skb_rtable(skb)->fl.iif == 0) {
1358 /* It is our own packet, looped back. 1351 /* It is our own packet, looped back.
1359 Very complicated situation... 1352 Very complicated situation...
1360 1353
@@ -1430,7 +1423,7 @@ int ip_mr_input(struct sk_buff *skb)
1430{ 1423{
1431 struct mfc_cache *cache; 1424 struct mfc_cache *cache;
1432 struct net *net = dev_net(skb->dev); 1425 struct net *net = dev_net(skb->dev);
1433 int local = skb->rtable->rt_flags&RTCF_LOCAL; 1426 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1434 1427
1435 /* Packet is looped back after forward, it should not be 1428 /* Packet is looped back after forward, it should not be
1436 forwarded second time, but still can be delivered locally. 1429 forwarded second time, but still can be delivered locally.
@@ -1543,8 +1536,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1543 skb->protocol = htons(ETH_P_IP); 1536 skb->protocol = htons(ETH_P_IP);
1544 skb->ip_summed = 0; 1537 skb->ip_summed = 0;
1545 skb->pkt_type = PACKET_HOST; 1538 skb->pkt_type = PACKET_HOST;
1546 dst_release(skb->dst); 1539 skb_dst_drop(skb);
1547 skb->dst = NULL;
1548 reg_dev->stats.rx_bytes += skb->len; 1540 reg_dev->stats.rx_bytes += skb->len;
1549 reg_dev->stats.rx_packets++; 1541 reg_dev->stats.rx_packets++;
1550 nf_reset(skb); 1542 nf_reset(skb);
@@ -1646,7 +1638,7 @@ int ipmr_get_route(struct net *net,
1646{ 1638{
1647 int err; 1639 int err;
1648 struct mfc_cache *cache; 1640 struct mfc_cache *cache;
1649 struct rtable *rt = skb->rtable; 1641 struct rtable *rt = skb_rtable(skb);
1650 1642
1651 read_lock(&mrt_lock); 1643 read_lock(&mrt_lock);
1652 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); 1644 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
@@ -1955,6 +1947,7 @@ static const struct file_operations ipmr_mfc_fops = {
1955#ifdef CONFIG_IP_PIMSM_V2 1947#ifdef CONFIG_IP_PIMSM_V2
1956static struct net_protocol pim_protocol = { 1948static struct net_protocol pim_protocol = {
1957 .handler = pim_rcv, 1949 .handler = pim_rcv,
1950 .netns_ok = 1,
1958}; 1951};
1959#endif 1952#endif
1960 1953
@@ -2041,8 +2034,19 @@ int __init ip_mr_init(void)
2041 err = register_netdevice_notifier(&ip_mr_notifier); 2034 err = register_netdevice_notifier(&ip_mr_notifier);
2042 if (err) 2035 if (err)
2043 goto reg_notif_fail; 2036 goto reg_notif_fail;
2037#ifdef CONFIG_IP_PIMSM_V2
2038 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2039 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
2040 err = -EAGAIN;
2041 goto add_proto_fail;
2042 }
2043#endif
2044 return 0; 2044 return 0;
2045 2045
2046#ifdef CONFIG_IP_PIMSM_V2
2047add_proto_fail:
2048 unregister_netdevice_notifier(&ip_mr_notifier);
2049#endif
2046reg_notif_fail: 2050reg_notif_fail:
2047 del_timer(&ipmr_expire_timer); 2051 del_timer(&ipmr_expire_timer);
2048 unregister_pernet_subsys(&ipmr_net_ops); 2052 unregister_pernet_subsys(&ipmr_net_ops);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index fdf6811c31a2..1725dc0ef688 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
12/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 12/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
13int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) 13int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
14{ 14{
15 struct net *net = dev_net(skb->dst->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 const struct iphdr *iph = ip_hdr(skb); 16 const struct iphdr *iph = ip_hdr(skb);
17 struct rtable *rt; 17 struct rtable *rt;
18 struct flowi fl = {}; 18 struct flowi fl = {};
@@ -41,8 +41,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
41 return -1; 41 return -1;
42 42
43 /* Drop old route. */ 43 /* Drop old route. */
44 dst_release(skb->dst); 44 skb_dst_drop(skb);
45 skb->dst = &rt->u.dst; 45 skb_dst_set(skb, &rt->u.dst);
46 } else { 46 } else {
47 /* non-local src, find valid iif to satisfy 47 /* non-local src, find valid iif to satisfy
48 * rp-filter when calling ip_route_input. */ 48 * rp-filter when calling ip_route_input. */
@@ -50,7 +50,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
50 if (ip_route_output_key(net, &rt, &fl) != 0) 50 if (ip_route_output_key(net, &rt, &fl) != 0)
51 return -1; 51 return -1;
52 52
53 odst = skb->dst; 53 odst = skb_dst(skb);
54 if (ip_route_input(skb, iph->daddr, iph->saddr, 54 if (ip_route_input(skb, iph->daddr, iph->saddr,
55 RT_TOS(iph->tos), rt->u.dst.dev) != 0) { 55 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
56 dst_release(&rt->u.dst); 56 dst_release(&rt->u.dst);
@@ -60,18 +60,22 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
60 dst_release(odst); 60 dst_release(odst);
61 } 61 }
62 62
63 if (skb->dst->error) 63 if (skb_dst(skb)->error)
64 return -1; 64 return -1;
65 65
66#ifdef CONFIG_XFRM 66#ifdef CONFIG_XFRM
67 if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && 67 if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
68 xfrm_decode_session(skb, &fl, AF_INET) == 0) 68 xfrm_decode_session(skb, &fl, AF_INET) == 0) {
69 if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) 69 struct dst_entry *dst = skb_dst(skb);
70 skb_dst_set(skb, NULL);
71 if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
70 return -1; 72 return -1;
73 skb_dst_set(skb, dst);
74 }
71#endif 75#endif
72 76
73 /* Change in oif may mean change in hh_len. */ 77 /* Change in oif may mean change in hh_len. */
74 hh_len = skb->dst->dev->hard_header_len; 78 hh_len = skb_dst(skb)->dev->hard_header_len;
75 if (skb_headroom(skb) < hh_len && 79 if (skb_headroom(skb) < hh_len &&
76 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 80 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
77 return -1; 81 return -1;
@@ -92,7 +96,7 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
92 if (xfrm_decode_session(skb, &fl, AF_INET) < 0) 96 if (xfrm_decode_session(skb, &fl, AF_INET) < 0)
93 return -1; 97 return -1;
94 98
95 dst = skb->dst; 99 dst = skb_dst(skb);
96 if (dst->xfrm) 100 if (dst->xfrm)
97 dst = ((struct xfrm_dst *)dst)->route; 101 dst = ((struct xfrm_dst *)dst)->route;
98 dst_hold(dst); 102 dst_hold(dst);
@@ -100,11 +104,11 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
100 if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) 104 if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0)
101 return -1; 105 return -1;
102 106
103 dst_release(skb->dst); 107 skb_dst_drop(skb);
104 skb->dst = dst; 108 skb_dst_set(skb, dst);
105 109
106 /* Change in oif may mean change in hh_len. */ 110 /* Change in oif may mean change in hh_len. */
107 hh_len = skb->dst->dev->hard_header_len; 111 hh_len = skb_dst(skb)->dev->hard_header_len;
108 if (skb_headroom(skb) < hh_len && 112 if (skb_headroom(skb) < hh_len &&
109 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 113 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
110 return -1; 114 return -1;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 831fe1879dc0..7505dff4ffdf 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -231,6 +231,12 @@ static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
231 return (struct arpt_entry *)(base + offset); 231 return (struct arpt_entry *)(base + offset);
232} 232}
233 233
234static inline __pure
235struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
236{
237 return (void *)entry + entry->next_offset;
238}
239
234unsigned int arpt_do_table(struct sk_buff *skb, 240unsigned int arpt_do_table(struct sk_buff *skb,
235 unsigned int hook, 241 unsigned int hook,
236 const struct net_device *in, 242 const struct net_device *in,
@@ -267,67 +273,64 @@ unsigned int arpt_do_table(struct sk_buff *skb,
267 273
268 arp = arp_hdr(skb); 274 arp = arp_hdr(skb);
269 do { 275 do {
270 if (arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 276 struct arpt_entry_target *t;
271 struct arpt_entry_target *t; 277 int hdr_len;
272 int hdr_len;
273
274 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
275 (2 * skb->dev->addr_len);
276 278
277 ADD_COUNTER(e->counters, hdr_len, 1); 279 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
280 e = arpt_next_entry(e);
281 continue;
282 }
278 283
279 t = arpt_get_target(e); 284 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
285 (2 * skb->dev->addr_len);
286 ADD_COUNTER(e->counters, hdr_len, 1);
280 287
281 /* Standard target? */ 288 t = arpt_get_target(e);
282 if (!t->u.kernel.target->target) {
283 int v;
284 289
285 v = ((struct arpt_standard_target *)t)->verdict; 290 /* Standard target? */
286 if (v < 0) { 291 if (!t->u.kernel.target->target) {
287 /* Pop from stack? */ 292 int v;
288 if (v != ARPT_RETURN) {
289 verdict = (unsigned)(-v) - 1;
290 break;
291 }
292 e = back;
293 back = get_entry(table_base,
294 back->comefrom);
295 continue;
296 }
297 if (table_base + v
298 != (void *)e + e->next_offset) {
299 /* Save old back ptr in next entry */
300 struct arpt_entry *next
301 = (void *)e + e->next_offset;
302 next->comefrom =
303 (void *)back - table_base;
304
305 /* set back pointer to next entry */
306 back = next;
307 }
308 293
309 e = get_entry(table_base, v); 294 v = ((struct arpt_standard_target *)t)->verdict;
310 } else { 295 if (v < 0) {
311 /* Targets which reenter must return 296 /* Pop from stack? */
312 * abs. verdicts 297 if (v != ARPT_RETURN) {
313 */ 298 verdict = (unsigned)(-v) - 1;
314 tgpar.target = t->u.kernel.target;
315 tgpar.targinfo = t->data;
316 verdict = t->u.kernel.target->target(skb,
317 &tgpar);
318
319 /* Target might have changed stuff. */
320 arp = arp_hdr(skb);
321
322 if (verdict == ARPT_CONTINUE)
323 e = (void *)e + e->next_offset;
324 else
325 /* Verdict */
326 break; 299 break;
300 }
301 e = back;
302 back = get_entry(table_base, back->comefrom);
303 continue;
327 } 304 }
328 } else { 305 if (table_base + v
329 e = (void *)e + e->next_offset; 306 != arpt_next_entry(e)) {
307 /* Save old back ptr in next entry */
308 struct arpt_entry *next = arpt_next_entry(e);
309 next->comefrom = (void *)back - table_base;
310
311 /* set back pointer to next entry */
312 back = next;
313 }
314
315 e = get_entry(table_base, v);
316 continue;
330 } 317 }
318
319 /* Targets which reenter must return
320 * abs. verdicts
321 */
322 tgpar.target = t->u.kernel.target;
323 tgpar.targinfo = t->data;
324 verdict = t->u.kernel.target->target(skb, &tgpar);
325
326 /* Target might have changed stuff. */
327 arp = arp_hdr(skb);
328
329 if (verdict == ARPT_CONTINUE)
330 e = arpt_next_entry(e);
331 else
332 /* Verdict */
333 break;
331 } while (!hotdrop); 334 } while (!hotdrop);
332 xt_info_rdunlock_bh(); 335 xt_info_rdunlock_bh();
333 336
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5f22c91c6e15..c156db215987 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -596,7 +596,7 @@ static int __init ip_queue_init(void)
596#ifdef CONFIG_SYSCTL 596#ifdef CONFIG_SYSCTL
597 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table); 597 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
598#endif 598#endif
599 status = nf_register_queue_handler(PF_INET, &nfqh); 599 status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
600 if (status < 0) { 600 if (status < 0) {
601 printk(KERN_ERR "ip_queue: failed to register queue handler\n"); 601 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
602 goto cleanup_sysctl; 602 goto cleanup_sysctl;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2ec8d7290c40..fdefae6b5dfc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -238,8 +238,8 @@ static struct nf_loginfo trace_loginfo = {
238/* Mildly perf critical (only if packet tracing is on) */ 238/* Mildly perf critical (only if packet tracing is on) */
239static inline int 239static inline int
240get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, 240get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
241 char *hookname, char **chainname, 241 const char *hookname, const char **chainname,
242 char **comment, unsigned int *rulenum) 242 const char **comment, unsigned int *rulenum)
243{ 243{
244 struct ipt_standard_target *t = (void *)ipt_get_target(s); 244 struct ipt_standard_target *t = (void *)ipt_get_target(s);
245 245
@@ -257,8 +257,8 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
257 && unconditional(&s->ip)) { 257 && unconditional(&s->ip)) {
258 /* Tail of chains: STANDARD target (return/policy) */ 258 /* Tail of chains: STANDARD target (return/policy) */
259 *comment = *chainname == hookname 259 *comment = *chainname == hookname
260 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY] 260 ? comments[NF_IP_TRACE_COMMENT_POLICY]
261 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN]; 261 : comments[NF_IP_TRACE_COMMENT_RETURN];
262 } 262 }
263 return 1; 263 return 1;
264 } else 264 } else
@@ -277,14 +277,14 @@ static void trace_packet(struct sk_buff *skb,
277{ 277{
278 void *table_base; 278 void *table_base;
279 const struct ipt_entry *root; 279 const struct ipt_entry *root;
280 char *hookname, *chainname, *comment; 280 const char *hookname, *chainname, *comment;
281 unsigned int rulenum = 0; 281 unsigned int rulenum = 0;
282 282
283 table_base = (void *)private->entries[smp_processor_id()]; 283 table_base = private->entries[smp_processor_id()];
284 root = get_entry(table_base, private->hook_entry[hook]); 284 root = get_entry(table_base, private->hook_entry[hook]);
285 285
286 hookname = chainname = (char *)hooknames[hook]; 286 hookname = chainname = hooknames[hook];
287 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE]; 287 comment = comments[NF_IP_TRACE_COMMENT_RULE];
288 288
289 IPT_ENTRY_ITERATE(root, 289 IPT_ENTRY_ITERATE(root,
290 private->size - private->hook_entry[hook], 290 private->size - private->hook_entry[hook],
@@ -297,6 +297,12 @@ static void trace_packet(struct sk_buff *skb,
297} 297}
298#endif 298#endif
299 299
300static inline __pure
301struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
302{
303 return (void *)entry + entry->next_offset;
304}
305
300/* Returns one of the generic firewall policies, like NF_ACCEPT. */ 306/* Returns one of the generic firewall policies, like NF_ACCEPT. */
301unsigned int 307unsigned int
302ipt_do_table(struct sk_buff *skb, 308ipt_do_table(struct sk_buff *skb,
@@ -305,6 +311,8 @@ ipt_do_table(struct sk_buff *skb,
305 const struct net_device *out, 311 const struct net_device *out,
306 struct xt_table *table) 312 struct xt_table *table)
307{ 313{
314#define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
315
308 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 316 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
309 const struct iphdr *ip; 317 const struct iphdr *ip;
310 u_int16_t datalen; 318 u_int16_t datalen;
@@ -335,7 +343,7 @@ ipt_do_table(struct sk_buff *skb,
335 mtpar.in = tgpar.in = in; 343 mtpar.in = tgpar.in = in;
336 mtpar.out = tgpar.out = out; 344 mtpar.out = tgpar.out = out;
337 mtpar.family = tgpar.family = NFPROTO_IPV4; 345 mtpar.family = tgpar.family = NFPROTO_IPV4;
338 tgpar.hooknum = hook; 346 mtpar.hooknum = tgpar.hooknum = hook;
339 347
340 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 348 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
341 xt_info_rdlock_bh(); 349 xt_info_rdlock_bh();
@@ -348,92 +356,84 @@ ipt_do_table(struct sk_buff *skb,
348 back = get_entry(table_base, private->underflow[hook]); 356 back = get_entry(table_base, private->underflow[hook]);
349 357
350 do { 358 do {
359 struct ipt_entry_target *t;
360
351 IP_NF_ASSERT(e); 361 IP_NF_ASSERT(e);
352 IP_NF_ASSERT(back); 362 IP_NF_ASSERT(back);
353 if (ip_packet_match(ip, indev, outdev, 363 if (!ip_packet_match(ip, indev, outdev,
354 &e->ip, mtpar.fragoff)) { 364 &e->ip, mtpar.fragoff) ||
355 struct ipt_entry_target *t; 365 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
356 366 e = ipt_next_entry(e);
357 if (IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) 367 continue;
358 goto no_match; 368 }
359 369
360 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 370 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
361 371
362 t = ipt_get_target(e); 372 t = ipt_get_target(e);
363 IP_NF_ASSERT(t->u.kernel.target); 373 IP_NF_ASSERT(t->u.kernel.target);
364 374
365#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 375#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
366 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 376 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
367 /* The packet is traced: log it */ 377 /* The packet is traced: log it */
368 if (unlikely(skb->nf_trace)) 378 if (unlikely(skb->nf_trace))
369 trace_packet(skb, hook, in, out, 379 trace_packet(skb, hook, in, out,
370 table->name, private, e); 380 table->name, private, e);
371#endif 381#endif
372 /* Standard target? */ 382 /* Standard target? */
373 if (!t->u.kernel.target->target) { 383 if (!t->u.kernel.target->target) {
374 int v; 384 int v;
375 385
376 v = ((struct ipt_standard_target *)t)->verdict; 386 v = ((struct ipt_standard_target *)t)->verdict;
377 if (v < 0) { 387 if (v < 0) {
378 /* Pop from stack? */ 388 /* Pop from stack? */
379 if (v != IPT_RETURN) { 389 if (v != IPT_RETURN) {
380 verdict = (unsigned)(-v) - 1; 390 verdict = (unsigned)(-v) - 1;
381 break; 391 break;
382 }
383 e = back;
384 back = get_entry(table_base,
385 back->comefrom);
386 continue;
387 }
388 if (table_base + v != (void *)e + e->next_offset
389 && !(e->ip.flags & IPT_F_GOTO)) {
390 /* Save old back ptr in next entry */
391 struct ipt_entry *next
392 = (void *)e + e->next_offset;
393 next->comefrom
394 = (void *)back - table_base;
395 /* set back pointer to next entry */
396 back = next;
397 } 392 }
393 e = back;
394 back = get_entry(table_base, back->comefrom);
395 continue;
396 }
397 if (table_base + v != ipt_next_entry(e)
398 && !(e->ip.flags & IPT_F_GOTO)) {
399 /* Save old back ptr in next entry */
400 struct ipt_entry *next = ipt_next_entry(e);
401 next->comefrom = (void *)back - table_base;
402 /* set back pointer to next entry */
403 back = next;
404 }
405
406 e = get_entry(table_base, v);
407 continue;
408 }
409
410 /* Targets which reenter must return
411 abs. verdicts */
412 tgpar.target = t->u.kernel.target;
413 tgpar.targinfo = t->data;
414
398 415
399 e = get_entry(table_base, v);
400 } else {
401 /* Targets which reenter must return
402 abs. verdicts */
403 tgpar.target = t->u.kernel.target;
404 tgpar.targinfo = t->data;
405#ifdef CONFIG_NETFILTER_DEBUG 416#ifdef CONFIG_NETFILTER_DEBUG
406 ((struct ipt_entry *)table_base)->comefrom 417 tb_comefrom = 0xeeeeeeec;
407 = 0xeeeeeeec;
408#endif 418#endif
409 verdict = t->u.kernel.target->target(skb, 419 verdict = t->u.kernel.target->target(skb, &tgpar);
410 &tgpar);
411#ifdef CONFIG_NETFILTER_DEBUG 420#ifdef CONFIG_NETFILTER_DEBUG
412 if (((struct ipt_entry *)table_base)->comefrom 421 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
413 != 0xeeeeeeec 422 printk("Target %s reentered!\n",
414 && verdict == IPT_CONTINUE) { 423 t->u.kernel.target->name);
415 printk("Target %s reentered!\n", 424 verdict = NF_DROP;
416 t->u.kernel.target->name); 425 }
417 verdict = NF_DROP; 426 tb_comefrom = 0x57acc001;
418 }
419 ((struct ipt_entry *)table_base)->comefrom
420 = 0x57acc001;
421#endif 427#endif
422 /* Target might have changed stuff. */ 428 /* Target might have changed stuff. */
423 ip = ip_hdr(skb); 429 ip = ip_hdr(skb);
424 datalen = skb->len - ip->ihl * 4; 430 datalen = skb->len - ip->ihl * 4;
425
426 if (verdict == IPT_CONTINUE)
427 e = (void *)e + e->next_offset;
428 else
429 /* Verdict */
430 break;
431 }
432 } else {
433 431
434 no_match: 432 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset; 433 e = ipt_next_entry(e);
436 } 434 else
435 /* Verdict */
436 break;
437 } while (!hotdrop); 437 } while (!hotdrop);
438 xt_info_rdunlock_bh(); 438 xt_info_rdunlock_bh();
439 439
@@ -444,6 +444,8 @@ ipt_do_table(struct sk_buff *skb,
444 return NF_DROP; 444 return NF_DROP;
445 else return verdict; 445 else return verdict;
446#endif 446#endif
447
448#undef tb_comefrom
447} 449}
448 450
449/* Figures out from what hook each rule can be called: returns 0 if 451/* Figures out from what hook each rule can be called: returns 0 if
@@ -2158,7 +2160,7 @@ static bool icmp_checkentry(const struct xt_mtchk_param *par)
2158static struct xt_target ipt_standard_target __read_mostly = { 2160static struct xt_target ipt_standard_target __read_mostly = {
2159 .name = IPT_STANDARD_TARGET, 2161 .name = IPT_STANDARD_TARGET,
2160 .targetsize = sizeof(int), 2162 .targetsize = sizeof(int),
2161 .family = AF_INET, 2163 .family = NFPROTO_IPV4,
2162#ifdef CONFIG_COMPAT 2164#ifdef CONFIG_COMPAT
2163 .compatsize = sizeof(compat_int_t), 2165 .compatsize = sizeof(compat_int_t),
2164 .compat_from_user = compat_standard_from_user, 2166 .compat_from_user = compat_standard_from_user,
@@ -2170,7 +2172,7 @@ static struct xt_target ipt_error_target __read_mostly = {
2170 .name = IPT_ERROR_TARGET, 2172 .name = IPT_ERROR_TARGET,
2171 .target = ipt_error, 2173 .target = ipt_error,
2172 .targetsize = IPT_FUNCTION_MAXNAMELEN, 2174 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2173 .family = AF_INET, 2175 .family = NFPROTO_IPV4,
2174}; 2176};
2175 2177
2176static struct nf_sockopt_ops ipt_sockopts = { 2178static struct nf_sockopt_ops ipt_sockopts = {
@@ -2196,17 +2198,17 @@ static struct xt_match icmp_matchstruct __read_mostly = {
2196 .matchsize = sizeof(struct ipt_icmp), 2198 .matchsize = sizeof(struct ipt_icmp),
2197 .checkentry = icmp_checkentry, 2199 .checkentry = icmp_checkentry,
2198 .proto = IPPROTO_ICMP, 2200 .proto = IPPROTO_ICMP,
2199 .family = AF_INET, 2201 .family = NFPROTO_IPV4,
2200}; 2202};
2201 2203
2202static int __net_init ip_tables_net_init(struct net *net) 2204static int __net_init ip_tables_net_init(struct net *net)
2203{ 2205{
2204 return xt_proto_init(net, AF_INET); 2206 return xt_proto_init(net, NFPROTO_IPV4);
2205} 2207}
2206 2208
2207static void __net_exit ip_tables_net_exit(struct net *net) 2209static void __net_exit ip_tables_net_exit(struct net *net)
2208{ 2210{
2209 xt_proto_fini(net, AF_INET); 2211 xt_proto_fini(net, NFPROTO_IPV4);
2210} 2212}
2211 2213
2212static struct pernet_operations ip_tables_net_ops = { 2214static struct pernet_operations ip_tables_net_ops = {
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index f389f60cb105..dada0863946d 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -27,9 +27,6 @@ MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
28MODULE_DESCRIPTION("Xtables: automatic-address SNAT"); 28MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
29 29
30/* Lock protects masq region inside conntrack */
31static DEFINE_RWLOCK(masq_lock);
32
33/* FIXME: Multiple targets. --RR */ 30/* FIXME: Multiple targets. --RR */
34static bool masquerade_tg_check(const struct xt_tgchk_param *par) 31static bool masquerade_tg_check(const struct xt_tgchk_param *par)
35{ 32{
@@ -72,16 +69,14 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
72 return NF_ACCEPT; 69 return NF_ACCEPT;
73 70
74 mr = par->targinfo; 71 mr = par->targinfo;
75 rt = skb->rtable; 72 rt = skb_rtable(skb);
76 newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); 73 newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
77 if (!newsrc) { 74 if (!newsrc) {
78 printk("MASQUERADE: %s ate my IP address\n", par->out->name); 75 printk("MASQUERADE: %s ate my IP address\n", par->out->name);
79 return NF_DROP; 76 return NF_DROP;
80 } 77 }
81 78
82 write_lock_bh(&masq_lock);
83 nat->masq_index = par->out->ifindex; 79 nat->masq_index = par->out->ifindex;
84 write_unlock_bh(&masq_lock);
85 80
86 /* Transfer from original range. */ 81 /* Transfer from original range. */
87 newrange = ((struct nf_nat_range) 82 newrange = ((struct nf_nat_range)
@@ -97,16 +92,11 @@ static int
97device_cmp(struct nf_conn *i, void *ifindex) 92device_cmp(struct nf_conn *i, void *ifindex)
98{ 93{
99 const struct nf_conn_nat *nat = nfct_nat(i); 94 const struct nf_conn_nat *nat = nfct_nat(i);
100 int ret;
101 95
102 if (!nat) 96 if (!nat)
103 return 0; 97 return 0;
104 98
105 read_lock_bh(&masq_lock); 99 return nat->masq_index == (int)(long)ifindex;
106 ret = (nat->masq_index == (int)(long)ifindex);
107 read_unlock_bh(&masq_lock);
108
109 return ret;
110} 100}
111 101
112static int masq_device_event(struct notifier_block *this, 102static int masq_device_event(struct notifier_block *this,
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 0b4b6e0ff2b9..c93ae44bff2a 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -108,17 +108,16 @@ static void send_reset(struct sk_buff *oldskb, int hook)
108 addr_type = RTN_LOCAL; 108 addr_type = RTN_LOCAL;
109 109
110 /* ip_route_me_harder expects skb->dst to be set */ 110 /* ip_route_me_harder expects skb->dst to be set */
111 dst_hold(oldskb->dst); 111 skb_dst_set(nskb, dst_clone(skb_dst(oldskb)));
112 nskb->dst = oldskb->dst;
113 112
114 if (ip_route_me_harder(nskb, addr_type)) 113 if (ip_route_me_harder(nskb, addr_type))
115 goto free_nskb; 114 goto free_nskb;
116 115
117 niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); 116 niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
118 nskb->ip_summed = CHECKSUM_NONE; 117 nskb->ip_summed = CHECKSUM_NONE;
119 118
120 /* "Never happens" */ 119 /* "Never happens" */
121 if (nskb->len > dst_mtu(nskb->dst)) 120 if (nskb->len > dst_mtu(skb_dst(nskb)))
122 goto free_nskb; 121 goto free_nskb;
123 122
124 nf_ct_attach(nskb, oldskb); 123 nf_ct_attach(nskb, oldskb);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 23b2c2ee869a..d71ba7677344 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -82,18 +82,10 @@ static int icmp_packet(struct nf_conn *ct,
82 u_int8_t pf, 82 u_int8_t pf,
83 unsigned int hooknum) 83 unsigned int hooknum)
84{ 84{
85 /* Try to delete connection immediately after all replies: 85 /* Do not immediately delete the connection after the first
86 won't actually vanish as we still have skb, and del_timer 86 successful reply to avoid excessive conntrackd traffic
87 means this will only run once even if count hits zero twice 87 and also to handle correctly ICMP echo reply duplicates. */
88 (theoretically possible with SMP) */ 88 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
90 if (atomic_dec_and_test(&ct->proto.icmp.count))
91 nf_ct_kill_acct(ct, ctinfo, skb);
92 } else {
93 atomic_inc(&ct->proto.icmp.count);
94 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, ct);
95 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
96 }
97 89
98 return NF_ACCEPT; 90 return NF_ACCEPT;
99} 91}
@@ -117,7 +109,6 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
117 nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); 109 nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
118 return false; 110 return false;
119 } 111 }
120 atomic_set(&ct->proto.icmp.count, 0);
121 return true; 112 return true;
122} 113}
123 114
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index cf7a42bf9820..155c008626c8 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -140,7 +140,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
140 const char *rep_buffer, 140 const char *rep_buffer,
141 unsigned int rep_len) 141 unsigned int rep_len)
142{ 142{
143 struct rtable *rt = skb->rtable; 143 struct rtable *rt = skb_rtable(skb);
144 struct iphdr *iph; 144 struct iphdr *iph;
145 struct tcphdr *tcph; 145 struct tcphdr *tcph;
146 int oldlen, datalen; 146 int oldlen, datalen;
@@ -218,7 +218,7 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
218 const char *rep_buffer, 218 const char *rep_buffer,
219 unsigned int rep_len) 219 unsigned int rep_len)
220{ 220{
221 struct rtable *rt = skb->rtable; 221 struct rtable *rt = skb_rtable(skb);
222 struct iphdr *iph; 222 struct iphdr *iph;
223 struct udphdr *udph; 223 struct udphdr *udph;
224 int datalen, oldlen; 224 int datalen, oldlen;
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 65e470bc6123..3fc598eeeb1a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -33,6 +33,7 @@ sctp_manip_pkt(struct sk_buff *skb,
33 enum nf_nat_manip_type maniptype) 33 enum nf_nat_manip_type maniptype)
34{ 34{
35 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 35 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
36 struct sk_buff *frag;
36 sctp_sctphdr_t *hdr; 37 sctp_sctphdr_t *hdr;
37 unsigned int hdroff = iphdroff + iph->ihl*4; 38 unsigned int hdroff = iphdroff + iph->ihl*4;
38 __be32 oldip, newip; 39 __be32 oldip, newip;
@@ -57,8 +58,8 @@ sctp_manip_pkt(struct sk_buff *skb,
57 } 58 }
58 59
59 crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); 60 crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
60 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) 61 skb_walk_frags(skb, frag)
61 crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), 62 crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
62 crc32); 63 crc32);
63 crc32 = sctp_end_cksum(crc32); 64 crc32 = sctp_end_cksum(crc32);
64 hdr->checksum = crc32; 65 hdr->checksum = crc32;
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index b7dd695691a0..5567bd0d0750 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -167,10 +167,9 @@ nf_nat_in(unsigned int hooknum,
167 167
168 ret = nf_nat_fn(hooknum, skb, in, out, okfn); 168 ret = nf_nat_fn(hooknum, skb, in, out, okfn);
169 if (ret != NF_DROP && ret != NF_STOLEN && 169 if (ret != NF_DROP && ret != NF_STOLEN &&
170 daddr != ip_hdr(skb)->daddr) { 170 daddr != ip_hdr(skb)->daddr)
171 dst_release(skb->dst); 171 skb_dst_drop(skb);
172 skb->dst = NULL; 172
173 }
174 return ret; 173 return ret;
175} 174}
176 175
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index cf0cdeeb1db0..f25542c48b7d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -90,14 +90,14 @@ static const struct file_operations sockstat_seq_fops = {
90 90
91/* snmp items */ 91/* snmp items */
92static const struct snmp_mib snmp4_ipstats_list[] = { 92static const struct snmp_mib snmp4_ipstats_list[] = {
93 SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES), 93 SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INPKTS),
94 SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS), 94 SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
95 SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS), 95 SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
96 SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), 96 SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
97 SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), 97 SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
98 SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS), 98 SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
99 SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS), 99 SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
100 SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS), 100 SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTPKTS),
101 SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS), 101 SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
102 SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), 102 SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
103 SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), 103 SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
@@ -118,6 +118,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
118 SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), 118 SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
119 SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS), 119 SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS),
120 SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS), 120 SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS),
121 SNMP_MIB_ITEM("InOctets", IPSTATS_MIB_INOCTETS),
122 SNMP_MIB_ITEM("OutOctets", IPSTATS_MIB_OUTOCTETS),
123 SNMP_MIB_ITEM("InMcastOctets", IPSTATS_MIB_INMCASTOCTETS),
124 SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
125 SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
126 SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
121 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
122}; 128};
123 129
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index f774651f0a47..3dc9171a272f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -343,7 +343,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
343 343
344 skb->priority = sk->sk_priority; 344 skb->priority = sk->sk_priority;
345 skb->mark = sk->sk_mark; 345 skb->mark = sk->sk_mark;
346 skb->dst = dst_clone(&rt->u.dst); 346 skb_dst_set(skb, dst_clone(&rt->u.dst));
347 347
348 skb_reset_network_header(skb); 348 skb_reset_network_header(skb);
349 iph = ip_hdr(skb); 349 iph = ip_hdr(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 28205e5bfa9b..cd76b3cb7092 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -131,8 +131,8 @@ static int ip_rt_min_advmss __read_mostly = 256;
131static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; 131static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
132static int rt_chain_length_max __read_mostly = 20; 132static int rt_chain_length_max __read_mostly = 20;
133 133
134static void rt_worker_func(struct work_struct *work); 134static struct delayed_work expires_work;
135static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); 135static unsigned long expires_ljiffies;
136 136
137/* 137/*
138 * Interface to generic destination cache. 138 * Interface to generic destination cache.
@@ -787,9 +787,12 @@ static void rt_check_expire(void)
787 struct rtable *rth, *aux, **rthp; 787 struct rtable *rth, *aux, **rthp;
788 unsigned long samples = 0; 788 unsigned long samples = 0;
789 unsigned long sum = 0, sum2 = 0; 789 unsigned long sum = 0, sum2 = 0;
790 unsigned long delta;
790 u64 mult; 791 u64 mult;
791 792
792 mult = ((u64)ip_rt_gc_interval) << rt_hash_log; 793 delta = jiffies - expires_ljiffies;
794 expires_ljiffies = jiffies;
795 mult = ((u64)delta) << rt_hash_log;
793 if (ip_rt_gc_timeout > 1) 796 if (ip_rt_gc_timeout > 1)
794 do_div(mult, ip_rt_gc_timeout); 797 do_div(mult, ip_rt_gc_timeout);
795 goal = (unsigned int)mult; 798 goal = (unsigned int)mult;
@@ -1064,7 +1067,8 @@ work_done:
1064out: return 0; 1067out: return 0;
1065} 1068}
1066 1069
1067static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) 1070static int rt_intern_hash(unsigned hash, struct rtable *rt,
1071 struct rtable **rp, struct sk_buff *skb)
1068{ 1072{
1069 struct rtable *rth, **rthp; 1073 struct rtable *rth, **rthp;
1070 unsigned long now; 1074 unsigned long now;
@@ -1114,7 +1118,10 @@ restart:
1114 spin_unlock_bh(rt_hash_lock_addr(hash)); 1118 spin_unlock_bh(rt_hash_lock_addr(hash));
1115 1119
1116 rt_drop(rt); 1120 rt_drop(rt);
1117 *rp = rth; 1121 if (rp)
1122 *rp = rth;
1123 else
1124 skb_dst_set(skb, &rth->u.dst);
1118 return 0; 1125 return 0;
1119 } 1126 }
1120 1127
@@ -1210,7 +1217,10 @@ restart:
1210 rcu_assign_pointer(rt_hash_table[hash].chain, rt); 1217 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1211 1218
1212 spin_unlock_bh(rt_hash_lock_addr(hash)); 1219 spin_unlock_bh(rt_hash_lock_addr(hash));
1213 *rp = rt; 1220 if (rp)
1221 *rp = rt;
1222 else
1223 skb_dst_set(skb, &rt->u.dst);
1214 return 0; 1224 return 0;
1215} 1225}
1216 1226
@@ -1407,7 +1417,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1407 &netevent); 1417 &netevent);
1408 1418
1409 rt_del(hash, rth); 1419 rt_del(hash, rth);
1410 if (!rt_intern_hash(hash, rt, &rt)) 1420 if (!rt_intern_hash(hash, rt, &rt, NULL))
1411 ip_rt_put(rt); 1421 ip_rt_put(rt);
1412 goto do_next; 1422 goto do_next;
1413 } 1423 }
@@ -1473,7 +1483,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1473 1483
1474void ip_rt_send_redirect(struct sk_buff *skb) 1484void ip_rt_send_redirect(struct sk_buff *skb)
1475{ 1485{
1476 struct rtable *rt = skb->rtable; 1486 struct rtable *rt = skb_rtable(skb);
1477 struct in_device *in_dev = in_dev_get(rt->u.dst.dev); 1487 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1478 1488
1479 if (!in_dev) 1489 if (!in_dev)
@@ -1521,7 +1531,7 @@ out:
1521 1531
1522static int ip_error(struct sk_buff *skb) 1532static int ip_error(struct sk_buff *skb)
1523{ 1533{
1524 struct rtable *rt = skb->rtable; 1534 struct rtable *rt = skb_rtable(skb);
1525 unsigned long now; 1535 unsigned long now;
1526 int code; 1536 int code;
1527 1537
@@ -1698,7 +1708,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1698 1708
1699 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1709 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1700 1710
1701 rt = skb->rtable; 1711 rt = skb_rtable(skb);
1702 if (rt) 1712 if (rt)
1703 dst_set_expires(&rt->u.dst, 0); 1713 dst_set_expires(&rt->u.dst, 0);
1704} 1714}
@@ -1858,7 +1868,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1858 1868
1859 in_dev_put(in_dev); 1869 in_dev_put(in_dev);
1860 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1870 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1861 return rt_intern_hash(hash, rth, &skb->rtable); 1871 return rt_intern_hash(hash, rth, NULL, skb);
1862 1872
1863e_nobufs: 1873e_nobufs:
1864 in_dev_put(in_dev); 1874 in_dev_put(in_dev);
@@ -2019,7 +2029,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2019 /* put it into the cache */ 2029 /* put it into the cache */
2020 hash = rt_hash(daddr, saddr, fl->iif, 2030 hash = rt_hash(daddr, saddr, fl->iif,
2021 rt_genid(dev_net(rth->u.dst.dev))); 2031 rt_genid(dev_net(rth->u.dst.dev)));
2022 return rt_intern_hash(hash, rth, &skb->rtable); 2032 return rt_intern_hash(hash, rth, NULL, skb);
2023} 2033}
2024 2034
2025/* 2035/*
@@ -2175,7 +2185,7 @@ local_input:
2175 } 2185 }
2176 rth->rt_type = res.type; 2186 rth->rt_type = res.type;
2177 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2187 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2178 err = rt_intern_hash(hash, rth, &skb->rtable); 2188 err = rt_intern_hash(hash, rth, NULL, skb);
2179 goto done; 2189 goto done;
2180 2190
2181no_route: 2191no_route:
@@ -2244,7 +2254,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2244 dst_use(&rth->u.dst, jiffies); 2254 dst_use(&rth->u.dst, jiffies);
2245 RT_CACHE_STAT_INC(in_hit); 2255 RT_CACHE_STAT_INC(in_hit);
2246 rcu_read_unlock(); 2256 rcu_read_unlock();
2247 skb->rtable = rth; 2257 skb_dst_set(skb, &rth->u.dst);
2248 return 0; 2258 return 0;
2249 } 2259 }
2250 RT_CACHE_STAT_INC(in_hlist_search); 2260 RT_CACHE_STAT_INC(in_hlist_search);
@@ -2420,7 +2430,7 @@ static int ip_mkroute_output(struct rtable **rp,
2420 if (err == 0) { 2430 if (err == 0) {
2421 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2431 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2422 rt_genid(dev_net(dev_out))); 2432 rt_genid(dev_net(dev_out)));
2423 err = rt_intern_hash(hash, rth, rp); 2433 err = rt_intern_hash(hash, rth, rp, NULL);
2424 } 2434 }
2425 2435
2426 return err; 2436 return err;
@@ -2763,7 +2773,7 @@ static int rt_fill_info(struct net *net,
2763 struct sk_buff *skb, u32 pid, u32 seq, int event, 2773 struct sk_buff *skb, u32 pid, u32 seq, int event,
2764 int nowait, unsigned int flags) 2774 int nowait, unsigned int flags)
2765{ 2775{
2766 struct rtable *rt = skb->rtable; 2776 struct rtable *rt = skb_rtable(skb);
2767 struct rtmsg *r; 2777 struct rtmsg *r;
2768 struct nlmsghdr *nlh; 2778 struct nlmsghdr *nlh;
2769 long expires; 2779 long expires;
@@ -2907,7 +2917,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2907 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); 2917 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2908 local_bh_enable(); 2918 local_bh_enable();
2909 2919
2910 rt = skb->rtable; 2920 rt = skb_rtable(skb);
2911 if (err == 0 && rt->u.dst.error) 2921 if (err == 0 && rt->u.dst.error)
2912 err = -rt->u.dst.error; 2922 err = -rt->u.dst.error;
2913 } else { 2923 } else {
@@ -2927,7 +2937,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2927 if (err) 2937 if (err)
2928 goto errout_free; 2938 goto errout_free;
2929 2939
2930 skb->rtable = rt; 2940 skb_dst_set(skb, &rt->u.dst);
2931 if (rtm->rtm_flags & RTM_F_NOTIFY) 2941 if (rtm->rtm_flags & RTM_F_NOTIFY)
2932 rt->rt_flags |= RTCF_NOTIFY; 2942 rt->rt_flags |= RTCF_NOTIFY;
2933 2943
@@ -2968,15 +2978,15 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2968 continue; 2978 continue;
2969 if (rt_is_expired(rt)) 2979 if (rt_is_expired(rt))
2970 continue; 2980 continue;
2971 skb->dst = dst_clone(&rt->u.dst); 2981 skb_dst_set(skb, dst_clone(&rt->u.dst));
2972 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, 2982 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2973 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2983 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2974 1, NLM_F_MULTI) <= 0) { 2984 1, NLM_F_MULTI) <= 0) {
2975 dst_release(xchg(&skb->dst, NULL)); 2985 skb_dst_drop(skb);
2976 rcu_read_unlock_bh(); 2986 rcu_read_unlock_bh();
2977 goto done; 2987 goto done;
2978 } 2988 }
2979 dst_release(xchg(&skb->dst, NULL)); 2989 skb_dst_drop(skb);
2980 } 2990 }
2981 rcu_read_unlock_bh(); 2991 rcu_read_unlock_bh();
2982 } 2992 }
@@ -3390,6 +3400,8 @@ int __init ip_rt_init(void)
3390 /* All the timers, started at system startup tend 3400 /* All the timers, started at system startup tend
3391 to synchronize. Perturb it a bit. 3401 to synchronize. Perturb it a bit.
3392 */ 3402 */
3403 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3404 expires_ljiffies = jiffies;
3393 schedule_delayed_work(&expires_work, 3405 schedule_delayed_work(&expires_work,
3394 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3406 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3395 3407
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b35a950d2e06..cd2b97f1b6e1 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -161,13 +161,12 @@ static __u16 const msstab[] = {
161 */ 161 */
162__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 162__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
163{ 163{
164 struct tcp_sock *tp = tcp_sk(sk);
165 const struct iphdr *iph = ip_hdr(skb); 164 const struct iphdr *iph = ip_hdr(skb);
166 const struct tcphdr *th = tcp_hdr(skb); 165 const struct tcphdr *th = tcp_hdr(skb);
167 int mssind; 166 int mssind;
168 const __u16 mss = *mssp; 167 const __u16 mss = *mssp;
169 168
170 tp->last_synq_overflow = jiffies; 169 tcp_synq_overflow(sk);
171 170
172 /* XXX sort msstab[] by probability? Binary search? */ 171 /* XXX sort msstab[] by probability? Binary search? */
173 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 172 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
@@ -268,7 +267,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
268 if (!sysctl_tcp_syncookies || !th->ack) 267 if (!sysctl_tcp_syncookies || !th->ack)
269 goto out; 268 goto out;
270 269
271 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 270 if (tcp_synq_no_recent_overflow(sk) ||
272 (mss = cookie_check(skb, cookie)) == 0) { 271 (mss = cookie_check(skb, cookie)) == 0) {
273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); 272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
274 goto out; 273 goto out;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7a0f0b27bf1f..17b89c523f9d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
439 !tp->urg_data || 439 !tp->urg_data ||
440 before(tp->urg_seq, tp->copied_seq) || 440 before(tp->urg_seq, tp->copied_seq) ||
441 !before(tp->urg_seq, tp->rcv_nxt)) { 441 !before(tp->urg_seq, tp->rcv_nxt)) {
442 struct sk_buff *skb;
443
442 answ = tp->rcv_nxt - tp->copied_seq; 444 answ = tp->rcv_nxt - tp->copied_seq;
443 445
444 /* Subtract 1, if FIN is in queue. */ 446 /* Subtract 1, if FIN is in queue. */
445 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 447 skb = skb_peek_tail(&sk->sk_receive_queue);
446 answ -= 448 if (answ && skb)
447 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; 449 answ -= tcp_hdr(skb)->fin;
448 } else 450 } else
449 answ = tp->urg_seq - tp->copied_seq; 451 answ = tp->urg_seq - tp->copied_seq;
450 release_sock(sk); 452 release_sock(sk);
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1382 1384
1383 /* Next get a buffer. */ 1385 /* Next get a buffer. */
1384 1386
1385 skb = skb_peek(&sk->sk_receive_queue); 1387 skb_queue_walk(&sk->sk_receive_queue, skb) {
1386 do {
1387 if (!skb)
1388 break;
1389
1390 /* Now that we have two receive queues this 1388 /* Now that we have two receive queues this
1391 * shouldn't happen. 1389 * shouldn't happen.
1392 */ 1390 */
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1403 if (tcp_hdr(skb)->fin) 1401 if (tcp_hdr(skb)->fin)
1404 goto found_fin_ok; 1402 goto found_fin_ok;
1405 WARN_ON(!(flags & MSG_PEEK)); 1403 WARN_ON(!(flags & MSG_PEEK));
1406 skb = skb->next; 1404 }
1407 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1408 1405
1409 /* Well, if we have backlog, try to process it now yet. */ 1406 /* Well, if we have backlog, try to process it now yet. */
1410 1407
@@ -2518,20 +2515,30 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2518 unsigned int thlen; 2515 unsigned int thlen;
2519 unsigned int flags; 2516 unsigned int flags;
2520 unsigned int mss = 1; 2517 unsigned int mss = 1;
2518 unsigned int hlen;
2519 unsigned int off;
2521 int flush = 1; 2520 int flush = 1;
2522 int i; 2521 int i;
2523 2522
2524 th = skb_gro_header(skb, sizeof(*th)); 2523 off = skb_gro_offset(skb);
2525 if (unlikely(!th)) 2524 hlen = off + sizeof(*th);
2526 goto out; 2525 th = skb_gro_header_fast(skb, off);
2526 if (skb_gro_header_hard(skb, hlen)) {
2527 th = skb_gro_header_slow(skb, hlen, off);
2528 if (unlikely(!th))
2529 goto out;
2530 }
2527 2531
2528 thlen = th->doff * 4; 2532 thlen = th->doff * 4;
2529 if (thlen < sizeof(*th)) 2533 if (thlen < sizeof(*th))
2530 goto out; 2534 goto out;
2531 2535
2532 th = skb_gro_header(skb, thlen); 2536 hlen = off + thlen;
2533 if (unlikely(!th)) 2537 if (skb_gro_header_hard(skb, hlen)) {
2534 goto out; 2538 th = skb_gro_header_slow(skb, hlen, off);
2539 if (unlikely(!th))
2540 goto out;
2541 }
2535 2542
2536 skb_gro_pull(skb, thlen); 2543 skb_gro_pull(skb, thlen);
2537 2544
@@ -2544,7 +2551,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2544 2551
2545 th2 = tcp_hdr(p); 2552 th2 = tcp_hdr(p);
2546 2553
2547 if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) { 2554 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2548 NAPI_GRO_CB(p)->same_flow = 0; 2555 NAPI_GRO_CB(p)->same_flow = 0;
2549 continue; 2556 continue;
2550 } 2557 }
@@ -2559,14 +2566,14 @@ found:
2559 flush |= flags & TCP_FLAG_CWR; 2566 flush |= flags & TCP_FLAG_CWR;
2560 flush |= (flags ^ tcp_flag_word(th2)) & 2567 flush |= (flags ^ tcp_flag_word(th2)) &
2561 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2568 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2562 flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window); 2569 flush |= th->ack_seq ^ th2->ack_seq;
2563 for (i = sizeof(*th); !flush && i < thlen; i += 4) 2570 for (i = sizeof(*th); i < thlen; i += 4)
2564 flush |= *(u32 *)((u8 *)th + i) ^ 2571 flush |= *(u32 *)((u8 *)th + i) ^
2565 *(u32 *)((u8 *)th2 + i); 2572 *(u32 *)((u8 *)th2 + i);
2566 2573
2567 mss = skb_shinfo(p)->gso_size; 2574 mss = skb_shinfo(p)->gso_size;
2568 2575
2569 flush |= (len > mss) | !len; 2576 flush |= (len - 1) >= mss;
2570 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 2577 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2571 2578
2572 if (flush || skb_gro_receive(head, skb)) { 2579 if (flush || skb_gro_receive(head, skb)) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eec3e6f9956c..2bdb0da237e6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -77,7 +77,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
77int sysctl_tcp_sack __read_mostly = 1; 77int sysctl_tcp_sack __read_mostly = 1;
78int sysctl_tcp_fack __read_mostly = 1; 78int sysctl_tcp_fack __read_mostly = 1;
79int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 79int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
80int sysctl_tcp_ecn __read_mostly; 80int sysctl_tcp_ecn __read_mostly = 2;
81int sysctl_tcp_dsack __read_mostly = 1; 81int sysctl_tcp_dsack __read_mostly = 1;
82int sysctl_tcp_app_win __read_mostly = 31; 82int sysctl_tcp_app_win __read_mostly = 31;
83int sysctl_tcp_adv_win_scale __read_mostly = 2; 83int sysctl_tcp_adv_win_scale __read_mostly = 2;
@@ -4426,7 +4426,7 @@ drop:
4426 } 4426 }
4427 __skb_queue_head(&tp->out_of_order_queue, skb); 4427 __skb_queue_head(&tp->out_of_order_queue, skb);
4428 } else { 4428 } else {
4429 struct sk_buff *skb1 = tp->out_of_order_queue.prev; 4429 struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4430 u32 seq = TCP_SKB_CB(skb)->seq; 4430 u32 seq = TCP_SKB_CB(skb)->seq;
4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4432 4432
@@ -4443,15 +4443,18 @@ drop:
4443 } 4443 }
4444 4444
4445 /* Find place to insert this segment. */ 4445 /* Find place to insert this segment. */
4446 do { 4446 while (1) {
4447 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4447 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4448 break; 4448 break;
4449 } while ((skb1 = skb1->prev) != 4449 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4450 (struct sk_buff *)&tp->out_of_order_queue); 4450 skb1 = NULL;
4451 break;
4452 }
4453 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4454 }
4451 4455
4452 /* Do skb overlap to previous one? */ 4456 /* Do skb overlap to previous one? */
4453 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && 4457 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4454 before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4458 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4456 /* All the bits are present. Drop. */ 4459 /* All the bits are present. Drop. */
4457 __kfree_skb(skb); 4460 __kfree_skb(skb);
@@ -4463,15 +4466,26 @@ drop:
4463 tcp_dsack_set(sk, seq, 4466 tcp_dsack_set(sk, seq,
4464 TCP_SKB_CB(skb1)->end_seq); 4467 TCP_SKB_CB(skb1)->end_seq);
4465 } else { 4468 } else {
4466 skb1 = skb1->prev; 4469 if (skb_queue_is_first(&tp->out_of_order_queue,
4470 skb1))
4471 skb1 = NULL;
4472 else
4473 skb1 = skb_queue_prev(
4474 &tp->out_of_order_queue,
4475 skb1);
4467 } 4476 }
4468 } 4477 }
4469 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4478 if (!skb1)
4479 __skb_queue_head(&tp->out_of_order_queue, skb);
4480 else
4481 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4470 4482
4471 /* And clean segments covered by new one as whole. */ 4483 /* And clean segments covered by new one as whole. */
4472 while ((skb1 = skb->next) != 4484 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4473 (struct sk_buff *)&tp->out_of_order_queue && 4485 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4474 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4486
4487 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4488 break;
4475 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4489 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4476 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4490 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4477 end_seq); 4491 end_seq);
@@ -4492,7 +4506,10 @@ add_sack:
4492static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4506static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4493 struct sk_buff_head *list) 4507 struct sk_buff_head *list)
4494{ 4508{
4495 struct sk_buff *next = skb->next; 4509 struct sk_buff *next = NULL;
4510
4511 if (!skb_queue_is_last(list, skb))
4512 next = skb_queue_next(list, skb);
4496 4513
4497 __skb_unlink(skb, list); 4514 __skb_unlink(skb, list);
4498 __kfree_skb(skb); 4515 __kfree_skb(skb);
@@ -4503,6 +4520,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4503 4520
4504/* Collapse contiguous sequence of skbs head..tail with 4521/* Collapse contiguous sequence of skbs head..tail with
4505 * sequence numbers start..end. 4522 * sequence numbers start..end.
4523 *
4524 * If tail is NULL, this means until the end of the list.
4525 *
4506 * Segments with FIN/SYN are not collapsed (only because this 4526 * Segments with FIN/SYN are not collapsed (only because this
4507 * simplifies code) 4527 * simplifies code)
4508 */ 4528 */
@@ -4511,15 +4531,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4511 struct sk_buff *head, struct sk_buff *tail, 4531 struct sk_buff *head, struct sk_buff *tail,
4512 u32 start, u32 end) 4532 u32 start, u32 end)
4513{ 4533{
4514 struct sk_buff *skb; 4534 struct sk_buff *skb, *n;
4535 bool end_of_skbs;
4515 4536
4516 /* First, check that queue is collapsible and find 4537 /* First, check that queue is collapsible and find
4517 * the point where collapsing can be useful. */ 4538 * the point where collapsing can be useful. */
4518 for (skb = head; skb != tail;) { 4539 skb = head;
4540restart:
4541 end_of_skbs = true;
4542 skb_queue_walk_from_safe(list, skb, n) {
4543 if (skb == tail)
4544 break;
4519 /* No new bits? It is possible on ofo queue. */ 4545 /* No new bits? It is possible on ofo queue. */
4520 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4546 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4521 skb = tcp_collapse_one(sk, skb, list); 4547 skb = tcp_collapse_one(sk, skb, list);
4522 continue; 4548 if (!skb)
4549 break;
4550 goto restart;
4523 } 4551 }
4524 4552
4525 /* The first skb to collapse is: 4553 /* The first skb to collapse is:
@@ -4529,16 +4557,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4529 */ 4557 */
4530 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4558 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
4531 (tcp_win_from_space(skb->truesize) > skb->len || 4559 (tcp_win_from_space(skb->truesize) > skb->len ||
4532 before(TCP_SKB_CB(skb)->seq, start) || 4560 before(TCP_SKB_CB(skb)->seq, start))) {
4533 (skb->next != tail && 4561 end_of_skbs = false;
4534 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
4535 break; 4562 break;
4563 }
4564
4565 if (!skb_queue_is_last(list, skb)) {
4566 struct sk_buff *next = skb_queue_next(list, skb);
4567 if (next != tail &&
4568 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
4569 end_of_skbs = false;
4570 break;
4571 }
4572 }
4536 4573
4537 /* Decided to skip this, advance start seq. */ 4574 /* Decided to skip this, advance start seq. */
4538 start = TCP_SKB_CB(skb)->end_seq; 4575 start = TCP_SKB_CB(skb)->end_seq;
4539 skb = skb->next;
4540 } 4576 }
4541 if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4577 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
4542 return; 4578 return;
4543 4579
4544 while (before(start, end)) { 4580 while (before(start, end)) {
@@ -4583,7 +4619,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4583 } 4619 }
4584 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4620 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4585 skb = tcp_collapse_one(sk, skb, list); 4621 skb = tcp_collapse_one(sk, skb, list);
4586 if (skb == tail || 4622 if (!skb ||
4623 skb == tail ||
4587 tcp_hdr(skb)->syn || 4624 tcp_hdr(skb)->syn ||
4588 tcp_hdr(skb)->fin) 4625 tcp_hdr(skb)->fin)
4589 return; 4626 return;
@@ -4610,17 +4647,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4610 head = skb; 4647 head = skb;
4611 4648
4612 for (;;) { 4649 for (;;) {
4613 skb = skb->next; 4650 struct sk_buff *next = NULL;
4651
4652 if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
4653 next = skb_queue_next(&tp->out_of_order_queue, skb);
4654 skb = next;
4614 4655
4615 /* Segment is terminated when we see gap or when 4656 /* Segment is terminated when we see gap or when
4616 * we are at the end of all the queue. */ 4657 * we are at the end of all the queue. */
4617 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 4658 if (!skb ||
4618 after(TCP_SKB_CB(skb)->seq, end) || 4659 after(TCP_SKB_CB(skb)->seq, end) ||
4619 before(TCP_SKB_CB(skb)->end_seq, start)) { 4660 before(TCP_SKB_CB(skb)->end_seq, start)) {
4620 tcp_collapse(sk, &tp->out_of_order_queue, 4661 tcp_collapse(sk, &tp->out_of_order_queue,
4621 head, skb, start, end); 4662 head, skb, start, end);
4622 head = skb; 4663 head = skb;
4623 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 4664 if (!skb)
4624 break; 4665 break;
4625 /* Start new segment */ 4666 /* Start new segment */
4626 start = TCP_SKB_CB(skb)->seq; 4667 start = TCP_SKB_CB(skb)->seq;
@@ -4681,10 +4722,11 @@ static int tcp_prune_queue(struct sock *sk)
4681 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4722 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4682 4723
4683 tcp_collapse_ofo_queue(sk); 4724 tcp_collapse_ofo_queue(sk);
4684 tcp_collapse(sk, &sk->sk_receive_queue, 4725 if (!skb_queue_empty(&sk->sk_receive_queue))
4685 sk->sk_receive_queue.next, 4726 tcp_collapse(sk, &sk->sk_receive_queue,
4686 (struct sk_buff *)&sk->sk_receive_queue, 4727 skb_peek(&sk->sk_receive_queue),
4687 tp->copied_seq, tp->rcv_nxt); 4728 NULL,
4729 tp->copied_seq, tp->rcv_nxt);
4688 sk_mem_reclaim(sk); 4730 sk_mem_reclaim(sk);
4689 4731
4690 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4732 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5d427f86b414..5a1ca2698c88 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -546,7 +546,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
546 if (th->rst) 546 if (th->rst)
547 return; 547 return;
548 548
549 if (skb->rtable->rt_type != RTN_LOCAL) 549 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
550 return; 550 return;
551 551
552 /* Swap the send and the receive. */ 552 /* Swap the send and the receive. */
@@ -590,7 +590,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
590 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 590 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
591 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 591 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
592 592
593 net = dev_net(skb->dst->dev); 593 net = dev_net(skb_dst(skb)->dev);
594 ip_send_reply(net->ipv4.tcp_sock, skb, 594 ip_send_reply(net->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len); 595 &arg, arg.iov[0].iov_len);
596 596
@@ -617,7 +617,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
617 ]; 617 ];
618 } rep; 618 } rep;
619 struct ip_reply_arg arg; 619 struct ip_reply_arg arg;
620 struct net *net = dev_net(skb->dst->dev); 620 struct net *net = dev_net(skb_dst(skb)->dev);
621 621
622 memset(&rep.th, 0, sizeof(struct tcphdr)); 622 memset(&rep.th, 0, sizeof(struct tcphdr));
623 memset(&arg, 0, sizeof(arg)); 623 memset(&arg, 0, sizeof(arg));
@@ -1185,7 +1185,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1185#endif 1185#endif
1186 1186
1187 /* Never answer to SYNs send to broadcast or multicast */ 1187 /* Never answer to SYNs send to broadcast or multicast */
1188 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1188 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1189 goto drop; 1189 goto drop;
1190 1190
1191 /* TW buckets are converted to open requests without 1191 /* TW buckets are converted to open requests without
@@ -1593,7 +1593,7 @@ process:
1593#endif 1593#endif
1594 { 1594 {
1595 if (!tcp_prequeue(sk, skb)) 1595 if (!tcp_prequeue(sk, skb))
1596 ret = tcp_v4_do_rcv(sk, skb); 1596 ret = tcp_v4_do_rcv(sk, skb);
1597 } 1597 }
1598 } else 1598 } else
1599 sk_add_backlog(sk, skb); 1599 sk_add_backlog(sk, skb);
@@ -2343,7 +2343,7 @@ void tcp4_proc_exit(void)
2343 2343
2344struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2344struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2345{ 2345{
2346 struct iphdr *iph = ip_hdr(skb); 2346 struct iphdr *iph = skb_gro_network_header(skb);
2347 2347
2348 switch (skb->ip_summed) { 2348 switch (skb->ip_summed) {
2349 case CHECKSUM_COMPLETE: 2349 case CHECKSUM_COMPLETE:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 59aec609cec6..416fc4c2e7eb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -288,7 +288,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
288 struct tcp_sock *tp = tcp_sk(sk); 288 struct tcp_sock *tp = tcp_sk(sk);
289 289
290 tp->ecn_flags = 0; 290 tp->ecn_flags = 0;
291 if (sysctl_tcp_ecn) { 291 if (sysctl_tcp_ecn == 1) {
292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
293 tp->ecn_flags = TCP_ECN_OK; 293 tp->ecn_flags = TCP_ECN_OK;
294 } 294 }
@@ -2202,7 +2202,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2202 /* Reserve space for headers. */ 2202 /* Reserve space for headers. */
2203 skb_reserve(skb, MAX_TCP_HEADER); 2203 skb_reserve(skb, MAX_TCP_HEADER);
2204 2204
2205 skb->dst = dst_clone(dst); 2205 skb_dst_set(skb, dst_clone(dst));
2206 2206
2207 mss = dst_metric(dst, RTAX_ADVMSS); 2207 mss = dst_metric(dst, RTAX_ADVMSS);
2208 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2208 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7a1d1ce22e66..8f4158d7c9a6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -328,7 +328,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
328 if (unlikely(sk = skb_steal_sock(skb))) 328 if (unlikely(sk = skb_steal_sock(skb)))
329 return sk; 329 return sk;
330 else 330 else
331 return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport, 331 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
332 iph->daddr, dport, inet_iif(skb), 332 iph->daddr, dport, inet_iif(skb),
333 udptable); 333 udptable);
334} 334}
@@ -1237,7 +1237,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1237 struct sock *sk; 1237 struct sock *sk;
1238 struct udphdr *uh; 1238 struct udphdr *uh;
1239 unsigned short ulen; 1239 unsigned short ulen;
1240 struct rtable *rt = (struct rtable*)skb->dst; 1240 struct rtable *rt = skb_rtable(skb);
1241 __be32 saddr, daddr; 1241 __be32 saddr, daddr;
1242 struct net *net = dev_net(skb->dev); 1242 struct net *net = dev_net(skb->dev);
1243 1243
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 4ec2162a437e..f9f922a0ba88 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -23,7 +23,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
23 23
24static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) 24static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
25{ 25{
26 if (skb->dst == NULL) { 26 if (skb_dst(skb) == NULL) {
27 const struct iphdr *iph = ip_hdr(skb); 27 const struct iphdr *iph = ip_hdr(skb);
28 28
29 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 29 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 7135279f3f84..3444f3b34eca 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -28,7 +28,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
28 */ 28 */
29static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 29static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
30{ 30{
31 struct dst_entry *dst = skb->dst; 31 struct dst_entry *dst = skb_dst(skb);
32 struct iphdr *top_iph; 32 struct iphdr *top_iph;
33 int flags; 33 int flags;
34 34
@@ -41,7 +41,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
41 top_iph->ihl = 5; 41 top_iph->ihl = 5;
42 top_iph->version = 4; 42 top_iph->version = 4;
43 43
44 top_iph->protocol = xfrm_af2proto(skb->dst->ops->family); 44 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
45 45
46 /* DS disclosed */ 46 /* DS disclosed */
47 top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos, 47 top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos,
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 8c3180adddbf..c908bd99bcba 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -29,7 +29,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
30 goto out; 30 goto out;
31 31
32 dst = skb->dst; 32 dst = skb_dst(skb);
33 mtu = dst_mtu(dst); 33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 34 if (skb->len > mtu) {
35 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 35 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
72static int xfrm4_output_finish(struct sk_buff *skb) 72static int xfrm4_output_finish(struct sk_buff *skb)
73{ 73{
74#ifdef CONFIG_NETFILTER 74#ifdef CONFIG_NETFILTER
75 if (!skb->dst->xfrm) { 75 if (!skb_dst(skb)->xfrm) {
76 IPCB(skb)->flags |= IPSKB_REROUTED; 76 IPCB(skb)->flags |= IPSKB_REROUTED;
77 return dst_output(skb); 77 return dst_output(skb);
78 } 78 }
@@ -87,6 +87,6 @@ static int xfrm4_output_finish(struct sk_buff *skb)
87int xfrm4_output(struct sk_buff *skb) 87int xfrm4_output(struct sk_buff *skb)
88{ 88{
89 return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, 89 return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb,
90 NULL, skb->dst->dev, xfrm4_output_finish, 90 NULL, skb_dst(skb)->dev, xfrm4_output_finish,
91 !(IPCB(skb)->flags & IPSKB_REROUTED)); 91 !(IPCB(skb)->flags & IPSKB_REROUTED));
92} 92}
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index ca8cb326d1d2..ead6c7a42f44 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -168,7 +168,7 @@ config IPV6_SIT
168 into IPv4 packets. This is useful if you want to connect two IPv6 168 into IPv4 packets. This is useful if you want to connect two IPv6
169 networks over an IPv4-only path. 169 networks over an IPv4-only path.
170 170
171 Saying M here will produce a module called sit.ko. If unsure, say Y. 171 Saying M here will produce a module called sit. If unsure, say Y.
172 172
173config IPV6_NDISC_NODETYPE 173config IPV6_NDISC_NODETYPE
174 bool 174 bool
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a8218bc1806a..8c1e86afbbf5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -503,7 +503,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
503 return 0; 503 return 0;
504 504
505 if (!rtnl_trylock()) 505 if (!rtnl_trylock())
506 return -ERESTARTSYS; 506 return restart_syscall();
507 507
508 if (p == &net->ipv6.devconf_all->forwarding) { 508 if (p == &net->ipv6.devconf_all->forwarding) {
509 __s32 newf = net->ipv6.devconf_all->forwarding; 509 __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -591,7 +591,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
591{ 591{
592 struct inet6_ifaddr *ifa = NULL; 592 struct inet6_ifaddr *ifa = NULL;
593 struct rt6_info *rt; 593 struct rt6_info *rt;
594 struct net *net = dev_net(idev->dev);
595 int hash; 594 int hash;
596 int err = 0; 595 int err = 0;
597 int addr_type = ipv6_addr_type(addr); 596 int addr_type = ipv6_addr_type(addr);
@@ -608,7 +607,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
608 goto out2; 607 goto out2;
609 } 608 }
610 609
611 if (idev->cnf.disable_ipv6 || net->ipv6.devconf_all->disable_ipv6) { 610 if (idev->cnf.disable_ipv6) {
612 err = -EACCES; 611 err = -EACCES;
613 goto out2; 612 goto out2;
614 } 613 }
@@ -1520,6 +1519,8 @@ static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
1520 1519
1521int __ipv6_isatap_ifid(u8 *eui, __be32 addr) 1520int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
1522{ 1521{
1522 if (addr == 0)
1523 return -1;
1523 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || 1524 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
1524 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || 1525 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
1525 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || 1526 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
@@ -1750,6 +1751,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1750 __u32 prefered_lft; 1751 __u32 prefered_lft;
1751 int addr_type; 1752 int addr_type;
1752 struct inet6_dev *in6_dev; 1753 struct inet6_dev *in6_dev;
1754 struct net *net = dev_net(dev);
1753 1755
1754 pinfo = (struct prefix_info *) opt; 1756 pinfo = (struct prefix_info *) opt;
1755 1757
@@ -1807,7 +1809,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1807 if (addrconf_finite_timeout(rt_expires)) 1809 if (addrconf_finite_timeout(rt_expires))
1808 rt_expires *= HZ; 1810 rt_expires *= HZ;
1809 1811
1810 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1812 rt = rt6_lookup(net, &pinfo->prefix, NULL,
1811 dev->ifindex, 1); 1813 dev->ifindex, 1);
1812 1814
1813 if (rt && addrconf_is_prefix_route(rt)) { 1815 if (rt && addrconf_is_prefix_route(rt)) {
@@ -1844,7 +1846,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1844 struct inet6_ifaddr * ifp; 1846 struct inet6_ifaddr * ifp;
1845 struct in6_addr addr; 1847 struct in6_addr addr;
1846 int create = 0, update_lft = 0; 1848 int create = 0, update_lft = 0;
1847 struct net *net = dev_net(dev);
1848 1849
1849 if (pinfo->prefix_len == 64) { 1850 if (pinfo->prefix_len == 64) {
1850 memcpy(&addr, &pinfo->prefix, 8); 1851 memcpy(&addr, &pinfo->prefix, 8);
@@ -2765,7 +2766,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2765 spin_unlock_bh(&ifp->lock); 2766 spin_unlock_bh(&ifp->lock);
2766 read_unlock_bh(&idev->lock); 2767 read_unlock_bh(&idev->lock);
2767 /* 2768 /*
2768 * If the defice is not ready: 2769 * If the device is not ready:
2769 * - keep it tentative if it is a permanent address. 2770 * - keep it tentative if it is a permanent address.
2770 * - otherwise, kill it. 2771 * - otherwise, kill it.
2771 */ 2772 */
@@ -3986,6 +3987,75 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
3986 return addrconf_fixup_forwarding(table, valp, val); 3987 return addrconf_fixup_forwarding(table, valp, val);
3987} 3988}
3988 3989
3990static void dev_disable_change(struct inet6_dev *idev)
3991{
3992 if (!idev || !idev->dev)
3993 return;
3994
3995 if (idev->cnf.disable_ipv6)
3996 addrconf_notify(NULL, NETDEV_DOWN, idev->dev);
3997 else
3998 addrconf_notify(NULL, NETDEV_UP, idev->dev);
3999}
4000
4001static void addrconf_disable_change(struct net *net, __s32 newf)
4002{
4003 struct net_device *dev;
4004 struct inet6_dev *idev;
4005
4006 read_lock(&dev_base_lock);
4007 for_each_netdev(net, dev) {
4008 rcu_read_lock();
4009 idev = __in6_dev_get(dev);
4010 if (idev) {
4011 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
4012 idev->cnf.disable_ipv6 = newf;
4013 if (changed)
4014 dev_disable_change(idev);
4015 }
4016 rcu_read_unlock();
4017 }
4018 read_unlock(&dev_base_lock);
4019}
4020
4021static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
4022{
4023 struct net *net;
4024
4025 net = (struct net *)table->extra2;
4026
4027 if (p == &net->ipv6.devconf_dflt->disable_ipv6)
4028 return 0;
4029
4030 if (!rtnl_trylock())
4031 return restart_syscall();
4032
4033 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4034 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
4035 net->ipv6.devconf_dflt->disable_ipv6 = newf;
4036 addrconf_disable_change(net, newf);
4037 } else if ((!*p) ^ (!old))
4038 dev_disable_change((struct inet6_dev *)table->extra1);
4039
4040 rtnl_unlock();
4041 return 0;
4042}
4043
4044static
4045int addrconf_sysctl_disable(ctl_table *ctl, int write, struct file * filp,
4046 void __user *buffer, size_t *lenp, loff_t *ppos)
4047{
4048 int *valp = ctl->data;
4049 int val = *valp;
4050 int ret;
4051
4052 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
4053
4054 if (write)
4055 ret = addrconf_disable_ipv6(ctl, valp, val);
4056 return ret;
4057}
4058
3989static struct addrconf_sysctl_table 4059static struct addrconf_sysctl_table
3990{ 4060{
3991 struct ctl_table_header *sysctl_header; 4061 struct ctl_table_header *sysctl_header;
@@ -4223,7 +4293,8 @@ static struct addrconf_sysctl_table
4223 .data = &ipv6_devconf.disable_ipv6, 4293 .data = &ipv6_devconf.disable_ipv6,
4224 .maxlen = sizeof(int), 4294 .maxlen = sizeof(int),
4225 .mode = 0644, 4295 .mode = 0644,
4226 .proc_handler = proc_dointvec, 4296 .proc_handler = addrconf_sysctl_disable,
4297 .strategy = sysctl_intvec,
4227 }, 4298 },
4228 { 4299 {
4229 .ctl_name = CTL_UNNUMBERED, 4300 .ctl_name = CTL_UNNUMBERED,
@@ -4344,6 +4415,10 @@ static int addrconf_init_net(struct net *net)
4344 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 4415 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
4345 if (dflt == NULL) 4416 if (dflt == NULL)
4346 goto err_alloc_dflt; 4417 goto err_alloc_dflt;
4418 } else {
4419 /* these will be inherited by all namespaces */
4420 dflt->autoconf = ipv6_defaults.autoconf;
4421 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4347 } 4422 }
4348 4423
4349 net->ipv6.devconf_all = all; 4424 net->ipv6.devconf_all = all;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 61f55386a236..85b3d0036afd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -72,9 +72,21 @@ MODULE_LICENSE("GPL");
72static struct list_head inetsw6[SOCK_MAX]; 72static struct list_head inetsw6[SOCK_MAX];
73static DEFINE_SPINLOCK(inetsw6_lock); 73static DEFINE_SPINLOCK(inetsw6_lock);
74 74
75static int disable_ipv6 = 0; 75struct ipv6_params ipv6_defaults = {
76module_param_named(disable, disable_ipv6, int, 0); 76 .disable_ipv6 = 0,
77MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional"); 77 .autoconf = 1,
78};
79
80static int disable_ipv6_mod = 0;
81
82module_param_named(disable, disable_ipv6_mod, int, 0444);
83MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
84
85module_param_named(disable_ipv6, ipv6_defaults.disable_ipv6, int, 0444);
86MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces");
87
88module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444);
89MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces");
78 90
79static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 91static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
80{ 92{
@@ -817,13 +829,20 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
817 struct sk_buff *p; 829 struct sk_buff *p;
818 struct ipv6hdr *iph; 830 struct ipv6hdr *iph;
819 unsigned int nlen; 831 unsigned int nlen;
832 unsigned int hlen;
833 unsigned int off;
820 int flush = 1; 834 int flush = 1;
821 int proto; 835 int proto;
822 __wsum csum; 836 __wsum csum;
823 837
824 iph = skb_gro_header(skb, sizeof(*iph)); 838 off = skb_gro_offset(skb);
825 if (unlikely(!iph)) 839 hlen = off + sizeof(*iph);
826 goto out; 840 iph = skb_gro_header_fast(skb, off);
841 if (skb_gro_header_hard(skb, hlen)) {
842 iph = skb_gro_header_slow(skb, hlen, off);
843 if (unlikely(!iph))
844 goto out;
845 }
827 846
828 skb_gro_pull(skb, sizeof(*iph)); 847 skb_gro_pull(skb, sizeof(*iph));
829 skb_set_transport_header(skb, skb_gro_offset(skb)); 848 skb_set_transport_header(skb, skb_gro_offset(skb));
@@ -1031,7 +1050,7 @@ static int __init inet6_init(void)
1031 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) 1050 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1032 INIT_LIST_HEAD(r); 1051 INIT_LIST_HEAD(r);
1033 1052
1034 if (disable_ipv6) { 1053 if (disable_ipv6_mod) {
1035 printk(KERN_INFO 1054 printk(KERN_INFO
1036 "IPv6: Loaded, but administratively disabled, " 1055 "IPv6: Loaded, but administratively disabled, "
1037 "reboot required to enable\n"); 1056 "reboot required to enable\n");
@@ -1220,7 +1239,7 @@ module_init(inet6_init);
1220 1239
1221static void __exit inet6_exit(void) 1240static void __exit inet6_exit(void)
1222{ 1241{
1223 if (disable_ipv6) 1242 if (disable_ipv6_mod)
1224 return; 1243 return;
1225 1244
1226 /* First of all disallow new sockets creation. */ 1245 /* First of all disallow new sockets creation. */
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 1c7f400a3cfe..4aae658e5501 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -277,7 +277,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
277 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 277 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
278 !pskb_may_pull(skb, (skb_transport_offset(skb) + 278 !pskb_may_pull(skb, (skb_transport_offset(skb) +
279 ((skb_transport_header(skb)[1] + 1) << 3)))) { 279 ((skb_transport_header(skb)[1] + 1) << 3)))) {
280 IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst), 280 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
281 IPSTATS_MIB_INHDRERRORS); 281 IPSTATS_MIB_INHDRERRORS);
282 kfree_skb(skb); 282 kfree_skb(skb);
283 return -1; 283 return -1;
@@ -288,7 +288,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
288 dstbuf = opt->dst1; 288 dstbuf = opt->dst1;
289#endif 289#endif
290 290
291 dst = dst_clone(skb->dst); 291 dst = dst_clone(skb_dst(skb));
292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
293 dst_release(dst); 293 dst_release(dst);
294 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 294 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
@@ -333,7 +333,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
333 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 333 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
334 !pskb_may_pull(skb, (skb_transport_offset(skb) + 334 !pskb_may_pull(skb, (skb_transport_offset(skb) +
335 ((skb_transport_header(skb)[1] + 1) << 3)))) { 335 ((skb_transport_header(skb)[1] + 1) << 3)))) {
336 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 336 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
337 IPSTATS_MIB_INHDRERRORS); 337 IPSTATS_MIB_INHDRERRORS);
338 kfree_skb(skb); 338 kfree_skb(skb);
339 return -1; 339 return -1;
@@ -343,7 +343,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
343 343
344 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || 344 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
345 skb->pkt_type != PACKET_HOST) { 345 skb->pkt_type != PACKET_HOST) {
346 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 346 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
347 IPSTATS_MIB_INADDRERRORS); 347 IPSTATS_MIB_INADDRERRORS);
348 kfree_skb(skb); 348 kfree_skb(skb);
349 return -1; 349 return -1;
@@ -358,7 +358,7 @@ looped_back:
358 * processed by own 358 * processed by own
359 */ 359 */
360 if (!addr) { 360 if (!addr) {
361 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 361 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
362 IPSTATS_MIB_INADDRERRORS); 362 IPSTATS_MIB_INADDRERRORS);
363 kfree_skb(skb); 363 kfree_skb(skb);
364 return -1; 364 return -1;
@@ -384,7 +384,7 @@ looped_back:
384 goto unknown_rh; 384 goto unknown_rh;
385 /* Silently discard invalid RTH type 2 */ 385 /* Silently discard invalid RTH type 2 */
386 if (hdr->hdrlen != 2 || hdr->segments_left != 1) { 386 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
387 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 387 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
388 IPSTATS_MIB_INHDRERRORS); 388 IPSTATS_MIB_INHDRERRORS);
389 kfree_skb(skb); 389 kfree_skb(skb);
390 return -1; 390 return -1;
@@ -403,7 +403,7 @@ looped_back:
403 n = hdr->hdrlen >> 1; 403 n = hdr->hdrlen >> 1;
404 404
405 if (hdr->segments_left > n) { 405 if (hdr->segments_left > n) {
406 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 406 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
407 IPSTATS_MIB_INHDRERRORS); 407 IPSTATS_MIB_INHDRERRORS);
408 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 408 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
409 ((&hdr->segments_left) - 409 ((&hdr->segments_left) -
@@ -417,7 +417,7 @@ looped_back:
417 if (skb_cloned(skb)) { 417 if (skb_cloned(skb)) {
418 /* the copy is a forwarded packet */ 418 /* the copy is a forwarded packet */
419 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 419 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
420 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 420 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
421 IPSTATS_MIB_OUTDISCARDS); 421 IPSTATS_MIB_OUTDISCARDS);
422 kfree_skb(skb); 422 kfree_skb(skb);
423 return -1; 423 return -1;
@@ -440,13 +440,13 @@ looped_back:
440 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, 440 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
441 (xfrm_address_t *)&ipv6_hdr(skb)->saddr, 441 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
442 IPPROTO_ROUTING) < 0) { 442 IPPROTO_ROUTING) < 0) {
443 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 443 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
444 IPSTATS_MIB_INADDRERRORS); 444 IPSTATS_MIB_INADDRERRORS);
445 kfree_skb(skb); 445 kfree_skb(skb);
446 return -1; 446 return -1;
447 } 447 }
448 if (!ipv6_chk_home_addr(dev_net(skb->dst->dev), addr)) { 448 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
449 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 449 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
450 IPSTATS_MIB_INADDRERRORS); 450 IPSTATS_MIB_INADDRERRORS);
451 kfree_skb(skb); 451 kfree_skb(skb);
452 return -1; 452 return -1;
@@ -458,7 +458,7 @@ looped_back:
458 } 458 }
459 459
460 if (ipv6_addr_is_multicast(addr)) { 460 if (ipv6_addr_is_multicast(addr)) {
461 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 461 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
462 IPSTATS_MIB_INADDRERRORS); 462 IPSTATS_MIB_INADDRERRORS);
463 kfree_skb(skb); 463 kfree_skb(skb);
464 return -1; 464 return -1;
@@ -468,17 +468,17 @@ looped_back:
468 ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); 468 ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
469 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); 469 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
470 470
471 dst_release(xchg(&skb->dst, NULL)); 471 skb_dst_drop(skb);
472 ip6_route_input(skb); 472 ip6_route_input(skb);
473 if (skb->dst->error) { 473 if (skb_dst(skb)->error) {
474 skb_push(skb, skb->data - skb_network_header(skb)); 474 skb_push(skb, skb->data - skb_network_header(skb));
475 dst_input(skb); 475 dst_input(skb);
476 return -1; 476 return -1;
477 } 477 }
478 478
479 if (skb->dst->dev->flags&IFF_LOOPBACK) { 479 if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
480 if (ipv6_hdr(skb)->hop_limit <= 1) { 480 if (ipv6_hdr(skb)->hop_limit <= 1) {
481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
482 IPSTATS_MIB_INHDRERRORS); 482 IPSTATS_MIB_INHDRERRORS);
483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
484 0, skb->dev); 484 0, skb->dev);
@@ -494,7 +494,7 @@ looped_back:
494 return -1; 494 return -1;
495 495
496unknown_rh: 496unknown_rh:
497 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 497 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
498 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 498 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
499 (&hdr->type) - skb_network_header(skb)); 499 (&hdr->type) - skb_network_header(skb));
500 return -1; 500 return -1;
@@ -552,11 +552,11 @@ void ipv6_exthdrs_exit(void)
552 **********************************/ 552 **********************************/
553 553
554/* 554/*
555 * Note: we cannot rely on skb->dst before we assign it in ip6_route_input(). 555 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
556 */ 556 */
557static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) 557static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
558{ 558{
559 return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev); 559 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
560} 560}
561 561
562/* Router Alert as of RFC 2711 */ 562/* Router Alert as of RFC 2711 */
@@ -581,7 +581,7 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
581{ 581{
582 const unsigned char *nh = skb_network_header(skb); 582 const unsigned char *nh = skb_network_header(skb);
583 u32 pkt_len; 583 u32 pkt_len;
584 struct net *net = dev_net(skb->dst->dev); 584 struct net *net = dev_net(skb_dst(skb)->dev);
585 585
586 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 586 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
587 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 587 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index f5de3f9dc692..00a7a5e4ac97 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -151,7 +151,7 @@ static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = {
151}; 151};
152 152
153static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 153static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
154 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 154 struct fib_rule_hdr *frh,
155 struct nlattr **tb) 155 struct nlattr **tb)
156{ 156{
157 int err = -EINVAL; 157 int err = -EINVAL;
@@ -211,7 +211,7 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
211} 211}
212 212
213static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 213static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
214 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 214 struct fib_rule_hdr *frh)
215{ 215{
216 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 216 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
217 217
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 3c3732d50c1a..cc4797dd8325 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -228,7 +228,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
228 __inet6_csk_dst_store(sk, dst, NULL, NULL); 228 __inet6_csk_dst_store(sk, dst, NULL, NULL);
229 } 229 }
230 230
231 skb->dst = dst_clone(dst); 231 skb_dst_set(skb, dst_clone(dst));
232 232
233 /* Restore final destination back after routing done */ 233 /* Restore final destination back after routing done */
234 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 234 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 8f04bd9da274..c3a07d75b5f5 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -48,7 +48,7 @@
48 48
49inline int ip6_rcv_finish( struct sk_buff *skb) 49inline int ip6_rcv_finish( struct sk_buff *skb)
50{ 50{
51 if (skb->dst == NULL) 51 if (skb_dst(skb) == NULL)
52 ip6_route_input(skb); 52 ip6_route_input(skb);
53 53
54 return dst_input(skb); 54 return dst_input(skb);
@@ -70,7 +70,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
70 70
71 idev = __in6_dev_get(skb->dev); 71 idev = __in6_dev_get(skb->dev);
72 72
73 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INRECEIVES); 73 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len);
74 74
75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || 75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
76 !idev || unlikely(idev->cnf.disable_ipv6)) { 76 !idev || unlikely(idev->cnf.disable_ipv6)) {
@@ -91,7 +91,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
91 * arrived via the sending interface (ethX), because of the 91 * arrived via the sending interface (ethX), because of the
92 * nature of scoping architecture. --yoshfuji 92 * nature of scoping architecture. --yoshfuji
93 */ 93 */
94 IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex; 94 IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
95 95
96 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) 96 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
97 goto err; 97 goto err;
@@ -161,7 +161,7 @@ static int ip6_input_finish(struct sk_buff *skb)
161 int nexthdr, raw; 161 int nexthdr, raw;
162 u8 hash; 162 u8 hash;
163 struct inet6_dev *idev; 163 struct inet6_dev *idev;
164 struct net *net = dev_net(skb->dst->dev); 164 struct net *net = dev_net(skb_dst(skb)->dev);
165 165
166 /* 166 /*
167 * Parse extension headers 167 * Parse extension headers
@@ -169,7 +169,7 @@ static int ip6_input_finish(struct sk_buff *skb)
169 169
170 rcu_read_lock(); 170 rcu_read_lock();
171resubmit: 171resubmit:
172 idev = ip6_dst_idev(skb->dst); 172 idev = ip6_dst_idev(skb_dst(skb));
173 if (!pskb_pull(skb, skb_transport_offset(skb))) 173 if (!pskb_pull(skb, skb_transport_offset(skb)))
174 goto discard; 174 goto discard;
175 nhoff = IP6CB(skb)->nhoff; 175 nhoff = IP6CB(skb)->nhoff;
@@ -242,8 +242,9 @@ int ip6_mc_input(struct sk_buff *skb)
242 struct ipv6hdr *hdr; 242 struct ipv6hdr *hdr;
243 int deliver; 243 int deliver;
244 244
245 IP6_INC_STATS_BH(dev_net(skb->dst->dev), 245 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
246 ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); 246 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
247 skb->len);
247 248
248 hdr = ipv6_hdr(skb); 249 hdr = ipv6_hdr(skb);
249 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); 250 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9fb49c3b518a..7c76e3d18215 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -78,7 +78,7 @@ int __ip6_local_out(struct sk_buff *skb)
78 len = 0; 78 len = 0;
79 ipv6_hdr(skb)->payload_len = htons(len); 79 ipv6_hdr(skb)->payload_len = htons(len);
80 80
81 return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, 81 return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
82 dst_output); 82 dst_output);
83} 83}
84 84
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(ip6_local_out);
96 96
97static int ip6_output_finish(struct sk_buff *skb) 97static int ip6_output_finish(struct sk_buff *skb)
98{ 98{
99 struct dst_entry *dst = skb->dst; 99 struct dst_entry *dst = skb_dst(skb);
100 100
101 if (dst->hh) 101 if (dst->hh)
102 return neigh_hh_output(dst->hh, skb); 102 return neigh_hh_output(dst->hh, skb);
@@ -117,7 +117,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
117 __skb_pull(newskb, skb_network_offset(newskb)); 117 __skb_pull(newskb, skb_network_offset(newskb));
118 newskb->pkt_type = PACKET_LOOPBACK; 118 newskb->pkt_type = PACKET_LOOPBACK;
119 newskb->ip_summed = CHECKSUM_UNNECESSARY; 119 newskb->ip_summed = CHECKSUM_UNNECESSARY;
120 WARN_ON(!newskb->dst); 120 WARN_ON(!skb_dst(newskb));
121 121
122 netif_rx(newskb); 122 netif_rx(newskb);
123 return 0; 123 return 0;
@@ -126,7 +126,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
126 126
127static int ip6_output2(struct sk_buff *skb) 127static int ip6_output2(struct sk_buff *skb)
128{ 128{
129 struct dst_entry *dst = skb->dst; 129 struct dst_entry *dst = skb_dst(skb);
130 struct net_device *dev = dst->dev; 130 struct net_device *dev = dst->dev;
131 131
132 skb->protocol = htons(ETH_P_IPV6); 132 skb->protocol = htons(ETH_P_IPV6);
@@ -134,7 +134,7 @@ static int ip6_output2(struct sk_buff *skb)
134 134
135 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 135 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
136 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; 136 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
137 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 137 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
138 138
139 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 139 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
140 ((mroute6_socket(dev_net(dev)) && 140 ((mroute6_socket(dev_net(dev)) &&
@@ -159,7 +159,8 @@ static int ip6_output2(struct sk_buff *skb)
159 } 159 }
160 } 160 }
161 161
162 IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCASTPKTS); 162 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
163 skb->len);
163 } 164 }
164 165
165 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev, 166 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
@@ -171,21 +172,21 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
171 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 172 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
172 173
173 return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? 174 return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
174 skb->dst->dev->mtu : dst_mtu(skb->dst); 175 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
175} 176}
176 177
177int ip6_output(struct sk_buff *skb) 178int ip6_output(struct sk_buff *skb)
178{ 179{
179 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 180 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
180 if (unlikely(idev->cnf.disable_ipv6)) { 181 if (unlikely(idev->cnf.disable_ipv6)) {
181 IP6_INC_STATS(dev_net(skb->dst->dev), idev, 182 IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev,
182 IPSTATS_MIB_OUTDISCARDS); 183 IPSTATS_MIB_OUTDISCARDS);
183 kfree_skb(skb); 184 kfree_skb(skb);
184 return 0; 185 return 0;
185 } 186 }
186 187
187 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 188 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
188 dst_allfrag(skb->dst)) 189 dst_allfrag(skb_dst(skb)))
189 return ip6_fragment(skb, ip6_output2); 190 return ip6_fragment(skb, ip6_output2);
190 else 191 else
191 return ip6_output2(skb); 192 return ip6_output2(skb);
@@ -201,7 +202,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
201 struct net *net = sock_net(sk); 202 struct net *net = sock_net(sk);
202 struct ipv6_pinfo *np = inet6_sk(sk); 203 struct ipv6_pinfo *np = inet6_sk(sk);
203 struct in6_addr *first_hop = &fl->fl6_dst; 204 struct in6_addr *first_hop = &fl->fl6_dst;
204 struct dst_entry *dst = skb->dst; 205 struct dst_entry *dst = skb_dst(skb);
205 struct ipv6hdr *hdr; 206 struct ipv6hdr *hdr;
206 u8 proto = fl->proto; 207 u8 proto = fl->proto;
207 int seg_len = skb->len; 208 int seg_len = skb->len;
@@ -221,7 +222,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
221 if (skb_headroom(skb) < head_room) { 222 if (skb_headroom(skb) < head_room) {
222 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); 223 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
223 if (skb2 == NULL) { 224 if (skb2 == NULL) {
224 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 225 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 IPSTATS_MIB_OUTDISCARDS); 226 IPSTATS_MIB_OUTDISCARDS);
226 kfree_skb(skb); 227 kfree_skb(skb);
227 return -ENOBUFS; 228 return -ENOBUFS;
@@ -275,8 +276,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
275 276
276 mtu = dst_mtu(dst); 277 mtu = dst_mtu(dst);
277 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { 278 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
278 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 279 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
279 IPSTATS_MIB_OUTREQUESTS); 280 IPSTATS_MIB_OUT, skb->len);
280 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 281 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
281 dst_output); 282 dst_output);
282 } 283 }
@@ -285,7 +286,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
285 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); 286 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
286 skb->dev = dst->dev; 287 skb->dev = dst->dev;
287 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 288 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
288 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); 289 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
289 kfree_skb(skb); 290 kfree_skb(skb);
290 return -EMSGSIZE; 291 return -EMSGSIZE;
291} 292}
@@ -415,7 +416,7 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
415 416
416int ip6_forward(struct sk_buff *skb) 417int ip6_forward(struct sk_buff *skb)
417{ 418{
418 struct dst_entry *dst = skb->dst; 419 struct dst_entry *dst = skb_dst(skb);
419 struct ipv6hdr *hdr = ipv6_hdr(skb); 420 struct ipv6hdr *hdr = ipv6_hdr(skb);
420 struct inet6_skb_parm *opt = IP6CB(skb); 421 struct inet6_skb_parm *opt = IP6CB(skb);
421 struct net *net = dev_net(dst->dev); 422 struct net *net = dev_net(dst->dev);
@@ -484,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
484 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); 485 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
485 goto drop; 486 goto drop;
486 } 487 }
487 dst = skb->dst; 488 dst = skb_dst(skb);
488 489
489 /* IPv6 specs say nothing about it, but it is clear that we cannot 490 /* IPv6 specs say nothing about it, but it is clear that we cannot
490 send redirects to source routed frames. 491 send redirects to source routed frames.
@@ -565,8 +566,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
565 to->pkt_type = from->pkt_type; 566 to->pkt_type = from->pkt_type;
566 to->priority = from->priority; 567 to->priority = from->priority;
567 to->protocol = from->protocol; 568 to->protocol = from->protocol;
568 dst_release(to->dst); 569 skb_dst_drop(to);
569 to->dst = dst_clone(from->dst); 570 skb_dst_set(to, dst_clone(skb_dst(from)));
570 to->dev = from->dev; 571 to->dev = from->dev;
571 to->mark = from->mark; 572 to->mark = from->mark;
572 573
@@ -623,7 +624,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
623static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 624static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
624{ 625{
625 struct sk_buff *frag; 626 struct sk_buff *frag;
626 struct rt6_info *rt = (struct rt6_info*)skb->dst; 627 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
627 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 628 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
628 struct ipv6hdr *tmp_hdr; 629 struct ipv6hdr *tmp_hdr;
629 struct frag_hdr *fh; 630 struct frag_hdr *fh;
@@ -631,7 +632,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
631 __be32 frag_id = 0; 632 __be32 frag_id = 0;
632 int ptr, offset = 0, err=0; 633 int ptr, offset = 0, err=0;
633 u8 *prevhdr, nexthdr = 0; 634 u8 *prevhdr, nexthdr = 0;
634 struct net *net = dev_net(skb->dst->dev); 635 struct net *net = dev_net(skb_dst(skb)->dev);
635 636
636 hlen = ip6_find_1stfragopt(skb, &prevhdr); 637 hlen = ip6_find_1stfragopt(skb, &prevhdr);
637 nexthdr = *prevhdr; 638 nexthdr = *prevhdr;
@@ -643,9 +644,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
643 * check should be redundant, but it's free.) 644 * check should be redundant, but it's free.)
644 */ 645 */
645 if (!skb->local_df) { 646 if (!skb->local_df) {
646 skb->dev = skb->dst->dev; 647 skb->dev = skb_dst(skb)->dev;
647 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 648 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
648 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 649 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
649 IPSTATS_MIB_FRAGFAILS); 650 IPSTATS_MIB_FRAGFAILS);
650 kfree_skb(skb); 651 kfree_skb(skb);
651 return -EMSGSIZE; 652 return -EMSGSIZE;
@@ -657,7 +658,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
657 } 658 }
658 mtu -= hlen + sizeof(struct frag_hdr); 659 mtu -= hlen + sizeof(struct frag_hdr);
659 660
660 if (skb_shinfo(skb)->frag_list) { 661 if (skb_has_frags(skb)) {
661 int first_len = skb_pagelen(skb); 662 int first_len = skb_pagelen(skb);
662 int truesizes = 0; 663 int truesizes = 0;
663 664
@@ -666,7 +667,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
666 skb_cloned(skb)) 667 skb_cloned(skb))
667 goto slow_path; 668 goto slow_path;
668 669
669 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 670 skb_walk_frags(skb, frag) {
670 /* Correct geometry. */ 671 /* Correct geometry. */
671 if (frag->len > mtu || 672 if (frag->len > mtu ||
672 ((frag->len & 7) && frag->next) || 673 ((frag->len & 7) && frag->next) ||
@@ -679,7 +680,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
679 680
680 BUG_ON(frag->sk); 681 BUG_ON(frag->sk);
681 if (skb->sk) { 682 if (skb->sk) {
682 sock_hold(skb->sk);
683 frag->sk = skb->sk; 683 frag->sk = skb->sk;
684 frag->destructor = sock_wfree; 684 frag->destructor = sock_wfree;
685 truesizes += frag->truesize; 685 truesizes += frag->truesize;
@@ -689,13 +689,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
689 err = 0; 689 err = 0;
690 offset = 0; 690 offset = 0;
691 frag = skb_shinfo(skb)->frag_list; 691 frag = skb_shinfo(skb)->frag_list;
692 skb_shinfo(skb)->frag_list = NULL; 692 skb_frag_list_init(skb);
693 /* BUILD HEADER */ 693 /* BUILD HEADER */
694 694
695 *prevhdr = NEXTHDR_FRAGMENT; 695 *prevhdr = NEXTHDR_FRAGMENT;
696 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); 696 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
697 if (!tmp_hdr) { 697 if (!tmp_hdr) {
698 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 698 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
699 IPSTATS_MIB_FRAGFAILS); 699 IPSTATS_MIB_FRAGFAILS);
700 return -ENOMEM; 700 return -ENOMEM;
701 } 701 }
@@ -808,7 +808,7 @@ slow_path:
808 808
809 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 809 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
810 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); 810 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
811 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 811 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
812 IPSTATS_MIB_FRAGFAILS); 812 IPSTATS_MIB_FRAGFAILS);
813 err = -ENOMEM; 813 err = -ENOMEM;
814 goto fail; 814 goto fail;
@@ -872,16 +872,16 @@ slow_path:
872 if (err) 872 if (err)
873 goto fail; 873 goto fail;
874 874
875 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 875 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
876 IPSTATS_MIB_FRAGCREATES); 876 IPSTATS_MIB_FRAGCREATES);
877 } 877 }
878 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 878 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
879 IPSTATS_MIB_FRAGOKS); 879 IPSTATS_MIB_FRAGOKS);
880 kfree_skb(skb); 880 kfree_skb(skb);
881 return err; 881 return err;
882 882
883fail: 883fail:
884 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 884 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
885 IPSTATS_MIB_FRAGFAILS); 885 IPSTATS_MIB_FRAGFAILS);
886 kfree_skb(skb); 886 kfree_skb(skb);
887 return err; 887 return err;
@@ -1515,10 +1515,10 @@ int ip6_push_pending_frames(struct sock *sk)
1515 skb->priority = sk->sk_priority; 1515 skb->priority = sk->sk_priority;
1516 skb->mark = sk->sk_mark; 1516 skb->mark = sk->sk_mark;
1517 1517
1518 skb->dst = dst_clone(&rt->u.dst); 1518 skb_dst_set(skb, dst_clone(&rt->u.dst));
1519 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); 1519 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1520 if (proto == IPPROTO_ICMPV6) { 1520 if (proto == IPPROTO_ICMPV6) {
1521 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 1521 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1522 1522
1523 ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); 1523 ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1524 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 1524 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
@@ -1544,8 +1544,8 @@ void ip6_flush_pending_frames(struct sock *sk)
1544 struct sk_buff *skb; 1544 struct sk_buff *skb;
1545 1545
1546 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { 1546 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1547 if (skb->dst) 1547 if (skb_dst(skb))
1548 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb->dst), 1548 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1549 IPSTATS_MIB_OUTDISCARDS); 1549 IPSTATS_MIB_OUTDISCARDS);
1550 kfree_skb(skb); 1550 kfree_skb(skb);
1551 } 1551 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d994c55a5b16..404d16a97d5c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -532,8 +532,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
532 if (!skb2) 532 if (!skb2)
533 return 0; 533 return 0;
534 534
535 dst_release(skb2->dst); 535 skb_dst_drop(skb2);
536 skb2->dst = NULL; 536
537 skb_pull(skb2, offset); 537 skb_pull(skb2, offset);
538 skb_reset_network_header(skb2); 538 skb_reset_network_header(skb2);
539 eiph = ip_hdr(skb2); 539 eiph = ip_hdr(skb2);
@@ -560,21 +560,21 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
560 ip_rt_put(rt); 560 ip_rt_put(rt);
561 goto out; 561 goto out;
562 } 562 }
563 skb2->dst = (struct dst_entry *)rt; 563 skb_dst_set(skb2, (struct dst_entry *)rt);
564 } else { 564 } else {
565 ip_rt_put(rt); 565 ip_rt_put(rt);
566 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 566 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
567 skb2->dev) || 567 skb2->dev) ||
568 skb2->dst->dev->type != ARPHRD_TUNNEL) 568 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
569 goto out; 569 goto out;
570 } 570 }
571 571
572 /* change mtu on this route */ 572 /* change mtu on this route */
573 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 573 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
574 if (rel_info > dst_mtu(skb2->dst)) 574 if (rel_info > dst_mtu(skb_dst(skb2)))
575 goto out; 575 goto out;
576 576
577 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 577 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info);
578 } 578 }
579 579
580 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 580 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -606,8 +606,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
606 if (!skb2) 606 if (!skb2)
607 return 0; 607 return 0;
608 608
609 dst_release(skb2->dst); 609 skb_dst_drop(skb2);
610 skb2->dst = NULL;
611 skb_pull(skb2, offset); 610 skb_pull(skb2, offset);
612 skb_reset_network_header(skb2); 611 skb_reset_network_header(skb2);
613 612
@@ -720,8 +719,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
720 skb->pkt_type = PACKET_HOST; 719 skb->pkt_type = PACKET_HOST;
721 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 720 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
722 skb->dev = t->dev; 721 skb->dev = t->dev;
723 dst_release(skb->dst); 722 skb_dst_drop(skb);
724 skb->dst = NULL;
725 nf_reset(skb); 723 nf_reset(skb);
726 724
727 dscp_ecn_decapsulate(t, ipv6h, skb); 725 dscp_ecn_decapsulate(t, ipv6h, skb);
@@ -885,8 +883,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
885 } 883 }
886 if (mtu < IPV6_MIN_MTU) 884 if (mtu < IPV6_MIN_MTU)
887 mtu = IPV6_MIN_MTU; 885 mtu = IPV6_MIN_MTU;
888 if (skb->dst) 886 if (skb_dst(skb))
889 skb->dst->ops->update_pmtu(skb->dst, mtu); 887 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
890 if (skb->len > mtu) { 888 if (skb->len > mtu) {
891 *pmtu = mtu; 889 *pmtu = mtu;
892 err = -EMSGSIZE; 890 err = -EMSGSIZE;
@@ -910,8 +908,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
910 kfree_skb(skb); 908 kfree_skb(skb);
911 skb = new_skb; 909 skb = new_skb;
912 } 910 }
913 dst_release(skb->dst); 911 skb_dst_drop(skb);
914 skb->dst = dst_clone(dst); 912 skb_dst_set(skb, dst_clone(dst));
915 913
916 skb->transport_header = skb->network_header; 914 skb->transport_header = skb->network_header;
917 915
@@ -1100,8 +1098,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1100 struct ip6_tnl_parm *p = &t->parms; 1098 struct ip6_tnl_parm *p = &t->parms;
1101 struct flowi *fl = &t->fl; 1099 struct flowi *fl = &t->fl;
1102 1100
1103 memcpy(&dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1101 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1104 memcpy(&dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1102 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1105 1103
1106 /* Set up flowi template */ 1104 /* Set up flowi template */
1107 ipv6_addr_copy(&fl->fl6_src, &p->laddr); 1105 ipv6_addr_copy(&fl->fl6_src, &p->laddr);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 228be551e9c1..c769f155c698 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -398,10 +398,9 @@ static int pim6_rcv(struct sk_buff *skb)
398 skb->protocol = htons(ETH_P_IPV6); 398 skb->protocol = htons(ETH_P_IPV6);
399 skb->ip_summed = 0; 399 skb->ip_summed = 0;
400 skb->pkt_type = PACKET_HOST; 400 skb->pkt_type = PACKET_HOST;
401 dst_release(skb->dst); 401 skb_dst_drop(skb);
402 reg_dev->stats.rx_bytes += skb->len; 402 reg_dev->stats.rx_bytes += skb->len;
403 reg_dev->stats.rx_packets++; 403 reg_dev->stats.rx_packets++;
404 skb->dst = NULL;
405 nf_reset(skb); 404 nf_reset(skb);
406 netif_rx(skb); 405 netif_rx(skb);
407 dev_put(reg_dev); 406 dev_put(reg_dev);
@@ -442,6 +441,7 @@ static void reg_vif_setup(struct net_device *dev)
442 dev->flags = IFF_NOARP; 441 dev->flags = IFF_NOARP;
443 dev->netdev_ops = &reg_vif_netdev_ops; 442 dev->netdev_ops = &reg_vif_netdev_ops;
444 dev->destructor = free_netdev; 443 dev->destructor = free_netdev;
444 dev->features |= NETIF_F_NETNS_LOCAL;
445} 445}
446 446
447static struct net_device *ip6mr_reg_vif(struct net *net) 447static struct net_device *ip6mr_reg_vif(struct net *net)
@@ -849,7 +849,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
849 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); 849 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
850 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); 850 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
851 851
852 skb->dst = dst_clone(pkt->dst); 852 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
853 skb->ip_summed = CHECKSUM_UNNECESSARY; 853 skb->ip_summed = CHECKSUM_UNNECESSARY;
854 } 854 }
855 855
@@ -1078,7 +1078,18 @@ int __init ip6_mr_init(void)
1078 err = register_netdevice_notifier(&ip6_mr_notifier); 1078 err = register_netdevice_notifier(&ip6_mr_notifier);
1079 if (err) 1079 if (err)
1080 goto reg_notif_fail; 1080 goto reg_notif_fail;
1081#ifdef CONFIG_IPV6_PIMSM_V2
1082 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1083 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1084 err = -EAGAIN;
1085 goto add_proto_fail;
1086 }
1087#endif
1081 return 0; 1088 return 0;
1089#ifdef CONFIG_IPV6_PIMSM_V2
1090add_proto_fail:
1091 unregister_netdevice_notifier(&ip6_mr_notifier);
1092#endif
1082reg_notif_fail: 1093reg_notif_fail:
1083 del_timer(&ipmr_expire_timer); 1094 del_timer(&ipmr_expire_timer);
1084 unregister_pernet_subsys(&ip6mr_net_ops); 1095 unregister_pernet_subsys(&ip6mr_net_ops);
@@ -1364,14 +1375,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1364 if (v != net->ipv6.mroute_do_pim) { 1375 if (v != net->ipv6.mroute_do_pim) {
1365 net->ipv6.mroute_do_pim = v; 1376 net->ipv6.mroute_do_pim = v;
1366 net->ipv6.mroute_do_assert = v; 1377 net->ipv6.mroute_do_assert = v;
1367 if (net->ipv6.mroute_do_pim)
1368 ret = inet6_add_protocol(&pim6_protocol,
1369 IPPROTO_PIM);
1370 else
1371 ret = inet6_del_protocol(&pim6_protocol,
1372 IPPROTO_PIM);
1373 if (ret < 0)
1374 ret = -EAGAIN;
1375 } 1378 }
1376 rtnl_unlock(); 1379 rtnl_unlock();
1377 return ret; 1380 return ret;
@@ -1487,7 +1490,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1487 1490
1488static inline int ip6mr_forward2_finish(struct sk_buff *skb) 1491static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1489{ 1492{
1490 IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst), 1493 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1491 IPSTATS_MIB_OUTFORWDATAGRAMS); 1494 IPSTATS_MIB_OUTFORWDATAGRAMS);
1492 return dst_output(skb); 1495 return dst_output(skb);
1493} 1496}
@@ -1532,8 +1535,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1532 if (!dst) 1535 if (!dst)
1533 goto out_free; 1536 goto out_free;
1534 1537
1535 dst_release(skb->dst); 1538 skb_dst_drop(skb);
1536 skb->dst = dst; 1539 skb_dst_set(skb, dst);
1537 1540
1538 /* 1541 /*
1539 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1542 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
@@ -1722,7 +1725,7 @@ int ip6mr_get_route(struct net *net,
1722{ 1725{
1723 int err; 1726 int err;
1724 struct mfc6_cache *cache; 1727 struct mfc6_cache *cache;
1725 struct rt6_info *rt = (struct rt6_info *)skb->dst; 1728 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1726 1729
1727 read_lock(&mrt_lock); 1730 read_lock(&mrt_lock);
1728 cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); 1731 cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a51fb33e6864..4b264ed40a8c 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1448,8 +1448,10 @@ static void mld_sendpack(struct sk_buff *skb)
1448 struct net *net = dev_net(skb->dev); 1448 struct net *net = dev_net(skb->dev);
1449 int err; 1449 int err;
1450 struct flowi fl; 1450 struct flowi fl;
1451 struct dst_entry *dst;
1452
1453 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1451 1454
1452 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1453 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1455 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
1454 mldlen = skb->tail - skb->transport_header; 1456 mldlen = skb->tail - skb->transport_header;
1455 pip6->payload_len = htons(payload_len); 1457 pip6->payload_len = htons(payload_len);
@@ -1458,9 +1460,9 @@ static void mld_sendpack(struct sk_buff *skb)
1458 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), 1460 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
1459 mldlen, 0)); 1461 mldlen, 0));
1460 1462
1461 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); 1463 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1462 1464
1463 if (!skb->dst) { 1465 if (!dst) {
1464 err = -ENOMEM; 1466 err = -ENOMEM;
1465 goto err_out; 1467 goto err_out;
1466 } 1468 }
@@ -1469,17 +1471,20 @@ static void mld_sendpack(struct sk_buff *skb)
1469 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1471 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1470 skb->dev->ifindex); 1472 skb->dev->ifindex);
1471 1473
1472 err = xfrm_lookup(net, &skb->dst, &fl, NULL, 0); 1474 err = xfrm_lookup(net, &dst, &fl, NULL, 0);
1475 skb_dst_set(skb, dst);
1473 if (err) 1476 if (err)
1474 goto err_out; 1477 goto err_out;
1475 1478
1479 payload_len = skb->len;
1480
1476 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1481 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1477 dst_output); 1482 dst_output);
1478out: 1483out:
1479 if (!err) { 1484 if (!err) {
1480 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); 1485 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1481 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 1486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1482 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTMCASTPKTS); 1487 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1483 } else 1488 } else
1484 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); 1489 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1485 1490
@@ -1772,11 +1777,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1772 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1777 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1773 IPV6_TLV_PADN, 0 }; 1778 IPV6_TLV_PADN, 0 };
1774 struct flowi fl; 1779 struct flowi fl;
1780 struct dst_entry *dst;
1775 1781
1776 rcu_read_lock();
1777 IP6_INC_STATS(net, __in6_dev_get(dev),
1778 IPSTATS_MIB_OUTREQUESTS);
1779 rcu_read_unlock();
1780 if (type == ICMPV6_MGM_REDUCTION) 1782 if (type == ICMPV6_MGM_REDUCTION)
1781 snd_addr = &in6addr_linklocal_allrouters; 1783 snd_addr = &in6addr_linklocal_allrouters;
1782 else 1784 else
@@ -1786,6 +1788,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1786 payload_len = len + sizeof(ra); 1788 payload_len = len + sizeof(ra);
1787 full_len = sizeof(struct ipv6hdr) + payload_len; 1789 full_len = sizeof(struct ipv6hdr) + payload_len;
1788 1790
1791 rcu_read_lock();
1792 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1793 IPSTATS_MIB_OUT, full_len);
1794 rcu_read_unlock();
1795
1789 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); 1796 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
1790 1797
1791 if (skb == NULL) { 1798 if (skb == NULL) {
@@ -1824,8 +1831,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1824 1831
1825 idev = in6_dev_get(skb->dev); 1832 idev = in6_dev_get(skb->dev);
1826 1833
1827 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); 1834 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1828 if (!skb->dst) { 1835 if (!dst) {
1829 err = -ENOMEM; 1836 err = -ENOMEM;
1830 goto err_out; 1837 goto err_out;
1831 } 1838 }
@@ -1834,17 +1841,18 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1834 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1841 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1835 skb->dev->ifindex); 1842 skb->dev->ifindex);
1836 1843
1837 err = xfrm_lookup(net, &skb->dst, &fl, NULL, 0); 1844 err = xfrm_lookup(net, &dst, &fl, NULL, 0);
1838 if (err) 1845 if (err)
1839 goto err_out; 1846 goto err_out;
1840 1847
1848 skb_dst_set(skb, dst);
1841 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1849 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1842 dst_output); 1850 dst_output);
1843out: 1851out:
1844 if (!err) { 1852 if (!err) {
1845 ICMP6MSGOUT_INC_STATS(net, idev, type); 1853 ICMP6MSGOUT_INC_STATS(net, idev, type);
1846 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1854 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1847 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTMCASTPKTS); 1855 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1848 } else 1856 } else
1849 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1857 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1850 1858
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9f061d1adbc2..9eb68e92cc18 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -465,8 +465,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
465 1, &err); 465 1, &err);
466 if (!skb) { 466 if (!skb) {
467 ND_PRINTK0(KERN_ERR 467 ND_PRINTK0(KERN_ERR
468 "ICMPv6 ND: %s() failed to allocate an skb.\n", 468 "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
469 __func__); 469 __func__, err);
470 return NULL; 470 return NULL;
471 } 471 }
472 472
@@ -530,10 +530,10 @@ void ndisc_send_skb(struct sk_buff *skb,
530 return; 530 return;
531 } 531 }
532 532
533 skb->dst = dst; 533 skb_dst_set(skb, dst);
534 534
535 idev = in6_dev_get(dst->dev); 535 idev = in6_dev_get(dst->dev);
536 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS); 536 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
537 537
538 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 538 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
539 dst_output); 539 dst_output);
@@ -658,6 +658,7 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
658 &icmp6h, NULL, 658 &icmp6h, NULL,
659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); 659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0);
660} 660}
661EXPORT_SYMBOL(ndisc_send_rs);
661 662
662 663
663static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) 664static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
@@ -1561,8 +1562,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1561 1, &err); 1562 1, &err);
1562 if (buff == NULL) { 1563 if (buff == NULL) {
1563 ND_PRINTK0(KERN_ERR 1564 ND_PRINTK0(KERN_ERR
1564 "ICMPv6 Redirect: %s() failed to allocate an skb.\n", 1565 "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n",
1565 __func__); 1566 __func__, err);
1566 goto release; 1567 goto release;
1567 } 1568 }
1568 1569
@@ -1611,9 +1612,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1611 len, IPPROTO_ICMPV6, 1612 len, IPPROTO_ICMPV6,
1612 csum_partial(icmph, len, 0)); 1613 csum_partial(icmph, len, 0));
1613 1614
1614 buff->dst = dst; 1615 skb_dst_set(buff, dst);
1615 idev = in6_dev_get(dst->dev); 1616 idev = in6_dev_get(dst->dev);
1616 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS); 1617 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1617 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, 1618 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1618 dst_output); 1619 dst_output);
1619 if (!err) { 1620 if (!err) {
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 834cea69fb53..d5ed92b14346 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -12,7 +12,7 @@
12 12
13int ip6_route_me_harder(struct sk_buff *skb) 13int ip6_route_me_harder(struct sk_buff *skb)
14{ 14{
15 struct net *net = dev_net(skb->dst->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 struct ipv6hdr *iph = ipv6_hdr(skb); 16 struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst; 17 struct dst_entry *dst;
18 struct flowi fl = { 18 struct flowi fl = {
@@ -28,9 +28,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
28 28
29#ifdef CONFIG_XFRM 29#ifdef CONFIG_XFRM
30 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 30 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
31 xfrm_decode_session(skb, &fl, AF_INET6) == 0) 31 xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
32 if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) 32 struct dst_entry *dst2 = skb_dst(skb);
33
34 if (xfrm_lookup(net, &dst2, &fl, skb->sk, 0)) {
35 skb_dst_set(skb, NULL);
33 return -1; 36 return -1;
37 }
38 skb_dst_set(skb, dst2);
39 }
34#endif 40#endif
35 41
36 if (dst->error) { 42 if (dst->error) {
@@ -41,9 +47,9 @@ int ip6_route_me_harder(struct sk_buff *skb)
41 } 47 }
42 48
43 /* Drop old route. */ 49 /* Drop old route. */
44 dst_release(skb->dst); 50 skb_dst_drop(skb);
45 51
46 skb->dst = dst; 52 skb_dst_set(skb, dst);
47 return 0; 53 return 0;
48} 54}
49EXPORT_SYMBOL(ip6_route_me_harder); 55EXPORT_SYMBOL(ip6_route_me_harder);
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index b693f841aeb4..1cf3f0c6a959 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -598,7 +598,7 @@ static int __init ip6_queue_init(void)
598#ifdef CONFIG_SYSCTL 598#ifdef CONFIG_SYSCTL
599 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table); 599 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
600#endif 600#endif
601 status = nf_register_queue_handler(PF_INET6, &nfqh); 601 status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
602 if (status < 0) { 602 if (status < 0) {
603 printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); 603 printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
604 goto cleanup_sysctl; 604 goto cleanup_sysctl;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 219e165aea10..ced1f2c0cb65 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -270,8 +270,8 @@ static struct nf_loginfo trace_loginfo = {
270/* Mildly perf critical (only if packet tracing is on) */ 270/* Mildly perf critical (only if packet tracing is on) */
271static inline int 271static inline int
272get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, 272get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname, 273 const char *hookname, const char **chainname,
274 char **comment, unsigned int *rulenum) 274 const char **comment, unsigned int *rulenum)
275{ 275{
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s); 276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
277 277
@@ -289,8 +289,8 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
289 && unconditional(&s->ipv6)) { 289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */ 290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname 291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY] 292 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN]; 293 : comments[NF_IP6_TRACE_COMMENT_RETURN];
294 } 294 }
295 return 1; 295 return 1;
296 } else 296 } else
@@ -309,14 +309,14 @@ static void trace_packet(struct sk_buff *skb,
309{ 309{
310 void *table_base; 310 void *table_base;
311 const struct ip6t_entry *root; 311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment; 312 const char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0; 313 unsigned int rulenum = 0;
314 314
315 table_base = (void *)private->entries[smp_processor_id()]; 315 table_base = private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]); 316 root = get_entry(table_base, private->hook_entry[hook]);
317 317
318 hookname = chainname = (char *)hooknames[hook]; 318 hookname = chainname = hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE]; 319 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
320 320
321 IP6T_ENTRY_ITERATE(root, 321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook], 322 private->size - private->hook_entry[hook],
@@ -329,6 +329,12 @@ static void trace_packet(struct sk_buff *skb,
329} 329}
330#endif 330#endif
331 331
332static inline __pure struct ip6t_entry *
333ip6t_next_entry(const struct ip6t_entry *entry)
334{
335 return (void *)entry + entry->next_offset;
336}
337
332/* Returns one of the generic firewall policies, like NF_ACCEPT. */ 338/* Returns one of the generic firewall policies, like NF_ACCEPT. */
333unsigned int 339unsigned int
334ip6t_do_table(struct sk_buff *skb, 340ip6t_do_table(struct sk_buff *skb,
@@ -337,6 +343,8 @@ ip6t_do_table(struct sk_buff *skb,
337 const struct net_device *out, 343 const struct net_device *out,
338 struct xt_table *table) 344 struct xt_table *table)
339{ 345{
346#define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
347
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 348 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false; 349 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */ 350 /* Initializing verdict to NF_DROP keeps gcc happy. */
@@ -361,7 +369,7 @@ ip6t_do_table(struct sk_buff *skb,
361 mtpar.in = tgpar.in = in; 369 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out; 370 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6; 371 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook; 372 mtpar.hooknum = tgpar.hooknum = hook;
365 373
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 374 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
367 375
@@ -375,96 +383,86 @@ ip6t_do_table(struct sk_buff *skb,
375 back = get_entry(table_base, private->underflow[hook]); 383 back = get_entry(table_base, private->underflow[hook]);
376 384
377 do { 385 do {
386 struct ip6t_entry_target *t;
387
378 IP_NF_ASSERT(e); 388 IP_NF_ASSERT(e);
379 IP_NF_ASSERT(back); 389 IP_NF_ASSERT(back);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6, 390 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) { 391 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
382 struct ip6t_entry_target *t; 392 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
383 393 e = ip6t_next_entry(e);
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) 394 continue;
385 goto no_match; 395 }
386 396
387 ADD_COUNTER(e->counters, 397 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) + 398 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1); 399 sizeof(struct ipv6hdr), 1);
390 400
391 t = ip6t_get_target(e); 401 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target); 402 IP_NF_ASSERT(t->u.kernel.target);
393 403
394#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 404#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 405 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */ 406 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace)) 407 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out, 408 trace_packet(skb, hook, in, out,
399 table->name, private, e); 409 table->name, private, e);
400#endif 410#endif
401 /* Standard target? */ 411 /* Standard target? */
402 if (!t->u.kernel.target->target) { 412 if (!t->u.kernel.target->target) {
403 int v; 413 int v;
404 414
405 v = ((struct ip6t_standard_target *)t)->verdict; 415 v = ((struct ip6t_standard_target *)t)->verdict;
406 if (v < 0) { 416 if (v < 0) {
407 /* Pop from stack? */ 417 /* Pop from stack? */
408 if (v != IP6T_RETURN) { 418 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1; 419 verdict = (unsigned)(-v) - 1;
410 break; 420 break;
411 }
412 e = back;
413 back = get_entry(table_base,
414 back->comefrom);
415 continue;
416 }
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
422 next->comefrom
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
425 back = next;
426 } 421 }
422 e = back;
423 back = get_entry(table_base, back->comefrom);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e)
427 && !(e->ipv6.flags & IP6T_F_GOTO)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry *next = ip6t_next_entry(e);
430 next->comefrom = (void *)back - table_base;
431 /* set back pointer to next entry */
432 back = next;
433 }
427 434
428 e = get_entry(table_base, v); 435 e = get_entry(table_base, v);
429 } else { 436 continue;
430 /* Targets which reenter must return 437 }
431 abs. verdicts */
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
434 438
435#ifdef CONFIG_NETFILTER_DEBUG 439 /* Targets which reenter must return
436 ((struct ip6t_entry *)table_base)->comefrom 440 abs. verdicts */
437 = 0xeeeeeeec; 441 tgpar.target = t->u.kernel.target;
438#endif 442 tgpar.targinfo = t->data;
439 verdict = t->u.kernel.target->target(skb,
440 &tgpar);
441 443
442#ifdef CONFIG_NETFILTER_DEBUG 444#ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom 445 tb_comefrom = 0xeeeeeeec;
444 != 0xeeeeeeec
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
448 verdict = NF_DROP;
449 }
450 ((struct ip6t_entry *)table_base)->comefrom
451 = 0x57acc001;
452#endif 446#endif
453 if (verdict == IP6T_CONTINUE) 447 verdict = t->u.kernel.target->target(skb, &tgpar);
454 e = (void *)e + e->next_offset;
455 else
456 /* Verdict */
457 break;
458 }
459 } else {
460 448
461 no_match: 449#ifdef CONFIG_NETFILTER_DEBUG
462 e = (void *)e + e->next_offset; 450 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
451 printk("Target %s reentered!\n",
452 t->u.kernel.target->name);
453 verdict = NF_DROP;
463 } 454 }
455 tb_comefrom = 0x57acc001;
456#endif
457 if (verdict == IP6T_CONTINUE)
458 e = ip6t_next_entry(e);
459 else
460 /* Verdict */
461 break;
464 } while (!hotdrop); 462 } while (!hotdrop);
465 463
466#ifdef CONFIG_NETFILTER_DEBUG 464#ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; 465 tb_comefrom = NETFILTER_LINK_POISON;
468#endif 466#endif
469 xt_info_rdunlock_bh(); 467 xt_info_rdunlock_bh();
470 468
@@ -475,6 +473,8 @@ ip6t_do_table(struct sk_buff *skb,
475 return NF_DROP; 473 return NF_DROP;
476 else return verdict; 474 else return verdict;
477#endif 475#endif
476
477#undef tb_comefrom
478} 478}
479 479
480/* Figures out from what hook each rule can be called: returns 0 if 480/* Figures out from what hook each rule can be called: returns 0 if
@@ -2191,7 +2191,7 @@ static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2191static struct xt_target ip6t_standard_target __read_mostly = { 2191static struct xt_target ip6t_standard_target __read_mostly = {
2192 .name = IP6T_STANDARD_TARGET, 2192 .name = IP6T_STANDARD_TARGET,
2193 .targetsize = sizeof(int), 2193 .targetsize = sizeof(int),
2194 .family = AF_INET6, 2194 .family = NFPROTO_IPV6,
2195#ifdef CONFIG_COMPAT 2195#ifdef CONFIG_COMPAT
2196 .compatsize = sizeof(compat_int_t), 2196 .compatsize = sizeof(compat_int_t),
2197 .compat_from_user = compat_standard_from_user, 2197 .compat_from_user = compat_standard_from_user,
@@ -2203,7 +2203,7 @@ static struct xt_target ip6t_error_target __read_mostly = {
2203 .name = IP6T_ERROR_TARGET, 2203 .name = IP6T_ERROR_TARGET,
2204 .target = ip6t_error, 2204 .target = ip6t_error,
2205 .targetsize = IP6T_FUNCTION_MAXNAMELEN, 2205 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2206 .family = AF_INET6, 2206 .family = NFPROTO_IPV6,
2207}; 2207};
2208 2208
2209static struct nf_sockopt_ops ip6t_sockopts = { 2209static struct nf_sockopt_ops ip6t_sockopts = {
@@ -2229,17 +2229,17 @@ static struct xt_match icmp6_matchstruct __read_mostly = {
2229 .matchsize = sizeof(struct ip6t_icmp), 2229 .matchsize = sizeof(struct ip6t_icmp),
2230 .checkentry = icmp6_checkentry, 2230 .checkentry = icmp6_checkentry,
2231 .proto = IPPROTO_ICMPV6, 2231 .proto = IPPROTO_ICMPV6,
2232 .family = AF_INET6, 2232 .family = NFPROTO_IPV6,
2233}; 2233};
2234 2234
2235static int __net_init ip6_tables_net_init(struct net *net) 2235static int __net_init ip6_tables_net_init(struct net *net)
2236{ 2236{
2237 return xt_proto_init(net, AF_INET6); 2237 return xt_proto_init(net, NFPROTO_IPV6);
2238} 2238}
2239 2239
2240static void __net_exit ip6_tables_net_exit(struct net *net) 2240static void __net_exit ip6_tables_net_exit(struct net *net)
2241{ 2241{
2242 xt_proto_fini(net, AF_INET6); 2242 xt_proto_fini(net, NFPROTO_IPV6);
2243} 2243}
2244 2244
2245static struct pernet_operations ip6_tables_net_ops = { 2245static struct pernet_operations ip6_tables_net_ops = {
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 5a2d0a41694a..5a7f00cd15ce 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -112,7 +112,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
112 return; 112 return;
113 } 113 }
114 114
115 nskb->dst = dst; 115 skb_dst_set(nskb, dst);
116 116
117 skb_reserve(nskb, hh_len + dst->header_len); 117 skb_reserve(nskb, hh_len + dst->header_len);
118 118
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9903227bf37c..642dcb127bab 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -95,18 +95,10 @@ static int icmpv6_packet(struct nf_conn *ct,
95 u_int8_t pf, 95 u_int8_t pf,
96 unsigned int hooknum) 96 unsigned int hooknum)
97{ 97{
98 /* Try to delete connection immediately after all replies: 98 /* Do not immediately delete the connection after the first
99 won't actually vanish as we still have skb, and del_timer 99 successful reply to avoid excessive conntrackd traffic
100 means this will only run once even if count hits zero twice 100 and also to handle correctly ICMP echo reply duplicates. */
101 (theoretically possible with SMP) */ 101 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
102 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
103 if (atomic_dec_and_test(&ct->proto.icmp.count))
104 nf_ct_kill_acct(ct, ctinfo, skb);
105 } else {
106 atomic_inc(&ct->proto.icmp.count);
107 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, ct);
108 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
109 }
110 102
111 return NF_ACCEPT; 103 return NF_ACCEPT;
112} 104}
@@ -132,7 +124,6 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
132 type + 128); 124 type + 128);
133 return false; 125 return false;
134 } 126 }
135 atomic_set(&ct->proto.icmp.count, 0);
136 return true; 127 return true;
137} 128}
138 129
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 058a5e4a60c3..f3aba255ad9f 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -409,7 +409,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
409 /* If the first fragment is fragmented itself, we split 409 /* If the first fragment is fragmented itself, we split
410 * it to two chunks: the first with data and paged part 410 * it to two chunks: the first with data and paged part
411 * and the second, holding only fragments. */ 411 * and the second, holding only fragments. */
412 if (skb_shinfo(head)->frag_list) { 412 if (skb_has_frags(head)) {
413 struct sk_buff *clone; 413 struct sk_buff *clone;
414 int i, plen = 0; 414 int i, plen = 0;
415 415
@@ -420,7 +420,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
420 clone->next = head->next; 420 clone->next = head->next;
421 head->next = clone; 421 head->next = clone;
422 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 422 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
423 skb_shinfo(head)->frag_list = NULL; 423 skb_frag_list_init(head);
424 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 424 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
425 plen += skb_shinfo(head)->frags[i].size; 425 plen += skb_shinfo(head)->frags[i].size;
426 clone->len = clone->data_len = head->data_len - plen; 426 clone->len = clone->data_len = head->data_len - plen;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 97c17fdd6f75..590ddefb7ffc 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -61,7 +61,7 @@ static const struct file_operations sockstat6_seq_fops = {
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
66 SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS), 66 SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS),
67 SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES), 67 SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES),
@@ -71,7 +71,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
71 SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS), 71 SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
72 SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS), 72 SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
73 SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), 73 SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
74 SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS), 74 SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTPKTS),
75 SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS), 75 SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
76 SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), 76 SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
77 SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), 77 SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
@@ -83,6 +83,12 @@ static struct snmp_mib snmp6_ipstats_list[] = {
83 SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES), 83 SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES),
84 SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS), 84 SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
85 SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), 85 SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
86 SNMP_MIB_ITEM("Ip6InOctets", IPSTATS_MIB_INOCTETS),
87 SNMP_MIB_ITEM("Ip6OutOctets", IPSTATS_MIB_OUTOCTETS),
88 SNMP_MIB_ITEM("Ip6InMcastOctets", IPSTATS_MIB_INMCASTOCTETS),
89 SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
90 SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
91 SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
86 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
87}; 93};
88 94
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 61f6827e5906..36a090d87a3d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -625,7 +625,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
625 625
626 skb->priority = sk->sk_priority; 626 skb->priority = sk->sk_priority;
627 skb->mark = sk->sk_mark; 627 skb->mark = sk->sk_mark;
628 skb->dst = dst_clone(&rt->u.dst); 628 skb_dst_set(skb, dst_clone(&rt->u.dst));
629 629
630 skb_put(skb, length); 630 skb_put(skb, length);
631 skb_reset_network_header(skb); 631 skb_reset_network_header(skb);
@@ -638,7 +638,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
638 if (err) 638 if (err)
639 goto error_fault; 639 goto error_fault;
640 640
641 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); 641 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, 642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
643 dst_output); 643 dst_output);
644 if (err > 0) 644 if (err > 0)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e9ac7a12f595..2642a41a8535 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -267,7 +267,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
267 struct sk_buff *prev, *next; 267 struct sk_buff *prev, *next;
268 struct net_device *dev; 268 struct net_device *dev;
269 int offset, end; 269 int offset, end;
270 struct net *net = dev_net(skb->dst->dev); 270 struct net *net = dev_net(skb_dst(skb)->dev);
271 271
272 if (fq->q.last_in & INET_FRAG_COMPLETE) 272 if (fq->q.last_in & INET_FRAG_COMPLETE)
273 goto err; 273 goto err;
@@ -277,7 +277,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
278 278
279 if ((unsigned int)end > IPV6_MAXPLEN) { 279 if ((unsigned int)end > IPV6_MAXPLEN) {
280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
281 IPSTATS_MIB_INHDRERRORS); 281 IPSTATS_MIB_INHDRERRORS);
282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
283 ((u8 *)&fhdr->frag_off - 283 ((u8 *)&fhdr->frag_off -
@@ -310,7 +310,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
310 /* RFC2460 says always send parameter problem in 310 /* RFC2460 says always send parameter problem in
311 * this case. -DaveM 311 * this case. -DaveM
312 */ 312 */
313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
314 IPSTATS_MIB_INHDRERRORS); 314 IPSTATS_MIB_INHDRERRORS);
315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
316 offsetof(struct ipv6hdr, payload_len)); 316 offsetof(struct ipv6hdr, payload_len));
@@ -434,7 +434,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
434 return -1; 434 return -1;
435 435
436err: 436err:
437 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 437 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
438 IPSTATS_MIB_REASMFAILS); 438 IPSTATS_MIB_REASMFAILS);
439 kfree_skb(skb); 439 kfree_skb(skb);
440 return -1; 440 return -1;
@@ -494,7 +494,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
494 /* If the first fragment is fragmented itself, we split 494 /* If the first fragment is fragmented itself, we split
495 * it to two chunks: the first with data and paged part 495 * it to two chunks: the first with data and paged part
496 * and the second, holding only fragments. */ 496 * and the second, holding only fragments. */
497 if (skb_shinfo(head)->frag_list) { 497 if (skb_has_frags(head)) {
498 struct sk_buff *clone; 498 struct sk_buff *clone;
499 int i, plen = 0; 499 int i, plen = 0;
500 500
@@ -503,7 +503,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
503 clone->next = head->next; 503 clone->next = head->next;
504 head->next = clone; 504 head->next = clone;
505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
506 skb_shinfo(head)->frag_list = NULL; 506 skb_frag_list_init(head);
507 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 507 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
508 plen += skb_shinfo(head)->frags[i].size; 508 plen += skb_shinfo(head)->frags[i].size;
509 clone->len = clone->data_len = head->data_len - plen; 509 clone->len = clone->data_len = head->data_len - plen;
@@ -576,9 +576,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
576 struct frag_hdr *fhdr; 576 struct frag_hdr *fhdr;
577 struct frag_queue *fq; 577 struct frag_queue *fq;
578 struct ipv6hdr *hdr = ipv6_hdr(skb); 578 struct ipv6hdr *hdr = ipv6_hdr(skb);
579 struct net *net = dev_net(skb->dst->dev); 579 struct net *net = dev_net(skb_dst(skb)->dev);
580 580
581 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); 581 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
582 582
583 /* Jumbo payload inhibits frag. header */ 583 /* Jumbo payload inhibits frag. header */
584 if (hdr->payload_len==0) 584 if (hdr->payload_len==0)
@@ -595,17 +595,17 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
595 /* It is not a fragmented frame */ 595 /* It is not a fragmented frame */
596 skb->transport_header += sizeof(struct frag_hdr); 596 skb->transport_header += sizeof(struct frag_hdr);
597 IP6_INC_STATS_BH(net, 597 IP6_INC_STATS_BH(net,
598 ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); 598 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
599 599
600 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 600 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
601 return 1; 601 return 1;
602 } 602 }
603 603
604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
605 ip6_evictor(net, ip6_dst_idev(skb->dst)); 605 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
606 606
607 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 607 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
608 ip6_dst_idev(skb->dst))) != NULL) { 608 ip6_dst_idev(skb_dst(skb)))) != NULL) {
609 int ret; 609 int ret;
610 610
611 spin_lock(&fq->q.lock); 611 spin_lock(&fq->q.lock);
@@ -617,12 +617,12 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
617 return ret; 617 return ret;
618 } 618 }
619 619
620 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); 620 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
621 kfree_skb(skb); 621 kfree_skb(skb);
622 return -1; 622 return -1;
623 623
624fail_hdr: 624fail_hdr:
625 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 625 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
626 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 626 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
627 return -1; 627 return -1;
628} 628}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 032a5ec391c5..658293ea05ba 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -800,7 +800,7 @@ void ip6_route_input(struct sk_buff *skb)
800 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) 800 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
801 flags |= RT6_LOOKUP_F_IFACE; 801 flags |= RT6_LOOKUP_F_IFACE;
802 802
803 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); 803 skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input));
804} 804}
805 805
806static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, 806static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
@@ -911,7 +911,7 @@ static void ip6_link_failure(struct sk_buff *skb)
911 911
912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); 912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
913 913
914 rt = (struct rt6_info *) skb->dst; 914 rt = (struct rt6_info *) skb_dst(skb);
915 if (rt) { 915 if (rt) {
916 if (rt->rt6i_flags&RTF_CACHE) { 916 if (rt->rt6i_flags&RTF_CACHE) {
917 dst_set_expires(&rt->u.dst, 0); 917 dst_set_expires(&rt->u.dst, 0);
@@ -1868,7 +1868,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1868static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes) 1868static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes)
1869{ 1869{
1870 int type; 1870 int type;
1871 struct dst_entry *dst = skb->dst; 1871 struct dst_entry *dst = skb_dst(skb);
1872 switch (ipstats_mib_noroutes) { 1872 switch (ipstats_mib_noroutes) {
1873 case IPSTATS_MIB_INNOROUTES: 1873 case IPSTATS_MIB_INNOROUTES:
1874 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 1874 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
@@ -1895,7 +1895,7 @@ static int ip6_pkt_discard(struct sk_buff *skb)
1895 1895
1896static int ip6_pkt_discard_out(struct sk_buff *skb) 1896static int ip6_pkt_discard_out(struct sk_buff *skb)
1897{ 1897{
1898 skb->dev = skb->dst->dev; 1898 skb->dev = skb_dst(skb)->dev;
1899 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 1899 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1900} 1900}
1901 1901
@@ -1908,7 +1908,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb)
1908 1908
1909static int ip6_pkt_prohibit_out(struct sk_buff *skb) 1909static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1910{ 1910{
1911 skb->dev = skb->dst->dev; 1911 skb->dev = skb_dst(skb)->dev;
1912 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 1912 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1913} 1913}
1914 1914
@@ -2366,7 +2366,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2366 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); 2366 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2367 2367
2368 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); 2368 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2369 skb->dst = &rt->u.dst; 2369 skb_dst_set(skb, &rt->u.dst);
2370 2370
2371 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2371 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2372 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2372 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 664ab82e03b2..68e52308e552 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -15,6 +15,7 @@
15 * Roger Venning <r.venning@telstra.com>: 6to4 support 15 * Roger Venning <r.venning@telstra.com>: 6to4 support
16 * Nate Thompson <nate@thebog.net>: 6to4 support 16 * Nate Thompson <nate@thebog.net>: 6to4 support
17 * Fred Templin <fred.l.templin@boeing.com>: isatap support 17 * Fred Templin <fred.l.templin@boeing.com>: isatap support
18 * Sascha Hlusiak <mail@saschahlusiak.de>: stateless autoconf for isatap
18 */ 19 */
19 20
20#include <linux/module.h> 21#include <linux/module.h>
@@ -80,7 +81,7 @@ struct sit_net {
80static DEFINE_RWLOCK(ipip6_lock); 81static DEFINE_RWLOCK(ipip6_lock);
81 82
82static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 83static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
83 __be32 remote, __be32 local) 84 struct net_device *dev, __be32 remote, __be32 local)
84{ 85{
85 unsigned h0 = HASH(remote); 86 unsigned h0 = HASH(remote);
86 unsigned h1 = HASH(local); 87 unsigned h1 = HASH(local);
@@ -89,18 +90,25 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
89 90
90 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { 91 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) {
91 if (local == t->parms.iph.saddr && 92 if (local == t->parms.iph.saddr &&
92 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 93 remote == t->parms.iph.daddr &&
94 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
95 (t->dev->flags & IFF_UP))
93 return t; 96 return t;
94 } 97 }
95 for (t = sitn->tunnels_r[h0]; t; t = t->next) { 98 for (t = sitn->tunnels_r[h0]; t; t = t->next) {
96 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 99 if (remote == t->parms.iph.daddr &&
100 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
101 (t->dev->flags & IFF_UP))
97 return t; 102 return t;
98 } 103 }
99 for (t = sitn->tunnels_l[h1]; t; t = t->next) { 104 for (t = sitn->tunnels_l[h1]; t; t = t->next) {
100 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 105 if (local == t->parms.iph.saddr &&
106 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
107 (t->dev->flags & IFF_UP))
101 return t; 108 return t;
102 } 109 }
103 if ((t = sitn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) 110 t = sitn->tunnels_wc[0];
111 if ((t != NULL) && (t->dev->flags & IFF_UP))
104 return t; 112 return t;
105 return NULL; 113 return NULL;
106} 114}
@@ -165,8 +173,14 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
165 struct sit_net *sitn = net_generic(net, sit_net_id); 173 struct sit_net *sitn = net_generic(net, sit_net_id);
166 174
167 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) { 175 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
168 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 176 if (local == t->parms.iph.saddr &&
169 return t; 177 remote == t->parms.iph.daddr &&
178 parms->link == t->parms.link) {
179 if (create)
180 return NULL;
181 else
182 return t;
183 }
170 } 184 }
171 if (!create) 185 if (!create)
172 goto failed; 186 goto failed;
@@ -209,6 +223,44 @@ failed:
209 return NULL; 223 return NULL;
210} 224}
211 225
226static void ipip6_tunnel_rs_timer(unsigned long data)
227{
228 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *) data;
229 struct inet6_dev *ifp;
230 struct inet6_ifaddr *addr;
231
232 spin_lock(&p->lock);
233 ifp = __in6_dev_get(p->tunnel->dev);
234
235 read_lock_bh(&ifp->lock);
236 for (addr = ifp->addr_list; addr; addr = addr->if_next) {
237 struct in6_addr rtr;
238
239 if (!(ipv6_addr_type(&addr->addr) & IPV6_ADDR_LINKLOCAL))
240 continue;
241
242 /* Send RS to guessed linklocal address of router
243 *
244 * Better: send to ff02::2 encapsuled in unicast directly
245 * to router-v4 instead of guessing the v6 address.
246 *
247 * Cisco/Windows seem to not set the u/l bit correctly,
248 * so we won't guess right.
249 */
250 ipv6_addr_set(&rtr, htonl(0xFE800000), 0, 0, 0);
251 if (!__ipv6_isatap_ifid(rtr.s6_addr + 8,
252 p->addr)) {
253 ndisc_send_rs(p->tunnel->dev, &addr->addr, &rtr);
254 }
255 }
256 read_unlock_bh(&ifp->lock);
257
258 mod_timer(&p->rs_timer, jiffies + HZ * p->rs_delay);
259 spin_unlock(&p->lock);
260
261 return;
262}
263
212static struct ip_tunnel_prl_entry * 264static struct ip_tunnel_prl_entry *
213__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) 265__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
214{ 266{
@@ -267,6 +319,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
267 continue; 319 continue;
268 kp[c].addr = prl->addr; 320 kp[c].addr = prl->addr;
269 kp[c].flags = prl->flags; 321 kp[c].flags = prl->flags;
322 kp[c].rs_delay = prl->rs_delay;
270 c++; 323 c++;
271 if (kprl.addr != htonl(INADDR_ANY)) 324 if (kprl.addr != htonl(INADDR_ANY))
272 break; 325 break;
@@ -316,11 +369,23 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
316 } 369 }
317 370
318 p->next = t->prl; 371 p->next = t->prl;
372 p->tunnel = t;
319 t->prl = p; 373 t->prl = p;
320 t->prl_count++; 374 t->prl_count++;
375
376 spin_lock_init(&p->lock);
377 setup_timer(&p->rs_timer, ipip6_tunnel_rs_timer, (unsigned long) p);
321update: 378update:
322 p->addr = a->addr; 379 p->addr = a->addr;
323 p->flags = a->flags; 380 p->flags = a->flags;
381 p->rs_delay = a->rs_delay;
382 if (p->rs_delay == 0)
383 p->rs_delay = IPTUNNEL_RS_DEFAULT_DELAY;
384 spin_lock(&p->lock);
385 del_timer(&p->rs_timer);
386 if (p->flags & PRL_DEFAULT)
387 mod_timer(&p->rs_timer, jiffies + 1);
388 spin_unlock(&p->lock);
324out: 389out:
325 write_unlock(&ipip6_lock); 390 write_unlock(&ipip6_lock);
326 return err; 391 return err;
@@ -339,6 +404,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
339 if ((*p)->addr == a->addr) { 404 if ((*p)->addr == a->addr) {
340 x = *p; 405 x = *p;
341 *p = x->next; 406 *p = x->next;
407 spin_lock(&x->lock);
408 del_timer(&x->rs_timer);
409 spin_unlock(&x->lock);
342 kfree(x); 410 kfree(x);
343 t->prl_count--; 411 t->prl_count--;
344 goto out; 412 goto out;
@@ -349,13 +417,16 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
349 while (t->prl) { 417 while (t->prl) {
350 x = t->prl; 418 x = t->prl;
351 t->prl = t->prl->next; 419 t->prl = t->prl->next;
420 spin_lock(&x->lock);
421 del_timer(&x->rs_timer);
422 spin_unlock(&x->lock);
352 kfree(x); 423 kfree(x);
353 t->prl_count--; 424 t->prl_count--;
354 } 425 }
355 } 426 }
356out: 427out:
357 write_unlock(&ipip6_lock); 428 write_unlock(&ipip6_lock);
358 return 0; 429 return err;
359} 430}
360 431
361static int 432static int
@@ -446,7 +517,10 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
446 err = -ENOENT; 517 err = -ENOENT;
447 518
448 read_lock(&ipip6_lock); 519 read_lock(&ipip6_lock);
449 t = ipip6_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 520 t = ipip6_tunnel_lookup(dev_net(skb->dev),
521 skb->dev,
522 iph->daddr,
523 iph->saddr);
450 if (t == NULL || t->parms.iph.daddr == 0) 524 if (t == NULL || t->parms.iph.daddr == 0)
451 goto out; 525 goto out;
452 526
@@ -481,8 +555,9 @@ static int ipip6_rcv(struct sk_buff *skb)
481 iph = ip_hdr(skb); 555 iph = ip_hdr(skb);
482 556
483 read_lock(&ipip6_lock); 557 read_lock(&ipip6_lock);
484 if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), 558 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
485 iph->saddr, iph->daddr)) != NULL) { 559 iph->saddr, iph->daddr);
560 if (tunnel != NULL) {
486 secpath_reset(skb); 561 secpath_reset(skb);
487 skb->mac_header = skb->network_header; 562 skb->mac_header = skb->network_header;
488 skb_reset_network_header(skb); 563 skb_reset_network_header(skb);
@@ -500,8 +575,7 @@ static int ipip6_rcv(struct sk_buff *skb)
500 tunnel->dev->stats.rx_packets++; 575 tunnel->dev->stats.rx_packets++;
501 tunnel->dev->stats.rx_bytes += skb->len; 576 tunnel->dev->stats.rx_bytes += skb->len;
502 skb->dev = tunnel->dev; 577 skb->dev = tunnel->dev;
503 dst_release(skb->dst); 578 skb_dst_drop(skb);
504 skb->dst = NULL;
505 nf_reset(skb); 579 nf_reset(skb);
506 ipip6_ecn_decapsulate(iph, skb); 580 ipip6_ecn_decapsulate(iph, skb);
507 netif_rx(skb); 581 netif_rx(skb);
@@ -563,8 +637,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
563 if (dev->priv_flags & IFF_ISATAP) { 637 if (dev->priv_flags & IFF_ISATAP) {
564 struct neighbour *neigh = NULL; 638 struct neighbour *neigh = NULL;
565 639
566 if (skb->dst) 640 if (skb_dst(skb))
567 neigh = skb->dst->neighbour; 641 neigh = skb_dst(skb)->neighbour;
568 642
569 if (neigh == NULL) { 643 if (neigh == NULL) {
570 if (net_ratelimit()) 644 if (net_ratelimit())
@@ -588,8 +662,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
588 if (!dst) { 662 if (!dst) {
589 struct neighbour *neigh = NULL; 663 struct neighbour *neigh = NULL;
590 664
591 if (skb->dst) 665 if (skb_dst(skb))
592 neigh = skb->dst->neighbour; 666 neigh = skb_dst(skb)->neighbour;
593 667
594 if (neigh == NULL) { 668 if (neigh == NULL) {
595 if (net_ratelimit()) 669 if (net_ratelimit())
@@ -639,7 +713,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
639 if (tiph->frag_off) 713 if (tiph->frag_off)
640 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 714 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
641 else 715 else
642 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 716 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
643 717
644 if (mtu < 68) { 718 if (mtu < 68) {
645 stats->collisions++; 719 stats->collisions++;
@@ -648,8 +722,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
648 } 722 }
649 if (mtu < IPV6_MIN_MTU) 723 if (mtu < IPV6_MIN_MTU)
650 mtu = IPV6_MIN_MTU; 724 mtu = IPV6_MIN_MTU;
651 if (tunnel->parms.iph.daddr && skb->dst) 725 if (tunnel->parms.iph.daddr && skb_dst(skb))
652 skb->dst->ops->update_pmtu(skb->dst, mtu); 726 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
653 727
654 if (skb->len > mtu) { 728 if (skb->len > mtu) {
655 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 729 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
@@ -693,8 +767,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
693 skb_reset_network_header(skb); 767 skb_reset_network_header(skb);
694 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 768 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
695 IPCB(skb)->flags = 0; 769 IPCB(skb)->flags = 0;
696 dst_release(skb->dst); 770 skb_dst_drop(skb);
697 skb->dst = &rt->u.dst; 771 skb_dst_set(skb, &rt->u.dst);
698 772
699 /* 773 /*
700 * Push down and install the IPIP header. 774 * Push down and install the IPIP header.
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 711175e0571f..8c2513982b61 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -131,7 +131,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
131 int mssind; 131 int mssind;
132 const __u16 mss = *mssp; 132 const __u16 mss = *mssp;
133 133
134 tcp_sk(sk)->last_synq_overflow = jiffies; 134 tcp_synq_overflow(sk);
135 135
136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
137 ; 137 ;
@@ -175,7 +175,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
175 if (!sysctl_tcp_syncookies || !th->ack) 175 if (!sysctl_tcp_syncookies || !th->ack)
176 goto out; 176 goto out;
177 177
178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 178 if (tcp_synq_no_recent_overflow(sk) ||
179 (mss = cookie_check(skb, cookie)) == 0) { 179 (mss = cookie_check(skb, cookie)) == 0) {
180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); 180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
181 goto out; 181 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4b5aa1854260..53b6a4192b16 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -941,9 +941,10 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
941 return 0; 941 return 0;
942} 942}
943 943
944struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) 944static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
945 struct sk_buff *skb)
945{ 946{
946 struct ipv6hdr *iph = ipv6_hdr(skb); 947 struct ipv6hdr *iph = skb_gro_network_header(skb);
947 948
948 switch (skb->ip_summed) { 949 switch (skb->ip_summed) {
949 case CHECKSUM_COMPLETE: 950 case CHECKSUM_COMPLETE:
@@ -961,9 +962,8 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
961 962
962 return tcp_gro_receive(head, skb); 963 return tcp_gro_receive(head, skb);
963} 964}
964EXPORT_SYMBOL(tcp6_gro_receive);
965 965
966int tcp6_gro_complete(struct sk_buff *skb) 966static int tcp6_gro_complete(struct sk_buff *skb)
967{ 967{
968 struct ipv6hdr *iph = ipv6_hdr(skb); 968 struct ipv6hdr *iph = ipv6_hdr(skb);
969 struct tcphdr *th = tcp_hdr(skb); 969 struct tcphdr *th = tcp_hdr(skb);
@@ -974,7 +974,6 @@ int tcp6_gro_complete(struct sk_buff *skb)
974 974
975 return tcp_gro_complete(skb); 975 return tcp_gro_complete(skb);
976} 976}
977EXPORT_SYMBOL(tcp6_gro_complete);
978 977
979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 978static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst) 979 u32 ts, struct tcp_md5sig_key *key, int rst)
@@ -982,9 +981,10 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
982 struct tcphdr *th = tcp_hdr(skb), *t1; 981 struct tcphdr *th = tcp_hdr(skb), *t1;
983 struct sk_buff *buff; 982 struct sk_buff *buff;
984 struct flowi fl; 983 struct flowi fl;
985 struct net *net = dev_net(skb->dst->dev); 984 struct net *net = dev_net(skb_dst(skb)->dev);
986 struct sock *ctl_sk = net->ipv6.tcp_sk; 985 struct sock *ctl_sk = net->ipv6.tcp_sk;
987 unsigned int tot_len = sizeof(struct tcphdr); 986 unsigned int tot_len = sizeof(struct tcphdr);
987 struct dst_entry *dst;
988 __be32 *topt; 988 __be32 *topt;
989 989
990 if (ts) 990 if (ts)
@@ -1053,8 +1053,9 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1053 * Underlying function will use this to retrieve the network 1053 * Underlying function will use this to retrieve the network
1054 * namespace 1054 * namespace
1055 */ 1055 */
1056 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { 1056 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1057 if (xfrm_lookup(net, &buff->dst, &fl, NULL, 0) >= 0) { 1057 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1058 skb_dst_set(buff, dst);
1058 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1059 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1059 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1060 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1060 if (rst) 1061 if (rst)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8905712cfbb8..fc333d854728 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -177,10 +177,9 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
177 177
178 if (unlikely(sk = skb_steal_sock(skb))) 178 if (unlikely(sk = skb_steal_sock(skb)))
179 return sk; 179 return sk;
180 else 180 return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
181 return __udp6_lib_lookup(dev_net(skb->dst->dev), &iph->saddr, sport, 181 &iph->daddr, dport, inet6_iif(skb),
182 &iph->daddr, dport, inet6_iif(skb), 182 udptable);
183 udptable);
184} 183}
185 184
186/* 185/*
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index e20529b4c825..3927832227b9 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -31,7 +31,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
31 */ 31 */
32static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 32static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
33{ 33{
34 struct dst_entry *dst = skb->dst; 34 struct dst_entry *dst = skb_dst(skb);
35 struct ipv6hdr *top_iph; 35 struct ipv6hdr *top_iph;
36 int dsfield; 36 int dsfield;
37 37
@@ -45,7 +45,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
45 45
46 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, 46 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
47 sizeof(top_iph->flow_lbl)); 47 sizeof(top_iph->flow_lbl));
48 top_iph->nexthdr = xfrm_af2proto(skb->dst->ops->family); 48 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
49 49
50 dsfield = XFRM_MODE_SKB_CB(skb)->tos; 50 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
51 dsfield = INET_ECN_encapsulate(dsfield, dsfield); 51 dsfield = INET_ECN_encapsulate(dsfield, dsfield);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5ee5a031bc93..c4f4eef032a3 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(xfrm6_find_1stfragopt);
30static int xfrm6_tunnel_check_size(struct sk_buff *skb) 30static int xfrm6_tunnel_check_size(struct sk_buff *skb)
31{ 31{
32 int mtu, ret = 0; 32 int mtu, ret = 0;
33 struct dst_entry *dst = skb->dst; 33 struct dst_entry *dst = skb_dst(skb);
34 34
35 mtu = dst_mtu(dst); 35 mtu = dst_mtu(dst);
36 if (mtu < IPV6_MIN_MTU) 36 if (mtu < IPV6_MIN_MTU)
@@ -90,6 +90,6 @@ static int xfrm6_output_finish(struct sk_buff *skb)
90 90
91int xfrm6_output(struct sk_buff *skb) 91int xfrm6_output(struct sk_buff *skb)
92{ 92{
93 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dst->dev, 93 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev,
94 xfrm6_output_finish); 94 xfrm6_output_finish);
95} 95}
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 2562ebc1b22c..7af2e74deda8 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -982,17 +982,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
982{ 982{
983 struct sk_buff *tx_skb; 983 struct sk_buff *tx_skb;
984 struct sk_buff *skb; 984 struct sk_buff *skb;
985 int count;
986 985
987 IRDA_ASSERT(self != NULL, return;); 986 IRDA_ASSERT(self != NULL, return;);
988 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 987 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
989 988
990 /* Initialize variables */
991 count = skb_queue_len(&self->wx_list);
992
993 /* Resend unacknowledged frame(s) */ 989 /* Resend unacknowledged frame(s) */
994 skb = skb_peek(&self->wx_list); 990 skb_queue_walk(&self->wx_list, skb) {
995 while (skb != NULL) {
996 irlap_wait_min_turn_around(self, &self->qos_tx); 991 irlap_wait_min_turn_around(self, &self->qos_tx);
997 992
998 /* We copy the skb to be retransmitted since we will have to 993 /* We copy the skb to be retransmitted since we will have to
@@ -1011,21 +1006,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1011 /* 1006 /*
1012 * Set poll bit on the last frame retransmitted 1007 * Set poll bit on the last frame retransmitted
1013 */ 1008 */
1014 if (count-- == 1) 1009 if (skb_queue_is_last(&self->wx_list, skb))
1015 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ 1010 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
1016 else 1011 else
1017 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ 1012 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */
1018 1013
1019 irlap_send_i_frame(self, tx_skb, command); 1014 irlap_send_i_frame(self, tx_skb, command);
1020
1021 /*
1022 * If our skb is the last buffer in the list, then
1023 * we are finished, if not, move to the next sk-buffer
1024 */
1025 if (skb == skb_peek_tail(&self->wx_list))
1026 skb = NULL;
1027 else
1028 skb = skb->next;
1029 } 1015 }
1030#if 0 /* Not yet */ 1016#if 0 /* Not yet */
1031 /* 1017 /*
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 2f05ec1037ab..8dd7ed7e7c1f 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -87,7 +87,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
87 if (!dev) 87 if (!dev)
88 return -ENODEV; 88 return -ENODEV;
89 89
90 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 90 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
91 if (!msg) { 91 if (!msg) {
92 dev_put(dev); 92 dev_put(dev);
93 return -ENOMEM; 93 return -ENOMEM;
@@ -148,21 +148,8 @@ static struct genl_ops irda_nl_ops[] = {
148 148
149int irda_nl_register(void) 149int irda_nl_register(void)
150{ 150{
151 int err, i; 151 return genl_register_family_with_ops(&irda_nl_family,
152 152 irda_nl_ops, ARRAY_SIZE(irda_nl_ops));
153 err = genl_register_family(&irda_nl_family);
154 if (err)
155 return err;
156
157 for (i = 0; i < ARRAY_SIZE(irda_nl_ops); i++) {
158 err = genl_register_ops(&irda_nl_family, &irda_nl_ops[i]);
159 if (err)
160 goto err_out;
161 }
162 return 0;
163 err_out:
164 genl_unregister_family(&irda_nl_family);
165 return err;
166} 153}
167 154
168void irda_nl_unregister(void) 155void irda_nl_unregister(void)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index b51c9187c347..656cbd195825 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * linux/net/iucv/af_iucv.c
3 *
4 * IUCV protocol stack for Linux on zSeries 2 * IUCV protocol stack for Linux on zSeries
5 * 3 *
6 * Copyright 2006 IBM Corporation 4 * Copyright IBM Corp. 2006, 2009
7 * 5 *
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8 * PM functions:
9 * Ursula Braun <ursula.braun@de.ibm.com>
9 */ 10 */
10 11
11#define KMSG_COMPONENT "af_iucv" 12#define KMSG_COMPONENT "af_iucv"
@@ -29,10 +30,7 @@
29#include <net/iucv/iucv.h> 30#include <net/iucv/iucv.h>
30#include <net/iucv/af_iucv.h> 31#include <net/iucv/af_iucv.h>
31 32
32#define CONFIG_IUCV_SOCK_DEBUG 1 33#define VERSION "1.1"
33
34#define IPRMDATA 0x80
35#define VERSION "1.0"
36 34
37static char iucv_userid[80]; 35static char iucv_userid[80];
38 36
@@ -44,6 +42,19 @@ static struct proto iucv_proto = {
44 .obj_size = sizeof(struct iucv_sock), 42 .obj_size = sizeof(struct iucv_sock),
45}; 43};
46 44
45/* special AF_IUCV IPRM messages */
46static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48
49#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50
51/* macros to set/get socket control buffer at correct offset */
52#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55#define CB_TRGCLS_LEN (TRGCLS_SIZE)
56
57
47static void iucv_sock_kill(struct sock *sk); 58static void iucv_sock_kill(struct sock *sk);
48static void iucv_sock_close(struct sock *sk); 59static void iucv_sock_close(struct sock *sk);
49 60
@@ -54,6 +65,7 @@ static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
54static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 65static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
55 u8 ipuser[16]); 66 u8 ipuser[16]);
56static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 67static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
68static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
57 69
58static struct iucv_sock_list iucv_sk_list = { 70static struct iucv_sock_list iucv_sk_list = {
59 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 71 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
@@ -65,7 +77,8 @@ static struct iucv_handler af_iucv_handler = {
65 .path_complete = iucv_callback_connack, 77 .path_complete = iucv_callback_connack,
66 .path_severed = iucv_callback_connrej, 78 .path_severed = iucv_callback_connrej,
67 .message_pending = iucv_callback_rx, 79 .message_pending = iucv_callback_rx,
68 .message_complete = iucv_callback_txdone 80 .message_complete = iucv_callback_txdone,
81 .path_quiesced = iucv_callback_shutdown,
69}; 82};
70 83
71static inline void high_nmcpy(unsigned char *dst, char *src) 84static inline void high_nmcpy(unsigned char *dst, char *src)
@@ -78,6 +91,153 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
78 memcpy(&dst[8], src, 8); 91 memcpy(&dst[8], src, 8);
79} 92}
80 93
94static int afiucv_pm_prepare(struct device *dev)
95{
96#ifdef CONFIG_PM_DEBUG
97 printk(KERN_WARNING "afiucv_pm_prepare\n");
98#endif
99 return 0;
100}
101
102static void afiucv_pm_complete(struct device *dev)
103{
104#ifdef CONFIG_PM_DEBUG
105 printk(KERN_WARNING "afiucv_pm_complete\n");
106#endif
107 return;
108}
109
110/**
111 * afiucv_pm_freeze() - Freeze PM callback
112 * @dev: AFIUCV dummy device
113 *
114 * Sever all established IUCV communication pathes
115 */
116static int afiucv_pm_freeze(struct device *dev)
117{
118 struct iucv_sock *iucv;
119 struct sock *sk;
120 struct hlist_node *node;
121 int err = 0;
122
123#ifdef CONFIG_PM_DEBUG
124 printk(KERN_WARNING "afiucv_pm_freeze\n");
125#endif
126 read_lock(&iucv_sk_list.lock);
127 sk_for_each(sk, node, &iucv_sk_list.head) {
128 iucv = iucv_sk(sk);
129 skb_queue_purge(&iucv->send_skb_q);
130 skb_queue_purge(&iucv->backlog_skb_q);
131 switch (sk->sk_state) {
132 case IUCV_SEVERED:
133 case IUCV_DISCONN:
134 case IUCV_CLOSING:
135 case IUCV_CONNECTED:
136 if (iucv->path) {
137 err = iucv_path_sever(iucv->path, NULL);
138 iucv_path_free(iucv->path);
139 iucv->path = NULL;
140 }
141 break;
142 case IUCV_OPEN:
143 case IUCV_BOUND:
144 case IUCV_LISTEN:
145 case IUCV_CLOSED:
146 default:
147 break;
148 }
149 }
150 read_unlock(&iucv_sk_list.lock);
151 return err;
152}
153
154/**
155 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
156 * @dev: AFIUCV dummy device
157 *
158 * socket clean up after freeze
159 */
160static int afiucv_pm_restore_thaw(struct device *dev)
161{
162 struct iucv_sock *iucv;
163 struct sock *sk;
164 struct hlist_node *node;
165
166#ifdef CONFIG_PM_DEBUG
167 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
168#endif
169 read_lock(&iucv_sk_list.lock);
170 sk_for_each(sk, node, &iucv_sk_list.head) {
171 iucv = iucv_sk(sk);
172 switch (sk->sk_state) {
173 case IUCV_CONNECTED:
174 sk->sk_err = EPIPE;
175 sk->sk_state = IUCV_DISCONN;
176 sk->sk_state_change(sk);
177 break;
178 case IUCV_DISCONN:
179 case IUCV_SEVERED:
180 case IUCV_CLOSING:
181 case IUCV_LISTEN:
182 case IUCV_BOUND:
183 case IUCV_OPEN:
184 default:
185 break;
186 }
187 }
188 read_unlock(&iucv_sk_list.lock);
189 return 0;
190}
191
192static struct dev_pm_ops afiucv_pm_ops = {
193 .prepare = afiucv_pm_prepare,
194 .complete = afiucv_pm_complete,
195 .freeze = afiucv_pm_freeze,
196 .thaw = afiucv_pm_restore_thaw,
197 .restore = afiucv_pm_restore_thaw,
198};
199
200static struct device_driver af_iucv_driver = {
201 .owner = THIS_MODULE,
202 .name = "afiucv",
203 .bus = &iucv_bus,
204 .pm = &afiucv_pm_ops,
205};
206
207/* dummy device used as trigger for PM functions */
208static struct device *af_iucv_dev;
209
210/**
211 * iucv_msg_length() - Returns the length of an iucv message.
212 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
213 *
214 * The function returns the length of the specified iucv message @msg of data
215 * stored in a buffer and of data stored in the parameter list (PRMDATA).
216 *
217 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
218 * data:
219 * PRMDATA[0..6] socket data (max 7 bytes);
220 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
221 *
222 * The socket data length is computed by substracting the socket data length
223 * value from 0xFF.
224 * If the socket data len is greater 7, then PRMDATA can be used for special
225 * notifications (see iucv_sock_shutdown); and further,
226 * if the socket data len is > 7, the function returns 8.
227 *
228 * Use this function to allocate socket buffers to store iucv message data.
229 */
230static inline size_t iucv_msg_length(struct iucv_message *msg)
231{
232 size_t datalen;
233
234 if (msg->flags & IUCV_IPRMDATA) {
235 datalen = 0xff - msg->rmmsg[7];
236 return (datalen < 8) ? datalen : 8;
237 }
238 return msg->length;
239}
240
81/* Timers */ 241/* Timers */
82static void iucv_sock_timeout(unsigned long arg) 242static void iucv_sock_timeout(unsigned long arg)
83{ 243{
@@ -225,6 +385,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
225 spin_lock_init(&iucv_sk(sk)->message_q.lock); 385 spin_lock_init(&iucv_sk(sk)->message_q.lock);
226 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 386 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
227 iucv_sk(sk)->send_tag = 0; 387 iucv_sk(sk)->send_tag = 0;
388 iucv_sk(sk)->flags = 0;
389 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
228 iucv_sk(sk)->path = NULL; 390 iucv_sk(sk)->path = NULL;
229 memset(&iucv_sk(sk)->src_user_id , 0, 32); 391 memset(&iucv_sk(sk)->src_user_id , 0, 32);
230 392
@@ -248,11 +410,22 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
248{ 410{
249 struct sock *sk; 411 struct sock *sk;
250 412
251 if (sock->type != SOCK_STREAM) 413 if (protocol && protocol != PF_IUCV)
252 return -ESOCKTNOSUPPORT; 414 return -EPROTONOSUPPORT;
253 415
254 sock->state = SS_UNCONNECTED; 416 sock->state = SS_UNCONNECTED;
255 sock->ops = &iucv_sock_ops; 417
418 switch (sock->type) {
419 case SOCK_STREAM:
420 sock->ops = &iucv_sock_ops;
421 break;
422 case SOCK_SEQPACKET:
423 /* currently, proto ops can handle both sk types */
424 sock->ops = &iucv_sock_ops;
425 break;
426 default:
427 return -ESOCKTNOSUPPORT;
428 }
256 429
257 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 430 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
258 if (!sk) 431 if (!sk)
@@ -463,11 +636,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
463 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 636 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
464 return -EBADFD; 637 return -EBADFD;
465 638
466 if (sk->sk_type != SOCK_STREAM) 639 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
467 return -EINVAL; 640 return -EINVAL;
468 641
469 iucv = iucv_sk(sk);
470
471 if (sk->sk_state == IUCV_OPEN) { 642 if (sk->sk_state == IUCV_OPEN) {
472 err = iucv_sock_autobind(sk); 643 err = iucv_sock_autobind(sk);
473 if (unlikely(err)) 644 if (unlikely(err))
@@ -486,8 +657,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
486 657
487 iucv = iucv_sk(sk); 658 iucv = iucv_sk(sk);
488 /* Create path. */ 659 /* Create path. */
489 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, 660 iucv->path = iucv_path_alloc(iucv->msglimit,
490 IPRMDATA, GFP_KERNEL); 661 IUCV_IPRMDATA, GFP_KERNEL);
491 if (!iucv->path) { 662 if (!iucv->path) {
492 err = -ENOMEM; 663 err = -ENOMEM;
493 goto done; 664 goto done;
@@ -521,8 +692,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
521 } 692 }
522 693
523 if (sk->sk_state == IUCV_DISCONN) { 694 if (sk->sk_state == IUCV_DISCONN) {
524 release_sock(sk); 695 err = -ECONNREFUSED;
525 return -ECONNREFUSED;
526 } 696 }
527 697
528 if (err) { 698 if (err) {
@@ -545,7 +715,10 @@ static int iucv_sock_listen(struct socket *sock, int backlog)
545 lock_sock(sk); 715 lock_sock(sk);
546 716
547 err = -EINVAL; 717 err = -EINVAL;
548 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 718 if (sk->sk_state != IUCV_BOUND)
719 goto done;
720
721 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
549 goto done; 722 goto done;
550 723
551 sk->sk_max_ack_backlog = backlog; 724 sk->sk_max_ack_backlog = backlog;
@@ -636,6 +809,30 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
636 return 0; 809 return 0;
637} 810}
638 811
812/**
813 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
814 * @path: IUCV path
815 * @msg: Pointer to a struct iucv_message
816 * @skb: The socket data to send, skb->len MUST BE <= 7
817 *
818 * Send the socket data in the parameter list in the iucv message
819 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
820 * list and the socket data len at index 7 (last byte).
821 * See also iucv_msg_length().
822 *
823 * Returns the error code from the iucv_message_send() call.
824 */
825static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
826 struct sk_buff *skb)
827{
828 u8 prmdata[8];
829
830 memcpy(prmdata, (void *) skb->data, skb->len);
831 prmdata[7] = 0xff - (u8) skb->len;
832 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
833 (void *) prmdata, 8);
834}
835
639static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 836static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
640 struct msghdr *msg, size_t len) 837 struct msghdr *msg, size_t len)
641{ 838{
@@ -643,6 +840,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
643 struct iucv_sock *iucv = iucv_sk(sk); 840 struct iucv_sock *iucv = iucv_sk(sk);
644 struct sk_buff *skb; 841 struct sk_buff *skb;
645 struct iucv_message txmsg; 842 struct iucv_message txmsg;
843 struct cmsghdr *cmsg;
844 int cmsg_done;
646 char user_id[9]; 845 char user_id[9];
647 char appl_id[9]; 846 char appl_id[9];
648 int err; 847 int err;
@@ -654,6 +853,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
654 if (msg->msg_flags & MSG_OOB) 853 if (msg->msg_flags & MSG_OOB)
655 return -EOPNOTSUPP; 854 return -EOPNOTSUPP;
656 855
856 /* SOCK_SEQPACKET: we do not support segmented records */
857 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
858 return -EOPNOTSUPP;
859
657 lock_sock(sk); 860 lock_sock(sk);
658 861
659 if (sk->sk_shutdown & SEND_SHUTDOWN) { 862 if (sk->sk_shutdown & SEND_SHUTDOWN) {
@@ -662,6 +865,52 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
662 } 865 }
663 866
664 if (sk->sk_state == IUCV_CONNECTED) { 867 if (sk->sk_state == IUCV_CONNECTED) {
868 /* initialize defaults */
869 cmsg_done = 0; /* check for duplicate headers */
870 txmsg.class = 0;
871
872 /* iterate over control messages */
873 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
874 cmsg = CMSG_NXTHDR(msg, cmsg)) {
875
876 if (!CMSG_OK(msg, cmsg)) {
877 err = -EINVAL;
878 goto out;
879 }
880
881 if (cmsg->cmsg_level != SOL_IUCV)
882 continue;
883
884 if (cmsg->cmsg_type & cmsg_done) {
885 err = -EINVAL;
886 goto out;
887 }
888 cmsg_done |= cmsg->cmsg_type;
889
890 switch (cmsg->cmsg_type) {
891 case SCM_IUCV_TRGCLS:
892 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
893 err = -EINVAL;
894 goto out;
895 }
896
897 /* set iucv message target class */
898 memcpy(&txmsg.class,
899 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
900
901 break;
902
903 default:
904 err = -EINVAL;
905 goto out;
906 break;
907 }
908 }
909
910 /* allocate one skb for each iucv message:
911 * this is fine for SOCK_SEQPACKET (unless we want to support
912 * segmented records using the MSG_EOR flag), but
913 * for SOCK_STREAM we might want to improve it in future */
665 if (!(skb = sock_alloc_send_skb(sk, len, 914 if (!(skb = sock_alloc_send_skb(sk, len,
666 msg->msg_flags & MSG_DONTWAIT, 915 msg->msg_flags & MSG_DONTWAIT,
667 &err))) 916 &err)))
@@ -672,13 +921,33 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
672 goto fail; 921 goto fail;
673 } 922 }
674 923
675 txmsg.class = 0; 924 /* increment and save iucv message tag for msg_completion cbk */
676 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
677 txmsg.tag = iucv->send_tag++; 925 txmsg.tag = iucv->send_tag++;
678 memcpy(skb->cb, &txmsg.tag, 4); 926 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
679 skb_queue_tail(&iucv->send_skb_q, skb); 927 skb_queue_tail(&iucv->send_skb_q, skb);
680 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 928
681 (void *) skb->data, skb->len); 929 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
930 && skb->len <= 7) {
931 err = iucv_send_iprm(iucv->path, &txmsg, skb);
932
933 /* on success: there is no message_complete callback
934 * for an IPRMDATA msg; remove skb from send queue */
935 if (err == 0) {
936 skb_unlink(skb, &iucv->send_skb_q);
937 kfree_skb(skb);
938 }
939
940 /* this error should never happen since the
941 * IUCV_IPRMDATA path flag is set... sever path */
942 if (err == 0x15) {
943 iucv_path_sever(iucv->path, NULL);
944 skb_unlink(skb, &iucv->send_skb_q);
945 err = -EPIPE;
946 goto fail;
947 }
948 } else
949 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
950 (void *) skb->data, skb->len);
682 if (err) { 951 if (err) {
683 if (err == 3) { 952 if (err == 3) {
684 user_id[8] = 0; 953 user_id[8] = 0;
@@ -725,6 +994,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
725 if (!nskb) 994 if (!nskb)
726 return -ENOMEM; 995 return -ENOMEM;
727 996
997 /* copy target class to control buffer of new skb */
998 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
999
1000 /* copy data fragment */
728 memcpy(nskb->data, skb->data + copied, size); 1001 memcpy(nskb->data, skb->data + copied, size);
729 copied += size; 1002 copied += size;
730 dataleft -= size; 1003 dataleft -= size;
@@ -744,19 +1017,33 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
744 struct iucv_message *msg) 1017 struct iucv_message *msg)
745{ 1018{
746 int rc; 1019 int rc;
1020 unsigned int len;
1021
1022 len = iucv_msg_length(msg);
1023
1024 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1025 /* Note: the first 4 bytes are reserved for msg tag */
1026 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
747 1027
748 if (msg->flags & IPRMDATA) { 1028 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
749 skb->data = NULL; 1029 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
750 skb->len = 0; 1030 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1031 skb->data = NULL;
1032 skb->len = 0;
1033 }
751 } else { 1034 } else {
752 rc = iucv_message_receive(path, msg, 0, skb->data, 1035 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
753 msg->length, NULL); 1036 skb->data, len, NULL);
754 if (rc) { 1037 if (rc) {
755 kfree_skb(skb); 1038 kfree_skb(skb);
756 return; 1039 return;
757 } 1040 }
758 if (skb->truesize >= sk->sk_rcvbuf / 4) { 1041 /* we need to fragment iucv messages for SOCK_STREAM only;
759 rc = iucv_fragment_skb(sk, skb, msg->length); 1042 * for SOCK_SEQPACKET, it is only relevant if we support
1043 * record segmentation using MSG_EOR (see also recvmsg()) */
1044 if (sk->sk_type == SOCK_STREAM &&
1045 skb->truesize >= sk->sk_rcvbuf / 4) {
1046 rc = iucv_fragment_skb(sk, skb, len);
760 kfree_skb(skb); 1047 kfree_skb(skb);
761 skb = NULL; 1048 skb = NULL;
762 if (rc) { 1049 if (rc) {
@@ -767,7 +1054,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
767 } else { 1054 } else {
768 skb_reset_transport_header(skb); 1055 skb_reset_transport_header(skb);
769 skb_reset_network_header(skb); 1056 skb_reset_network_header(skb);
770 skb->len = msg->length; 1057 skb->len = len;
771 } 1058 }
772 } 1059 }
773 1060
@@ -782,7 +1069,7 @@ static void iucv_process_message_q(struct sock *sk)
782 struct sock_msg_q *p, *n; 1069 struct sock_msg_q *p, *n;
783 1070
784 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1071 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
785 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 1072 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
786 if (!skb) 1073 if (!skb)
787 break; 1074 break;
788 iucv_process_message(sk, skb, p->path, &p->msg); 1075 iucv_process_message(sk, skb, p->path, &p->msg);
@@ -799,7 +1086,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
799 int noblock = flags & MSG_DONTWAIT; 1086 int noblock = flags & MSG_DONTWAIT;
800 struct sock *sk = sock->sk; 1087 struct sock *sk = sock->sk;
801 struct iucv_sock *iucv = iucv_sk(sk); 1088 struct iucv_sock *iucv = iucv_sk(sk);
802 int target, copied = 0; 1089 unsigned int copied, rlen;
803 struct sk_buff *skb, *rskb, *cskb; 1090 struct sk_buff *skb, *rskb, *cskb;
804 int err = 0; 1091 int err = 0;
805 1092
@@ -812,8 +1099,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
812 if (flags & (MSG_OOB)) 1099 if (flags & (MSG_OOB))
813 return -EOPNOTSUPP; 1100 return -EOPNOTSUPP;
814 1101
815 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
816
817 /* receive/dequeue next skb: 1102 /* receive/dequeue next skb:
818 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 1103 * the function understands MSG_PEEK and, thus, does not dequeue skb */
819 skb = skb_recv_datagram(sk, flags, noblock, &err); 1104 skb = skb_recv_datagram(sk, flags, noblock, &err);
@@ -823,25 +1108,45 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
823 return err; 1108 return err;
824 } 1109 }
825 1110
826 copied = min_t(unsigned int, skb->len, len); 1111 rlen = skb->len; /* real length of skb */
1112 copied = min_t(unsigned int, rlen, len);
827 1113
828 cskb = skb; 1114 cskb = skb;
829 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1115 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
830 skb_queue_head(&sk->sk_receive_queue, skb); 1116 if (!(flags & MSG_PEEK))
831 if (copied == 0) 1117 skb_queue_head(&sk->sk_receive_queue, skb);
832 return -EFAULT; 1118 return -EFAULT;
833 goto done; 1119 }
1120
1121 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1122 if (sk->sk_type == SOCK_SEQPACKET) {
1123 if (copied < rlen)
1124 msg->msg_flags |= MSG_TRUNC;
1125 /* each iucv message contains a complete record */
1126 msg->msg_flags |= MSG_EOR;
834 } 1127 }
835 1128
836 len -= copied; 1129 /* create control message to store iucv msg target class:
1130 * get the trgcls from the control buffer of the skb due to
1131 * fragmentation of original iucv message. */
1132 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1133 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1134 if (err) {
1135 if (!(flags & MSG_PEEK))
1136 skb_queue_head(&sk->sk_receive_queue, skb);
1137 return err;
1138 }
837 1139
838 /* Mark read part of skb as used */ 1140 /* Mark read part of skb as used */
839 if (!(flags & MSG_PEEK)) { 1141 if (!(flags & MSG_PEEK)) {
840 skb_pull(skb, copied);
841 1142
842 if (skb->len) { 1143 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
843 skb_queue_head(&sk->sk_receive_queue, skb); 1144 if (sk->sk_type == SOCK_STREAM) {
844 goto done; 1145 skb_pull(skb, copied);
1146 if (skb->len) {
1147 skb_queue_head(&sk->sk_receive_queue, skb);
1148 goto done;
1149 }
845 } 1150 }
846 1151
847 kfree_skb(skb); 1152 kfree_skb(skb);
@@ -866,7 +1171,11 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
866 } 1171 }
867 1172
868done: 1173done:
869 return err ? : copied; 1174 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1175 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1176 copied = rlen;
1177
1178 return copied;
870} 1179}
871 1180
872static inline unsigned int iucv_accept_poll(struct sock *parent) 1181static inline unsigned int iucv_accept_poll(struct sock *parent)
@@ -928,7 +1237,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
928 struct iucv_sock *iucv = iucv_sk(sk); 1237 struct iucv_sock *iucv = iucv_sk(sk);
929 struct iucv_message txmsg; 1238 struct iucv_message txmsg;
930 int err = 0; 1239 int err = 0;
931 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
932 1240
933 how++; 1241 how++;
934 1242
@@ -953,7 +1261,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
953 txmsg.class = 0; 1261 txmsg.class = 0;
954 txmsg.tag = 0; 1262 txmsg.tag = 0;
955 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1263 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
956 (void *) prmmsg, 8); 1264 (void *) iprm_shutdown, 8);
957 if (err) { 1265 if (err) {
958 switch (err) { 1266 switch (err) {
959 case 1: 1267 case 1:
@@ -1007,6 +1315,98 @@ static int iucv_sock_release(struct socket *sock)
1007 return err; 1315 return err;
1008} 1316}
1009 1317
1318/* getsockopt and setsockopt */
1319static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1320 char __user *optval, int optlen)
1321{
1322 struct sock *sk = sock->sk;
1323 struct iucv_sock *iucv = iucv_sk(sk);
1324 int val;
1325 int rc;
1326
1327 if (level != SOL_IUCV)
1328 return -ENOPROTOOPT;
1329
1330 if (optlen < sizeof(int))
1331 return -EINVAL;
1332
1333 if (get_user(val, (int __user *) optval))
1334 return -EFAULT;
1335
1336 rc = 0;
1337
1338 lock_sock(sk);
1339 switch (optname) {
1340 case SO_IPRMDATA_MSG:
1341 if (val)
1342 iucv->flags |= IUCV_IPRMDATA;
1343 else
1344 iucv->flags &= ~IUCV_IPRMDATA;
1345 break;
1346 case SO_MSGLIMIT:
1347 switch (sk->sk_state) {
1348 case IUCV_OPEN:
1349 case IUCV_BOUND:
1350 if (val < 1 || val > (u16)(~0))
1351 rc = -EINVAL;
1352 else
1353 iucv->msglimit = val;
1354 break;
1355 default:
1356 rc = -EINVAL;
1357 break;
1358 }
1359 break;
1360 default:
1361 rc = -ENOPROTOOPT;
1362 break;
1363 }
1364 release_sock(sk);
1365
1366 return rc;
1367}
1368
1369static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1370 char __user *optval, int __user *optlen)
1371{
1372 struct sock *sk = sock->sk;
1373 struct iucv_sock *iucv = iucv_sk(sk);
1374 int val, len;
1375
1376 if (level != SOL_IUCV)
1377 return -ENOPROTOOPT;
1378
1379 if (get_user(len, optlen))
1380 return -EFAULT;
1381
1382 if (len < 0)
1383 return -EINVAL;
1384
1385 len = min_t(unsigned int, len, sizeof(int));
1386
1387 switch (optname) {
1388 case SO_IPRMDATA_MSG:
1389 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1390 break;
1391 case SO_MSGLIMIT:
1392 lock_sock(sk);
1393 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1394 : iucv->msglimit; /* default */
1395 release_sock(sk);
1396 break;
1397 default:
1398 return -ENOPROTOOPT;
1399 }
1400
1401 if (put_user(len, optlen))
1402 return -EFAULT;
1403 if (copy_to_user(optval, &val, len))
1404 return -EFAULT;
1405
1406 return 0;
1407}
1408
1409
1010/* Callback wrappers - called from iucv base support */ 1410/* Callback wrappers - called from iucv base support */
1011static int iucv_callback_connreq(struct iucv_path *path, 1411static int iucv_callback_connreq(struct iucv_path *path,
1012 u8 ipvmid[8], u8 ipuser[16]) 1412 u8 ipvmid[8], u8 ipuser[16])
@@ -1060,7 +1460,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1060 } 1460 }
1061 1461
1062 /* Create the new socket */ 1462 /* Create the new socket */
1063 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1463 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1064 if (!nsk) { 1464 if (!nsk) {
1065 err = iucv_path_sever(path, user_data); 1465 err = iucv_path_sever(path, user_data);
1066 iucv_path_free(path); 1466 iucv_path_free(path);
@@ -1083,7 +1483,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1083 memcpy(nuser_data + 8, niucv->src_name, 8); 1483 memcpy(nuser_data + 8, niucv->src_name, 8);
1084 ASCEBC(nuser_data + 8, 8); 1484 ASCEBC(nuser_data + 8, 8);
1085 1485
1086 path->msglim = IUCV_QUEUELEN_DEFAULT; 1486 /* set message limit for path based on msglimit of accepting socket */
1487 niucv->msglimit = iucv->msglimit;
1488 path->msglim = iucv->msglimit;
1087 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1489 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1088 if (err) { 1490 if (err) {
1089 err = iucv_path_sever(path, user_data); 1491 err = iucv_path_sever(path, user_data);
@@ -1131,19 +1533,17 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1131 goto save_message; 1533 goto save_message;
1132 1534
1133 len = atomic_read(&sk->sk_rmem_alloc); 1535 len = atomic_read(&sk->sk_rmem_alloc);
1134 len += msg->length + sizeof(struct sk_buff); 1536 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1135 if (len > sk->sk_rcvbuf) 1537 if (len > sk->sk_rcvbuf)
1136 goto save_message; 1538 goto save_message;
1137 1539
1138 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1540 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1139 if (!skb) 1541 if (!skb)
1140 goto save_message; 1542 goto save_message;
1141 1543
1142 iucv_process_message(sk, skb, path, msg); 1544 iucv_process_message(sk, skb, path, msg);
1143 goto out_unlock; 1545 goto out_unlock;
1144 1546
1145 return;
1146
1147save_message: 1547save_message:
1148 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1548 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1149 if (!save_msg) 1549 if (!save_msg)
@@ -1170,7 +1570,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1170 spin_lock_irqsave(&list->lock, flags); 1570 spin_lock_irqsave(&list->lock, flags);
1171 1571
1172 while (list_skb != (struct sk_buff *)list) { 1572 while (list_skb != (struct sk_buff *)list) {
1173 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1573 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1174 this = list_skb; 1574 this = list_skb;
1175 break; 1575 break;
1176 } 1576 }
@@ -1206,6 +1606,21 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1206 sk->sk_state_change(sk); 1606 sk->sk_state_change(sk);
1207} 1607}
1208 1608
1609/* called if the other communication side shuts down its RECV direction;
1610 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1611 */
1612static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1613{
1614 struct sock *sk = path->private;
1615
1616 bh_lock_sock(sk);
1617 if (sk->sk_state != IUCV_CLOSED) {
1618 sk->sk_shutdown |= SEND_SHUTDOWN;
1619 sk->sk_state_change(sk);
1620 }
1621 bh_unlock_sock(sk);
1622}
1623
1209static struct proto_ops iucv_sock_ops = { 1624static struct proto_ops iucv_sock_ops = {
1210 .family = PF_IUCV, 1625 .family = PF_IUCV,
1211 .owner = THIS_MODULE, 1626 .owner = THIS_MODULE,
@@ -1222,8 +1637,8 @@ static struct proto_ops iucv_sock_ops = {
1222 .mmap = sock_no_mmap, 1637 .mmap = sock_no_mmap,
1223 .socketpair = sock_no_socketpair, 1638 .socketpair = sock_no_socketpair,
1224 .shutdown = iucv_sock_shutdown, 1639 .shutdown = iucv_sock_shutdown,
1225 .setsockopt = sock_no_setsockopt, 1640 .setsockopt = iucv_sock_setsockopt,
1226 .getsockopt = sock_no_getsockopt 1641 .getsockopt = iucv_sock_getsockopt,
1227}; 1642};
1228 1643
1229static struct net_proto_family iucv_sock_family_ops = { 1644static struct net_proto_family iucv_sock_family_ops = {
@@ -1258,8 +1673,30 @@ static int __init afiucv_init(void)
1258 err = sock_register(&iucv_sock_family_ops); 1673 err = sock_register(&iucv_sock_family_ops);
1259 if (err) 1674 if (err)
1260 goto out_proto; 1675 goto out_proto;
1676 /* establish dummy device */
1677 err = driver_register(&af_iucv_driver);
1678 if (err)
1679 goto out_sock;
1680 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1681 if (!af_iucv_dev) {
1682 err = -ENOMEM;
1683 goto out_driver;
1684 }
1685 dev_set_name(af_iucv_dev, "af_iucv");
1686 af_iucv_dev->bus = &iucv_bus;
1687 af_iucv_dev->parent = iucv_root;
1688 af_iucv_dev->release = (void (*)(struct device *))kfree;
1689 af_iucv_dev->driver = &af_iucv_driver;
1690 err = device_register(af_iucv_dev);
1691 if (err)
1692 goto out_driver;
1693
1261 return 0; 1694 return 0;
1262 1695
1696out_driver:
1697 driver_unregister(&af_iucv_driver);
1698out_sock:
1699 sock_unregister(PF_IUCV);
1263out_proto: 1700out_proto:
1264 proto_unregister(&iucv_proto); 1701 proto_unregister(&iucv_proto);
1265out_iucv: 1702out_iucv:
@@ -1270,6 +1707,8 @@ out:
1270 1707
1271static void __exit afiucv_exit(void) 1708static void __exit afiucv_exit(void)
1272{ 1709{
1710 device_unregister(af_iucv_dev);
1711 driver_unregister(&af_iucv_driver);
1273 sock_unregister(PF_IUCV); 1712 sock_unregister(PF_IUCV);
1274 proto_unregister(&iucv_proto); 1713 proto_unregister(&iucv_proto);
1275 iucv_unregister(&af_iucv_handler, 0); 1714 iucv_unregister(&af_iucv_handler, 0);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a35240f61ec3..c833481d32e3 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * IUCV base infrastructure. 2 * IUCV base infrastructure.
3 * 3 *
4 * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation 4 * Copyright IBM Corp. 2001, 2009
5 *
5 * Author(s): 6 * Author(s):
6 * Original source: 7 * Original source:
7 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 8 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
@@ -10,6 +11,8 @@
10 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 11 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
11 * Rewritten for af_iucv: 12 * Rewritten for af_iucv:
12 * Martin Schwidefsky <schwidefsky@de.ibm.com> 13 * Martin Schwidefsky <schwidefsky@de.ibm.com>
14 * PM functions:
15 * Ursula Braun (ursula.braun@de.ibm.com)
13 * 16 *
14 * Documentation used: 17 * Documentation used:
15 * The original source 18 * The original source
@@ -45,6 +48,7 @@
45#include <linux/err.h> 48#include <linux/err.h>
46#include <linux/device.h> 49#include <linux/device.h>
47#include <linux/cpu.h> 50#include <linux/cpu.h>
51#include <linux/reboot.h>
48#include <net/iucv/iucv.h> 52#include <net/iucv/iucv.h>
49#include <asm/atomic.h> 53#include <asm/atomic.h>
50#include <asm/ebcdic.h> 54#include <asm/ebcdic.h>
@@ -75,9 +79,24 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv)
75 return 0; 79 return 0;
76} 80}
77 81
82static int iucv_pm_prepare(struct device *);
83static void iucv_pm_complete(struct device *);
84static int iucv_pm_freeze(struct device *);
85static int iucv_pm_thaw(struct device *);
86static int iucv_pm_restore(struct device *);
87
88static struct dev_pm_ops iucv_pm_ops = {
89 .prepare = iucv_pm_prepare,
90 .complete = iucv_pm_complete,
91 .freeze = iucv_pm_freeze,
92 .thaw = iucv_pm_thaw,
93 .restore = iucv_pm_restore,
94};
95
78struct bus_type iucv_bus = { 96struct bus_type iucv_bus = {
79 .name = "iucv", 97 .name = "iucv",
80 .match = iucv_bus_match, 98 .match = iucv_bus_match,
99 .pm = &iucv_pm_ops,
81}; 100};
82EXPORT_SYMBOL(iucv_bus); 101EXPORT_SYMBOL(iucv_bus);
83 102
@@ -147,6 +166,7 @@ enum iucv_command_codes {
147 IUCV_RESUME = 14, 166 IUCV_RESUME = 14,
148 IUCV_SEVER = 15, 167 IUCV_SEVER = 15,
149 IUCV_SETMASK = 16, 168 IUCV_SETMASK = 16,
169 IUCV_SETCONTROLMASK = 17,
150}; 170};
151 171
152/* 172/*
@@ -280,6 +300,7 @@ union iucv_param {
280 * Anchor for per-cpu IUCV command parameter block. 300 * Anchor for per-cpu IUCV command parameter block.
281 */ 301 */
282static union iucv_param *iucv_param[NR_CPUS]; 302static union iucv_param *iucv_param[NR_CPUS];
303static union iucv_param *iucv_param_irq[NR_CPUS];
283 304
284/** 305/**
285 * iucv_call_b2f0 306 * iucv_call_b2f0
@@ -358,11 +379,23 @@ static void iucv_allow_cpu(void *data)
358 * 0x10 - Flag to allow priority message completion interrupts 379 * 0x10 - Flag to allow priority message completion interrupts
359 * 0x08 - Flag to allow IUCV control interrupts 380 * 0x08 - Flag to allow IUCV control interrupts
360 */ 381 */
361 parm = iucv_param[cpu]; 382 parm = iucv_param_irq[cpu];
362 memset(parm, 0, sizeof(union iucv_param)); 383 memset(parm, 0, sizeof(union iucv_param));
363 parm->set_mask.ipmask = 0xf8; 384 parm->set_mask.ipmask = 0xf8;
364 iucv_call_b2f0(IUCV_SETMASK, parm); 385 iucv_call_b2f0(IUCV_SETMASK, parm);
365 386
387 /*
388 * Enable all iucv control interrupts.
389 * ipmask contains bits for the different interrupts
390 * 0x80 - Flag to allow pending connections interrupts
391 * 0x40 - Flag to allow connection complete interrupts
392 * 0x20 - Flag to allow connection severed interrupts
393 * 0x10 - Flag to allow connection quiesced interrupts
394 * 0x08 - Flag to allow connection resumed interrupts
395 */
396 memset(parm, 0, sizeof(union iucv_param));
397 parm->set_mask.ipmask = 0xf8;
398 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
366 /* Set indication that iucv interrupts are allowed for this cpu. */ 399 /* Set indication that iucv interrupts are allowed for this cpu. */
367 cpu_set(cpu, iucv_irq_cpumask); 400 cpu_set(cpu, iucv_irq_cpumask);
368} 401}
@@ -379,7 +412,7 @@ static void iucv_block_cpu(void *data)
379 union iucv_param *parm; 412 union iucv_param *parm;
380 413
381 /* Disable all iucv interrupts. */ 414 /* Disable all iucv interrupts. */
382 parm = iucv_param[cpu]; 415 parm = iucv_param_irq[cpu];
383 memset(parm, 0, sizeof(union iucv_param)); 416 memset(parm, 0, sizeof(union iucv_param));
384 iucv_call_b2f0(IUCV_SETMASK, parm); 417 iucv_call_b2f0(IUCV_SETMASK, parm);
385 418
@@ -388,6 +421,31 @@ static void iucv_block_cpu(void *data)
388} 421}
389 422
390/** 423/**
424 * iucv_block_cpu_almost
425 * @data: unused
426 *
427 * Allow connection-severed interrupts only on this cpu.
428 */
429static void iucv_block_cpu_almost(void *data)
430{
431 int cpu = smp_processor_id();
432 union iucv_param *parm;
433
434 /* Allow iucv control interrupts only */
435 parm = iucv_param_irq[cpu];
436 memset(parm, 0, sizeof(union iucv_param));
437 parm->set_mask.ipmask = 0x08;
438 iucv_call_b2f0(IUCV_SETMASK, parm);
439 /* Allow iucv-severed interrupt only */
440 memset(parm, 0, sizeof(union iucv_param));
441 parm->set_mask.ipmask = 0x20;
442 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
443
444 /* Clear indication that iucv interrupts are allowed for this cpu. */
445 cpu_clear(cpu, iucv_irq_cpumask);
446}
447
448/**
391 * iucv_declare_cpu 449 * iucv_declare_cpu
392 * @data: unused 450 * @data: unused
393 * 451 *
@@ -403,7 +461,7 @@ static void iucv_declare_cpu(void *data)
403 return; 461 return;
404 462
405 /* Declare interrupt buffer. */ 463 /* Declare interrupt buffer. */
406 parm = iucv_param[cpu]; 464 parm = iucv_param_irq[cpu];
407 memset(parm, 0, sizeof(union iucv_param)); 465 memset(parm, 0, sizeof(union iucv_param));
408 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 466 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
409 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 467 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
@@ -460,7 +518,7 @@ static void iucv_retrieve_cpu(void *data)
460 iucv_block_cpu(NULL); 518 iucv_block_cpu(NULL);
461 519
462 /* Retrieve interrupt buffer. */ 520 /* Retrieve interrupt buffer. */
463 parm = iucv_param[cpu]; 521 parm = iucv_param_irq[cpu];
464 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 522 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
465 523
466 /* Clear indication that an iucv buffer exists for this cpu. */ 524 /* Clear indication that an iucv buffer exists for this cpu. */
@@ -574,11 +632,22 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
574 iucv_irq_data[cpu] = NULL; 632 iucv_irq_data[cpu] = NULL;
575 return NOTIFY_BAD; 633 return NOTIFY_BAD;
576 } 634 }
635 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
636 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
637 if (!iucv_param_irq[cpu]) {
638 kfree(iucv_param[cpu]);
639 iucv_param[cpu] = NULL;
640 kfree(iucv_irq_data[cpu]);
641 iucv_irq_data[cpu] = NULL;
642 return NOTIFY_BAD;
643 }
577 break; 644 break;
578 case CPU_UP_CANCELED: 645 case CPU_UP_CANCELED:
579 case CPU_UP_CANCELED_FROZEN: 646 case CPU_UP_CANCELED_FROZEN:
580 case CPU_DEAD: 647 case CPU_DEAD:
581 case CPU_DEAD_FROZEN: 648 case CPU_DEAD_FROZEN:
649 kfree(iucv_param_irq[cpu]);
650 iucv_param_irq[cpu] = NULL;
582 kfree(iucv_param[cpu]); 651 kfree(iucv_param[cpu]);
583 iucv_param[cpu] = NULL; 652 iucv_param[cpu] = NULL;
584 kfree(iucv_irq_data[cpu]); 653 kfree(iucv_irq_data[cpu]);
@@ -625,7 +694,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
625{ 694{
626 union iucv_param *parm; 695 union iucv_param *parm;
627 696
628 parm = iucv_param[smp_processor_id()]; 697 parm = iucv_param_irq[smp_processor_id()];
629 memset(parm, 0, sizeof(union iucv_param)); 698 memset(parm, 0, sizeof(union iucv_param));
630 if (userdata) 699 if (userdata)
631 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 700 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -746,6 +815,28 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
746} 815}
747EXPORT_SYMBOL(iucv_unregister); 816EXPORT_SYMBOL(iucv_unregister);
748 817
818static int iucv_reboot_event(struct notifier_block *this,
819 unsigned long event, void *ptr)
820{
821 int i, rc;
822
823 get_online_cpus();
824 on_each_cpu(iucv_block_cpu, NULL, 1);
825 preempt_disable();
826 for (i = 0; i < iucv_max_pathid; i++) {
827 if (iucv_path_table[i])
828 rc = iucv_sever_pathid(i, NULL);
829 }
830 preempt_enable();
831 put_online_cpus();
832 iucv_disable();
833 return NOTIFY_DONE;
834}
835
836static struct notifier_block iucv_reboot_notifier = {
837 .notifier_call = iucv_reboot_event,
838};
839
749/** 840/**
750 * iucv_path_accept 841 * iucv_path_accept
751 * @path: address of iucv path structure 842 * @path: address of iucv path structure
@@ -765,6 +856,10 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
765 int rc; 856 int rc;
766 857
767 local_bh_disable(); 858 local_bh_disable();
859 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
860 rc = -EIO;
861 goto out;
862 }
768 /* Prepare parameter block. */ 863 /* Prepare parameter block. */
769 parm = iucv_param[smp_processor_id()]; 864 parm = iucv_param[smp_processor_id()];
770 memset(parm, 0, sizeof(union iucv_param)); 865 memset(parm, 0, sizeof(union iucv_param));
@@ -780,6 +875,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
780 path->msglim = parm->ctrl.ipmsglim; 875 path->msglim = parm->ctrl.ipmsglim;
781 path->flags = parm->ctrl.ipflags1; 876 path->flags = parm->ctrl.ipflags1;
782 } 877 }
878out:
783 local_bh_enable(); 879 local_bh_enable();
784 return rc; 880 return rc;
785} 881}
@@ -809,6 +905,10 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
809 905
810 spin_lock_bh(&iucv_table_lock); 906 spin_lock_bh(&iucv_table_lock);
811 iucv_cleanup_queue(); 907 iucv_cleanup_queue();
908 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
909 rc = -EIO;
910 goto out;
911 }
812 parm = iucv_param[smp_processor_id()]; 912 parm = iucv_param[smp_processor_id()];
813 memset(parm, 0, sizeof(union iucv_param)); 913 memset(parm, 0, sizeof(union iucv_param));
814 parm->ctrl.ipmsglim = path->msglim; 914 parm->ctrl.ipmsglim = path->msglim;
@@ -843,6 +943,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
843 rc = -EIO; 943 rc = -EIO;
844 } 944 }
845 } 945 }
946out:
846 spin_unlock_bh(&iucv_table_lock); 947 spin_unlock_bh(&iucv_table_lock);
847 return rc; 948 return rc;
848} 949}
@@ -864,12 +965,17 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
864 int rc; 965 int rc;
865 966
866 local_bh_disable(); 967 local_bh_disable();
968 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
969 rc = -EIO;
970 goto out;
971 }
867 parm = iucv_param[smp_processor_id()]; 972 parm = iucv_param[smp_processor_id()];
868 memset(parm, 0, sizeof(union iucv_param)); 973 memset(parm, 0, sizeof(union iucv_param));
869 if (userdata) 974 if (userdata)
870 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 975 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
871 parm->ctrl.ippathid = path->pathid; 976 parm->ctrl.ippathid = path->pathid;
872 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 977 rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
978out:
873 local_bh_enable(); 979 local_bh_enable();
874 return rc; 980 return rc;
875} 981}
@@ -891,12 +997,17 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
891 int rc; 997 int rc;
892 998
893 local_bh_disable(); 999 local_bh_disable();
1000 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1001 rc = -EIO;
1002 goto out;
1003 }
894 parm = iucv_param[smp_processor_id()]; 1004 parm = iucv_param[smp_processor_id()];
895 memset(parm, 0, sizeof(union iucv_param)); 1005 memset(parm, 0, sizeof(union iucv_param));
896 if (userdata) 1006 if (userdata)
897 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1007 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
898 parm->ctrl.ippathid = path->pathid; 1008 parm->ctrl.ippathid = path->pathid;
899 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1009 rc = iucv_call_b2f0(IUCV_RESUME, parm);
1010out:
900 local_bh_enable(); 1011 local_bh_enable();
901 return rc; 1012 return rc;
902} 1013}
@@ -915,15 +1026,18 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
915 int rc; 1026 int rc;
916 1027
917 preempt_disable(); 1028 preempt_disable();
1029 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1030 rc = -EIO;
1031 goto out;
1032 }
918 if (iucv_active_cpu != smp_processor_id()) 1033 if (iucv_active_cpu != smp_processor_id())
919 spin_lock_bh(&iucv_table_lock); 1034 spin_lock_bh(&iucv_table_lock);
920 rc = iucv_sever_pathid(path->pathid, userdata); 1035 rc = iucv_sever_pathid(path->pathid, userdata);
921 if (!rc) { 1036 iucv_path_table[path->pathid] = NULL;
922 iucv_path_table[path->pathid] = NULL; 1037 list_del_init(&path->list);
923 list_del_init(&path->list);
924 }
925 if (iucv_active_cpu != smp_processor_id()) 1038 if (iucv_active_cpu != smp_processor_id())
926 spin_unlock_bh(&iucv_table_lock); 1039 spin_unlock_bh(&iucv_table_lock);
1040out:
927 preempt_enable(); 1041 preempt_enable();
928 return rc; 1042 return rc;
929} 1043}
@@ -946,6 +1060,10 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
946 int rc; 1060 int rc;
947 1061
948 local_bh_disable(); 1062 local_bh_disable();
1063 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1064 rc = -EIO;
1065 goto out;
1066 }
949 parm = iucv_param[smp_processor_id()]; 1067 parm = iucv_param[smp_processor_id()];
950 memset(parm, 0, sizeof(union iucv_param)); 1068 memset(parm, 0, sizeof(union iucv_param));
951 parm->purge.ippathid = path->pathid; 1069 parm->purge.ippathid = path->pathid;
@@ -957,6 +1075,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
957 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1075 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
958 msg->tag = parm->purge.ipmsgtag; 1076 msg->tag = parm->purge.ipmsgtag;
959 } 1077 }
1078out:
960 local_bh_enable(); 1079 local_bh_enable();
961 return rc; 1080 return rc;
962} 1081}
@@ -1033,6 +1152,10 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1033 if (msg->flags & IUCV_IPRMDATA) 1152 if (msg->flags & IUCV_IPRMDATA)
1034 return iucv_message_receive_iprmdata(path, msg, flags, 1153 return iucv_message_receive_iprmdata(path, msg, flags,
1035 buffer, size, residual); 1154 buffer, size, residual);
1155 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1156 rc = -EIO;
1157 goto out;
1158 }
1036 parm = iucv_param[smp_processor_id()]; 1159 parm = iucv_param[smp_processor_id()];
1037 memset(parm, 0, sizeof(union iucv_param)); 1160 memset(parm, 0, sizeof(union iucv_param));
1038 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1161 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
@@ -1048,6 +1171,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1048 if (residual) 1171 if (residual)
1049 *residual = parm->db.ipbfln1f; 1172 *residual = parm->db.ipbfln1f;
1050 } 1173 }
1174out:
1051 return rc; 1175 return rc;
1052} 1176}
1053EXPORT_SYMBOL(__iucv_message_receive); 1177EXPORT_SYMBOL(__iucv_message_receive);
@@ -1101,6 +1225,10 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1101 int rc; 1225 int rc;
1102 1226
1103 local_bh_disable(); 1227 local_bh_disable();
1228 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1229 rc = -EIO;
1230 goto out;
1231 }
1104 parm = iucv_param[smp_processor_id()]; 1232 parm = iucv_param[smp_processor_id()];
1105 memset(parm, 0, sizeof(union iucv_param)); 1233 memset(parm, 0, sizeof(union iucv_param));
1106 parm->db.ippathid = path->pathid; 1234 parm->db.ippathid = path->pathid;
@@ -1108,6 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1108 parm->db.iptrgcls = msg->class; 1236 parm->db.iptrgcls = msg->class;
1109 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1237 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1110 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1238 rc = iucv_call_b2f0(IUCV_REJECT, parm);
1239out:
1111 local_bh_enable(); 1240 local_bh_enable();
1112 return rc; 1241 return rc;
1113} 1242}
@@ -1135,6 +1264,10 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1135 int rc; 1264 int rc;
1136 1265
1137 local_bh_disable(); 1266 local_bh_disable();
1267 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1268 rc = -EIO;
1269 goto out;
1270 }
1138 parm = iucv_param[smp_processor_id()]; 1271 parm = iucv_param[smp_processor_id()];
1139 memset(parm, 0, sizeof(union iucv_param)); 1272 memset(parm, 0, sizeof(union iucv_param));
1140 if (flags & IUCV_IPRMDATA) { 1273 if (flags & IUCV_IPRMDATA) {
@@ -1152,6 +1285,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1152 parm->db.iptrgcls = msg->class; 1285 parm->db.iptrgcls = msg->class;
1153 } 1286 }
1154 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1287 rc = iucv_call_b2f0(IUCV_REPLY, parm);
1288out:
1155 local_bh_enable(); 1289 local_bh_enable();
1156 return rc; 1290 return rc;
1157} 1291}
@@ -1180,6 +1314,10 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1180 union iucv_param *parm; 1314 union iucv_param *parm;
1181 int rc; 1315 int rc;
1182 1316
1317 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1318 rc = -EIO;
1319 goto out;
1320 }
1183 parm = iucv_param[smp_processor_id()]; 1321 parm = iucv_param[smp_processor_id()];
1184 memset(parm, 0, sizeof(union iucv_param)); 1322 memset(parm, 0, sizeof(union iucv_param));
1185 if (flags & IUCV_IPRMDATA) { 1323 if (flags & IUCV_IPRMDATA) {
@@ -1202,6 +1340,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1202 rc = iucv_call_b2f0(IUCV_SEND, parm); 1340 rc = iucv_call_b2f0(IUCV_SEND, parm);
1203 if (!rc) 1341 if (!rc)
1204 msg->id = parm->db.ipmsgid; 1342 msg->id = parm->db.ipmsgid;
1343out:
1205 return rc; 1344 return rc;
1206} 1345}
1207EXPORT_SYMBOL(__iucv_message_send); 1346EXPORT_SYMBOL(__iucv_message_send);
@@ -1262,6 +1401,10 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1262 int rc; 1401 int rc;
1263 1402
1264 local_bh_disable(); 1403 local_bh_disable();
1404 if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
1405 rc = -EIO;
1406 goto out;
1407 }
1265 parm = iucv_param[smp_processor_id()]; 1408 parm = iucv_param[smp_processor_id()];
1266 memset(parm, 0, sizeof(union iucv_param)); 1409 memset(parm, 0, sizeof(union iucv_param));
1267 if (flags & IUCV_IPRMDATA) { 1410 if (flags & IUCV_IPRMDATA) {
@@ -1287,6 +1430,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1287 rc = iucv_call_b2f0(IUCV_SEND, parm); 1430 rc = iucv_call_b2f0(IUCV_SEND, parm);
1288 if (!rc) 1431 if (!rc)
1289 msg->id = parm->db.ipmsgid; 1432 msg->id = parm->db.ipmsgid;
1433out:
1290 local_bh_enable(); 1434 local_bh_enable();
1291 return rc; 1435 return rc;
1292} 1436}
@@ -1378,6 +1522,8 @@ static void iucv_path_complete(struct iucv_irq_data *data)
1378 struct iucv_path_complete *ipc = (void *) data; 1522 struct iucv_path_complete *ipc = (void *) data;
1379 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1523 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1380 1524
1525 if (path)
1526 path->flags = ipc->ipflags1;
1381 if (path && path->handler && path->handler->path_complete) 1527 if (path && path->handler && path->handler->path_complete)
1382 path->handler->path_complete(path, ipc->ipuser); 1528 path->handler->path_complete(path, ipc->ipuser);
1383} 1529}
@@ -1413,7 +1559,7 @@ static void iucv_path_severed(struct iucv_irq_data *data)
1413 else { 1559 else {
1414 iucv_sever_pathid(path->pathid, NULL); 1560 iucv_sever_pathid(path->pathid, NULL);
1415 iucv_path_table[path->pathid] = NULL; 1561 iucv_path_table[path->pathid] = NULL;
1416 list_del_init(&path->list); 1562 list_del(&path->list);
1417 iucv_path_free(path); 1563 iucv_path_free(path);
1418 } 1564 }
1419} 1565}
@@ -1675,6 +1821,130 @@ static void iucv_external_interrupt(u16 code)
1675 spin_unlock(&iucv_queue_lock); 1821 spin_unlock(&iucv_queue_lock);
1676} 1822}
1677 1823
1824static int iucv_pm_prepare(struct device *dev)
1825{
1826 int rc = 0;
1827
1828#ifdef CONFIG_PM_DEBUG
1829 printk(KERN_INFO "iucv_pm_prepare\n");
1830#endif
1831 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
1832 rc = dev->driver->pm->prepare(dev);
1833 return rc;
1834}
1835
1836static void iucv_pm_complete(struct device *dev)
1837{
1838#ifdef CONFIG_PM_DEBUG
1839 printk(KERN_INFO "iucv_pm_complete\n");
1840#endif
1841 if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
1842 dev->driver->pm->complete(dev);
1843}
1844
1845/**
1846 * iucv_path_table_empty() - determine if iucv path table is empty
1847 *
1848 * Returns 0 if there are still iucv pathes defined
1849 * 1 if there are no iucv pathes defined
1850 */
1851int iucv_path_table_empty(void)
1852{
1853 int i;
1854
1855 for (i = 0; i < iucv_max_pathid; i++) {
1856 if (iucv_path_table[i])
1857 return 0;
1858 }
1859 return 1;
1860}
1861
1862/**
1863 * iucv_pm_freeze() - Freeze PM callback
1864 * @dev: iucv-based device
1865 *
1866 * disable iucv interrupts
1867 * invoke callback function of the iucv-based driver
1868 * shut down iucv, if no iucv-pathes are established anymore
1869 */
1870static int iucv_pm_freeze(struct device *dev)
1871{
1872 int cpu;
1873 int rc = 0;
1874
1875#ifdef CONFIG_PM_DEBUG
1876 printk(KERN_WARNING "iucv_pm_freeze\n");
1877#endif
1878 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1879 smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
1880 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
1881 rc = dev->driver->pm->freeze(dev);
1882 if (iucv_path_table_empty())
1883 iucv_disable();
1884 return rc;
1885}
1886
1887/**
1888 * iucv_pm_thaw() - Thaw PM callback
1889 * @dev: iucv-based device
1890 *
1891 * make iucv ready for use again: allocate path table, declare interrupt buffers
1892 * and enable iucv interrupts
1893 * invoke callback function of the iucv-based driver
1894 */
1895static int iucv_pm_thaw(struct device *dev)
1896{
1897 int rc = 0;
1898
1899#ifdef CONFIG_PM_DEBUG
1900 printk(KERN_WARNING "iucv_pm_thaw\n");
1901#endif
1902 if (!iucv_path_table) {
1903 rc = iucv_enable();
1904 if (rc)
1905 goto out;
1906 }
1907 if (cpus_empty(iucv_irq_cpumask)) {
1908 if (iucv_nonsmp_handler)
1909 /* enable interrupts on one cpu */
1910 iucv_allow_cpu(NULL);
1911 else
1912 /* enable interrupts on all cpus */
1913 iucv_setmask_mp();
1914 }
1915 if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
1916 rc = dev->driver->pm->thaw(dev);
1917out:
1918 return rc;
1919}
1920
1921/**
1922 * iucv_pm_restore() - Restore PM callback
1923 * @dev: iucv-based device
1924 *
1925 * make iucv ready for use again: allocate path table, declare interrupt buffers
1926 * and enable iucv interrupts
1927 * invoke callback function of the iucv-based driver
1928 */
1929static int iucv_pm_restore(struct device *dev)
1930{
1931 int rc = 0;
1932
1933#ifdef CONFIG_PM_DEBUG
1934 printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
1935#endif
1936 if (cpus_empty(iucv_irq_cpumask)) {
1937 rc = iucv_query_maxconn();
1938 rc = iucv_enable();
1939 if (rc)
1940 goto out;
1941 }
1942 if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
1943 rc = dev->driver->pm->restore(dev);
1944out:
1945 return rc;
1946}
1947
1678/** 1948/**
1679 * iucv_init 1949 * iucv_init
1680 * 1950 *
@@ -1717,23 +1987,37 @@ static int __init iucv_init(void)
1717 rc = -ENOMEM; 1987 rc = -ENOMEM;
1718 goto out_free; 1988 goto out_free;
1719 } 1989 }
1990 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
1991 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1992 if (!iucv_param_irq[cpu]) {
1993 rc = -ENOMEM;
1994 goto out_free;
1995 }
1996
1720 } 1997 }
1721 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 1998 rc = register_hotcpu_notifier(&iucv_cpu_notifier);
1722 if (rc) 1999 if (rc)
1723 goto out_free; 2000 goto out_free;
2001 rc = register_reboot_notifier(&iucv_reboot_notifier);
2002 if (rc)
2003 goto out_cpu;
1724 ASCEBC(iucv_error_no_listener, 16); 2004 ASCEBC(iucv_error_no_listener, 16);
1725 ASCEBC(iucv_error_no_memory, 16); 2005 ASCEBC(iucv_error_no_memory, 16);
1726 ASCEBC(iucv_error_pathid, 16); 2006 ASCEBC(iucv_error_pathid, 16);
1727 iucv_available = 1; 2007 iucv_available = 1;
1728 rc = bus_register(&iucv_bus); 2008 rc = bus_register(&iucv_bus);
1729 if (rc) 2009 if (rc)
1730 goto out_cpu; 2010 goto out_reboot;
1731 return 0; 2011 return 0;
1732 2012
2013out_reboot:
2014 unregister_reboot_notifier(&iucv_reboot_notifier);
1733out_cpu: 2015out_cpu:
1734 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2016 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1735out_free: 2017out_free:
1736 for_each_possible_cpu(cpu) { 2018 for_each_possible_cpu(cpu) {
2019 kfree(iucv_param_irq[cpu]);
2020 iucv_param_irq[cpu] = NULL;
1737 kfree(iucv_param[cpu]); 2021 kfree(iucv_param[cpu]);
1738 iucv_param[cpu] = NULL; 2022 iucv_param[cpu] = NULL;
1739 kfree(iucv_irq_data[cpu]); 2023 kfree(iucv_irq_data[cpu]);
@@ -1762,8 +2046,11 @@ static void __exit iucv_exit(void)
1762 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 2046 list_for_each_entry_safe(p, n, &iucv_work_queue, list)
1763 kfree(p); 2047 kfree(p);
1764 spin_unlock_irq(&iucv_queue_lock); 2048 spin_unlock_irq(&iucv_queue_lock);
2049 unregister_reboot_notifier(&iucv_reboot_notifier);
1765 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2050 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1766 for_each_possible_cpu(cpu) { 2051 for_each_possible_cpu(cpu) {
2052 kfree(iucv_param_irq[cpu]);
2053 iucv_param_irq[cpu] = NULL;
1767 kfree(iucv_param[cpu]); 2054 kfree(iucv_param[cpu]);
1768 iucv_param[cpu] = NULL; 2055 iucv_param[cpu] = NULL;
1769 kfree(iucv_irq_data[cpu]); 2056 kfree(iucv_irq_data[cpu]);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index febae702685c..9208cf5f2bd5 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -935,7 +935,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
935 935
936 if (llc->dev) { 936 if (llc->dev) {
937 sllc.sllc_arphrd = llc->dev->type; 937 sllc.sllc_arphrd = llc->dev->type;
938 memcpy(&sllc.sllc_mac, &llc->dev->dev_addr, 938 memcpy(&sllc.sllc_mac, llc->dev->dev_addr,
939 IFHWADDRLEN); 939 IFHWADDRLEN);
940 } 940 }
941 } 941 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3477624a4906..c6bab39b018e 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -79,10 +79,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
79 79
80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { 80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
81 /* indicate or confirm not required */ 81 /* indicate or confirm not required */
82 /* XXX this is not very pretty, perhaps we should store
83 * XXX indicate/confirm-needed state in the llc_conn_state_ev
84 * XXX control block of the SKB instead? -DaveM
85 */
86 if (!skb->next) 82 if (!skb->next)
87 goto out_kfree_skb; 83 goto out_kfree_skb;
88 goto out_skb_put; 84 goto out_skb_put;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index ecc3faf9f11a..ba2643a43c73 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -1,16 +1,35 @@
1config MAC80211 1config MAC80211
2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
3 depends on CFG80211
3 select CRYPTO 4 select CRYPTO
4 select CRYPTO_ECB 5 select CRYPTO_ECB
5 select CRYPTO_ARC4 6 select CRYPTO_ARC4
6 select CRYPTO_AES 7 select CRYPTO_AES
7 select CRC32 8 select CRC32
8 select WIRELESS_EXT 9 select WIRELESS_EXT
9 select CFG80211
10 ---help--- 10 ---help---
11 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
12 networking stack. 12 networking stack.
13 13
14comment "CFG80211 needs to be enabled for MAC80211"
15 depends on CFG80211=n
16
17config MAC80211_DEFAULT_PS
18 bool "enable powersave by default"
19 depends on MAC80211
20 default y
21 help
22 This option enables powersave mode by default.
23
24 If this causes your applications to misbehave you should fix your
25 applications instead -- they need to register their network
26 latency requirement, see Documentation/power/pm_qos_interface.txt.
27
28config MAC80211_DEFAULT_PS_VALUE
29 int
30 default 1 if MAC80211_DEFAULT_PS
31 default 0
32
14menu "Rate control algorithm selection" 33menu "Rate control algorithm selection"
15 depends on MAC80211 != n 34 depends on MAC80211 != n
16 35
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 07656d830bc4..bc064d7933ff 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -16,12 +16,12 @@
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19 20
20void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 21void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
21 u16 initiator, u16 reason) 22 u16 initiator, u16 reason)
22{ 23{
23 struct ieee80211_local *local = sta->local; 24 struct ieee80211_local *local = sta->local;
24 struct ieee80211_hw *hw = &local->hw;
25 int i; 25 int i;
26 26
27 /* check if TID is in operational state */ 27 /* check if TID is in operational state */
@@ -41,8 +41,8 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, 44 if (drv_ampdu_action(local, IEEE80211_AMPDU_RX_STOP,
45 &sta->sta, tid, NULL)) 45 &sta->sta, tid, NULL))
46 printk(KERN_DEBUG "HW problem - can not stop rx " 46 printk(KERN_DEBUG "HW problem - can not stop rx "
47 "aggregation for tid %d\n", tid); 47 "aggregation for tid %d\n", tid);
48 48
@@ -68,6 +68,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
68 spin_lock_bh(&sta->lock); 68 spin_lock_bh(&sta->lock);
69 /* free resources */ 69 /* free resources */
70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); 70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
71 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time);
71 72
72 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) { 73 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
73 kfree(sta->ampdu_mlme.tid_rx[tid]); 74 kfree(sta->ampdu_mlme.tid_rx[tid]);
@@ -268,19 +269,23 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
268 /* prepare reordering buffer */ 269 /* prepare reordering buffer */
269 tid_agg_rx->reorder_buf = 270 tid_agg_rx->reorder_buf =
270 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); 271 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
271 if (!tid_agg_rx->reorder_buf) { 272 tid_agg_rx->reorder_time =
273 kcalloc(buf_size, sizeof(unsigned long), GFP_ATOMIC);
274 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
272#ifdef CONFIG_MAC80211_HT_DEBUG 275#ifdef CONFIG_MAC80211_HT_DEBUG
273 if (net_ratelimit()) 276 if (net_ratelimit())
274 printk(KERN_ERR "can not allocate reordering buffer " 277 printk(KERN_ERR "can not allocate reordering buffer "
275 "to tid %d\n", tid); 278 "to tid %d\n", tid);
276#endif 279#endif
280 kfree(tid_agg_rx->reorder_buf);
281 kfree(tid_agg_rx->reorder_time);
277 kfree(sta->ampdu_mlme.tid_rx[tid]); 282 kfree(sta->ampdu_mlme.tid_rx[tid]);
283 sta->ampdu_mlme.tid_rx[tid] = NULL;
278 goto end; 284 goto end;
279 } 285 }
280 286
281 if (local->ops->ampdu_action) 287 ret = drv_ampdu_action(local, IEEE80211_AMPDU_RX_START,
282 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, 288 &sta->sta, tid, &start_seq_num);
283 &sta->sta, tid, &start_seq_num);
284#ifdef CONFIG_MAC80211_HT_DEBUG 289#ifdef CONFIG_MAC80211_HT_DEBUG
285 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 290 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
286#endif /* CONFIG_MAC80211_HT_DEBUG */ 291#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 947aaaad35d2..9e5762ad307d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -16,6 +16,7 @@
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19#include "wme.h" 20#include "wme.h"
20 21
21/** 22/**
@@ -131,11 +132,14 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
131 132
132 state = &sta->ampdu_mlme.tid_state_tx[tid]; 133 state = &sta->ampdu_mlme.tid_state_tx[tid];
133 134
135 if (*state == HT_AGG_STATE_OPERATIONAL)
136 sta->ampdu_mlme.addba_req_num[tid] = 0;
137
134 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 138 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
135 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 139 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
136 140
137 ret = local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_STOP, 141 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_STOP,
138 &sta->sta, tid, NULL); 142 &sta->sta, tid, NULL);
139 143
140 /* HW shall not deny going back to legacy */ 144 /* HW shall not deny going back to legacy */
141 if (WARN_ON(ret)) { 145 if (WARN_ON(ret)) {
@@ -306,8 +310,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
306 310
307 start_seq_num = sta->tid_seq[tid]; 311 start_seq_num = sta->tid_seq[tid];
308 312
309 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, 313 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_START,
310 &sta->sta, tid, &start_seq_num); 314 &sta->sta, tid, &start_seq_num);
311 315
312 if (ret) { 316 if (ret) {
313#ifdef CONFIG_MAC80211_HT_DEBUG 317#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -336,6 +340,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
336 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 340 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
337 sta->ampdu_mlme.tid_tx[tid]->ssn, 341 sta->ampdu_mlme.tid_tx[tid]->ssn,
338 0x40, 5000); 342 0x40, 5000);
343 sta->ampdu_mlme.addba_req_num[tid]++;
339 /* activate the timer for the recipient's addBA response */ 344 /* activate the timer for the recipient's addBA response */
340 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = 345 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
341 jiffies + ADDBA_RESP_INTERVAL; 346 jiffies + ADDBA_RESP_INTERVAL;
@@ -418,8 +423,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
418 ieee80211_agg_splice_finish(local, sta, tid); 423 ieee80211_agg_splice_finish(local, sta, tid);
419 spin_unlock(&local->ampdu_lock); 424 spin_unlock(&local->ampdu_lock);
420 425
421 local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL, 426 drv_ampdu_action(local, IEEE80211_AMPDU_TX_OPERATIONAL,
422 &sta->sta, tid, NULL); 427 &sta->sta, tid, NULL);
423} 428}
424 429
425void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) 430void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
@@ -605,7 +610,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
605 610
606 *state = HT_AGG_STATE_IDLE; 611 *state = HT_AGG_STATE_IDLE;
607 /* from now on packets are no longer put onto sta->pending */ 612 /* from now on packets are no longer put onto sta->pending */
608 sta->ampdu_mlme.addba_req_num[tid] = 0;
609 kfree(sta->ampdu_mlme.tid_tx[tid]); 613 kfree(sta->ampdu_mlme.tid_tx[tid]);
610 sta->ampdu_mlme.tid_tx[tid] = NULL; 614 sta->ampdu_mlme.tid_tx[tid] = NULL;
611 615
@@ -688,7 +692,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
688 692
689 sta->ampdu_mlme.addba_req_num[tid] = 0; 693 sta->ampdu_mlme.addba_req_num[tid] = 0;
690 } else { 694 } else {
691 sta->ampdu_mlme.addba_req_num[tid]++;
692 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 695 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
693 } 696 }
694 spin_unlock_bh(&sta->lock); 697 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e677b751d468..3f47276caeb8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -13,6 +13,7 @@
13#include <linux/rcupdate.h> 13#include <linux/rcupdate.h>
14#include <net/cfg80211.h> 14#include <net/cfg80211.h>
15#include "ieee80211_i.h" 15#include "ieee80211_i.h"
16#include "driver-ops.h"
16#include "cfg.h" 17#include "cfg.h"
17#include "rate.h" 18#include "rate.h"
18#include "mesh.h" 19#include "mesh.h"
@@ -111,7 +112,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
111} 112}
112 113
113static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, 114static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
114 u8 key_idx, u8 *mac_addr, 115 u8 key_idx, const u8 *mac_addr,
115 struct key_params *params) 116 struct key_params *params)
116{ 117{
117 struct ieee80211_sub_if_data *sdata; 118 struct ieee80211_sub_if_data *sdata;
@@ -140,7 +141,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
140 return -EINVAL; 141 return -EINVAL;
141 } 142 }
142 143
143 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key); 144 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
145 params->seq_len, params->seq);
144 if (!key) 146 if (!key)
145 return -ENOMEM; 147 return -ENOMEM;
146 148
@@ -165,7 +167,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
165} 167}
166 168
167static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 169static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
168 u8 key_idx, u8 *mac_addr) 170 u8 key_idx, const u8 *mac_addr)
169{ 171{
170 struct ieee80211_sub_if_data *sdata; 172 struct ieee80211_sub_if_data *sdata;
171 struct sta_info *sta; 173 struct sta_info *sta;
@@ -207,7 +209,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
207} 209}
208 210
209static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, 211static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
210 u8 key_idx, u8 *mac_addr, void *cookie, 212 u8 key_idx, const u8 *mac_addr, void *cookie,
211 void (*callback)(void *cookie, 213 void (*callback)(void *cookie,
212 struct key_params *params)) 214 struct key_params *params))
213{ 215{
@@ -245,12 +247,10 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
245 iv32 = key->u.tkip.tx.iv32; 247 iv32 = key->u.tkip.tx.iv32;
246 iv16 = key->u.tkip.tx.iv16; 248 iv16 = key->u.tkip.tx.iv16;
247 249
248 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 250 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
249 sdata->local->ops->get_tkip_seq) 251 drv_get_tkip_seq(sdata->local,
250 sdata->local->ops->get_tkip_seq( 252 key->conf.hw_key_idx,
251 local_to_hw(sdata->local), 253 &iv32, &iv16);
252 key->conf.hw_key_idx,
253 &iv32, &iv16);
254 254
255 seq[0] = iv16 & 0xff; 255 seq[0] = iv16 & 0xff;
256 seq[1] = (iv16 >> 8) & 0xff; 256 seq[1] = (iv16 >> 8) & 0xff;
@@ -451,18 +451,11 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
451 * This is a kludge. beacon interval should really be part 451 * This is a kludge. beacon interval should really be part
452 * of the beacon information. 452 * of the beacon information.
453 */ 453 */
454 if (params->interval && (sdata->local->hw.conf.beacon_int != 454 if (params->interval &&
455 params->interval)) { 455 (sdata->vif.bss_conf.beacon_int != params->interval)) {
456 sdata->local->hw.conf.beacon_int = params->interval; 456 sdata->vif.bss_conf.beacon_int = params->interval;
457 err = ieee80211_hw_config(sdata->local, 457 ieee80211_bss_info_change_notify(sdata,
458 IEEE80211_CONF_CHANGE_BEACON_INTERVAL); 458 BSS_CHANGED_BEACON_INT);
459 if (err < 0)
460 return err;
461 /*
462 * We updated some parameter so if below bails out
463 * it's not an error.
464 */
465 err = 0;
466 } 459 }
467 460
468 /* Need to have a beacon head if we don't have one yet */ 461 /* Need to have a beacon head if we don't have one yet */
@@ -528,8 +521,9 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
528 521
529 kfree(old); 522 kfree(old);
530 523
531 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 524 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
532 IEEE80211_IFCC_BEACON_ENABLED); 525 BSS_CHANGED_BEACON);
526 return 0;
533} 527}
534 528
535static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 529static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -580,7 +574,8 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
580 synchronize_rcu(); 574 synchronize_rcu();
581 kfree(old); 575 kfree(old);
582 576
583 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON_ENABLED); 577 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
578 return 0;
584} 579}
585 580
586/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ 581/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
@@ -635,34 +630,45 @@ static void sta_apply_parameters(struct ieee80211_local *local,
635 int i, j; 630 int i, j;
636 struct ieee80211_supported_band *sband; 631 struct ieee80211_supported_band *sband;
637 struct ieee80211_sub_if_data *sdata = sta->sdata; 632 struct ieee80211_sub_if_data *sdata = sta->sdata;
633 u32 mask, set;
638 634
639 sband = local->hw.wiphy->bands[local->oper_channel->band]; 635 sband = local->hw.wiphy->bands[local->oper_channel->band];
640 636
641 /* 637 spin_lock_bh(&sta->lock);
642 * FIXME: updating the flags is racy when this function is 638 mask = params->sta_flags_mask;
643 * called from ieee80211_change_station(), this will 639 set = params->sta_flags_set;
644 * be resolved in a future patch.
645 */
646 640
647 if (params->station_flags & STATION_FLAG_CHANGED) { 641 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
648 spin_lock_bh(&sta->lock);
649 sta->flags &= ~WLAN_STA_AUTHORIZED; 642 sta->flags &= ~WLAN_STA_AUTHORIZED;
650 if (params->station_flags & STATION_FLAG_AUTHORIZED) 643 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
651 sta->flags |= WLAN_STA_AUTHORIZED; 644 sta->flags |= WLAN_STA_AUTHORIZED;
645 }
652 646
647 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
653 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE; 648 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
654 if (params->station_flags & STATION_FLAG_SHORT_PREAMBLE) 649 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
655 sta->flags |= WLAN_STA_SHORT_PREAMBLE; 650 sta->flags |= WLAN_STA_SHORT_PREAMBLE;
651 }
656 652
653 if (mask & BIT(NL80211_STA_FLAG_WME)) {
657 sta->flags &= ~WLAN_STA_WME; 654 sta->flags &= ~WLAN_STA_WME;
658 if (params->station_flags & STATION_FLAG_WME) 655 if (set & BIT(NL80211_STA_FLAG_WME))
659 sta->flags |= WLAN_STA_WME; 656 sta->flags |= WLAN_STA_WME;
657 }
660 658
659 if (mask & BIT(NL80211_STA_FLAG_MFP)) {
661 sta->flags &= ~WLAN_STA_MFP; 660 sta->flags &= ~WLAN_STA_MFP;
662 if (params->station_flags & STATION_FLAG_MFP) 661 if (set & BIT(NL80211_STA_FLAG_MFP))
663 sta->flags |= WLAN_STA_MFP; 662 sta->flags |= WLAN_STA_MFP;
664 spin_unlock_bh(&sta->lock);
665 } 663 }
664 spin_unlock_bh(&sta->lock);
665
666 /*
667 * cfg80211 validates this (1-2007) and allows setting the AID
668 * only when creating a new station entry
669 */
670 if (params->aid)
671 sta->sta.aid = params->aid;
666 672
667 /* 673 /*
668 * FIXME: updating the following information is racy when this 674 * FIXME: updating the following information is racy when this
@@ -671,12 +677,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
671 * maybe we should just reject attemps to change it. 677 * maybe we should just reject attemps to change it.
672 */ 678 */
673 679
674 if (params->aid) {
675 sta->sta.aid = params->aid;
676 if (sta->sta.aid > IEEE80211_MAX_AID)
677 sta->sta.aid = 0; /* XXX: should this be an error? */
678 }
679
680 if (params->listen_interval >= 0) 680 if (params->listen_interval >= 0)
681 sta->listen_interval = params->listen_interval; 681 sta->listen_interval = params->listen_interval;
682 682
@@ -1120,10 +1120,10 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1120 p.cw_max = params->cwmax; 1120 p.cw_max = params->cwmax;
1121 p.cw_min = params->cwmin; 1121 p.cw_min = params->cwmin;
1122 p.txop = params->txop; 1122 p.txop = params->txop;
1123 if (local->ops->conf_tx(local_to_hw(local), params->queue, &p)) { 1123 if (drv_conf_tx(local, params->queue, &p)) {
1124 printk(KERN_DEBUG "%s: failed to set TX queue " 1124 printk(KERN_DEBUG "%s: failed to set TX queue "
1125 "parameters for queue %d\n", local->mdev->name, 1125 "parameters for queue %d\n",
1126 params->queue); 1126 wiphy_name(local->hw.wiphy), params->queue);
1127 return -EINVAL; 1127 return -EINVAL;
1128 } 1128 }
1129 1129
@@ -1167,7 +1167,8 @@ static int ieee80211_scan(struct wiphy *wiphy,
1167 1167
1168 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1168 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1169 sdata->vif.type != NL80211_IFTYPE_ADHOC && 1169 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1170 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 1170 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
1171 (sdata->vif.type != NL80211_IFTYPE_AP || sdata->u.ap.beacon))
1171 return -EOPNOTSUPP; 1172 return -EOPNOTSUPP;
1172 1173
1173 return ieee80211_request_scan(sdata, req); 1174 return ieee80211_request_scan(sdata, req);
@@ -1255,9 +1256,22 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1255 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; 1256 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
1256 1257
1257 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); 1258 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
1258 if (ret) 1259 if (ret && ret != -EALREADY)
1259 return ret; 1260 return ret;
1260 1261
1262 if (req->use_mfp) {
1263 sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED;
1264 sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED;
1265 } else {
1266 sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED;
1267 sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED;
1268 }
1269
1270 if (req->control_port)
1271 sdata->u.mgd.flags |= IEEE80211_STA_CONTROL_PORT;
1272 else
1273 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
1274
1261 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; 1275 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1262 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE; 1276 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
1263 ieee80211_sta_req_auth(sdata); 1277 ieee80211_sta_req_auth(sdata);
@@ -1267,25 +1281,106 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1267static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, 1281static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1268 struct cfg80211_deauth_request *req) 1282 struct cfg80211_deauth_request *req)
1269{ 1283{
1270 struct ieee80211_sub_if_data *sdata; 1284 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1271
1272 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1273 1285
1274 /* TODO: req->ie */ 1286 /* TODO: req->ie, req->peer_addr */
1275 return ieee80211_sta_deauthenticate(sdata, req->reason_code); 1287 return ieee80211_sta_deauthenticate(sdata, req->reason_code);
1276} 1288}
1277 1289
1278static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, 1290static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1279 struct cfg80211_disassoc_request *req) 1291 struct cfg80211_disassoc_request *req)
1280{ 1292{
1281 struct ieee80211_sub_if_data *sdata; 1293 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1282
1283 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1284 1294
1285 /* TODO: req->ie */ 1295 /* TODO: req->ie, req->peer_addr */
1286 return ieee80211_sta_disassociate(sdata, req->reason_code); 1296 return ieee80211_sta_disassociate(sdata, req->reason_code);
1287} 1297}
1288 1298
1299static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1300 struct cfg80211_ibss_params *params)
1301{
1302 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1303
1304 return ieee80211_ibss_join(sdata, params);
1305}
1306
1307static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1308{
1309 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1310
1311 return ieee80211_ibss_leave(sdata);
1312}
1313
1314static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1315{
1316 struct ieee80211_local *local = wiphy_priv(wiphy);
1317 int err;
1318
1319 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1320 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1321
1322 if (err)
1323 return err;
1324 }
1325
1326 if (changed & WIPHY_PARAM_RETRY_SHORT)
1327 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
1328 if (changed & WIPHY_PARAM_RETRY_LONG)
1329 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
1330 if (changed &
1331 (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
1332 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
1333
1334 return 0;
1335}
1336
1337static int ieee80211_set_tx_power(struct wiphy *wiphy,
1338 enum tx_power_setting type, int dbm)
1339{
1340 struct ieee80211_local *local = wiphy_priv(wiphy);
1341 struct ieee80211_channel *chan = local->hw.conf.channel;
1342 u32 changes = 0;
1343
1344 switch (type) {
1345 case TX_POWER_AUTOMATIC:
1346 local->user_power_level = -1;
1347 break;
1348 case TX_POWER_LIMITED:
1349 if (dbm < 0)
1350 return -EINVAL;
1351 local->user_power_level = dbm;
1352 break;
1353 case TX_POWER_FIXED:
1354 if (dbm < 0)
1355 return -EINVAL;
1356 /* TODO: move to cfg80211 when it knows the channel */
1357 if (dbm > chan->max_power)
1358 return -EINVAL;
1359 local->user_power_level = dbm;
1360 break;
1361 }
1362
1363 ieee80211_hw_config(local, changes);
1364
1365 return 0;
1366}
1367
1368static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm)
1369{
1370 struct ieee80211_local *local = wiphy_priv(wiphy);
1371
1372 *dbm = local->hw.conf.power_level;
1373
1374 return 0;
1375}
1376
1377static void ieee80211_rfkill_poll(struct wiphy *wiphy)
1378{
1379 struct ieee80211_local *local = wiphy_priv(wiphy);
1380
1381 drv_rfkill_poll(local);
1382}
1383
1289struct cfg80211_ops mac80211_config_ops = { 1384struct cfg80211_ops mac80211_config_ops = {
1290 .add_virtual_intf = ieee80211_add_iface, 1385 .add_virtual_intf = ieee80211_add_iface,
1291 .del_virtual_intf = ieee80211_del_iface, 1386 .del_virtual_intf = ieee80211_del_iface,
@@ -1322,4 +1417,10 @@ struct cfg80211_ops mac80211_config_ops = {
1322 .assoc = ieee80211_assoc, 1417 .assoc = ieee80211_assoc,
1323 .deauth = ieee80211_deauth, 1418 .deauth = ieee80211_deauth,
1324 .disassoc = ieee80211_disassoc, 1419 .disassoc = ieee80211_disassoc,
1420 .join_ibss = ieee80211_join_ibss,
1421 .leave_ibss = ieee80211_leave_ibss,
1422 .set_wiphy_params = ieee80211_set_wiphy_params,
1423 .set_tx_power = ieee80211_set_tx_power,
1424 .get_tx_power = ieee80211_get_tx_power,
1425 .rfkill_poll = ieee80211_rfkill_poll,
1325}; 1426};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 210b9b6fecd2..11c72311f35b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/rtnetlink.h> 11#include <linux/rtnetlink.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "driver-ops.h"
13#include "rate.h" 14#include "rate.h"
14#include "debugfs.h" 15#include "debugfs.h"
15 16
@@ -51,14 +52,6 @@ static const struct file_operations name## _ops = { \
51 52
52DEBUGFS_READONLY_FILE(frequency, 20, "%d", 53DEBUGFS_READONLY_FILE(frequency, 20, "%d",
53 local->hw.conf.channel->center_freq); 54 local->hw.conf.channel->center_freq);
54DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
55 local->rts_threshold);
56DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
57 local->fragmentation_threshold);
58DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
59 local->hw.conf.short_frame_max_tx_count);
60DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
61 local->hw.conf.long_frame_max_tx_count);
62DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 55DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
63 local->total_ps_buffered); 56 local->total_ps_buffered);
64DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", 57DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
@@ -70,11 +63,10 @@ static ssize_t tsf_read(struct file *file, char __user *user_buf,
70 size_t count, loff_t *ppos) 63 size_t count, loff_t *ppos)
71{ 64{
72 struct ieee80211_local *local = file->private_data; 65 struct ieee80211_local *local = file->private_data;
73 u64 tsf = 0; 66 u64 tsf;
74 char buf[100]; 67 char buf[100];
75 68
76 if (local->ops->get_tsf) 69 tsf = drv_get_tsf(local);
77 tsf = local->ops->get_tsf(local_to_hw(local));
78 70
79 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf); 71 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf);
80 72
@@ -97,13 +89,13 @@ static ssize_t tsf_write(struct file *file,
97 89
98 if (strncmp(buf, "reset", 5) == 0) { 90 if (strncmp(buf, "reset", 5) == 0) {
99 if (local->ops->reset_tsf) { 91 if (local->ops->reset_tsf) {
100 local->ops->reset_tsf(local_to_hw(local)); 92 drv_reset_tsf(local);
101 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy)); 93 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy));
102 } 94 }
103 } else { 95 } else {
104 tsf = simple_strtoul(buf, NULL, 0); 96 tsf = simple_strtoul(buf, NULL, 0);
105 if (local->ops->set_tsf) { 97 if (local->ops->set_tsf) {
106 local->ops->set_tsf(local_to_hw(local), tsf); 98 drv_set_tsf(local, tsf);
107 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf); 99 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf);
108 } 100 }
109 } 101 }
@@ -135,6 +127,42 @@ static const struct file_operations reset_ops = {
135 .open = mac80211_open_file_generic, 127 .open = mac80211_open_file_generic,
136}; 128};
137 129
130static ssize_t noack_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct ieee80211_local *local = file->private_data;
134 int res;
135 char buf[10];
136
137 res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test);
138
139 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
140}
141
142static ssize_t noack_write(struct file *file,
143 const char __user *user_buf,
144 size_t count, loff_t *ppos)
145{
146 struct ieee80211_local *local = file->private_data;
147 char buf[10];
148 size_t len;
149
150 len = min(count, sizeof(buf) - 1);
151 if (copy_from_user(buf, user_buf, len))
152 return -EFAULT;
153 buf[len] = '\0';
154
155 local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
156
157 return count;
158}
159
160static const struct file_operations noack_ops = {
161 .read = noack_read,
162 .write = noack_write,
163 .open = mac80211_open_file_generic
164};
165
138/* statistics stuff */ 166/* statistics stuff */
139 167
140#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 168#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -150,14 +178,12 @@ static ssize_t format_devstat_counter(struct ieee80211_local *local,
150 char buf[20]; 178 char buf[20];
151 int res; 179 int res;
152 180
153 if (!local->ops->get_stats)
154 return -EOPNOTSUPP;
155
156 rtnl_lock(); 181 rtnl_lock();
157 res = local->ops->get_stats(local_to_hw(local), &stats); 182 res = drv_get_stats(local, &stats);
158 rtnl_unlock(); 183 rtnl_unlock();
159 if (!res) 184 if (res)
160 res = printvalue(&stats, buf, sizeof(buf)); 185 return res;
186 res = printvalue(&stats, buf, sizeof(buf));
161 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 187 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
162} 188}
163 189
@@ -269,14 +295,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
269 local->debugfs.keys = debugfs_create_dir("keys", phyd); 295 local->debugfs.keys = debugfs_create_dir("keys", phyd);
270 296
271 DEBUGFS_ADD(frequency); 297 DEBUGFS_ADD(frequency);
272 DEBUGFS_ADD(rts_threshold);
273 DEBUGFS_ADD(fragmentation_threshold);
274 DEBUGFS_ADD(short_retry_limit);
275 DEBUGFS_ADD(long_retry_limit);
276 DEBUGFS_ADD(total_ps_buffered); 298 DEBUGFS_ADD(total_ps_buffered);
277 DEBUGFS_ADD(wep_iv); 299 DEBUGFS_ADD(wep_iv);
278 DEBUGFS_ADD(tsf); 300 DEBUGFS_ADD(tsf);
279 DEBUGFS_ADD_MODE(reset, 0200); 301 DEBUGFS_ADD_MODE(reset, 0200);
302 DEBUGFS_ADD(noack);
280 303
281 statsd = debugfs_create_dir("statistics", phyd); 304 statsd = debugfs_create_dir("statistics", phyd);
282 local->debugfs.statistics = statsd; 305 local->debugfs.statistics = statsd;
@@ -324,14 +347,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
324void debugfs_hw_del(struct ieee80211_local *local) 347void debugfs_hw_del(struct ieee80211_local *local)
325{ 348{
326 DEBUGFS_DEL(frequency); 349 DEBUGFS_DEL(frequency);
327 DEBUGFS_DEL(rts_threshold);
328 DEBUGFS_DEL(fragmentation_threshold);
329 DEBUGFS_DEL(short_retry_limit);
330 DEBUGFS_DEL(long_retry_limit);
331 DEBUGFS_DEL(total_ps_buffered); 350 DEBUGFS_DEL(total_ps_buffered);
332 DEBUGFS_DEL(wep_iv); 351 DEBUGFS_DEL(wep_iv);
333 DEBUGFS_DEL(tsf); 352 DEBUGFS_DEL(tsf);
334 DEBUGFS_DEL(reset); 353 DEBUGFS_DEL(reset);
354 DEBUGFS_DEL(noack);
335 355
336 DEBUGFS_STATS_DEL(transmitted_fragment_count); 356 DEBUGFS_STATS_DEL(transmitted_fragment_count);
337 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 357 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
new file mode 100644
index 000000000000..b13446afd48f
--- /dev/null
+++ b/net/mac80211/driver-ops.h
@@ -0,0 +1,191 @@
1#ifndef __MAC80211_DRIVER_OPS
2#define __MAC80211_DRIVER_OPS
3
4#include <net/mac80211.h>
5#include "ieee80211_i.h"
6
7static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
8{
9 return local->ops->tx(&local->hw, skb);
10}
11
12static inline int drv_start(struct ieee80211_local *local)
13{
14 return local->ops->start(&local->hw);
15}
16
17static inline void drv_stop(struct ieee80211_local *local)
18{
19 local->ops->stop(&local->hw);
20}
21
22static inline int drv_add_interface(struct ieee80211_local *local,
23 struct ieee80211_if_init_conf *conf)
24{
25 return local->ops->add_interface(&local->hw, conf);
26}
27
28static inline void drv_remove_interface(struct ieee80211_local *local,
29 struct ieee80211_if_init_conf *conf)
30{
31 local->ops->remove_interface(&local->hw, conf);
32}
33
34static inline int drv_config(struct ieee80211_local *local, u32 changed)
35{
36 return local->ops->config(&local->hw, changed);
37}
38
39static inline void drv_bss_info_changed(struct ieee80211_local *local,
40 struct ieee80211_vif *vif,
41 struct ieee80211_bss_conf *info,
42 u32 changed)
43{
44 if (local->ops->bss_info_changed)
45 local->ops->bss_info_changed(&local->hw, vif, info, changed);
46}
47
48static inline void drv_configure_filter(struct ieee80211_local *local,
49 unsigned int changed_flags,
50 unsigned int *total_flags,
51 int mc_count,
52 struct dev_addr_list *mc_list)
53{
54 local->ops->configure_filter(&local->hw, changed_flags, total_flags,
55 mc_count, mc_list);
56}
57
58static inline int drv_set_tim(struct ieee80211_local *local,
59 struct ieee80211_sta *sta, bool set)
60{
61 if (local->ops->set_tim)
62 return local->ops->set_tim(&local->hw, sta, set);
63 return 0;
64}
65
66static inline int drv_set_key(struct ieee80211_local *local,
67 enum set_key_cmd cmd, struct ieee80211_vif *vif,
68 struct ieee80211_sta *sta,
69 struct ieee80211_key_conf *key)
70{
71 return local->ops->set_key(&local->hw, cmd, vif, sta, key);
72}
73
74static inline void drv_update_tkip_key(struct ieee80211_local *local,
75 struct ieee80211_key_conf *conf,
76 const u8 *address, u32 iv32,
77 u16 *phase1key)
78{
79 if (local->ops->update_tkip_key)
80 local->ops->update_tkip_key(&local->hw, conf, address,
81 iv32, phase1key);
82}
83
84static inline int drv_hw_scan(struct ieee80211_local *local,
85 struct cfg80211_scan_request *req)
86{
87 return local->ops->hw_scan(&local->hw, req);
88}
89
90static inline void drv_sw_scan_start(struct ieee80211_local *local)
91{
92 if (local->ops->sw_scan_start)
93 local->ops->sw_scan_start(&local->hw);
94}
95
96static inline void drv_sw_scan_complete(struct ieee80211_local *local)
97{
98 if (local->ops->sw_scan_complete)
99 local->ops->sw_scan_complete(&local->hw);
100}
101
102static inline int drv_get_stats(struct ieee80211_local *local,
103 struct ieee80211_low_level_stats *stats)
104{
105 if (!local->ops->get_stats)
106 return -EOPNOTSUPP;
107 return local->ops->get_stats(&local->hw, stats);
108}
109
110static inline void drv_get_tkip_seq(struct ieee80211_local *local,
111 u8 hw_key_idx, u32 *iv32, u16 *iv16)
112{
113 if (local->ops->get_tkip_seq)
114 local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
115}
116
117static inline int drv_set_rts_threshold(struct ieee80211_local *local,
118 u32 value)
119{
120 if (local->ops->set_rts_threshold)
121 return local->ops->set_rts_threshold(&local->hw, value);
122 return 0;
123}
124
125static inline void drv_sta_notify(struct ieee80211_local *local,
126 struct ieee80211_vif *vif,
127 enum sta_notify_cmd cmd,
128 struct ieee80211_sta *sta)
129{
130 if (local->ops->sta_notify)
131 local->ops->sta_notify(&local->hw, vif, cmd, sta);
132}
133
134static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
135 const struct ieee80211_tx_queue_params *params)
136{
137 if (local->ops->conf_tx)
138 return local->ops->conf_tx(&local->hw, queue, params);
139 return -EOPNOTSUPP;
140}
141
142static inline int drv_get_tx_stats(struct ieee80211_local *local,
143 struct ieee80211_tx_queue_stats *stats)
144{
145 return local->ops->get_tx_stats(&local->hw, stats);
146}
147
148static inline u64 drv_get_tsf(struct ieee80211_local *local)
149{
150 if (local->ops->get_tsf)
151 return local->ops->get_tsf(&local->hw);
152 return -1ULL;
153}
154
155static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
156{
157 if (local->ops->set_tsf)
158 local->ops->set_tsf(&local->hw, tsf);
159}
160
161static inline void drv_reset_tsf(struct ieee80211_local *local)
162{
163 if (local->ops->reset_tsf)
164 local->ops->reset_tsf(&local->hw);
165}
166
167static inline int drv_tx_last_beacon(struct ieee80211_local *local)
168{
169 if (local->ops->tx_last_beacon)
170 return local->ops->tx_last_beacon(&local->hw);
171 return 1;
172}
173
174static inline int drv_ampdu_action(struct ieee80211_local *local,
175 enum ieee80211_ampdu_mlme_action action,
176 struct ieee80211_sta *sta, u16 tid,
177 u16 *ssn)
178{
179 if (local->ops->ampdu_action)
180 return local->ops->ampdu_action(&local->hw, action,
181 sta, tid, ssn);
182 return -EOPNOTSUPP;
183}
184
185
186static inline void drv_rfkill_poll(struct ieee80211_local *local)
187{
188 if (local->ops->rfkill_poll)
189 local->ops->rfkill_poll(&local->hw);
190}
191#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 0d95561c0ee0..f288d01a6344 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -12,12 +12,12 @@
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13 13
14/* 14/*
15 * indicate a failed Michael MIC to userspace; the passed packet 15 * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of
16 * (in the variable hdr) must be long enough to extract the TKIP 16 * the frame that generated the MIC failure (i.e., if it was provided by the
17 * fields like TSC 17 * driver or is still in the frame), it should provide that information.
18 */ 18 */
19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
20 struct ieee80211_hdr *hdr) 20 struct ieee80211_hdr *hdr, const u8 *tsc)
21{ 21{
22 union iwreq_data wrqu; 22 union iwreq_data wrqu;
23 char *buf = kmalloc(128, GFP_ATOMIC); 23 char *buf = kmalloc(128, GFP_ATOMIC);
@@ -34,8 +34,9 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
34 kfree(buf); 34 kfree(buf);
35 } 35 }
36 36
37 /* 37 cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
38 * TODO: re-add support for sending MIC failure indication 38 (hdr->addr1[0] & 0x01) ?
39 * with all info via nl80211 39 NL80211_KEYTYPE_GROUP :
40 */ 40 NL80211_KEYTYPE_PAIRWISE,
41 keyidx, tsc);
41} 42}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 4e3c72f20de7..0891bfb06996 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/wireless.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
19#include "ieee80211_i.h" 18#include "ieee80211_i.h"
20#include "rate.h" 19#include "rate.h"
@@ -83,89 +82,6 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
83 ht_cap->mcs.rx_mask[32/8] |= 1; 82 ht_cap->mcs.rx_mask[32/8] |= 1;
84} 83}
85 84
86/*
87 * ieee80211_enable_ht should be called only after the operating band
88 * has been determined as ht configuration depends on the hw's
89 * HT abilities for a specific band.
90 */
91u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
92 struct ieee80211_ht_info *hti,
93 u16 ap_ht_cap_flags)
94{
95 struct ieee80211_local *local = sdata->local;
96 struct ieee80211_supported_band *sband;
97 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
98 struct ieee80211_bss_ht_conf ht;
99 struct sta_info *sta;
100 u32 changed = 0;
101 bool enable_ht = true, ht_changed;
102 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
103
104 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
105
106 memset(&ht, 0, sizeof(ht));
107
108 /* HT is not supported */
109 if (!sband->ht_cap.ht_supported)
110 enable_ht = false;
111
112 /* check that channel matches the right operating channel */
113 if (local->hw.conf.channel->center_freq !=
114 ieee80211_channel_to_frequency(hti->control_chan))
115 enable_ht = false;
116
117 if (enable_ht) {
118 channel_type = NL80211_CHAN_HT20;
119
120 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
121 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
122 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
123 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
124 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
125 channel_type = NL80211_CHAN_HT40PLUS;
126 break;
127 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
128 channel_type = NL80211_CHAN_HT40MINUS;
129 break;
130 }
131 }
132 }
133
134 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
135 channel_type != local->hw.conf.channel_type;
136
137 local->oper_channel_type = channel_type;
138
139 if (ht_changed) {
140 /* channel_type change automatically detected */
141 ieee80211_hw_config(local, 0);
142
143 rcu_read_lock();
144
145 sta = sta_info_get(local, ifmgd->bssid);
146 if (sta)
147 rate_control_rate_update(local, sband, sta,
148 IEEE80211_RC_HT_CHANGED);
149
150 rcu_read_unlock();
151
152 }
153
154 /* disable HT */
155 if (!enable_ht)
156 return 0;
157
158 ht.operation_mode = le16_to_cpu(hti->operation_mode);
159
160 /* if bss configuration changed store the new one */
161 if (memcmp(&sdata->vif.bss_conf.ht, &ht, sizeof(ht))) {
162 changed |= BSS_CHANGED_HT;
163 sdata->vif.bss_conf.ht = ht;
164 }
165
166 return changed;
167}
168
169void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) 85void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
170{ 86{
171 int i; 87 int i;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3201e1f96365..0b30277eb366 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -22,6 +22,7 @@
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24#include "ieee80211_i.h" 24#include "ieee80211_i.h"
25#include "driver-ops.h"
25#include "rate.h" 26#include "rate.h"
26 27
27#define IEEE80211_SCAN_INTERVAL (2 * HZ) 28#define IEEE80211_SCAN_INTERVAL (2 * HZ)
@@ -59,74 +60,65 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
59 sdata->u.ibss.bssid, 0); 60 sdata->u.ibss.bssid, 0);
60} 61}
61 62
62static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 63static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
63 const u8 *bssid, const int beacon_int, 64 const u8 *bssid, const int beacon_int,
64 const int freq, 65 struct ieee80211_channel *chan,
65 const size_t supp_rates_len, 66 const u32 basic_rates,
66 const u8 *supp_rates, 67 const u16 capability, u64 tsf)
67 const u16 capability, u64 tsf)
68{ 68{
69 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 69 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
70 struct ieee80211_local *local = sdata->local; 70 struct ieee80211_local *local = sdata->local;
71 int res = 0, rates, i, j; 71 int rates, i;
72 struct sk_buff *skb; 72 struct sk_buff *skb;
73 struct ieee80211_mgmt *mgmt; 73 struct ieee80211_mgmt *mgmt;
74 u8 *pos; 74 u8 *pos;
75 struct ieee80211_supported_band *sband; 75 struct ieee80211_supported_band *sband;
76 union iwreq_data wrqu; 76 u32 bss_change;
77 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
77 78
78 if (local->ops->reset_tsf) { 79 /* Reset own TSF to allow time synchronization work. */
79 /* Reset own TSF to allow time synchronization work. */ 80 drv_reset_tsf(local);
80 local->ops->reset_tsf(local_to_hw(local));
81 }
82 81
83 if ((ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET) && 82 skb = ifibss->skb;
84 memcmp(ifibss->bssid, bssid, ETH_ALEN) == 0) 83 rcu_assign_pointer(ifibss->presp, NULL);
85 return res; 84 synchronize_rcu();
85 skb->data = skb->head;
86 skb->len = 0;
87 skb_reset_tail_pointer(skb);
88 skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
86 89
87 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 90 if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
88 if (!skb) { 91 sta_info_flush(sdata->local, sdata);
89 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
90 "response\n", sdata->dev->name);
91 return -ENOMEM;
92 }
93
94 if (!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET)) {
95 /* Remove possible STA entries from other IBSS networks. */
96 sta_info_flush_delayed(sdata);
97 }
98 92
99 memcpy(ifibss->bssid, bssid, ETH_ALEN); 93 memcpy(ifibss->bssid, bssid, ETH_ALEN);
100 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
101 if (res)
102 return res;
103
104 local->hw.conf.beacon_int = beacon_int >= 10 ? beacon_int : 10;
105 94
106 sdata->drop_unencrypted = capability & 95 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
107 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
108 96
109 res = ieee80211_set_freq(sdata, freq); 97 local->oper_channel = chan;
98 local->oper_channel_type = NL80211_CHAN_NO_HT;
99 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
110 100
111 if (res) 101 sband = local->hw.wiphy->bands[chan->band];
112 return res;
113 102
114 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 103 /* build supported rates array */
104 pos = supp_rates;
105 for (i = 0; i < sband->n_bitrates; i++) {
106 int rate = sband->bitrates[i].bitrate;
107 u8 basic = 0;
108 if (basic_rates & BIT(i))
109 basic = 0x80;
110 *pos++ = basic | (u8) (rate / 5);
111 }
115 112
116 /* Build IBSS probe response */ 113 /* Build IBSS probe response */
117 114 mgmt = (void *) skb_put(skb, 24 + sizeof(mgmt->u.beacon));
118 skb_reserve(skb, local->hw.extra_tx_headroom);
119
120 mgmt = (struct ieee80211_mgmt *)
121 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
122 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 115 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
123 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 116 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
124 IEEE80211_STYPE_PROBE_RESP); 117 IEEE80211_STYPE_PROBE_RESP);
125 memset(mgmt->da, 0xff, ETH_ALEN); 118 memset(mgmt->da, 0xff, ETH_ALEN);
126 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 119 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
127 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 120 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
128 mgmt->u.beacon.beacon_int = 121 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
129 cpu_to_le16(local->hw.conf.beacon_int);
130 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 122 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
131 mgmt->u.beacon.capab_info = cpu_to_le16(capability); 123 mgmt->u.beacon.capab_info = cpu_to_le16(capability);
132 124
@@ -135,7 +127,7 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
135 *pos++ = ifibss->ssid_len; 127 *pos++ = ifibss->ssid_len;
136 memcpy(pos, ifibss->ssid, ifibss->ssid_len); 128 memcpy(pos, ifibss->ssid, ifibss->ssid_len);
137 129
138 rates = supp_rates_len; 130 rates = sband->n_bitrates;
139 if (rates > 8) 131 if (rates > 8)
140 rates = 8; 132 rates = 8;
141 pos = skb_put(skb, 2 + rates); 133 pos = skb_put(skb, 2 + rates);
@@ -147,7 +139,7 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
147 pos = skb_put(skb, 2 + 1); 139 pos = skb_put(skb, 2 + 1);
148 *pos++ = WLAN_EID_DS_PARAMS; 140 *pos++ = WLAN_EID_DS_PARAMS;
149 *pos++ = 1; 141 *pos++ = 1;
150 *pos++ = ieee80211_frequency_to_channel(freq); 142 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
151 } 143 }
152 144
153 pos = skb_put(skb, 2 + 2); 145 pos = skb_put(skb, 2 + 2);
@@ -157,51 +149,73 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
157 *pos++ = 0; 149 *pos++ = 0;
158 *pos++ = 0; 150 *pos++ = 0;
159 151
160 if (supp_rates_len > 8) { 152 if (sband->n_bitrates > 8) {
161 rates = supp_rates_len - 8; 153 rates = sband->n_bitrates - 8;
162 pos = skb_put(skb, 2 + rates); 154 pos = skb_put(skb, 2 + rates);
163 *pos++ = WLAN_EID_EXT_SUPP_RATES; 155 *pos++ = WLAN_EID_EXT_SUPP_RATES;
164 *pos++ = rates; 156 *pos++ = rates;
165 memcpy(pos, &supp_rates[8], rates); 157 memcpy(pos, &supp_rates[8], rates);
166 } 158 }
167 159
168 ifibss->probe_resp = skb; 160 if (ifibss->ie_len)
161 memcpy(skb_put(skb, ifibss->ie_len),
162 ifibss->ie, ifibss->ie_len);
169 163
170 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 164 rcu_assign_pointer(ifibss->presp, skb);
171 IEEE80211_IFCC_BEACON_ENABLED);
172 165
166 sdata->vif.bss_conf.beacon_int = beacon_int;
167 bss_change = BSS_CHANGED_BEACON_INT;
168 bss_change |= ieee80211_reset_erp_info(sdata);
169 bss_change |= BSS_CHANGED_BSSID;
170 bss_change |= BSS_CHANGED_BEACON;
171 bss_change |= BSS_CHANGED_BEACON_ENABLED;
172 ieee80211_bss_info_change_notify(sdata, bss_change);
173 173
174 rates = 0; 174 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
175 for (i = 0; i < supp_rates_len; i++) {
176 int bitrate = (supp_rates[i] & 0x7f) * 5;
177 for (j = 0; j < sband->n_bitrates; j++)
178 if (sband->bitrates[j].bitrate == bitrate)
179 rates |= BIT(j);
180 }
181 175
182 ieee80211_sta_def_wmm_params(sdata, supp_rates_len, supp_rates);
183
184 ifibss->flags |= IEEE80211_IBSS_PREV_BSSID_SET;
185 ifibss->state = IEEE80211_IBSS_MLME_JOINED; 176 ifibss->state = IEEE80211_IBSS_MLME_JOINED;
186 mod_timer(&ifibss->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 177 mod_timer(&ifibss->timer,
187 178 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
188 memset(&wrqu, 0, sizeof(wrqu));
189 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
190 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
191 179
192 return res; 180 cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
181 mgmt, skb->len, 0, GFP_KERNEL);
182 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
193} 183}
194 184
195static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 185static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
196 struct ieee80211_bss *bss) 186 struct ieee80211_bss *bss)
197{ 187{
198 return __ieee80211_sta_join_ibss(sdata, 188 struct ieee80211_supported_band *sband;
199 bss->cbss.bssid, 189 u32 basic_rates;
200 bss->cbss.beacon_interval, 190 int i, j;
201 bss->cbss.channel->center_freq, 191 u16 beacon_int = bss->cbss.beacon_interval;
202 bss->supp_rates_len, bss->supp_rates, 192
203 bss->cbss.capability, 193 if (beacon_int < 10)
204 bss->cbss.tsf); 194 beacon_int = 10;
195
196 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band];
197
198 basic_rates = 0;
199
200 for (i = 0; i < bss->supp_rates_len; i++) {
201 int rate = (bss->supp_rates[i] & 0x7f) * 5;
202 bool is_basic = !!(bss->supp_rates[i] & 0x80);
203
204 for (j = 0; j < sband->n_bitrates; j++) {
205 if (sband->bitrates[j].bitrate == rate) {
206 if (is_basic)
207 basic_rates |= BIT(j);
208 break;
209 }
210 }
211 }
212
213 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid,
214 beacon_int,
215 bss->cbss.channel,
216 basic_rates,
217 bss->cbss.capability,
218 bss->cbss.tsf);
205} 219}
206 220
207static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 221static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -277,7 +291,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
277 goto put_bss; 291 goto put_bss;
278 292
279 /* we use a fixed BSSID */ 293 /* we use a fixed BSSID */
280 if (sdata->u.ibss.flags & IEEE80211_IBSS_BSSID_SET) 294 if (sdata->u.ibss.bssid)
281 goto put_bss; 295 goto put_bss;
282 296
283 /* not an IBSS */ 297 /* not an IBSS */
@@ -322,12 +336,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
322 bitrates[rx_status->rate_idx].bitrate; 336 bitrates[rx_status->rate_idx].bitrate;
323 337
324 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); 338 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
325 } else if (local && local->ops && local->ops->get_tsf) 339 } else {
326 /* second best option: get current TSF */ 340 /*
327 rx_timestamp = local->ops->get_tsf(local_to_hw(local)); 341 * second best option: get current TSF
328 else 342 * (will return -1 if not supported)
329 /* can't merge without knowing the TSF */ 343 */
330 rx_timestamp = -1LLU; 344 rx_timestamp = drv_get_tsf(local);
345 }
331 346
332#ifdef CONFIG_MAC80211_IBSS_DEBUG 347#ifdef CONFIG_MAC80211_IBSS_DEBUG
333 printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" 348 printk(KERN_DEBUG "RX beacon SA=%pM BSSID="
@@ -369,13 +384,14 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
369 struct sta_info *sta; 384 struct sta_info *sta;
370 int band = local->hw.conf.channel->band; 385 int band = local->hw.conf.channel->band;
371 386
372 /* TODO: Could consider removing the least recently used entry and 387 /*
373 * allow new one to be added. */ 388 * XXX: Consider removing the least recently used entry and
389 * allow new one to be added.
390 */
374 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 391 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
375 if (net_ratelimit()) { 392 if (net_ratelimit())
376 printk(KERN_DEBUG "%s: No room for a new IBSS STA " 393 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
377 "entry %pM\n", sdata->dev->name, addr); 394 sdata->dev->name, addr);
378 }
379 return NULL; 395 return NULL;
380 } 396 }
381 397
@@ -432,41 +448,33 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
432{ 448{
433 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 449 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
434 450
435 mod_timer(&ifibss->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 451 mod_timer(&ifibss->timer,
452 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
436 453
437 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); 454 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
455
438 if (ieee80211_sta_active_ibss(sdata)) 456 if (ieee80211_sta_active_ibss(sdata))
439 return; 457 return;
440 458
441 if ((ifibss->flags & IEEE80211_IBSS_BSSID_SET) && 459 if (ifibss->fixed_channel)
442 (!(ifibss->flags & IEEE80211_IBSS_AUTO_CHANNEL_SEL)))
443 return; 460 return;
444 461
445 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 462 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
446 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 463 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
447 464
448 /* XXX maybe racy? */ 465 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
449 if (sdata->local->scan_req)
450 return;
451
452 memcpy(sdata->local->int_scan_req.ssids[0].ssid,
453 ifibss->ssid, IEEE80211_MAX_SSID_LEN);
454 sdata->local->int_scan_req.ssids[0].ssid_len = ifibss->ssid_len;
455 ieee80211_request_scan(sdata, &sdata->local->int_scan_req);
456} 466}
457 467
458static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 468static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
459{ 469{
460 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 470 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
461 struct ieee80211_local *local = sdata->local; 471 struct ieee80211_local *local = sdata->local;
462 struct ieee80211_supported_band *sband; 472 struct ieee80211_supported_band *sband;
463 u8 *pos;
464 u8 bssid[ETH_ALEN]; 473 u8 bssid[ETH_ALEN];
465 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
466 u16 capability; 474 u16 capability;
467 int i; 475 int i;
468 476
469 if (ifibss->flags & IEEE80211_IBSS_BSSID_SET) { 477 if (ifibss->fixed_bssid) {
470 memcpy(bssid, ifibss->bssid, ETH_ALEN); 478 memcpy(bssid, ifibss->bssid, ETH_ALEN);
471 } else { 479 } else {
472 /* Generate random, not broadcast, locally administered BSSID. Mix in 480 /* Generate random, not broadcast, locally administered BSSID. Mix in
@@ -482,10 +490,7 @@ static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
482 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 490 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
483 sdata->dev->name, bssid); 491 sdata->dev->name, bssid);
484 492
485 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 493 sband = local->hw.wiphy->bands[ifibss->channel->band];
486
487 if (local->hw.conf.beacon_int == 0)
488 local->hw.conf.beacon_int = 100;
489 494
490 capability = WLAN_CAPABILITY_IBSS; 495 capability = WLAN_CAPABILITY_IBSS;
491 496
@@ -494,29 +499,20 @@ static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
494 else 499 else
495 sdata->drop_unencrypted = 0; 500 sdata->drop_unencrypted = 0;
496 501
497 pos = supp_rates; 502 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
498 for (i = 0; i < sband->n_bitrates; i++) { 503 ifibss->channel, 3, /* first two are basic */
499 int rate = sband->bitrates[i].bitrate; 504 capability, 0);
500 *pos++ = (u8) (rate / 5);
501 }
502
503 return __ieee80211_sta_join_ibss(sdata,
504 bssid, local->hw.conf.beacon_int,
505 local->hw.conf.channel->center_freq,
506 sband->n_bitrates, supp_rates,
507 capability, 0);
508} 505}
509 506
510static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) 507static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
511{ 508{
512 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 509 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
513 struct ieee80211_local *local = sdata->local; 510 struct ieee80211_local *local = sdata->local;
514 struct ieee80211_bss *bss; 511 struct ieee80211_bss *bss;
512 struct ieee80211_channel *chan = NULL;
515 const u8 *bssid = NULL; 513 const u8 *bssid = NULL;
516 int active_ibss; 514 int active_ibss;
517 515 u16 capability;
518 if (ifibss->ssid_len == 0)
519 return -EINVAL;
520 516
521 active_ibss = ieee80211_sta_active_ibss(sdata); 517 active_ibss = ieee80211_sta_active_ibss(sdata);
522#ifdef CONFIG_MAC80211_IBSS_DEBUG 518#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -525,14 +521,23 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
525#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 521#endif /* CONFIG_MAC80211_IBSS_DEBUG */
526 522
527 if (active_ibss) 523 if (active_ibss)
528 return 0; 524 return;
529 525
530 if (ifibss->flags & IEEE80211_IBSS_BSSID_SET) 526 capability = WLAN_CAPABILITY_IBSS;
527 if (sdata->default_key)
528 capability |= WLAN_CAPABILITY_PRIVACY;
529
530 if (ifibss->fixed_bssid)
531 bssid = ifibss->bssid; 531 bssid = ifibss->bssid;
532 bss = (void *)cfg80211_get_bss(local->hw.wiphy, NULL, bssid, 532 if (ifibss->fixed_channel)
533 chan = ifibss->channel;
534 if (!is_zero_ether_addr(ifibss->bssid))
535 bssid = ifibss->bssid;
536 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid,
533 ifibss->ssid, ifibss->ssid_len, 537 ifibss->ssid, ifibss->ssid_len,
534 WLAN_CAPABILITY_IBSS, 538 WLAN_CAPABILITY_IBSS |
535 WLAN_CAPABILITY_IBSS); 539 WLAN_CAPABILITY_PRIVACY,
540 capability);
536 541
537#ifdef CONFIG_MAC80211_IBSS_DEBUG 542#ifdef CONFIG_MAC80211_IBSS_DEBUG
538 if (bss) 543 if (bss)
@@ -540,18 +545,14 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
540 "%pM\n", bss->cbss.bssid, ifibss->bssid); 545 "%pM\n", bss->cbss.bssid, ifibss->bssid);
541#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 546#endif /* CONFIG_MAC80211_IBSS_DEBUG */
542 547
543 if (bss && 548 if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
544 (!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET) ||
545 memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN))) {
546 int ret;
547
548 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 549 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
549 " based on configured SSID\n", 550 " based on configured SSID\n",
550 sdata->dev->name, bss->cbss.bssid); 551 sdata->dev->name, bss->cbss.bssid);
551 552
552 ret = ieee80211_sta_join_ibss(sdata, bss); 553 ieee80211_sta_join_ibss(sdata, bss);
553 ieee80211_rx_bss_put(local, bss); 554 ieee80211_rx_bss_put(local, bss);
554 return ret; 555 return;
555 } else if (bss) 556 } else if (bss)
556 ieee80211_rx_bss_put(local, bss); 557 ieee80211_rx_bss_put(local, bss);
557 558
@@ -562,29 +563,24 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
562 /* Selected IBSS not found in current scan results - try to scan */ 563 /* Selected IBSS not found in current scan results - try to scan */
563 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED && 564 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED &&
564 !ieee80211_sta_active_ibss(sdata)) { 565 !ieee80211_sta_active_ibss(sdata)) {
565 mod_timer(&ifibss->timer, jiffies + 566 mod_timer(&ifibss->timer,
566 IEEE80211_IBSS_MERGE_INTERVAL); 567 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
567 } else if (time_after(jiffies, local->last_scan_completed + 568 } else if (time_after(jiffies, ifibss->last_scan_completed +
568 IEEE80211_SCAN_INTERVAL)) { 569 IEEE80211_SCAN_INTERVAL)) {
569 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 570 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
570 "join\n", sdata->dev->name); 571 "join\n", sdata->dev->name);
571 572
572 /* XXX maybe racy? */ 573 ieee80211_request_internal_scan(sdata, ifibss->ssid,
573 if (local->scan_req) 574 ifibss->ssid_len);
574 return -EBUSY;
575
576 memcpy(local->int_scan_req.ssids[0].ssid,
577 ifibss->ssid, IEEE80211_MAX_SSID_LEN);
578 local->int_scan_req.ssids[0].ssid_len = ifibss->ssid_len;
579 return ieee80211_request_scan(sdata, &local->int_scan_req);
580 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) { 575 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) {
581 int interval = IEEE80211_SCAN_INTERVAL; 576 int interval = IEEE80211_SCAN_INTERVAL;
582 577
583 if (time_after(jiffies, ifibss->ibss_join_req + 578 if (time_after(jiffies, ifibss->ibss_join_req +
584 IEEE80211_IBSS_JOIN_TIMEOUT)) { 579 IEEE80211_IBSS_JOIN_TIMEOUT)) {
585 if (!(local->oper_channel->flags & 580 if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) {
586 IEEE80211_CHAN_NO_IBSS)) 581 ieee80211_sta_create_ibss(sdata);
587 return ieee80211_sta_create_ibss(sdata); 582 return;
583 }
588 printk(KERN_DEBUG "%s: IBSS not allowed on" 584 printk(KERN_DEBUG "%s: IBSS not allowed on"
589 " %d MHz\n", sdata->dev->name, 585 " %d MHz\n", sdata->dev->name,
590 local->hw.conf.channel->center_freq); 586 local->hw.conf.channel->center_freq);
@@ -595,11 +591,9 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
595 } 591 }
596 592
597 ifibss->state = IEEE80211_IBSS_MLME_SEARCH; 593 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
598 mod_timer(&ifibss->timer, jiffies + interval); 594 mod_timer(&ifibss->timer,
599 return 0; 595 round_jiffies(jiffies + interval));
600 } 596 }
601
602 return 0;
603} 597}
604 598
605static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 599static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -614,13 +608,10 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
614 u8 *pos, *end; 608 u8 *pos, *end;
615 609
616 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || 610 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
617 len < 24 + 2 || !ifibss->probe_resp) 611 len < 24 + 2 || !ifibss->presp)
618 return; 612 return;
619 613
620 if (local->ops->tx_last_beacon) 614 tx_last_beacon = drv_tx_last_beacon(local);
621 tx_last_beacon = local->ops->tx_last_beacon(local_to_hw(local));
622 else
623 tx_last_beacon = 1;
624 615
625#ifdef CONFIG_MAC80211_IBSS_DEBUG 616#ifdef CONFIG_MAC80211_IBSS_DEBUG
626 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 617 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
@@ -649,13 +640,13 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
649 } 640 }
650 if (pos[1] != 0 && 641 if (pos[1] != 0 &&
651 (pos[1] != ifibss->ssid_len || 642 (pos[1] != ifibss->ssid_len ||
652 memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len) != 0)) { 643 !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
653 /* Ignore ProbeReq for foreign SSID */ 644 /* Ignore ProbeReq for foreign SSID */
654 return; 645 return;
655 } 646 }
656 647
657 /* Reply with ProbeResp */ 648 /* Reply with ProbeResp */
658 skb = skb_copy(ifibss->probe_resp, GFP_KERNEL); 649 skb = skb_copy(ifibss->presp, GFP_KERNEL);
659 if (!skb) 650 if (!skb)
660 return; 651 return;
661 652
@@ -746,6 +737,9 @@ static void ieee80211_ibss_work(struct work_struct *work)
746 struct ieee80211_if_ibss *ifibss; 737 struct ieee80211_if_ibss *ifibss;
747 struct sk_buff *skb; 738 struct sk_buff *skb;
748 739
740 if (WARN_ON(local->suspended))
741 return;
742
749 if (!netif_running(sdata->dev)) 743 if (!netif_running(sdata->dev))
750 return; 744 return;
751 745
@@ -782,101 +776,63 @@ static void ieee80211_ibss_timer(unsigned long data)
782 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 776 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
783 struct ieee80211_local *local = sdata->local; 777 struct ieee80211_local *local = sdata->local;
784 778
779 if (local->quiescing) {
780 ifibss->timer_running = true;
781 return;
782 }
783
785 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); 784 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
786 queue_work(local->hw.workqueue, &ifibss->work); 785 queue_work(local->hw.workqueue, &ifibss->work);
787} 786}
788 787
789void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) 788#ifdef CONFIG_PM
789void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
790{ 790{
791 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 791 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
792 792
793 INIT_WORK(&ifibss->work, ieee80211_ibss_work); 793 cancel_work_sync(&ifibss->work);
794 setup_timer(&ifibss->timer, ieee80211_ibss_timer, 794 if (del_timer_sync(&ifibss->timer))
795 (unsigned long) sdata); 795 ifibss->timer_running = true;
796 skb_queue_head_init(&ifibss->skb_queue);
797
798 ifibss->flags |= IEEE80211_IBSS_AUTO_BSSID_SEL |
799 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
800} 796}
801 797
802int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata) 798void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata)
803{ 799{
804 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 800 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
805 801
806 ifibss->flags &= ~IEEE80211_IBSS_PREV_BSSID_SET; 802 if (ifibss->timer_running) {
807 803 add_timer(&ifibss->timer);
808 if (ifibss->ssid_len) 804 ifibss->timer_running = false;
809 ifibss->flags |= IEEE80211_IBSS_SSID_SET;
810 else
811 ifibss->flags &= ~IEEE80211_IBSS_SSID_SET;
812
813 ifibss->ibss_join_req = jiffies;
814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
815 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
816
817 return 0;
818}
819
820int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
821{
822 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
823
824 if (len > IEEE80211_MAX_SSID_LEN)
825 return -EINVAL;
826
827 if (ifibss->ssid_len != len || memcmp(ifibss->ssid, ssid, len) != 0) {
828 memset(ifibss->ssid, 0, sizeof(ifibss->ssid));
829 memcpy(ifibss->ssid, ssid, len);
830 ifibss->ssid_len = len;
831 } 805 }
832
833 return ieee80211_ibss_commit(sdata);
834}
835
836int ieee80211_ibss_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
837{
838 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
839
840 memcpy(ssid, ifibss->ssid, ifibss->ssid_len);
841 *len = ifibss->ssid_len;
842
843 return 0;
844} 806}
807#endif
845 808
846int ieee80211_ibss_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) 809void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
847{ 810{
848 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 811 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
849 812
850 if (is_valid_ether_addr(bssid)) { 813 INIT_WORK(&ifibss->work, ieee80211_ibss_work);
851 memcpy(ifibss->bssid, bssid, ETH_ALEN); 814 setup_timer(&ifibss->timer, ieee80211_ibss_timer,
852 ifibss->flags |= IEEE80211_IBSS_BSSID_SET; 815 (unsigned long) sdata);
853 } else { 816 skb_queue_head_init(&ifibss->skb_queue);
854 memset(ifibss->bssid, 0, ETH_ALEN);
855 ifibss->flags &= ~IEEE80211_IBSS_BSSID_SET;
856 }
857
858 if (netif_running(sdata->dev)) {
859 if (ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID)) {
860 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
861 "the low-level driver\n", sdata->dev->name);
862 }
863 }
864
865 return ieee80211_ibss_commit(sdata);
866} 817}
867 818
868/* scan finished notification */ 819/* scan finished notification */
869void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) 820void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
870{ 821{
871 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 822 struct ieee80211_sub_if_data *sdata;
872 struct ieee80211_if_ibss *ifibss; 823
873 824 mutex_lock(&local->iflist_mtx);
874 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) { 825 list_for_each_entry(sdata, &local->interfaces, list) {
875 ifibss = &sdata->u.ibss; 826 if (!netif_running(sdata->dev))
876 if ((!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET)) || 827 continue;
877 !ieee80211_sta_active_ibss(sdata)) 828 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
878 ieee80211_sta_find_ibss(sdata); 829 continue;
830 if (!sdata->u.ibss.ssid_len)
831 continue;
832 sdata->u.ibss.last_scan_completed = jiffies;
833 ieee80211_sta_find_ibss(sdata);
879 } 834 }
835 mutex_unlock(&local->iflist_mtx);
880} 836}
881 837
882ieee80211_rx_result 838ieee80211_rx_result
@@ -906,3 +862,86 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
906 862
907 return RX_DROP_MONITOR; 863 return RX_DROP_MONITOR;
908} 864}
865
866int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
867 struct cfg80211_ibss_params *params)
868{
869 struct sk_buff *skb;
870
871 if (params->bssid) {
872 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
873 sdata->u.ibss.fixed_bssid = true;
874 } else
875 sdata->u.ibss.fixed_bssid = false;
876
877 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
878
879 sdata->u.ibss.channel = params->channel;
880 sdata->u.ibss.fixed_channel = params->channel_fixed;
881
882 if (params->ie) {
883 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
884 GFP_KERNEL);
885 if (sdata->u.ibss.ie)
886 sdata->u.ibss.ie_len = params->ie_len;
887 }
888
889 skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
890 36 /* bitrates */ +
891 34 /* SSID */ +
892 3 /* DS params */ +
893 4 /* IBSS params */ +
894 params->ie_len);
895 if (!skb)
896 return -ENOMEM;
897
898 sdata->u.ibss.skb = skb;
899 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
900 sdata->u.ibss.ibss_join_req = jiffies;
901
902 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
903
904 /*
905 * The ssid_len setting below is used to see whether
906 * we are active, and we need all other settings
907 * before that may get visible.
908 */
909 mb();
910
911 sdata->u.ibss.ssid_len = params->ssid_len;
912
913 ieee80211_recalc_idle(sdata->local);
914
915 set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
916 queue_work(sdata->local->hw.workqueue, &sdata->u.ibss.work);
917
918 return 0;
919}
920
921int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
922{
923 struct sk_buff *skb;
924
925 del_timer_sync(&sdata->u.ibss.timer);
926 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
927 cancel_work_sync(&sdata->u.ibss.work);
928 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
929
930 sta_info_flush(sdata->local, sdata);
931
932 /* remove beacon */
933 kfree(sdata->u.ibss.ie);
934 skb = sdata->u.ibss.presp;
935 rcu_assign_pointer(sdata->u.ibss.presp, NULL);
936 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
937 synchronize_rcu();
938 kfree_skb(skb);
939
940 skb_queue_purge(&sdata->u.ibss.skb_queue);
941 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
942 sdata->u.ibss.ssid_len = 0;
943
944 ieee80211_recalc_idle(sdata->local);
945
946 return 0;
947}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index e6ed78cb16b3..4dbc28964196 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <net/cfg80211.h> 26#include <net/cfg80211.h>
27#include <net/wireless.h>
28#include <net/iw_handler.h> 27#include <net/iw_handler.h>
29#include <net/mac80211.h> 28#include <net/mac80211.h>
30#include "key.h" 29#include "key.h"
@@ -236,7 +235,7 @@ struct mesh_preq_queue {
236#define IEEE80211_STA_ASSOCIATED BIT(4) 235#define IEEE80211_STA_ASSOCIATED BIT(4)
237#define IEEE80211_STA_PROBEREQ_POLL BIT(5) 236#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
238#define IEEE80211_STA_CREATE_IBSS BIT(6) 237#define IEEE80211_STA_CREATE_IBSS BIT(6)
239/* hole at 7, please re-use */ 238#define IEEE80211_STA_CONTROL_PORT BIT(7)
240#define IEEE80211_STA_WMM_ENABLED BIT(8) 239#define IEEE80211_STA_WMM_ENABLED BIT(8)
241/* hole at 9, please re-use */ 240/* hole at 9, please re-use */
242#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) 241#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
@@ -249,9 +248,8 @@ struct mesh_preq_queue {
249#define IEEE80211_STA_EXT_SME BIT(17) 248#define IEEE80211_STA_EXT_SME BIT(17)
250/* flags for MLME request */ 249/* flags for MLME request */
251#define IEEE80211_STA_REQ_SCAN 0 250#define IEEE80211_STA_REQ_SCAN 0
252#define IEEE80211_STA_REQ_DIRECT_PROBE 1 251#define IEEE80211_STA_REQ_AUTH 1
253#define IEEE80211_STA_REQ_AUTH 2 252#define IEEE80211_STA_REQ_RUN 2
254#define IEEE80211_STA_REQ_RUN 3
255 253
256/* bitfield of allowed auth algs */ 254/* bitfield of allowed auth algs */
257#define IEEE80211_AUTH_ALG_OPEN BIT(0) 255#define IEEE80211_AUTH_ALG_OPEN BIT(0)
@@ -295,6 +293,9 @@ struct ieee80211_if_managed {
295 int auth_tries; /* retries for auth req */ 293 int auth_tries; /* retries for auth req */
296 int assoc_tries; /* retries for assoc req */ 294 int assoc_tries; /* retries for assoc req */
297 295
296 unsigned long timers_running; /* used for quiesce/restart */
297 bool powersave; /* powersave requested for this iface */
298
298 unsigned long request; 299 unsigned long request;
299 300
300 unsigned long last_probe; 301 unsigned long last_probe;
@@ -306,6 +307,8 @@ struct ieee80211_if_managed {
306 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 307 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
307 int auth_transaction; 308 int auth_transaction;
308 309
310 u32 beacon_crc;
311
309 enum { 312 enum {
310 IEEE80211_MFP_DISABLED, 313 IEEE80211_MFP_DISABLED,
311 IEEE80211_MFP_OPTIONAL, 314 IEEE80211_MFP_OPTIONAL,
@@ -319,14 +322,6 @@ struct ieee80211_if_managed {
319 size_t sme_auth_ie_len; 322 size_t sme_auth_ie_len;
320}; 323};
321 324
322enum ieee80211_ibss_flags {
323 IEEE80211_IBSS_AUTO_CHANNEL_SEL = BIT(0),
324 IEEE80211_IBSS_AUTO_BSSID_SEL = BIT(1),
325 IEEE80211_IBSS_BSSID_SET = BIT(2),
326 IEEE80211_IBSS_PREV_BSSID_SET = BIT(3),
327 IEEE80211_IBSS_SSID_SET = BIT(4),
328};
329
330enum ieee80211_ibss_request { 325enum ieee80211_ibss_request {
331 IEEE80211_IBSS_REQ_RUN = 0, 326 IEEE80211_IBSS_REQ_RUN = 0,
332}; 327};
@@ -337,17 +332,23 @@ struct ieee80211_if_ibss {
337 332
338 struct sk_buff_head skb_queue; 333 struct sk_buff_head skb_queue;
339 334
340 u8 ssid[IEEE80211_MAX_SSID_LEN]; 335 unsigned long request;
341 u8 ssid_len; 336 unsigned long last_scan_completed;
342 337
343 u32 flags; 338 bool timer_running;
344 339
345 u8 bssid[ETH_ALEN]; 340 bool fixed_bssid;
341 bool fixed_channel;
346 342
347 unsigned long request; 343 u8 bssid[ETH_ALEN];
344 u8 ssid[IEEE80211_MAX_SSID_LEN];
345 u8 ssid_len, ie_len;
346 u8 *ie;
347 struct ieee80211_channel *channel;
348 348
349 unsigned long ibss_join_req; 349 unsigned long ibss_join_req;
350 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ 350 /* probe response/beacon for IBSS */
351 struct sk_buff *presp, *skb;
351 352
352 enum { 353 enum {
353 IEEE80211_IBSS_MLME_SEARCH, 354 IEEE80211_IBSS_MLME_SEARCH,
@@ -361,6 +362,8 @@ struct ieee80211_if_mesh {
361 struct timer_list mesh_path_timer; 362 struct timer_list mesh_path_timer;
362 struct sk_buff_head skb_queue; 363 struct sk_buff_head skb_queue;
363 364
365 unsigned long timers_running;
366
364 bool housekeeping; 367 bool housekeeping;
365 368
366 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 369 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
@@ -430,6 +433,12 @@ struct ieee80211_sub_if_data {
430 433
431 int drop_unencrypted; 434 int drop_unencrypted;
432 435
436 /*
437 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid.
439 */
440 bool ht_opmode_valid;
441
433 /* Fragment table for host-based reassembly */ 442 /* Fragment table for host-based reassembly */
434 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 443 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
435 unsigned int fragment_next; 444 unsigned int fragment_next;
@@ -580,6 +589,7 @@ enum queue_stop_reason {
580 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 589 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
581 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 590 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
582 IEEE80211_QUEUE_STOP_REASON_PENDING, 591 IEEE80211_QUEUE_STOP_REASON_PENDING,
592 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
583}; 593};
584 594
585struct ieee80211_master_priv { 595struct ieee80211_master_priv {
@@ -606,6 +616,21 @@ struct ieee80211_local {
606 unsigned int filter_flags; /* FIF_* */ 616 unsigned int filter_flags; /* FIF_* */
607 struct iw_statistics wstats; 617 struct iw_statistics wstats;
608 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 618 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
619
620 /*
621 * suspended is true if we finished all the suspend _and_ we have
622 * not yet come up from resume. This is to be used by mac80211
623 * to ensure driver sanity during suspend and mac80211's own
624 * sanity. It can eventually be used for WoW as well.
625 */
626 bool suspended;
627
628 /*
629 * quiescing is true during the suspend process _only_ to
630 * ease timer cancelling etc.
631 */
632 bool quiescing;
633
609 int tx_headroom; /* required headroom for hardware/radiotap */ 634 int tx_headroom; /* required headroom for hardware/radiotap */
610 635
611 /* Tasklet and skb queue to process calls from IRQ mode. All frames 636 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -626,8 +651,6 @@ struct ieee80211_local {
626 spinlock_t sta_lock; 651 spinlock_t sta_lock;
627 unsigned long num_sta; 652 unsigned long num_sta;
628 struct list_head sta_list; 653 struct list_head sta_list;
629 struct list_head sta_flush_list;
630 struct work_struct sta_flush_work;
631 struct sta_info *sta_hash[STA_HASH_SIZE]; 654 struct sta_info *sta_hash[STA_HASH_SIZE];
632 struct timer_list sta_cleanup; 655 struct timer_list sta_cleanup;
633 656
@@ -647,9 +670,6 @@ struct ieee80211_local {
647 670
648 struct rate_control_ref *rate_ctrl; 671 struct rate_control_ref *rate_ctrl;
649 672
650 int rts_threshold;
651 int fragmentation_threshold;
652
653 struct crypto_blkcipher *wep_tx_tfm; 673 struct crypto_blkcipher *wep_tx_tfm;
654 struct crypto_blkcipher *wep_rx_tfm; 674 struct crypto_blkcipher *wep_rx_tfm;
655 u32 wep_iv; 675 u32 wep_iv;
@@ -666,15 +686,18 @@ struct ieee80211_local {
666 686
667 687
668 /* Scanning and BSS list */ 688 /* Scanning and BSS list */
689 struct mutex scan_mtx;
669 bool sw_scanning, hw_scanning; 690 bool sw_scanning, hw_scanning;
670 struct cfg80211_ssid scan_ssid; 691 struct cfg80211_ssid scan_ssid;
671 struct cfg80211_scan_request int_scan_req; 692 struct cfg80211_scan_request int_scan_req;
672 struct cfg80211_scan_request *scan_req; 693 struct cfg80211_scan_request *scan_req;
673 struct ieee80211_channel *scan_channel; 694 struct ieee80211_channel *scan_channel;
695 const u8 *orig_ies;
696 int orig_ies_len;
674 int scan_channel_idx; 697 int scan_channel_idx;
698 int scan_ies_len;
675 699
676 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 700 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
677 unsigned long last_scan_completed;
678 struct delayed_work scan_work; 701 struct delayed_work scan_work;
679 struct ieee80211_sub_if_data *scan_sdata; 702 struct ieee80211_sub_if_data *scan_sdata;
680 enum nl80211_channel_type oper_channel_type; 703 enum nl80211_channel_type oper_channel_type;
@@ -736,28 +759,32 @@ struct ieee80211_local {
736 int wifi_wme_noack_test; 759 int wifi_wme_noack_test;
737 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 760 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
738 761
739 bool powersave;
740 bool pspolling; 762 bool pspolling;
763 /*
764 * PS can only be enabled when we have exactly one managed
765 * interface (and monitors) in PS, this then points there.
766 */
767 struct ieee80211_sub_if_data *ps_sdata;
741 struct work_struct dynamic_ps_enable_work; 768 struct work_struct dynamic_ps_enable_work;
742 struct work_struct dynamic_ps_disable_work; 769 struct work_struct dynamic_ps_disable_work;
743 struct timer_list dynamic_ps_timer; 770 struct timer_list dynamic_ps_timer;
771 struct notifier_block network_latency_notifier;
744 772
745 int user_power_level; /* in dBm */ 773 int user_power_level; /* in dBm */
746 int power_constr_level; /* in dBm */ 774 int power_constr_level; /* in dBm */
747 775
776 struct work_struct restart_work;
777
748#ifdef CONFIG_MAC80211_DEBUGFS 778#ifdef CONFIG_MAC80211_DEBUGFS
749 struct local_debugfsdentries { 779 struct local_debugfsdentries {
750 struct dentry *rcdir; 780 struct dentry *rcdir;
751 struct dentry *rcname; 781 struct dentry *rcname;
752 struct dentry *frequency; 782 struct dentry *frequency;
753 struct dentry *rts_threshold;
754 struct dentry *fragmentation_threshold;
755 struct dentry *short_retry_limit;
756 struct dentry *long_retry_limit;
757 struct dentry *total_ps_buffered; 783 struct dentry *total_ps_buffered;
758 struct dentry *wep_iv; 784 struct dentry *wep_iv;
759 struct dentry *tsf; 785 struct dentry *tsf;
760 struct dentry *reset; 786 struct dentry *reset;
787 struct dentry *noack;
761 struct dentry *statistics; 788 struct dentry *statistics;
762 struct local_debugfsdentries_statsdentries { 789 struct local_debugfsdentries_statsdentries {
763 struct dentry *transmitted_fragment_count; 790 struct dentry *transmitted_fragment_count;
@@ -830,7 +857,7 @@ struct ieee802_11_elems {
830 u8 *fh_params; 857 u8 *fh_params;
831 u8 *ds_params; 858 u8 *ds_params;
832 u8 *cf_params; 859 u8 *cf_params;
833 u8 *tim; 860 struct ieee80211_tim_ie *tim;
834 u8 *ibss_params; 861 u8 *ibss_params;
835 u8 *challenge; 862 u8 *challenge;
836 u8 *wpa; 863 u8 *wpa;
@@ -903,7 +930,6 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
903 930
904 931
905int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); 932int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
906int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
907void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 933void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
908void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 934void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
909 u32 changed); 935 u32 changed);
@@ -927,12 +953,16 @@ int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason
927int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); 953int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
928void ieee80211_send_pspoll(struct ieee80211_local *local, 954void ieee80211_send_pspoll(struct ieee80211_local *local,
929 struct ieee80211_sub_if_data *sdata); 955 struct ieee80211_sub_if_data *sdata);
956void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
957int ieee80211_max_network_latency(struct notifier_block *nb,
958 unsigned long data, void *dummy);
959void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
960 struct ieee80211_channel_sw_ie *sw_elem,
961 struct ieee80211_bss *bss);
962void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
963void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
930 964
931/* IBSS code */ 965/* IBSS code */
932int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata);
933int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
934int ieee80211_ibss_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
935int ieee80211_ibss_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
936void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 966void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
937void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); 967void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
938ieee80211_rx_result 968ieee80211_rx_result
@@ -940,14 +970,22 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
940 struct ieee80211_rx_status *rx_status); 970 struct ieee80211_rx_status *rx_status);
941struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 971struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
942 u8 *bssid, u8 *addr, u32 supp_rates); 972 u8 *bssid, u8 *addr, u32 supp_rates);
973int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
974 struct cfg80211_ibss_params *params);
975int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
976void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
977void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
943 978
944/* scan/BSS handling */ 979/* scan/BSS handling */
945void ieee80211_scan_work(struct work_struct *work); 980void ieee80211_scan_work(struct work_struct *work);
981int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
982 const u8 *ssid, u8 ssid_len);
946int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 983int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
947 struct cfg80211_scan_request *req); 984 struct cfg80211_scan_request *req);
948int ieee80211_scan_results(struct ieee80211_local *local, 985int ieee80211_scan_results(struct ieee80211_local *local,
949 struct iw_request_info *info, 986 struct iw_request_info *info,
950 char *buf, size_t len); 987 char *buf, size_t len);
988void ieee80211_scan_cancel(struct ieee80211_local *local);
951ieee80211_rx_result 989ieee80211_rx_result
952ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, 990ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
953 struct sk_buff *skb, 991 struct sk_buff *skb,
@@ -956,9 +994,6 @@ int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
956 const char *ie, size_t len); 994 const char *ie, size_t len);
957 995
958void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 996void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
959void ieee80211_scan_failed(struct ieee80211_local *local);
960int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
961 struct cfg80211_scan_request *req);
962struct ieee80211_bss * 997struct ieee80211_bss *
963ieee80211_bss_info_update(struct ieee80211_local *local, 998ieee80211_bss_info_update(struct ieee80211_local *local,
964 struct ieee80211_rx_status *rx_status, 999 struct ieee80211_rx_status *rx_status,
@@ -983,6 +1018,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
983 enum nl80211_iftype type); 1018 enum nl80211_iftype type);
984void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); 1019void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
985void ieee80211_remove_interfaces(struct ieee80211_local *local); 1020void ieee80211_remove_interfaces(struct ieee80211_local *local);
1021u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
1022void ieee80211_recalc_idle(struct ieee80211_local *local);
986 1023
987/* tx handling */ 1024/* tx handling */
988void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1025void ieee80211_clear_tx_pending(struct ieee80211_local *local);
@@ -995,9 +1032,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
995void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1032void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
996 struct ieee80211_ht_cap *ht_cap_ie, 1033 struct ieee80211_ht_cap *ht_cap_ie,
997 struct ieee80211_sta_ht_cap *ht_cap); 1034 struct ieee80211_sta_ht_cap *ht_cap);
998u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
999 struct ieee80211_ht_info *hti,
1000 u16 ap_ht_cap_flags);
1001void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); 1035void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
1002void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1036void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1003 const u8 *da, u16 tid, 1037 const u8 *da, u16 tid,
@@ -1027,24 +1061,23 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1027void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1061void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1028 struct ieee80211_mgmt *mgmt, 1062 struct ieee80211_mgmt *mgmt,
1029 size_t len); 1063 size_t len);
1030void ieee80211_chswitch_timer(unsigned long data); 1064
1031void ieee80211_chswitch_work(struct work_struct *work); 1065/* Suspend/resume and hw reconfiguration */
1032void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1066int ieee80211_reconfig(struct ieee80211_local *local);
1033 struct ieee80211_channel_sw_ie *sw_elem, 1067
1034 struct ieee80211_bss *bss);
1035void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1036 u16 capab_info, u8 *pwr_constr_elem,
1037 u8 pwr_constr_elem_len);
1038
1039/* Suspend/resume */
1040#ifdef CONFIG_PM 1068#ifdef CONFIG_PM
1041int __ieee80211_suspend(struct ieee80211_hw *hw); 1069int __ieee80211_suspend(struct ieee80211_hw *hw);
1042int __ieee80211_resume(struct ieee80211_hw *hw); 1070
1071static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1072{
1073 return ieee80211_reconfig(hw_to_local(hw));
1074}
1043#else 1075#else
1044static inline int __ieee80211_suspend(struct ieee80211_hw *hw) 1076static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
1045{ 1077{
1046 return 0; 1078 return 0;
1047} 1079}
1080
1048static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1081static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1049{ 1082{
1050 return 0; 1083 return 0;
@@ -1053,19 +1086,20 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1053 1086
1054/* utility functions/constants */ 1087/* utility functions/constants */
1055extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1088extern void *mac80211_wiphy_privid; /* for wiphy privid */
1056extern const unsigned char rfc1042_header[6];
1057extern const unsigned char bridge_tunnel_header[6];
1058u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1089u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
1059 enum nl80211_iftype type); 1090 enum nl80211_iftype type);
1060int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1091int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
1061 int rate, int erp, int short_preamble); 1092 int rate, int erp, int short_preamble);
1062void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 1093void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
1063 struct ieee80211_hdr *hdr); 1094 struct ieee80211_hdr *hdr, const u8 *tsc);
1064void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1095void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1065void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 1096void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1066 int encrypt); 1097 int encrypt);
1067void ieee802_11_parse_elems(u8 *start, size_t len, 1098void ieee802_11_parse_elems(u8 *start, size_t len,
1068 struct ieee802_11_elems *elems); 1099 struct ieee802_11_elems *elems);
1100u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
1101 struct ieee802_11_elems *elems,
1102 u64 filter, u32 crc);
1069int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); 1103int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
1070u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 1104u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
1071 enum ieee80211_band band); 1105 enum ieee80211_band band);
@@ -1088,14 +1122,20 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1088 enum queue_stop_reason reason); 1122 enum queue_stop_reason reason);
1089void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 1123void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1090 enum queue_stop_reason reason); 1124 enum queue_stop_reason reason);
1125void ieee80211_add_pending_skb(struct ieee80211_local *local,
1126 struct sk_buff *skb);
1127int ieee80211_add_pending_skbs(struct ieee80211_local *local,
1128 struct sk_buff_head *skbs);
1091 1129
1092void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1130void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1093 u16 transaction, u16 auth_alg, 1131 u16 transaction, u16 auth_alg,
1094 u8 *extra, size_t extra_len, 1132 u8 *extra, size_t extra_len,
1095 const u8 *bssid, int encrypt); 1133 const u8 *bssid, int encrypt);
1134int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1135 const u8 *ie, size_t ie_len);
1096void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1136void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1097 u8 *ssid, size_t ssid_len, 1137 const u8 *ssid, size_t ssid_len,
1098 u8 *ie, size_t ie_len); 1138 const u8 *ie, size_t ie_len);
1099 1139
1100void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1140void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1101 const size_t supp_rates_len, 1141 const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 91e8e1bacaaa..b7c8a4484298 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -20,6 +20,7 @@
20#include "debugfs_netdev.h" 20#include "debugfs_netdev.h"
21#include "mesh.h" 21#include "mesh.h"
22#include "led.h" 22#include "led.h"
23#include "driver-ops.h"
23 24
24/** 25/**
25 * DOC: Interface list locking 26 * DOC: Interface list locking
@@ -164,14 +165,12 @@ static int ieee80211_open(struct net_device *dev)
164 } 165 }
165 166
166 if (local->open_count == 0) { 167 if (local->open_count == 0) {
167 res = 0; 168 res = drv_start(local);
168 if (local->ops->start)
169 res = local->ops->start(local_to_hw(local));
170 if (res) 169 if (res)
171 goto err_del_bss; 170 goto err_del_bss;
172 /* we're brought up, everything changes */ 171 /* we're brought up, everything changes */
173 hw_reconf_flags = ~0; 172 hw_reconf_flags = ~0;
174 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 173 ieee80211_led_radio(local, true);
175 } 174 }
176 175
177 /* 176 /*
@@ -199,8 +198,8 @@ static int ieee80211_open(struct net_device *dev)
199 * Validate the MAC address for this device. 198 * Validate the MAC address for this device.
200 */ 199 */
201 if (!is_valid_ether_addr(dev->dev_addr)) { 200 if (!is_valid_ether_addr(dev->dev_addr)) {
202 if (!local->open_count && local->ops->stop) 201 if (!local->open_count)
203 local->ops->stop(local_to_hw(local)); 202 drv_stop(local);
204 return -EADDRNOTAVAIL; 203 return -EADDRNOTAVAIL;
205 } 204 }
206 205
@@ -235,17 +234,13 @@ static int ieee80211_open(struct net_device *dev)
235 netif_addr_unlock_bh(local->mdev); 234 netif_addr_unlock_bh(local->mdev);
236 break; 235 break;
237 case NL80211_IFTYPE_STATION: 236 case NL80211_IFTYPE_STATION:
238 case NL80211_IFTYPE_ADHOC: 237 sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
239 if (sdata->vif.type == NL80211_IFTYPE_STATION)
240 sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
241 else
242 sdata->u.ibss.flags &= ~IEEE80211_IBSS_PREV_BSSID_SET;
243 /* fall through */ 238 /* fall through */
244 default: 239 default:
245 conf.vif = &sdata->vif; 240 conf.vif = &sdata->vif;
246 conf.type = sdata->vif.type; 241 conf.type = sdata->vif.type;
247 conf.mac_addr = dev->dev_addr; 242 conf.mac_addr = dev->dev_addr;
248 res = local->ops->add_interface(local_to_hw(local), &conf); 243 res = drv_add_interface(local, &conf);
249 if (res) 244 if (res)
250 goto err_stop; 245 goto err_stop;
251 246
@@ -306,6 +301,8 @@ static int ieee80211_open(struct net_device *dev)
306 if (sdata->flags & IEEE80211_SDATA_PROMISC) 301 if (sdata->flags & IEEE80211_SDATA_PROMISC)
307 atomic_inc(&local->iff_promiscs); 302 atomic_inc(&local->iff_promiscs);
308 303
304 hw_reconf_flags |= __ieee80211_recalc_idle(local);
305
309 local->open_count++; 306 local->open_count++;
310 if (hw_reconf_flags) { 307 if (hw_reconf_flags) {
311 ieee80211_hw_config(local, hw_reconf_flags); 308 ieee80211_hw_config(local, hw_reconf_flags);
@@ -317,6 +314,8 @@ static int ieee80211_open(struct net_device *dev)
317 ieee80211_set_wmm_default(sdata); 314 ieee80211_set_wmm_default(sdata);
318 } 315 }
319 316
317 ieee80211_recalc_ps(local, -1);
318
320 /* 319 /*
321 * ieee80211_sta_work is disabled while network interface 320 * ieee80211_sta_work is disabled while network interface
322 * is down. Therefore, some configuration changes may not 321 * is down. Therefore, some configuration changes may not
@@ -325,17 +324,15 @@ static int ieee80211_open(struct net_device *dev)
325 */ 324 */
326 if (sdata->vif.type == NL80211_IFTYPE_STATION) 325 if (sdata->vif.type == NL80211_IFTYPE_STATION)
327 queue_work(local->hw.workqueue, &sdata->u.mgd.work); 326 queue_work(local->hw.workqueue, &sdata->u.mgd.work);
328 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
329 queue_work(local->hw.workqueue, &sdata->u.ibss.work);
330 327
331 netif_tx_start_all_queues(dev); 328 netif_tx_start_all_queues(dev);
332 329
333 return 0; 330 return 0;
334 err_del_interface: 331 err_del_interface:
335 local->ops->remove_interface(local_to_hw(local), &conf); 332 drv_remove_interface(local, &conf);
336 err_stop: 333 err_stop:
337 if (!local->open_count && local->ops->stop) 334 if (!local->open_count)
338 local->ops->stop(local_to_hw(local)); 335 drv_stop(local);
339 err_del_bss: 336 err_del_bss:
340 sdata->bss = NULL; 337 sdata->bss = NULL;
341 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 338 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -497,7 +494,6 @@ static int ieee80211_stop(struct net_device *dev)
497 /* fall through */ 494 /* fall through */
498 case NL80211_IFTYPE_ADHOC: 495 case NL80211_IFTYPE_ADHOC:
499 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 496 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
500 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
501 del_timer_sync(&sdata->u.ibss.timer); 497 del_timer_sync(&sdata->u.ibss.timer);
502 cancel_work_sync(&sdata->u.ibss.work); 498 cancel_work_sync(&sdata->u.ibss.work);
503 synchronize_rcu(); 499 synchronize_rcu();
@@ -549,19 +545,22 @@ static int ieee80211_stop(struct net_device *dev)
549 conf.mac_addr = dev->dev_addr; 545 conf.mac_addr = dev->dev_addr;
550 /* disable all keys for as long as this netdev is down */ 546 /* disable all keys for as long as this netdev is down */
551 ieee80211_disable_keys(sdata); 547 ieee80211_disable_keys(sdata);
552 local->ops->remove_interface(local_to_hw(local), &conf); 548 drv_remove_interface(local, &conf);
553 } 549 }
554 550
555 sdata->bss = NULL; 551 sdata->bss = NULL;
556 552
553 hw_reconf_flags |= __ieee80211_recalc_idle(local);
554
555 ieee80211_recalc_ps(local, -1);
556
557 if (local->open_count == 0) { 557 if (local->open_count == 0) {
558 if (netif_running(local->mdev)) 558 if (netif_running(local->mdev))
559 dev_close(local->mdev); 559 dev_close(local->mdev);
560 560
561 if (local->ops->stop) 561 drv_stop(local);
562 local->ops->stop(local_to_hw(local));
563 562
564 ieee80211_led_radio(local, 0); 563 ieee80211_led_radio(local, false);
565 564
566 flush_workqueue(local->hw.workqueue); 565 flush_workqueue(local->hw.workqueue);
567 566
@@ -649,7 +648,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
649 mesh_rmc_free(sdata); 648 mesh_rmc_free(sdata);
650 break; 649 break;
651 case NL80211_IFTYPE_ADHOC: 650 case NL80211_IFTYPE_ADHOC:
652 kfree_skb(sdata->u.ibss.probe_resp); 651 if (WARN_ON(sdata->u.ibss.presp))
652 kfree_skb(sdata->u.ibss.presp);
653 break; 653 break;
654 case NL80211_IFTYPE_STATION: 654 case NL80211_IFTYPE_STATION:
655 kfree(sdata->u.mgd.extra_ie); 655 kfree(sdata->u.mgd.extra_ie);
@@ -896,3 +896,74 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
896 unregister_netdevice(sdata->dev); 896 unregister_netdevice(sdata->dev);
897 } 897 }
898} 898}
899
900static u32 ieee80211_idle_off(struct ieee80211_local *local,
901 const char *reason)
902{
903 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
904 return 0;
905
906#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
907 printk(KERN_DEBUG "%s: device no longer idle - %s\n",
908 wiphy_name(local->hw.wiphy), reason);
909#endif
910
911 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
912 return IEEE80211_CONF_CHANGE_IDLE;
913}
914
915static u32 ieee80211_idle_on(struct ieee80211_local *local)
916{
917 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
918 return 0;
919
920#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
921 printk(KERN_DEBUG "%s: device now idle\n",
922 wiphy_name(local->hw.wiphy));
923#endif
924
925 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
926 return IEEE80211_CONF_CHANGE_IDLE;
927}
928
929u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
930{
931 struct ieee80211_sub_if_data *sdata;
932 int count = 0;
933
934 if (local->hw_scanning || local->sw_scanning)
935 return ieee80211_idle_off(local, "scanning");
936
937 list_for_each_entry(sdata, &local->interfaces, list) {
938 if (!netif_running(sdata->dev))
939 continue;
940 /* do not count disabled managed interfaces */
941 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
942 sdata->u.mgd.state == IEEE80211_STA_MLME_DISABLED)
943 continue;
944 /* do not count unused IBSS interfaces */
945 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
946 !sdata->u.ibss.ssid_len)
947 continue;
948 /* count everything else */
949 count++;
950 }
951
952 if (!count)
953 return ieee80211_idle_on(local);
954 else
955 return ieee80211_idle_off(local, "in use");
956
957 return 0;
958}
959
960void ieee80211_recalc_idle(struct ieee80211_local *local)
961{
962 u32 chg;
963
964 mutex_lock(&local->iflist_mtx);
965 chg = __ieee80211_recalc_idle(local);
966 mutex_unlock(&local->iflist_mtx);
967 if (chg)
968 ieee80211_hw_config(local, chg);
969}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 687acf23054d..ce267565e180 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -16,6 +16,7 @@
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19#include "debugfs_key.h" 20#include "debugfs_key.h"
20#include "aes_ccm.h" 21#include "aes_ccm.h"
21#include "aes_cmac.h" 22#include "aes_cmac.h"
@@ -136,8 +137,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
136 struct ieee80211_sub_if_data, 137 struct ieee80211_sub_if_data,
137 u.ap); 138 u.ap);
138 139
139 ret = key->local->ops->set_key(local_to_hw(key->local), SET_KEY, 140 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
140 &sdata->vif, sta, &key->conf);
141 141
142 if (!ret) { 142 if (!ret) {
143 spin_lock(&todo_lock); 143 spin_lock(&todo_lock);
@@ -179,8 +179,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
179 struct ieee80211_sub_if_data, 179 struct ieee80211_sub_if_data,
180 u.ap); 180 u.ap);
181 181
182 ret = key->local->ops->set_key(local_to_hw(key->local), DISABLE_KEY, 182 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif,
183 &sdata->vif, sta, &key->conf); 183 sta, &key->conf);
184 184
185 if (ret) 185 if (ret)
186 printk(KERN_ERR "mac80211-%s: failed to remove key " 186 printk(KERN_ERR "mac80211-%s: failed to remove key "
@@ -290,9 +290,11 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
290struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 290struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
291 int idx, 291 int idx,
292 size_t key_len, 292 size_t key_len,
293 const u8 *key_data) 293 const u8 *key_data,
294 size_t seq_len, const u8 *seq)
294{ 295{
295 struct ieee80211_key *key; 296 struct ieee80211_key *key;
297 int i, j;
296 298
297 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 299 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
298 300
@@ -318,14 +320,31 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
318 case ALG_TKIP: 320 case ALG_TKIP:
319 key->conf.iv_len = TKIP_IV_LEN; 321 key->conf.iv_len = TKIP_IV_LEN;
320 key->conf.icv_len = TKIP_ICV_LEN; 322 key->conf.icv_len = TKIP_ICV_LEN;
323 if (seq) {
324 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
325 key->u.tkip.rx[i].iv32 =
326 get_unaligned_le32(&seq[2]);
327 key->u.tkip.rx[i].iv16 =
328 get_unaligned_le16(seq);
329 }
330 }
321 break; 331 break;
322 case ALG_CCMP: 332 case ALG_CCMP:
323 key->conf.iv_len = CCMP_HDR_LEN; 333 key->conf.iv_len = CCMP_HDR_LEN;
324 key->conf.icv_len = CCMP_MIC_LEN; 334 key->conf.icv_len = CCMP_MIC_LEN;
335 if (seq) {
336 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
337 for (j = 0; j < CCMP_PN_LEN; j++)
338 key->u.ccmp.rx_pn[i][j] =
339 seq[CCMP_PN_LEN - j - 1];
340 }
325 break; 341 break;
326 case ALG_AES_CMAC: 342 case ALG_AES_CMAC:
327 key->conf.iv_len = 0; 343 key->conf.iv_len = 0;
328 key->conf.icv_len = sizeof(struct ieee80211_mmie); 344 key->conf.icv_len = sizeof(struct ieee80211_mmie);
345 if (seq)
346 for (j = 0; j < 6; j++)
347 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
329 break; 348 break;
330 } 349 }
331 memcpy(key->conf.key, key_data, key_len); 350 memcpy(key->conf.key, key_data, key_len);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 215d3ef42a4f..9572e00f532c 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -144,7 +144,8 @@ struct ieee80211_key {
144struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 144struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
145 int idx, 145 int idx,
146 size_t key_len, 146 size_t key_len,
147 const u8 *key_data); 147 const u8 *key_data,
148 size_t seq_len, const u8 *seq);
148/* 149/*
149 * Insert a key into data structures (sdata, sta if necessary) 150 * Insert a key into data structures (sdata, sta if necessary)
150 * to make it used, free old key. 151 * to make it used, free old key.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 14134193cd17..092a017b237e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -21,10 +21,12 @@
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
23#include <linux/bitmap.h> 23#include <linux/bitmap.h>
24#include <linux/pm_qos_params.h>
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/cfg80211.h> 26#include <net/cfg80211.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
29#include "driver-ops.h"
28#include "rate.h" 30#include "rate.h"
29#include "mesh.h" 31#include "mesh.h"
30#include "wep.h" 32#include "wep.h"
@@ -80,10 +82,9 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
80 /* be a bit nasty */ 82 /* be a bit nasty */
81 new_flags |= (1<<31); 83 new_flags |= (1<<31);
82 84
83 local->ops->configure_filter(local_to_hw(local), 85 drv_configure_filter(local, changed_flags, &new_flags,
84 changed_flags, &new_flags, 86 local->mdev->mc_count,
85 local->mdev->mc_count, 87 local->mdev->mc_list);
86 local->mdev->mc_list);
87 88
88 WARN_ON(new_flags & (1<<31)); 89 WARN_ON(new_flags & (1<<31));
89 90
@@ -151,93 +152,19 @@ static void ieee80211_master_set_multicast_list(struct net_device *dev)
151 ieee80211_configure_filter(local); 152 ieee80211_configure_filter(local);
152} 153}
153 154
154/* everything else */
155
156int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
157{
158 struct ieee80211_local *local = sdata->local;
159 struct ieee80211_if_conf conf;
160
161 if (WARN_ON(!netif_running(sdata->dev)))
162 return 0;
163
164 memset(&conf, 0, sizeof(conf));
165
166 if (sdata->vif.type == NL80211_IFTYPE_STATION)
167 conf.bssid = sdata->u.mgd.bssid;
168 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
169 conf.bssid = sdata->u.ibss.bssid;
170 else if (sdata->vif.type == NL80211_IFTYPE_AP)
171 conf.bssid = sdata->dev->dev_addr;
172 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
173 static const u8 zero[ETH_ALEN] = { 0 };
174 conf.bssid = zero;
175 } else {
176 WARN_ON(1);
177 return -EINVAL;
178 }
179
180 if (!local->ops->config_interface)
181 return 0;
182
183 switch (sdata->vif.type) {
184 case NL80211_IFTYPE_AP:
185 case NL80211_IFTYPE_ADHOC:
186 case NL80211_IFTYPE_MESH_POINT:
187 break;
188 default:
189 /* do not warn to simplify caller in scan.c */
190 changed &= ~IEEE80211_IFCC_BEACON_ENABLED;
191 if (WARN_ON(changed & IEEE80211_IFCC_BEACON))
192 return -EINVAL;
193 changed &= ~IEEE80211_IFCC_BEACON;
194 break;
195 }
196
197 if (changed & IEEE80211_IFCC_BEACON_ENABLED) {
198 if (local->sw_scanning) {
199 conf.enable_beacon = false;
200 } else {
201 /*
202 * Beacon should be enabled, but AP mode must
203 * check whether there is a beacon configured.
204 */
205 switch (sdata->vif.type) {
206 case NL80211_IFTYPE_AP:
207 conf.enable_beacon =
208 !!rcu_dereference(sdata->u.ap.beacon);
209 break;
210 case NL80211_IFTYPE_ADHOC:
211 conf.enable_beacon = !!sdata->u.ibss.probe_resp;
212 break;
213 case NL80211_IFTYPE_MESH_POINT:
214 conf.enable_beacon = true;
215 break;
216 default:
217 /* not reached */
218 WARN_ON(1);
219 break;
220 }
221 }
222 }
223
224 conf.changed = changed;
225
226 return local->ops->config_interface(local_to_hw(local),
227 &sdata->vif, &conf);
228}
229
230int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) 155int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
231{ 156{
232 struct ieee80211_channel *chan; 157 struct ieee80211_channel *chan, *scan_chan;
233 int ret = 0; 158 int ret = 0;
234 int power; 159 int power;
235 enum nl80211_channel_type channel_type; 160 enum nl80211_channel_type channel_type;
236 161
237 might_sleep(); 162 might_sleep();
238 163
239 if (local->sw_scanning) { 164 scan_chan = local->scan_channel;
240 chan = local->scan_channel; 165
166 if (scan_chan) {
167 chan = scan_chan;
241 channel_type = NL80211_CHAN_NO_HT; 168 channel_type = NL80211_CHAN_NO_HT;
242 } else { 169 } else {
243 chan = local->oper_channel; 170 chan = local->oper_channel;
@@ -251,7 +178,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
251 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 178 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
252 } 179 }
253 180
254 if (local->sw_scanning) 181 if (scan_chan)
255 power = chan->max_power; 182 power = chan->max_power;
256 else 183 else
257 power = local->power_constr_level ? 184 power = local->power_constr_level ?
@@ -267,7 +194,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
267 } 194 }
268 195
269 if (changed && local->open_count) { 196 if (changed && local->open_count) {
270 ret = local->ops->config(local_to_hw(local), changed); 197 ret = drv_config(local, changed);
271 /* 198 /*
272 * Goal: 199 * Goal:
273 * HW reconfiguration should never fail, the driver has told 200 * HW reconfiguration should never fail, the driver has told
@@ -292,18 +219,78 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
292 u32 changed) 219 u32 changed)
293{ 220{
294 struct ieee80211_local *local = sdata->local; 221 struct ieee80211_local *local = sdata->local;
222 static const u8 zero[ETH_ALEN] = { 0 };
295 223
296 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 224 if (!changed)
297 return; 225 return;
298 226
299 if (!changed) 227 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
228 /*
229 * While not associated, claim a BSSID of all-zeroes
230 * so that drivers don't do any weird things with the
231 * BSSID at that time.
232 */
233 if (sdata->vif.bss_conf.assoc)
234 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
235 else
236 sdata->vif.bss_conf.bssid = zero;
237 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
238 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
239 else if (sdata->vif.type == NL80211_IFTYPE_AP)
240 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr;
241 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
242 sdata->vif.bss_conf.bssid = zero;
243 } else {
244 WARN_ON(1);
300 return; 245 return;
246 }
247
248 switch (sdata->vif.type) {
249 case NL80211_IFTYPE_AP:
250 case NL80211_IFTYPE_ADHOC:
251 case NL80211_IFTYPE_MESH_POINT:
252 break;
253 default:
254 /* do not warn to simplify caller in scan.c */
255 changed &= ~BSS_CHANGED_BEACON_ENABLED;
256 if (WARN_ON(changed & BSS_CHANGED_BEACON))
257 return;
258 break;
259 }
260
261 if (changed & BSS_CHANGED_BEACON_ENABLED) {
262 if (local->sw_scanning) {
263 sdata->vif.bss_conf.enable_beacon = false;
264 } else {
265 /*
266 * Beacon should be enabled, but AP mode must
267 * check whether there is a beacon configured.
268 */
269 switch (sdata->vif.type) {
270 case NL80211_IFTYPE_AP:
271 sdata->vif.bss_conf.enable_beacon =
272 !!rcu_dereference(sdata->u.ap.beacon);
273 break;
274 case NL80211_IFTYPE_ADHOC:
275 sdata->vif.bss_conf.enable_beacon =
276 !!rcu_dereference(sdata->u.ibss.presp);
277 break;
278 case NL80211_IFTYPE_MESH_POINT:
279 sdata->vif.bss_conf.enable_beacon = true;
280 break;
281 default:
282 /* not reached */
283 WARN_ON(1);
284 break;
285 }
286 }
287 }
288
289 drv_bss_info_changed(local, &sdata->vif,
290 &sdata->vif.bss_conf, changed);
301 291
302 if (local->ops->bss_info_changed) 292 /* DEPRECATED */
303 local->ops->bss_info_changed(local_to_hw(local), 293 local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
304 &sdata->vif,
305 &sdata->vif.bss_conf,
306 changed);
307} 294}
308 295
309u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 296u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -382,60 +369,12 @@ static void ieee80211_tasklet_handler(unsigned long data)
382 } 369 }
383} 370}
384 371
385/* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
386 * make a prepared TX frame (one that has been given to hw) to look like brand
387 * new IEEE 802.11 frame that is ready to go through TX processing again.
388 */
389static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
390 struct ieee80211_key *key,
391 struct sk_buff *skb)
392{
393 unsigned int hdrlen, iv_len, mic_len;
394 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
395
396 hdrlen = ieee80211_hdrlen(hdr->frame_control);
397
398 if (!key)
399 goto no_key;
400
401 switch (key->conf.alg) {
402 case ALG_WEP:
403 iv_len = WEP_IV_LEN;
404 mic_len = WEP_ICV_LEN;
405 break;
406 case ALG_TKIP:
407 iv_len = TKIP_IV_LEN;
408 mic_len = TKIP_ICV_LEN;
409 break;
410 case ALG_CCMP:
411 iv_len = CCMP_HDR_LEN;
412 mic_len = CCMP_MIC_LEN;
413 break;
414 default:
415 goto no_key;
416 }
417
418 if (skb->len >= hdrlen + mic_len &&
419 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
420 skb_trim(skb, skb->len - mic_len);
421 if (skb->len >= hdrlen + iv_len) {
422 memmove(skb->data + iv_len, skb->data, hdrlen);
423 hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
424 }
425
426no_key:
427 if (ieee80211_is_data_qos(hdr->frame_control)) {
428 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
429 memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
430 hdrlen - IEEE80211_QOS_CTL_LEN);
431 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
432 }
433}
434
435static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, 372static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
436 struct sta_info *sta, 373 struct sta_info *sta,
437 struct sk_buff *skb) 374 struct sk_buff *skb)
438{ 375{
376 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
377
439 sta->tx_filtered_count++; 378 sta->tx_filtered_count++;
440 379
441 /* 380 /*
@@ -477,16 +416,15 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
477 */ 416 */
478 if (test_sta_flags(sta, WLAN_STA_PS) && 417 if (test_sta_flags(sta, WLAN_STA_PS) &&
479 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 418 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
480 ieee80211_remove_tx_extra(local, sta->key, skb);
481 skb_queue_tail(&sta->tx_filtered, skb); 419 skb_queue_tail(&sta->tx_filtered, skb);
482 return; 420 return;
483 } 421 }
484 422
485 if (!test_sta_flags(sta, WLAN_STA_PS) && !skb->requeue) { 423 if (!test_sta_flags(sta, WLAN_STA_PS) &&
424 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
486 /* Software retry the packet once */ 425 /* Software retry the packet once */
487 skb->requeue = 1; 426 info->flags |= IEEE80211_TX_INTFL_RETRIED;
488 ieee80211_remove_tx_extra(local, sta->key, skb); 427 ieee80211_add_pending_skb(local, skb);
489 dev_queue_xmit(skb);
490 return; 428 return;
491 } 429 }
492 430
@@ -696,6 +634,28 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
696} 634}
697EXPORT_SYMBOL(ieee80211_tx_status); 635EXPORT_SYMBOL(ieee80211_tx_status);
698 636
637static void ieee80211_restart_work(struct work_struct *work)
638{
639 struct ieee80211_local *local =
640 container_of(work, struct ieee80211_local, restart_work);
641
642 rtnl_lock();
643 ieee80211_reconfig(local);
644 rtnl_unlock();
645}
646
647void ieee80211_restart_hw(struct ieee80211_hw *hw)
648{
649 struct ieee80211_local *local = hw_to_local(hw);
650
651 /* use this reason, __ieee80211_resume will unblock it */
652 ieee80211_stop_queues_by_reason(hw,
653 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
654
655 schedule_work(&local->restart_work);
656}
657EXPORT_SYMBOL(ieee80211_restart_hw);
658
699struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 659struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
700 const struct ieee80211_ops *ops) 660 const struct ieee80211_ops *ops)
701{ 661{
@@ -718,9 +678,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
718 * +-------------------------+ 678 * +-------------------------+
719 * 679 *
720 */ 680 */
721 priv_size = ((sizeof(struct ieee80211_local) + 681 priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
722 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
723 priv_data_len;
724 682
725 wiphy = wiphy_new(&mac80211_config_ops, priv_size); 683 wiphy = wiphy_new(&mac80211_config_ops, priv_size);
726 684
@@ -728,17 +686,16 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
728 return NULL; 686 return NULL;
729 687
730 wiphy->privid = mac80211_wiphy_privid; 688 wiphy->privid = mac80211_wiphy_privid;
731 wiphy->max_scan_ssids = 4; 689
732 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 690 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
733 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) - 691 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
734 sizeof(struct cfg80211_bss); 692 sizeof(struct cfg80211_bss);
735 693
736 local = wiphy_priv(wiphy); 694 local = wiphy_priv(wiphy);
695
737 local->hw.wiphy = wiphy; 696 local->hw.wiphy = wiphy;
738 697
739 local->hw.priv = (char *)local + 698 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
740 ((sizeof(struct ieee80211_local) +
741 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
742 699
743 BUG_ON(!ops->tx); 700 BUG_ON(!ops->tx);
744 BUG_ON(!ops->start); 701 BUG_ON(!ops->start);
@@ -752,15 +709,14 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
752 /* set up some defaults */ 709 /* set up some defaults */
753 local->hw.queues = 1; 710 local->hw.queues = 1;
754 local->hw.max_rates = 1; 711 local->hw.max_rates = 1;
755 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 712 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
756 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 713 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
757 local->hw.conf.long_frame_max_tx_count = 4;
758 local->hw.conf.short_frame_max_tx_count = 7;
759 local->hw.conf.radio_enabled = true; 714 local->hw.conf.radio_enabled = true;
760 local->user_power_level = -1; 715 local->user_power_level = -1;
761 716
762 INIT_LIST_HEAD(&local->interfaces); 717 INIT_LIST_HEAD(&local->interfaces);
763 mutex_init(&local->iflist_mtx); 718 mutex_init(&local->iflist_mtx);
719 mutex_init(&local->scan_mtx);
764 720
765 spin_lock_init(&local->key_lock); 721 spin_lock_init(&local->key_lock);
766 722
@@ -768,6 +724,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
768 724
769 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 725 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
770 726
727 INIT_WORK(&local->restart_work, ieee80211_restart_work);
728
771 INIT_WORK(&local->dynamic_ps_enable_work, 729 INIT_WORK(&local->dynamic_ps_enable_work,
772 ieee80211_dynamic_ps_enable_work); 730 ieee80211_dynamic_ps_enable_work);
773 INIT_WORK(&local->dynamic_ps_disable_work, 731 INIT_WORK(&local->dynamic_ps_disable_work,
@@ -821,7 +779,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
821 enum ieee80211_band band; 779 enum ieee80211_band band;
822 struct net_device *mdev; 780 struct net_device *mdev;
823 struct ieee80211_master_priv *mpriv; 781 struct ieee80211_master_priv *mpriv;
824 int channels, i, j; 782 int channels, i, j, max_bitrates;
783 bool supp_ht;
784 static const u32 cipher_suites[] = {
785 WLAN_CIPHER_SUITE_WEP40,
786 WLAN_CIPHER_SUITE_WEP104,
787 WLAN_CIPHER_SUITE_TKIP,
788 WLAN_CIPHER_SUITE_CCMP,
789
790 /* keep last -- depends on hw flags! */
791 WLAN_CIPHER_SUITE_AES_CMAC
792 };
825 793
826 /* 794 /*
827 * generic code guarantees at least one band, 795 * generic code guarantees at least one band,
@@ -829,18 +797,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
829 * that hw.conf.channel is assigned 797 * that hw.conf.channel is assigned
830 */ 798 */
831 channels = 0; 799 channels = 0;
800 max_bitrates = 0;
801 supp_ht = false;
832 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 802 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
833 struct ieee80211_supported_band *sband; 803 struct ieee80211_supported_band *sband;
834 804
835 sband = local->hw.wiphy->bands[band]; 805 sband = local->hw.wiphy->bands[band];
836 if (sband && !local->oper_channel) { 806 if (!sband)
807 continue;
808 if (!local->oper_channel) {
837 /* init channel we're on */ 809 /* init channel we're on */
838 local->hw.conf.channel = 810 local->hw.conf.channel =
839 local->oper_channel = 811 local->oper_channel = &sband->channels[0];
840 local->scan_channel = &sband->channels[0]; 812 local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
841 } 813 }
842 if (sband) 814 channels += sband->n_channels;
843 channels += sband->n_channels; 815
816 if (max_bitrates < sband->n_bitrates)
817 max_bitrates = sband->n_bitrates;
818 supp_ht = supp_ht || sband->ht_cap.ht_supported;
844 } 819 }
845 820
846 local->int_scan_req.n_channels = channels; 821 local->int_scan_req.n_channels = channels;
@@ -860,6 +835,37 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
860 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 835 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
861 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 836 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
862 837
838 /*
839 * Calculate scan IE length -- we need this to alloc
840 * memory and to subtract from the driver limit. It
841 * includes the (extended) supported rates and HT
842 * information -- SSID is the driver's responsibility.
843 */
844 local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */
845 if (supp_ht)
846 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
847
848 if (!local->ops->hw_scan) {
849 /* For hw_scan, driver needs to set these up. */
850 local->hw.wiphy->max_scan_ssids = 4;
851 local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
852 }
853
854 /*
855 * If the driver supports any scan IEs, then assume the
856 * limit includes the IEs mac80211 will add, otherwise
857 * leave it at zero and let the driver sort it out; we
858 * still pass our IEs to the driver but userspace will
859 * not be allowed to in that case.
860 */
861 if (local->hw.wiphy->max_scan_ie_len)
862 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
863
864 local->hw.wiphy->cipher_suites = cipher_suites;
865 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
866 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
867 local->hw.wiphy->n_cipher_suites--;
868
863 result = wiphy_register(local->hw.wiphy); 869 result = wiphy_register(local->hw.wiphy);
864 if (result < 0) 870 if (result < 0)
865 goto fail_wiphy_register; 871 goto fail_wiphy_register;
@@ -898,9 +904,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
898 904
899 debugfs_hw_add(local); 905 debugfs_hw_add(local);
900 906
901 if (local->hw.conf.beacon_int < 10)
902 local->hw.conf.beacon_int = 100;
903
904 if (local->hw.max_listen_interval == 0) 907 if (local->hw.max_listen_interval == 0)
905 local->hw.max_listen_interval = 1; 908 local->hw.max_listen_interval = 1;
906 909
@@ -965,25 +968,38 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
965 } 968 }
966 } 969 }
967 970
971 local->network_latency_notifier.notifier_call =
972 ieee80211_max_network_latency;
973 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
974 &local->network_latency_notifier);
975
976 if (result) {
977 rtnl_lock();
978 goto fail_pm_qos;
979 }
980
968 return 0; 981 return 0;
969 982
970fail_rate: 983 fail_pm_qos:
984 ieee80211_led_exit(local);
985 ieee80211_remove_interfaces(local);
986 fail_rate:
971 unregister_netdevice(local->mdev); 987 unregister_netdevice(local->mdev);
972 local->mdev = NULL; 988 local->mdev = NULL;
973fail_dev: 989 fail_dev:
974 rtnl_unlock(); 990 rtnl_unlock();
975 ieee80211_wep_free(local); 991 ieee80211_wep_free(local);
976fail_wep: 992 fail_wep:
977 sta_info_stop(local); 993 sta_info_stop(local);
978fail_sta_info: 994 fail_sta_info:
979 debugfs_hw_del(local); 995 debugfs_hw_del(local);
980 destroy_workqueue(local->hw.workqueue); 996 destroy_workqueue(local->hw.workqueue);
981fail_workqueue: 997 fail_workqueue:
982 if (local->mdev) 998 if (local->mdev)
983 free_netdev(local->mdev); 999 free_netdev(local->mdev);
984fail_mdev_alloc: 1000 fail_mdev_alloc:
985 wiphy_unregister(local->hw.wiphy); 1001 wiphy_unregister(local->hw.wiphy);
986fail_wiphy_register: 1002 fail_wiphy_register:
987 kfree(local->int_scan_req.channels); 1003 kfree(local->int_scan_req.channels);
988 return result; 1004 return result;
989} 1005}
@@ -996,6 +1012,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
996 tasklet_kill(&local->tx_pending_tasklet); 1012 tasklet_kill(&local->tx_pending_tasklet);
997 tasklet_kill(&local->tasklet); 1013 tasklet_kill(&local->tasklet);
998 1014
1015 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
1016 &local->network_latency_notifier);
1017
999 rtnl_lock(); 1018 rtnl_lock();
1000 1019
1001 /* 1020 /*
@@ -1038,6 +1057,7 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
1038 struct ieee80211_local *local = hw_to_local(hw); 1057 struct ieee80211_local *local = hw_to_local(hw);
1039 1058
1040 mutex_destroy(&local->iflist_mtx); 1059 mutex_destroy(&local->iflist_mtx);
1060 mutex_destroy(&local->scan_mtx);
1041 1061
1042 wiphy_free(local->hw.wiphy); 1062 wiphy_free(local->hw.wiphy);
1043} 1063}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9a3e5de0410a..fc712e60705d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -21,6 +21,9 @@
21#define CAPAB_OFFSET 17 21#define CAPAB_OFFSET 17
22#define ACCEPT_PLINKS 0x80 22#define ACCEPT_PLINKS 0x80
23 23
24#define TMR_RUNNING_HK 0
25#define TMR_RUNNING_MP 1
26
24int mesh_allocated; 27int mesh_allocated;
25static struct kmem_cache *rm_cache; 28static struct kmem_cache *rm_cache;
26 29
@@ -45,6 +48,12 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 48 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46 49
47 ifmsh->housekeeping = true; 50 ifmsh->housekeeping = true;
51
52 if (local->quiescing) {
53 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
54 return;
55 }
56
48 queue_work(local->hw.workqueue, &ifmsh->work); 57 queue_work(local->hw.workqueue, &ifmsh->work);
49} 58}
50 59
@@ -343,6 +352,11 @@ static void ieee80211_mesh_path_timer(unsigned long data)
343 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 352 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
344 struct ieee80211_local *local = sdata->local; 353 struct ieee80211_local *local = sdata->local;
345 354
355 if (local->quiescing) {
356 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
357 return;
358 }
359
346 queue_work(local->hw.workqueue, &ifmsh->work); 360 queue_work(local->hw.workqueue, &ifmsh->work);
347} 361}
348 362
@@ -417,13 +431,39 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
417 431
418 free_plinks = mesh_plink_availables(sdata); 432 free_plinks = mesh_plink_availables(sdata);
419 if (free_plinks != sdata->u.mesh.accepting_plinks) 433 if (free_plinks != sdata->u.mesh.accepting_plinks)
420 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 434 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
421 435
422 ifmsh->housekeeping = false; 436 ifmsh->housekeeping = false;
423 mod_timer(&ifmsh->housekeeping_timer, 437 mod_timer(&ifmsh->housekeeping_timer,
424 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 438 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
425} 439}
426 440
441#ifdef CONFIG_PM
442void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
443{
444 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
445
446 /* might restart the timer but that doesn't matter */
447 cancel_work_sync(&ifmsh->work);
448
449 /* use atomic bitops in case both timers fire at the same time */
450
451 if (del_timer_sync(&ifmsh->housekeeping_timer))
452 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
453 if (del_timer_sync(&ifmsh->mesh_path_timer))
454 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
455}
456
457void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
458{
459 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
460
461 if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
462 add_timer(&ifmsh->housekeeping_timer);
463 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
464 add_timer(&ifmsh->mesh_path_timer);
465}
466#endif
427 467
428void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) 468void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
429{ 469{
@@ -432,8 +472,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
432 472
433 ifmsh->housekeeping = true; 473 ifmsh->housekeeping = true;
434 queue_work(local->hw.workqueue, &ifmsh->work); 474 queue_work(local->hw.workqueue, &ifmsh->work);
435 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 475 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
436 IEEE80211_IFCC_BEACON_ENABLED); 476 BSS_CHANGED_BEACON_ENABLED);
437} 477}
438 478
439void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 479void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index d891d7ddccd7..c7d72819cdd2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -191,12 +191,8 @@ struct mesh_rmc {
191#define PLINK_CATEGORY 30 191#define PLINK_CATEGORY 30
192#define MESH_PATH_SEL_CATEGORY 32 192#define MESH_PATH_SEL_CATEGORY 32
193 193
194/* Mesh Header Flags */
195#define IEEE80211S_FLAGS_AE 0x3
196
197/* Public interfaces */ 194/* Public interfaces */
198/* Various */ 195/* Various */
199int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
200int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 196int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
201 struct ieee80211_sub_if_data *sdata); 197 struct ieee80211_sub_if_data *sdata);
202int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 198int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
@@ -267,6 +263,8 @@ void mesh_path_timer(unsigned long data);
267void mesh_path_flush_by_nexthop(struct sta_info *sta); 263void mesh_path_flush_by_nexthop(struct sta_info *sta);
268void mesh_path_discard_frame(struct sk_buff *skb, 264void mesh_path_discard_frame(struct sk_buff *skb,
269 struct ieee80211_sub_if_data *sdata); 265 struct ieee80211_sub_if_data *sdata);
266void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
267void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
270 268
271#ifdef CONFIG_MAC80211_MESH 269#ifdef CONFIG_MAC80211_MESH
272extern int mesh_allocated; 270extern int mesh_allocated;
@@ -294,10 +292,20 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
294 292
295void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 293void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
296 294
295void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
296void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
297void mesh_plink_quiesce(struct sta_info *sta);
298void mesh_plink_restart(struct sta_info *sta);
297#else 299#else
298#define mesh_allocated 0 300#define mesh_allocated 0
299static inline void 301static inline void
300ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 302ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
303static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
304{}
305static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
306{}
307static inline void mesh_plink_quiesce(struct sta_info *sta) {}
308static inline void mesh_plink_restart(struct sta_info *sta) {}
301#endif 309#endif
302 310
303#endif /* IEEE80211S_H */ 311#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 60b35accda91..003cb470ac84 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -836,8 +836,14 @@ void mesh_path_timer(unsigned long data)
836 mpath = rcu_dereference(mpath); 836 mpath = rcu_dereference(mpath);
837 if (!mpath) 837 if (!mpath)
838 goto endmpathtimer; 838 goto endmpathtimer;
839 spin_lock_bh(&mpath->state_lock);
840 sdata = mpath->sdata; 839 sdata = mpath->sdata;
840
841 if (sdata->local->quiescing) {
842 rcu_read_unlock();
843 return;
844 }
845
846 spin_lock_bh(&mpath->state_lock);
841 if (mpath->flags & MESH_PATH_RESOLVED || 847 if (mpath->flags & MESH_PATH_RESOLVED ||
842 (!(mpath->flags & MESH_PATH_RESOLVING))) 848 (!(mpath->flags & MESH_PATH_RESOLVING)))
843 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 849 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a8bbdeca013a..cb14253587f1 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -266,6 +266,11 @@ static void mesh_plink_timer(unsigned long data)
266 */ 266 */
267 sta = (struct sta_info *) data; 267 sta = (struct sta_info *) data;
268 268
269 if (sta->sdata->local->quiescing) {
270 sta->plink_timer_was_running = true;
271 return;
272 }
273
269 spin_lock_bh(&sta->lock); 274 spin_lock_bh(&sta->lock);
270 if (sta->ignore_plink_timer) { 275 if (sta->ignore_plink_timer) {
271 sta->ignore_plink_timer = false; 276 sta->ignore_plink_timer = false;
@@ -322,6 +327,22 @@ static void mesh_plink_timer(unsigned long data)
322 } 327 }
323} 328}
324 329
330#ifdef CONFIG_PM
331void mesh_plink_quiesce(struct sta_info *sta)
332{
333 if (del_timer_sync(&sta->plink_timer))
334 sta->plink_timer_was_running = true;
335}
336
337void mesh_plink_restart(struct sta_info *sta)
338{
339 if (sta->plink_timer_was_running) {
340 add_timer(&sta->plink_timer);
341 sta->plink_timer_was_running = false;
342 }
343}
344#endif
345
325static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) 346static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
326{ 347{
327 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); 348 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 132938b073dc..d779c57a8220 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -17,10 +17,13 @@
17#include <linux/if_arp.h> 17#include <linux/if_arp.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h>
21#include <linux/crc32.h>
20#include <net/mac80211.h> 22#include <net/mac80211.h>
21#include <asm/unaligned.h> 23#include <asm/unaligned.h>
22 24
23#include "ieee80211_i.h" 25#include "ieee80211_i.h"
26#include "driver-ops.h"
24#include "rate.h" 27#include "rate.h"
25#include "led.h" 28#include "led.h"
26 29
@@ -30,9 +33,13 @@
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 33#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3 34#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 35#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
36#define IEEE80211_PROBE_WAIT (HZ / 5)
33#define IEEE80211_PROBE_IDLE_TIME (60 * HZ) 37#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
35 39
40#define TMR_RUNNING_TIMER 0
41#define TMR_RUNNING_CHANSW 1
42
36/* utils */ 43/* utils */
37static int ecw2cw(int ecw) 44static int ecw2cw(int ecw)
38{ 45{
@@ -80,6 +87,92 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
80 return count; 87 return count;
81} 88}
82 89
90/*
91 * ieee80211_enable_ht should be called only after the operating band
92 * has been determined as ht configuration depends on the hw's
93 * HT abilities for a specific band.
94 */
95static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
96 struct ieee80211_ht_info *hti,
97 u16 ap_ht_cap_flags)
98{
99 struct ieee80211_local *local = sdata->local;
100 struct ieee80211_supported_band *sband;
101 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
102 struct sta_info *sta;
103 u32 changed = 0;
104 u16 ht_opmode;
105 bool enable_ht = true, ht_changed;
106 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
107
108 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
109
110 /* HT is not supported */
111 if (!sband->ht_cap.ht_supported)
112 enable_ht = false;
113
114 /* check that channel matches the right operating channel */
115 if (local->hw.conf.channel->center_freq !=
116 ieee80211_channel_to_frequency(hti->control_chan))
117 enable_ht = false;
118
119 if (enable_ht) {
120 channel_type = NL80211_CHAN_HT20;
121
122 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
123 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
124 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
125 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
126 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
127 if (!(local->hw.conf.channel->flags &
128 IEEE80211_CHAN_NO_HT40PLUS))
129 channel_type = NL80211_CHAN_HT40PLUS;
130 break;
131 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
132 if (!(local->hw.conf.channel->flags &
133 IEEE80211_CHAN_NO_HT40MINUS))
134 channel_type = NL80211_CHAN_HT40MINUS;
135 break;
136 }
137 }
138 }
139
140 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
141 channel_type != local->hw.conf.channel_type;
142
143 local->oper_channel_type = channel_type;
144
145 if (ht_changed) {
146 /* channel_type change automatically detected */
147 ieee80211_hw_config(local, 0);
148
149 rcu_read_lock();
150
151 sta = sta_info_get(local, ifmgd->bssid);
152 if (sta)
153 rate_control_rate_update(local, sband, sta,
154 IEEE80211_RC_HT_CHANGED);
155
156 rcu_read_unlock();
157 }
158
159 /* disable HT */
160 if (!enable_ht)
161 return 0;
162
163 ht_opmode = le16_to_cpu(hti->operation_mode);
164
165 /* if bss configuration changed store the new one */
166 if (!sdata->ht_opmode_valid ||
167 sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
168 changed |= BSS_CHANGED_HT;
169 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
170 sdata->ht_opmode_valid = true;
171 }
172
173 return changed;
174}
175
83/* frame sending functions */ 176/* frame sending functions */
84 177
85static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 178static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
@@ -263,13 +356,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
263 356
264 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 357 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
265 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 358 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
266 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) { 359 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
267 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 360 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
268 cap &= ~IEEE80211_HT_CAP_SGI_40; 361 cap &= ~IEEE80211_HT_CAP_SGI_40;
269 } 362 }
270 break; 363 break;
271 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 364 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
272 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) { 365 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
273 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 366 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
274 cap &= ~IEEE80211_HT_CAP_SGI_40; 367 cap &= ~IEEE80211_HT_CAP_SGI_40;
275 } 368 }
@@ -325,6 +418,10 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
325 /* u.deauth.reason_code == u.disassoc.reason_code */ 418 /* u.deauth.reason_code == u.disassoc.reason_code */
326 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 419 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
327 420
421 if (stype == IEEE80211_STYPE_DEAUTH)
422 cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, skb->len);
423 else
424 cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, skb->len);
328 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); 425 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
329} 426}
330 427
@@ -359,6 +456,277 @@ void ieee80211_send_pspoll(struct ieee80211_local *local,
359 ieee80211_tx_skb(sdata, skb, 0); 456 ieee80211_tx_skb(sdata, skb, 0);
360} 457}
361 458
459void ieee80211_send_nullfunc(struct ieee80211_local *local,
460 struct ieee80211_sub_if_data *sdata,
461 int powersave)
462{
463 struct sk_buff *skb;
464 struct ieee80211_hdr *nullfunc;
465 __le16 fc;
466
467 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
468 return;
469
470 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
471 if (!skb) {
472 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
473 "frame\n", sdata->dev->name);
474 return;
475 }
476 skb_reserve(skb, local->hw.extra_tx_headroom);
477
478 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
479 memset(nullfunc, 0, 24);
480 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
481 IEEE80211_FCTL_TODS);
482 if (powersave)
483 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
484 nullfunc->frame_control = fc;
485 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
486 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
487 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
488
489 ieee80211_tx_skb(sdata, skb, 0);
490}
491
492/* spectrum management related things */
493static void ieee80211_chswitch_work(struct work_struct *work)
494{
495 struct ieee80211_sub_if_data *sdata =
496 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
497 struct ieee80211_bss *bss;
498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
499
500 if (!netif_running(sdata->dev))
501 return;
502
503 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
504 sdata->local->hw.conf.channel->center_freq,
505 ifmgd->ssid, ifmgd->ssid_len);
506 if (!bss)
507 goto exit;
508
509 sdata->local->oper_channel = sdata->local->csa_channel;
510 /* XXX: shouldn't really modify cfg80211-owned data! */
511 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
512 bss->cbss.channel = sdata->local->oper_channel;
513
514 ieee80211_rx_bss_put(sdata->local, bss);
515exit:
516 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
517 ieee80211_wake_queues_by_reason(&sdata->local->hw,
518 IEEE80211_QUEUE_STOP_REASON_CSA);
519}
520
521static void ieee80211_chswitch_timer(unsigned long data)
522{
523 struct ieee80211_sub_if_data *sdata =
524 (struct ieee80211_sub_if_data *) data;
525 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
526
527 if (sdata->local->quiescing) {
528 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
529 return;
530 }
531
532 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
533}
534
535void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
536 struct ieee80211_channel_sw_ie *sw_elem,
537 struct ieee80211_bss *bss)
538{
539 struct ieee80211_channel *new_ch;
540 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
541 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
542
543 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
544 return;
545
546 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
547 return;
548
549 /* Disregard subsequent beacons if we are already running a timer
550 processing a CSA */
551
552 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
553 return;
554
555 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
556 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
557 return;
558
559 sdata->local->csa_channel = new_ch;
560
561 if (sw_elem->count <= 1) {
562 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
563 } else {
564 ieee80211_stop_queues_by_reason(&sdata->local->hw,
565 IEEE80211_QUEUE_STOP_REASON_CSA);
566 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
567 mod_timer(&ifmgd->chswitch_timer,
568 jiffies +
569 msecs_to_jiffies(sw_elem->count *
570 bss->cbss.beacon_interval));
571 }
572}
573
574static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
575 u16 capab_info, u8 *pwr_constr_elem,
576 u8 pwr_constr_elem_len)
577{
578 struct ieee80211_conf *conf = &sdata->local->hw.conf;
579
580 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
581 return;
582
583 /* Power constraint IE length should be 1 octet */
584 if (pwr_constr_elem_len != 1)
585 return;
586
587 if ((*pwr_constr_elem <= conf->channel->max_power) &&
588 (*pwr_constr_elem != sdata->local->power_constr_level)) {
589 sdata->local->power_constr_level = *pwr_constr_elem;
590 ieee80211_hw_config(sdata->local, 0);
591 }
592}
593
594/* powersave */
595static void ieee80211_enable_ps(struct ieee80211_local *local,
596 struct ieee80211_sub_if_data *sdata)
597{
598 struct ieee80211_conf *conf = &local->hw.conf;
599
600 /*
601 * If we are scanning right now then the parameters will
602 * take effect when scan finishes.
603 */
604 if (local->hw_scanning || local->sw_scanning)
605 return;
606
607 if (conf->dynamic_ps_timeout > 0 &&
608 !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
609 mod_timer(&local->dynamic_ps_timer, jiffies +
610 msecs_to_jiffies(conf->dynamic_ps_timeout));
611 } else {
612 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
613 ieee80211_send_nullfunc(local, sdata, 1);
614 conf->flags |= IEEE80211_CONF_PS;
615 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
616 }
617}
618
619static void ieee80211_change_ps(struct ieee80211_local *local)
620{
621 struct ieee80211_conf *conf = &local->hw.conf;
622
623 if (local->ps_sdata) {
624 ieee80211_enable_ps(local, local->ps_sdata);
625 } else if (conf->flags & IEEE80211_CONF_PS) {
626 conf->flags &= ~IEEE80211_CONF_PS;
627 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
628 del_timer_sync(&local->dynamic_ps_timer);
629 cancel_work_sync(&local->dynamic_ps_enable_work);
630 }
631}
632
633/* need to hold RTNL or interface lock */
634void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
635{
636 struct ieee80211_sub_if_data *sdata, *found = NULL;
637 int count = 0;
638
639 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
640 local->ps_sdata = NULL;
641 return;
642 }
643
644 list_for_each_entry(sdata, &local->interfaces, list) {
645 if (!netif_running(sdata->dev))
646 continue;
647 if (sdata->vif.type != NL80211_IFTYPE_STATION)
648 continue;
649 found = sdata;
650 count++;
651 }
652
653 if (count == 1 && found->u.mgd.powersave &&
654 (found->u.mgd.flags & IEEE80211_STA_ASSOCIATED) &&
655 !(found->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL)) {
656 s32 beaconint_us;
657
658 if (latency < 0)
659 latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY);
660
661 beaconint_us = ieee80211_tu_to_usec(
662 found->vif.bss_conf.beacon_int);
663
664 if (beaconint_us > latency) {
665 local->ps_sdata = NULL;
666 } else {
667 u8 dtimper = found->vif.bss_conf.dtim_period;
668 int maxslp = 1;
669
670 if (dtimper > 1)
671 maxslp = min_t(int, dtimper,
672 latency / beaconint_us);
673
674 local->hw.conf.max_sleep_period = maxslp;
675 local->ps_sdata = found;
676 }
677 } else {
678 local->ps_sdata = NULL;
679 }
680
681 ieee80211_change_ps(local);
682}
683
684void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
685{
686 struct ieee80211_local *local =
687 container_of(work, struct ieee80211_local,
688 dynamic_ps_disable_work);
689
690 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
691 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
692 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
693 }
694
695 ieee80211_wake_queues_by_reason(&local->hw,
696 IEEE80211_QUEUE_STOP_REASON_PS);
697}
698
699void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
700{
701 struct ieee80211_local *local =
702 container_of(work, struct ieee80211_local,
703 dynamic_ps_enable_work);
704 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
705
706 /* can only happen when PS was just disabled anyway */
707 if (!sdata)
708 return;
709
710 if (local->hw.conf.flags & IEEE80211_CONF_PS)
711 return;
712
713 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
714 ieee80211_send_nullfunc(local, sdata, 1);
715
716 local->hw.conf.flags |= IEEE80211_CONF_PS;
717 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
718}
719
720void ieee80211_dynamic_ps_timer(unsigned long data)
721{
722 struct ieee80211_local *local = (void *) data;
723
724 if (local->quiescing)
725 return;
726
727 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
728}
729
362/* MLME */ 730/* MLME */
363static void ieee80211_sta_wmm_params(struct ieee80211_local *local, 731static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
364 struct ieee80211_if_managed *ifmgd, 732 struct ieee80211_if_managed *ifmgd,
@@ -424,41 +792,16 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
424#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 792#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
425 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 793 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
426 "cWmin=%d cWmax=%d txop=%d\n", 794 "cWmin=%d cWmax=%d txop=%d\n",
427 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, 795 wiphy_name(local->hw.wiphy), queue, aci, acm,
428 params.cw_max, params.txop); 796 params.aifs, params.cw_min, params.cw_max, params.txop);
429#endif 797#endif
430 if (local->ops->conf_tx && 798 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
431 local->ops->conf_tx(local_to_hw(local), queue, &params)) {
432 printk(KERN_DEBUG "%s: failed to set TX queue " 799 printk(KERN_DEBUG "%s: failed to set TX queue "
433 "parameters for queue %d\n", local->mdev->name, queue); 800 "parameters for queue %d\n",
434 } 801 wiphy_name(local->hw.wiphy), queue);
435 } 802 }
436} 803}
437 804
438static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
439{
440 u8 mask;
441 u8 index, indexn1, indexn2;
442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
443
444 if (unlikely(!tim || elems->tim_len < 4))
445 return false;
446
447 aid &= 0x3fff;
448 index = aid / 8;
449 mask = 1 << (aid & 7);
450
451 indexn1 = tim->bitmap_ctrl & 0xfe;
452 indexn2 = elems->tim_len + indexn1 - 4;
453
454 if (index < indexn1 || index > indexn2)
455 return false;
456
457 index -= indexn1;
458
459 return !!(tim->virtual_map[index] & mask);
460}
461
462static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 805static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
463 u16 capab, bool erp_valid, u8 erp) 806 u16 capab, bool erp_valid, u8 erp)
464{ 807{
@@ -610,6 +953,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
610 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 953 sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
611 sdata->vif.bss_conf.dtim_period = bss->dtim_period; 954 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
612 955
956 bss_info_changed |= BSS_CHANGED_BEACON_INT;
613 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 957 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
614 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 958 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
615 959
@@ -632,20 +976,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
632 * changed or not. 976 * changed or not.
633 */ 977 */
634 bss_info_changed |= BSS_CHANGED_BASIC_RATES; 978 bss_info_changed |= BSS_CHANGED_BASIC_RATES;
979
980 /* And the BSSID changed - we're associated now */
981 bss_info_changed |= BSS_CHANGED_BSSID;
982
635 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 983 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
636 984
637 if (local->powersave) { 985 /* will be same as sdata */
638 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) && 986 if (local->ps_sdata) {
639 local->hw.conf.dynamic_ps_timeout > 0) { 987 mutex_lock(&local->iflist_mtx);
640 mod_timer(&local->dynamic_ps_timer, jiffies + 988 ieee80211_recalc_ps(local, -1);
641 msecs_to_jiffies( 989 mutex_unlock(&local->iflist_mtx);
642 local->hw.conf.dynamic_ps_timeout));
643 } else {
644 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
645 ieee80211_send_nullfunc(local, sdata, 1);
646 conf->flags |= IEEE80211_CONF_PS;
647 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
648 }
649 } 990 }
650 991
651 netif_tx_start_all_queues(sdata->dev); 992 netif_tx_start_all_queues(sdata->dev);
@@ -664,7 +1005,8 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
664 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n", 1005 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
665 sdata->dev->name, ifmgd->bssid); 1006 sdata->dev->name, ifmgd->bssid);
666 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1007 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
667 ieee80211_sta_send_apinfo(sdata); 1008 ieee80211_recalc_idle(local);
1009 cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
668 1010
669 /* 1011 /*
670 * Most likely AP is not in the range so remove the 1012 * Most likely AP is not in the range so remove the
@@ -689,8 +1031,6 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
689 1031
690 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; 1032 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
691 1033
692 set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifmgd->request);
693
694 /* Direct probe is sent to broadcast address as some APs 1034 /* Direct probe is sent to broadcast address as some APs
695 * will not answer to direct packet in unassociated state. 1035 * will not answer to direct packet in unassociated state.
696 */ 1036 */
@@ -714,7 +1054,8 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
714 " timed out\n", 1054 " timed out\n",
715 sdata->dev->name, ifmgd->bssid); 1055 sdata->dev->name, ifmgd->bssid);
716 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1056 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
717 ieee80211_sta_send_apinfo(sdata); 1057 ieee80211_recalc_idle(local);
1058 cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
718 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 1059 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
719 sdata->local->hw.conf.channel->center_freq, 1060 sdata->local->hw.conf.channel->center_freq,
720 ifmgd->ssid, ifmgd->ssid_len); 1061 ifmgd->ssid, ifmgd->ssid_len);
@@ -817,9 +1158,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
817 1158
818 rcu_read_unlock(); 1159 rcu_read_unlock();
819 1160
1161 ieee80211_set_wmm_default(sdata);
1162
1163 ieee80211_recalc_idle(local);
1164
820 /* channel(_type) changes are handled by ieee80211_hw_config */ 1165 /* channel(_type) changes are handled by ieee80211_hw_config */
821 local->oper_channel_type = NL80211_CHAN_NO_HT; 1166 local->oper_channel_type = NL80211_CHAN_NO_HT;
822 1167
1168 /* on the next assoc, re-program HT parameters */
1169 sdata->ht_opmode_valid = false;
1170
823 local->power_constr_level = 0; 1171 local->power_constr_level = 0;
824 1172
825 del_timer_sync(&local->dynamic_ps_timer); 1173 del_timer_sync(&local->dynamic_ps_timer);
@@ -831,6 +1179,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
831 } 1179 }
832 1180
833 ieee80211_hw_config(local, config_changed); 1181 ieee80211_hw_config(local, config_changed);
1182
1183 /* And the BSSID changed -- not very interesting here */
1184 changed |= BSS_CHANGED_BSSID;
834 ieee80211_bss_info_change_notify(sdata, changed); 1185 ieee80211_bss_info_change_notify(sdata, changed);
835 1186
836 rcu_read_lock(); 1187 rcu_read_lock();
@@ -897,7 +1248,8 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
897 " timed out\n", 1248 " timed out\n",
898 sdata->dev->name, ifmgd->bssid); 1249 sdata->dev->name, ifmgd->bssid);
899 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1250 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
900 ieee80211_sta_send_apinfo(sdata); 1251 ieee80211_recalc_idle(local);
1252 cfg80211_send_assoc_timeout(sdata->dev, ifmgd->bssid);
901 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 1253 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
902 sdata->local->hw.conf.channel->center_freq, 1254 sdata->local->hw.conf.channel->center_freq,
903 ifmgd->ssid, ifmgd->ssid_len); 1255 ifmgd->ssid, ifmgd->ssid_len);
@@ -917,6 +1269,7 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
917 printk(KERN_DEBUG "%s: mismatch in privacy configuration and " 1269 printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
918 "mixed-cell disabled - abort association\n", sdata->dev->name); 1270 "mixed-cell disabled - abort association\n", sdata->dev->name);
919 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1271 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1272 ieee80211_recalc_idle(local);
920 return; 1273 return;
921 } 1274 }
922 1275
@@ -948,6 +1301,17 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
948 u.mgd.beacon_loss_work); 1301 u.mgd.beacon_loss_work);
949 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1302 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
950 1303
1304 /*
1305 * The driver has already reported this event and we have
1306 * already sent a probe request. Maybe the AP died and the
1307 * driver keeps reporting until we disassociate... We have
1308 * to ignore that because otherwise we would continually
1309 * reset the timer and never check whether we received a
1310 * probe response!
1311 */
1312 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
1313 return;
1314
951#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1315#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
952 if (net_ratelimit()) { 1316 if (net_ratelimit()) {
953 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " 1317 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
@@ -957,10 +1321,15 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
957#endif 1321#endif
958 1322
959 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1323 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1324
1325 mutex_lock(&sdata->local->iflist_mtx);
1326 ieee80211_recalc_ps(sdata->local, -1);
1327 mutex_unlock(&sdata->local->iflist_mtx);
1328
960 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1329 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
961 ifmgd->ssid_len, NULL, 0); 1330 ifmgd->ssid_len, NULL, 0);
962 1331
963 mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL); 1332 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
964} 1333}
965 1334
966void ieee80211_beacon_loss(struct ieee80211_vif *vif) 1335void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -977,6 +1346,7 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
977 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1346 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
978 struct ieee80211_local *local = sdata->local; 1347 struct ieee80211_local *local = sdata->local;
979 struct sta_info *sta; 1348 struct sta_info *sta;
1349 unsigned long last_rx;
980 bool disassoc = false; 1350 bool disassoc = false;
981 1351
982 /* TODO: start monitoring current AP signal quality and number of 1352 /* TODO: start monitoring current AP signal quality and number of
@@ -993,17 +1363,21 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
993 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", 1363 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
994 sdata->dev->name, ifmgd->bssid); 1364 sdata->dev->name, ifmgd->bssid);
995 disassoc = true; 1365 disassoc = true;
996 goto unlock; 1366 rcu_read_unlock();
1367 goto out;
997 } 1368 }
998 1369
1370 last_rx = sta->last_rx;
1371 rcu_read_unlock();
1372
999 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && 1373 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
1000 time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { 1374 time_after(jiffies, last_rx + IEEE80211_PROBE_WAIT)) {
1001 printk(KERN_DEBUG "%s: no probe response from AP %pM " 1375 printk(KERN_DEBUG "%s: no probe response from AP %pM "
1002 "- disassociating\n", 1376 "- disassociating\n",
1003 sdata->dev->name, ifmgd->bssid); 1377 sdata->dev->name, ifmgd->bssid);
1004 disassoc = true; 1378 disassoc = true;
1005 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1379 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1006 goto unlock; 1380 goto out;
1007 } 1381 }
1008 1382
1009 /* 1383 /*
@@ -1022,27 +1396,31 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1022 } 1396 }
1023#endif 1397#endif
1024 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1398 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1399 mutex_lock(&local->iflist_mtx);
1400 ieee80211_recalc_ps(local, -1);
1401 mutex_unlock(&local->iflist_mtx);
1025 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1402 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1026 ifmgd->ssid_len, NULL, 0); 1403 ifmgd->ssid_len, NULL, 0);
1027 goto unlock; 1404 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
1028 1405 goto out;
1029 } 1406 }
1030 1407
1031 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { 1408 if (time_after(jiffies, last_rx + IEEE80211_PROBE_IDLE_TIME)) {
1032 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1409 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1410 mutex_lock(&local->iflist_mtx);
1411 ieee80211_recalc_ps(local, -1);
1412 mutex_unlock(&local->iflist_mtx);
1033 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1413 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1034 ifmgd->ssid_len, NULL, 0); 1414 ifmgd->ssid_len, NULL, 0);
1035 } 1415 }
1036 1416
1037 unlock: 1417 out:
1038 rcu_read_unlock(); 1418 if (!disassoc)
1039 1419 mod_timer(&ifmgd->timer,
1040 if (disassoc) 1420 jiffies + IEEE80211_MONITORING_INTERVAL);
1421 else
1041 ieee80211_set_disassoc(sdata, true, true, 1422 ieee80211_set_disassoc(sdata, true, true,
1042 WLAN_REASON_PREV_AUTH_NOT_VALID); 1423 WLAN_REASON_PREV_AUTH_NOT_VALID);
1043 else
1044 mod_timer(&ifmgd->timer, jiffies +
1045 IEEE80211_MONITORING_INTERVAL);
1046} 1424}
1047 1425
1048 1426
@@ -1055,6 +1433,7 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
1055 if (ifmgd->flags & IEEE80211_STA_EXT_SME) { 1433 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1056 /* Wait for SME to request association */ 1434 /* Wait for SME to request association */
1057 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1435 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1436 ieee80211_recalc_idle(sdata->local);
1058 } else 1437 } else
1059 ieee80211_associate(sdata); 1438 ieee80211_associate(sdata);
1060} 1439}
@@ -1187,7 +1566,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1187 1566
1188 ieee80211_set_disassoc(sdata, true, false, 0); 1567 ieee80211_set_disassoc(sdata, true, false, 0);
1189 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; 1568 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
1190 cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len); 1569 cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, len);
1191} 1570}
1192 1571
1193 1572
@@ -1218,7 +1597,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1218 } 1597 }
1219 1598
1220 ieee80211_set_disassoc(sdata, false, false, reason_code); 1599 ieee80211_set_disassoc(sdata, false, false, reason_code);
1221 cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len); 1600 cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, len);
1222} 1601}
1223 1602
1224 1603
@@ -1287,6 +1666,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1287 * association next time. This works around some broken APs 1666 * association next time. This works around some broken APs
1288 * which do not correctly reject reassociation requests. */ 1667 * which do not correctly reject reassociation requests. */
1289 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; 1668 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1669 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
1670 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1671 /* Wait for SME to decide what to do next */
1672 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1673 ieee80211_recalc_idle(local);
1674 }
1290 return; 1675 return;
1291 } 1676 }
1292 1677
@@ -1340,8 +1725,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1340 * to between the sta_info_alloc() and sta_info_insert() above. 1725 * to between the sta_info_alloc() and sta_info_insert() above.
1341 */ 1726 */
1342 1727
1343 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | 1728 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP);
1344 WLAN_STA_AUTHORIZED); 1729 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1730 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1345 1731
1346 rates = 0; 1732 rates = 0;
1347 basic_rates = 0; 1733 basic_rates = 0;
@@ -1421,6 +1807,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1421 if (elems.wmm_param) 1807 if (elems.wmm_param)
1422 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1808 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1423 elems.wmm_param_len); 1809 elems.wmm_param_len);
1810 else
1811 ieee80211_set_wmm_default(sdata);
1424 1812
1425 if (elems.ht_info_elem && elems.wmm_param && 1813 if (elems.ht_info_elem && elems.wmm_param &&
1426 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1814 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
@@ -1476,7 +1864,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1476 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) { 1864 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) {
1477 struct ieee80211_channel_sw_ie *sw_elem = 1865 struct ieee80211_channel_sw_ie *sw_elem =
1478 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1866 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
1479 ieee80211_process_chanswitch(sdata, sw_elem, bss); 1867 ieee80211_sta_process_chanswitch(sdata, sw_elem, bss);
1480 } 1868 }
1481 1869
1482 ieee80211_rx_bss_put(local, bss); 1870 ieee80211_rx_bss_put(local, bss);
@@ -1507,57 +1895,98 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1507 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1895 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1508 1896
1509 /* direct probe may be part of the association flow */ 1897 /* direct probe may be part of the association flow */
1510 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, 1898 if (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE) {
1511 &ifmgd->request)) {
1512 printk(KERN_DEBUG "%s direct probe responded\n", 1899 printk(KERN_DEBUG "%s direct probe responded\n",
1513 sdata->dev->name); 1900 sdata->dev->name);
1514 ieee80211_authenticate(sdata); 1901 ieee80211_authenticate(sdata);
1515 } 1902 }
1516 1903
1517 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) 1904 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
1518 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1905 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1906 mutex_lock(&sdata->local->iflist_mtx);
1907 ieee80211_recalc_ps(sdata->local, -1);
1908 mutex_unlock(&sdata->local->iflist_mtx);
1909 }
1519} 1910}
1520 1911
1912/*
1913 * This is the canonical list of information elements we care about,
1914 * the filter code also gives us all changes to the Microsoft OUI
1915 * (00:50:F2) vendor IE which is used for WMM which we need to track.
1916 *
1917 * We implement beacon filtering in software since that means we can
1918 * avoid processing the frame here and in cfg80211, and userspace
1919 * will not be able to tell whether the hardware supports it or not.
1920 *
1921 * XXX: This list needs to be dynamic -- userspace needs to be able to
1922 * add items it requires. It also needs to be able to tell us to
1923 * look out for other vendor IEs.
1924 */
1925static const u64 care_about_ies =
1926 (1ULL << WLAN_EID_COUNTRY) |
1927 (1ULL << WLAN_EID_ERP_INFO) |
1928 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
1929 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
1930 (1ULL << WLAN_EID_HT_CAPABILITY) |
1931 (1ULL << WLAN_EID_HT_INFORMATION);
1932
1521static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 1933static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1522 struct ieee80211_mgmt *mgmt, 1934 struct ieee80211_mgmt *mgmt,
1523 size_t len, 1935 size_t len,
1524 struct ieee80211_rx_status *rx_status) 1936 struct ieee80211_rx_status *rx_status)
1525{ 1937{
1526 struct ieee80211_if_managed *ifmgd; 1938 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1527 size_t baselen; 1939 size_t baselen;
1528 struct ieee802_11_elems elems; 1940 struct ieee802_11_elems elems;
1529 struct ieee80211_local *local = sdata->local; 1941 struct ieee80211_local *local = sdata->local;
1530 u32 changed = 0; 1942 u32 changed = 0;
1531 bool erp_valid, directed_tim; 1943 bool erp_valid, directed_tim = false;
1532 u8 erp_value = 0; 1944 u8 erp_value = 0;
1945 u32 ncrc;
1533 1946
1534 /* Process beacon from the current BSS */ 1947 /* Process beacon from the current BSS */
1535 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 1948 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
1536 if (baselen > len) 1949 if (baselen > len)
1537 return; 1950 return;
1538 1951
1539 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 1952 if (rx_status->freq != local->hw.conf.channel->center_freq)
1540
1541 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
1542
1543 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1544 return; 1953 return;
1545 1954
1546 ifmgd = &sdata->u.mgd;
1547
1548 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) || 1955 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) ||
1549 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0) 1956 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0)
1550 return; 1957 return;
1551 1958
1552 if (rx_status->freq != local->hw.conf.channel->center_freq) 1959 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
1553 return; 1960#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1961 if (net_ratelimit()) {
1962 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1963 "to a received beacon\n", sdata->dev->name);
1964 }
1965#endif
1966 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1967 mutex_lock(&local->iflist_mtx);
1968 ieee80211_recalc_ps(local, -1);
1969 mutex_unlock(&local->iflist_mtx);
1970 }
1554 1971
1555 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1972 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
1556 elems.wmm_param_len); 1973 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
1974 len - baselen, &elems,
1975 care_about_ies, ncrc);
1557 1976
1558 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 1977 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
1559 directed_tim = ieee80211_check_tim(&elems, ifmgd->aid); 1978 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
1979 ifmgd->aid);
1560 1980
1981 if (ncrc != ifmgd->beacon_crc) {
1982 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
1983 true);
1984
1985 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1986 elems.wmm_param_len);
1987 }
1988
1989 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
1561 if (directed_tim) { 1990 if (directed_tim) {
1562 if (local->hw.conf.dynamic_ps_timeout > 0) { 1991 if (local->hw.conf.dynamic_ps_timeout > 0) {
1563 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1992 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
@@ -1580,6 +2009,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1580 } 2009 }
1581 } 2010 }
1582 2011
2012 if (ncrc == ifmgd->beacon_crc)
2013 return;
2014 ifmgd->beacon_crc = ncrc;
2015
1583 if (elems.erp_info && elems.erp_info_len >= 1) { 2016 if (elems.erp_info && elems.erp_info_len >= 1) {
1584 erp_valid = true; 2017 erp_valid = true;
1585 erp_value = elems.erp_info[0]; 2018 erp_value = elems.erp_info[0];
@@ -1714,6 +2147,11 @@ static void ieee80211_sta_timer(unsigned long data)
1714 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2147 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1715 struct ieee80211_local *local = sdata->local; 2148 struct ieee80211_local *local = sdata->local;
1716 2149
2150 if (local->quiescing) {
2151 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2152 return;
2153 }
2154
1717 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); 2155 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
1718 queue_work(local->hw.workqueue, &ifmgd->work); 2156 queue_work(local->hw.workqueue, &ifmgd->work);
1719} 2157}
@@ -1723,10 +2161,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
1723 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2161 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1724 struct ieee80211_local *local = sdata->local; 2162 struct ieee80211_local *local = sdata->local;
1725 2163
1726 if (local->ops->reset_tsf) { 2164 /* Reset own TSF to allow time synchronization work. */
1727 /* Reset own TSF to allow time synchronization work. */ 2165 drv_reset_tsf(local);
1728 local->ops->reset_tsf(local_to_hw(local));
1729 }
1730 2166
1731 ifmgd->wmm_last_param_set = -1; /* allow any WMM update */ 2167 ifmgd->wmm_last_param_set = -1; /* allow any WMM update */
1732 2168
@@ -1814,25 +2250,18 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
1814 return 0; 2250 return 0;
1815 } else { 2251 } else {
1816 if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { 2252 if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
2253
1817 ifmgd->assoc_scan_tries++; 2254 ifmgd->assoc_scan_tries++;
1818 /* XXX maybe racy? */
1819 if (local->scan_req)
1820 return -1;
1821 memcpy(local->int_scan_req.ssids[0].ssid,
1822 ifmgd->ssid, IEEE80211_MAX_SSID_LEN);
1823 if (ifmgd->flags & IEEE80211_STA_AUTO_SSID_SEL)
1824 local->int_scan_req.ssids[0].ssid_len = 0;
1825 else
1826 local->int_scan_req.ssids[0].ssid_len = ifmgd->ssid_len;
1827 2255
1828 if (ieee80211_start_scan(sdata, &local->int_scan_req)) 2256 ieee80211_request_internal_scan(sdata, ifmgd->ssid,
1829 ieee80211_scan_failed(local); 2257 ssid_len);
1830 2258
1831 ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE; 2259 ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE;
1832 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); 2260 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
1833 } else { 2261 } else {
1834 ifmgd->assoc_scan_tries = 0; 2262 ifmgd->assoc_scan_tries = 0;
1835 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 2263 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
2264 ieee80211_recalc_idle(local);
1836 } 2265 }
1837 } 2266 }
1838 return -1; 2267 return -1;
@@ -1855,6 +2284,17 @@ static void ieee80211_sta_work(struct work_struct *work)
1855 2284
1856 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 2285 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1857 return; 2286 return;
2287
2288 /*
2289 * Nothing should have been stuffed into the workqueue during
2290 * the suspend->resume cycle. If this WARN is seen then there
2291 * is a bug with either the driver suspend or something in
2292 * mac80211 stuffing into the workqueue which we haven't yet
2293 * cleared during mac80211's suspend cycle.
2294 */
2295 if (WARN_ON(local->suspended))
2296 return;
2297
1858 ifmgd = &sdata->u.mgd; 2298 ifmgd = &sdata->u.mgd;
1859 2299
1860 while ((skb = skb_dequeue(&ifmgd->skb_queue))) 2300 while ((skb = skb_dequeue(&ifmgd->skb_queue)))
@@ -1864,14 +2304,8 @@ static void ieee80211_sta_work(struct work_struct *work)
1864 ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE && 2304 ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE &&
1865 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE && 2305 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE &&
1866 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) { 2306 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) {
1867 /* 2307 queue_delayed_work(local->hw.workqueue, &local->scan_work,
1868 * The call to ieee80211_start_scan can fail but ieee80211_request_scan 2308 round_jiffies_relative(0));
1869 * (which queued ieee80211_sta_work) did not return an error. Thus, call
1870 * ieee80211_scan_failed here if ieee80211_start_scan fails in order to
1871 * notify the scan requester.
1872 */
1873 if (ieee80211_start_scan(sdata, local->scan_req))
1874 ieee80211_scan_failed(local);
1875 return; 2309 return;
1876 } 2310 }
1877 2311
@@ -1882,6 +2316,8 @@ static void ieee80211_sta_work(struct work_struct *work)
1882 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request)) 2316 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request))
1883 return; 2317 return;
1884 2318
2319 ieee80211_recalc_idle(local);
2320
1885 switch (ifmgd->state) { 2321 switch (ifmgd->state) {
1886 case IEEE80211_STA_MLME_DISABLED: 2322 case IEEE80211_STA_MLME_DISABLED:
1887 break; 2323 break;
@@ -1926,10 +2362,43 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
1926 } 2362 }
1927} 2363}
1928 2364
2365#ifdef CONFIG_PM
2366void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2367{
2368 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2369
2370 /*
2371 * we need to use atomic bitops for the running bits
2372 * only because both timers might fire at the same
2373 * time -- the code here is properly synchronised.
2374 */
2375
2376 cancel_work_sync(&ifmgd->work);
2377 cancel_work_sync(&ifmgd->beacon_loss_work);
2378 if (del_timer_sync(&ifmgd->timer))
2379 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2380
2381 cancel_work_sync(&ifmgd->chswitch_work);
2382 if (del_timer_sync(&ifmgd->chswitch_timer))
2383 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
2384}
2385
2386void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2387{
2388 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2389
2390 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
2391 add_timer(&ifmgd->timer);
2392 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2393 add_timer(&ifmgd->chswitch_timer);
2394}
2395#endif
2396
1929/* interface setup */ 2397/* interface setup */
1930void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 2398void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1931{ 2399{
1932 struct ieee80211_if_managed *ifmgd; 2400 struct ieee80211_if_managed *ifmgd;
2401 u32 hw_flags;
1933 2402
1934 ifmgd = &sdata->u.mgd; 2403 ifmgd = &sdata->u.mgd;
1935 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 2404 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
@@ -1949,6 +2418,13 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1949 IEEE80211_STA_AUTO_CHANNEL_SEL; 2418 IEEE80211_STA_AUTO_CHANNEL_SEL;
1950 if (sdata->local->hw.queues >= 4) 2419 if (sdata->local->hw.queues >= 4)
1951 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; 2420 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2421
2422 hw_flags = sdata->local->hw.flags;
2423
2424 if (hw_flags & IEEE80211_HW_SUPPORTS_PS) {
2425 ifmgd->powersave = CONFIG_MAC80211_DEFAULT_PS_VALUE;
2426 sdata->local->hw.conf.dynamic_ps_timeout = 500;
2427 }
1952} 2428}
1953 2429
1954/* configuration hooks */ 2430/* configuration hooks */
@@ -2032,13 +2508,6 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
2032 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET; 2508 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET;
2033 } 2509 }
2034 2510
2035 if (netif_running(sdata->dev)) {
2036 if (ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID)) {
2037 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
2038 "the low-level driver\n", sdata->dev->name);
2039 }
2040 }
2041
2042 return ieee80211_sta_commit(sdata); 2511 return ieee80211_sta_commit(sdata);
2043} 2512}
2044 2513
@@ -2047,6 +2516,13 @@ int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
2047{ 2516{
2048 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2517 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2049 2518
2519 if (len == 0 && ifmgd->extra_ie_len == 0)
2520 return -EALREADY;
2521
2522 if (len == ifmgd->extra_ie_len && ifmgd->extra_ie &&
2523 memcmp(ifmgd->extra_ie, ie, len) == 0)
2524 return -EALREADY;
2525
2050 kfree(ifmgd->extra_ie); 2526 kfree(ifmgd->extra_ie);
2051 if (len == 0) { 2527 if (len == 0) {
2052 ifmgd->extra_ie = NULL; 2528 ifmgd->extra_ie = NULL;
@@ -2068,9 +2544,6 @@ int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason
2068 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", 2544 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
2069 sdata->dev->name, reason); 2545 sdata->dev->name, reason);
2070 2546
2071 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2072 return -EINVAL;
2073
2074 ieee80211_set_disassoc(sdata, true, true, reason); 2547 ieee80211_set_disassoc(sdata, true, true, reason);
2075 return 0; 2548 return 0;
2076} 2549}
@@ -2082,9 +2555,6 @@ int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
2082 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", 2555 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
2083 sdata->dev->name, reason); 2556 sdata->dev->name, reason);
2084 2557
2085 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2086 return -EINVAL;
2087
2088 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED)) 2558 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED))
2089 return -ENOLINK; 2559 return -ENOLINK;
2090 2560
@@ -2104,75 +2574,17 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2104 rcu_read_unlock(); 2574 rcu_read_unlock();
2105} 2575}
2106 2576
2107void ieee80211_dynamic_ps_disable_work(struct work_struct *work) 2577int ieee80211_max_network_latency(struct notifier_block *nb,
2578 unsigned long data, void *dummy)
2108{ 2579{
2580 s32 latency_usec = (s32) data;
2109 struct ieee80211_local *local = 2581 struct ieee80211_local *local =
2110 container_of(work, struct ieee80211_local, 2582 container_of(nb, struct ieee80211_local,
2111 dynamic_ps_disable_work); 2583 network_latency_notifier);
2112
2113 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
2114 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
2115 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2116 }
2117
2118 ieee80211_wake_queues_by_reason(&local->hw,
2119 IEEE80211_QUEUE_STOP_REASON_PS);
2120}
2121
2122void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
2123{
2124 struct ieee80211_local *local =
2125 container_of(work, struct ieee80211_local,
2126 dynamic_ps_enable_work);
2127 /* XXX: using scan_sdata is completely broken! */
2128 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2129
2130 if (local->hw.conf.flags & IEEE80211_CONF_PS)
2131 return;
2132
2133 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata)
2134 ieee80211_send_nullfunc(local, sdata, 1);
2135
2136 local->hw.conf.flags |= IEEE80211_CONF_PS;
2137 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2138}
2139
2140void ieee80211_dynamic_ps_timer(unsigned long data)
2141{
2142 struct ieee80211_local *local = (void *) data;
2143
2144 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
2145}
2146
2147void ieee80211_send_nullfunc(struct ieee80211_local *local,
2148 struct ieee80211_sub_if_data *sdata,
2149 int powersave)
2150{
2151 struct sk_buff *skb;
2152 struct ieee80211_hdr *nullfunc;
2153 __le16 fc;
2154 2584
2155 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 2585 mutex_lock(&local->iflist_mtx);
2156 return; 2586 ieee80211_recalc_ps(local, latency_usec);
2157 2587 mutex_unlock(&local->iflist_mtx);
2158 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
2159 if (!skb) {
2160 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2161 "frame\n", sdata->dev->name);
2162 return;
2163 }
2164 skb_reserve(skb, local->hw.extra_tx_headroom);
2165 2588
2166 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 2589 return 0;
2167 memset(nullfunc, 0, 24);
2168 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
2169 IEEE80211_FCTL_TODS);
2170 if (powersave)
2171 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
2172 nullfunc->frame_control = fc;
2173 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
2174 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
2175 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
2176
2177 ieee80211_tx_skb(sdata, skb, 0);
2178} 2590}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 81985d27cbda..7a549f9deb96 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -2,6 +2,8 @@
2#include <net/rtnetlink.h> 2#include <net/rtnetlink.h>
3 3
4#include "ieee80211_i.h" 4#include "ieee80211_i.h"
5#include "mesh.h"
6#include "driver-ops.h"
5#include "led.h" 7#include "led.h"
6 8
7int __ieee80211_suspend(struct ieee80211_hw *hw) 9int __ieee80211_suspend(struct ieee80211_hw *hw)
@@ -12,11 +14,30 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
12 struct sta_info *sta; 14 struct sta_info *sta;
13 unsigned long flags; 15 unsigned long flags;
14 16
17 ieee80211_scan_cancel(local);
18
15 ieee80211_stop_queues_by_reason(hw, 19 ieee80211_stop_queues_by_reason(hw,
16 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 20 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
17 21
22 /* flush out all packets */
23 synchronize_net();
24
25 local->quiescing = true;
26 /* make quiescing visible to timers everywhere */
27 mb();
28
18 flush_workqueue(local->hw.workqueue); 29 flush_workqueue(local->hw.workqueue);
19 30
31 /* Don't try to run timers while suspended. */
32 del_timer_sync(&local->sta_cleanup);
33
34 /*
35 * Note that this particular timer doesn't need to be
36 * restarted at resume.
37 */
38 cancel_work_sync(&local->dynamic_ps_enable_work);
39 del_timer_sync(&local->dynamic_ps_timer);
40
20 /* disable keys */ 41 /* disable keys */
21 list_for_each_entry(sdata, &local->interfaces, list) 42 list_for_each_entry(sdata, &local->interfaces, list)
22 ieee80211_disable_keys(sdata); 43 ieee80211_disable_keys(sdata);
@@ -34,157 +55,70 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
34 55
35 rcu_read_unlock(); 56 rcu_read_unlock();
36 57
37 /* remove STAs */
38 if (local->ops->sta_notify) {
39 spin_lock_irqsave(&local->sta_lock, flags);
40 list_for_each_entry(sta, &local->sta_list, list) {
41 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
42 sdata = container_of(sdata->bss,
43 struct ieee80211_sub_if_data,
44 u.ap);
45
46 local->ops->sta_notify(hw, &sdata->vif,
47 STA_NOTIFY_REMOVE, &sta->sta);
48 }
49 spin_unlock_irqrestore(&local->sta_lock, flags);
50 }
51
52 /* remove all interfaces */
53 list_for_each_entry(sdata, &local->interfaces, list) {
54 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
55 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
56 netif_running(sdata->dev)) {
57 conf.vif = &sdata->vif;
58 conf.type = sdata->vif.type;
59 conf.mac_addr = sdata->dev->dev_addr;
60 local->ops->remove_interface(hw, &conf);
61 }
62 }
63
64 /* flush again, in case driver queued work */ 58 /* flush again, in case driver queued work */
65 flush_workqueue(local->hw.workqueue); 59 flush_workqueue(local->hw.workqueue);
66 60
67 /* stop hardware */ 61 /* stop hardware - this must stop RX */
68 if (local->open_count) { 62 if (local->open_count) {
69 ieee80211_led_radio(local, false); 63 ieee80211_led_radio(local, false);
70 local->ops->stop(hw); 64 drv_stop(local);
71 }
72 return 0;
73}
74
75int __ieee80211_resume(struct ieee80211_hw *hw)
76{
77 struct ieee80211_local *local = hw_to_local(hw);
78 struct ieee80211_sub_if_data *sdata;
79 struct ieee80211_if_init_conf conf;
80 struct sta_info *sta;
81 unsigned long flags;
82 int res;
83
84 /* restart hardware */
85 if (local->open_count) {
86 res = local->ops->start(hw);
87
88 ieee80211_led_radio(local, hw->conf.radio_enabled);
89 } 65 }
90 66
91 /* add interfaces */ 67 /* remove STAs */
92 list_for_each_entry(sdata, &local->interfaces, list) { 68 spin_lock_irqsave(&local->sta_lock, flags);
93 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 69 list_for_each_entry(sta, &local->sta_list, list) {
94 sdata->vif.type != NL80211_IFTYPE_MONITOR && 70 if (local->ops->sta_notify) {
95 netif_running(sdata->dev)) { 71 sdata = sta->sdata;
96 conf.vif = &sdata->vif;
97 conf.type = sdata->vif.type;
98 conf.mac_addr = sdata->dev->dev_addr;
99 res = local->ops->add_interface(hw, &conf);
100 }
101 }
102
103 /* add STAs back */
104 if (local->ops->sta_notify) {
105 spin_lock_irqsave(&local->sta_lock, flags);
106 list_for_each_entry(sta, &local->sta_list, list) {
107 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 72 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
108 sdata = container_of(sdata->bss, 73 sdata = container_of(sdata->bss,
109 struct ieee80211_sub_if_data, 74 struct ieee80211_sub_if_data,
110 u.ap); 75 u.ap);
111 76
112 local->ops->sta_notify(hw, &sdata->vif, 77 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
113 STA_NOTIFY_ADD, &sta->sta); 78 &sta->sta);
114 } 79 }
115 spin_unlock_irqrestore(&local->sta_lock, flags);
116 }
117
118 /* Clear Suspend state so that ADDBA requests can be processed */
119
120 rcu_read_lock();
121 80
122 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 81 mesh_plink_quiesce(sta);
123 list_for_each_entry_rcu(sta, &local->sta_list, list) {
124 clear_sta_flags(sta, WLAN_STA_SUSPEND);
125 }
126 } 82 }
83 spin_unlock_irqrestore(&local->sta_lock, flags);
127 84
128 rcu_read_unlock(); 85 /* remove all interfaces */
129
130 /* add back keys */
131 list_for_each_entry(sdata, &local->interfaces, list)
132 if (netif_running(sdata->dev))
133 ieee80211_enable_keys(sdata);
134
135 /* setup RTS threshold */
136 if (local->ops->set_rts_threshold)
137 local->ops->set_rts_threshold(hw, local->rts_threshold);
138
139 /* reconfigure hardware */
140 ieee80211_hw_config(local, ~0);
141
142 netif_addr_lock_bh(local->mdev);
143 ieee80211_configure_filter(local);
144 netif_addr_unlock_bh(local->mdev);
145
146 /* Finally also reconfigure all the BSS information */
147 list_for_each_entry(sdata, &local->interfaces, list) { 86 list_for_each_entry(sdata, &local->interfaces, list) {
148 u32 changed = ~0; 87 switch(sdata->vif.type) {
149 if (!netif_running(sdata->dev))
150 continue;
151 switch (sdata->vif.type) {
152 case NL80211_IFTYPE_STATION: 88 case NL80211_IFTYPE_STATION:
153 /* disable beacon change bits */ 89 ieee80211_sta_quiesce(sdata);
154 changed &= ~IEEE80211_IFCC_BEACON; 90 break;
155 /* fall through */
156 case NL80211_IFTYPE_ADHOC: 91 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP: 92 ieee80211_ibss_quiesce(sdata);
158 case NL80211_IFTYPE_MESH_POINT:
159 /*
160 * Driver's config_interface can fail if rfkill is
161 * enabled. Accommodate this return code.
162 * FIXME: When mac80211 has knowledge of rfkill
163 * state the code below can change back to:
164 * WARN(ieee80211_if_config(sdata, changed));
165 * ieee80211_bss_info_change_notify(sdata, ~0);
166 */
167 if (ieee80211_if_config(sdata, changed))
168 printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
169 sdata->dev->name);
170 else
171 ieee80211_bss_info_change_notify(sdata, ~0);
172 break; 93 break;
173 case NL80211_IFTYPE_WDS: 94 case NL80211_IFTYPE_MESH_POINT:
95 ieee80211_mesh_quiesce(sdata);
174 break; 96 break;
175 case NL80211_IFTYPE_AP_VLAN: 97 case NL80211_IFTYPE_AP_VLAN:
176 case NL80211_IFTYPE_MONITOR: 98 case NL80211_IFTYPE_MONITOR:
177 /* ignore virtual */ 99 /* don't tell driver about this */
178 break; 100 continue;
179 case NL80211_IFTYPE_UNSPECIFIED: 101 default:
180 case __NL80211_IFTYPE_AFTER_LAST:
181 WARN_ON(1);
182 break; 102 break;
183 } 103 }
104
105 if (!netif_running(sdata->dev))
106 continue;
107
108 conf.vif = &sdata->vif;
109 conf.type = sdata->vif.type;
110 conf.mac_addr = sdata->dev->dev_addr;
111 drv_remove_interface(local, &conf);
184 } 112 }
185 113
186 ieee80211_wake_queues_by_reason(hw, 114 local->suspended = true;
187 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 115 local->quiescing = false;
188 116
189 return 0; 117 return 0;
190} 118}
119
120/*
121 * __ieee80211_resume() is a static inline which just calls
122 * ieee80211_reconfig(), which is also needed for hardware
123 * hang/firmware failure/etc. recovery.
124 */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index d9233ec50610..b218b98fba7f 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -80,8 +80,7 @@ use_low_rate(struct sk_buff *skb)
80 fc = le16_to_cpu(hdr->frame_control); 80 fc = le16_to_cpu(hdr->frame_control);
81 81
82 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || 82 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
83 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 83 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA);
84 is_multicast_ether_addr(hdr->addr1));
85} 84}
86 85
87 86
@@ -216,7 +215,7 @@ minstrel_get_next_sample(struct minstrel_sta_info *mi)
216 unsigned int sample_ndx; 215 unsigned int sample_ndx;
217 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); 216 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
218 mi->sample_idx++; 217 mi->sample_idx++;
219 if (mi->sample_idx > (mi->n_rates - 2)) { 218 if ((int) mi->sample_idx > (mi->n_rates - 2)) {
220 mi->sample_idx = 0; 219 mi->sample_idx = 0;
221 mi->sample_column++; 220 mi->sample_column++;
222 if (mi->sample_column >= SAMPLE_COLUMNS) 221 if (mi->sample_column >= SAMPLE_COLUMNS)
@@ -245,7 +244,10 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
245 244
246 if (!sta || !mi || use_low_rate(skb)) { 245 if (!sta || !mi || use_low_rate(skb)) {
247 ar[0].idx = rate_lowest_index(sband, sta); 246 ar[0].idx = rate_lowest_index(sband, sta);
248 ar[0].count = mp->max_retry; 247 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
248 ar[0].count = 1;
249 else
250 ar[0].count = mp->max_retry;
249 return; 251 return;
250 } 252 }
251 253
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 8bef9a1262ff..a0bef767ceb5 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -289,13 +289,15 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
289 info->control.rates[0].count = 289 info->control.rates[0].count =
290 txrc->hw->conf.short_frame_max_tx_count; 290 txrc->hw->conf.short_frame_max_tx_count;
291 291
292 /* Send management frames and broadcast/multicast data using lowest 292 /* Send management frames and NO_ACK data using lowest rate. */
293 * rate. */
294 fc = le16_to_cpu(hdr->frame_control); 293 fc = le16_to_cpu(hdr->frame_control);
295 if (!sta || !spinfo || 294 if (!sta || !spinfo ||
296 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 295 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
297 is_multicast_ether_addr(hdr->addr1)) { 296 info->flags & IEEE80211_TX_CTL_NO_ACK) {
298 info->control.rates[0].idx = rate_lowest_index(sband, sta); 297 info->control.rates[0].idx = rate_lowest_index(sband, sta);
298 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
299 info->control.rates[0].count = 1;
300
299 return; 301 return;
300 } 302 }
301 303
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9776f73c51ad..de5bba7f910a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -19,6 +19,7 @@
19#include <net/ieee80211_radiotap.h> 19#include <net/ieee80211_radiotap.h>
20 20
21#include "ieee80211_i.h" 21#include "ieee80211_i.h"
22#include "driver-ops.h"
22#include "led.h" 23#include "led.h"
23#include "mesh.h" 24#include "mesh.h"
24#include "wep.h" 25#include "wep.h"
@@ -629,15 +630,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
629 * possible. 630 * possible.
630 */ 631 */
631 632
632 if (!ieee80211_has_protected(hdr->frame_control)) {
633 if (!ieee80211_is_mgmt(hdr->frame_control) ||
634 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
635 return RX_CONTINUE;
636 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
637 if (mmie_keyidx < 0)
638 return RX_CONTINUE;
639 }
640
641 /* 633 /*
642 * No point in finding a key and decrypting if the frame is neither 634 * No point in finding a key and decrypting if the frame is neither
643 * addressed to us nor a multicast frame. 635 * addressed to us nor a multicast frame.
@@ -648,8 +640,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
648 if (rx->sta) 640 if (rx->sta)
649 stakey = rcu_dereference(rx->sta->key); 641 stakey = rcu_dereference(rx->sta->key);
650 642
643 if (!ieee80211_has_protected(hdr->frame_control))
644 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
645
651 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 646 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
652 rx->key = stakey; 647 rx->key = stakey;
648 /* Skip decryption if the frame is not protected. */
649 if (!ieee80211_has_protected(hdr->frame_control))
650 return RX_CONTINUE;
653 } else if (mmie_keyidx >= 0) { 651 } else if (mmie_keyidx >= 0) {
654 /* Broadcast/multicast robust management frame / BIP */ 652 /* Broadcast/multicast robust management frame / BIP */
655 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 653 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
@@ -660,6 +658,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
660 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 658 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
661 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 659 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
662 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 660 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
661 } else if (!ieee80211_has_protected(hdr->frame_control)) {
662 /*
663 * The frame was not protected, so skip decryption. However, we
664 * need to set rx->key if there is a key that could have been
665 * used so that the frame may be dropped if encryption would
666 * have been expected.
667 */
668 struct ieee80211_key *key = NULL;
669 if (ieee80211_is_mgmt(hdr->frame_control) &&
670 is_multicast_ether_addr(hdr->addr1) &&
671 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
672 rx->key = key;
673 else if ((key = rcu_dereference(rx->sdata->default_key)))
674 rx->key = key;
675 return RX_CONTINUE;
663 } else { 676 } else {
664 /* 677 /*
665 * The device doesn't give us the IV so we won't be 678 * The device doesn't give us the IV so we won't be
@@ -773,9 +786,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
773 786
774 atomic_inc(&sdata->bss->num_sta_ps); 787 atomic_inc(&sdata->bss->num_sta_ps);
775 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); 788 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
776 if (local->ops->sta_notify) 789 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
777 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
778 STA_NOTIFY_SLEEP, &sta->sta);
779#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 790#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
780 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 791 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
781 sdata->dev->name, sta->sta.addr, sta->sta.aid); 792 sdata->dev->name, sta->sta.addr, sta->sta.aid);
@@ -786,15 +797,12 @@ static int ap_sta_ps_end(struct sta_info *sta)
786{ 797{
787 struct ieee80211_sub_if_data *sdata = sta->sdata; 798 struct ieee80211_sub_if_data *sdata = sta->sdata;
788 struct ieee80211_local *local = sdata->local; 799 struct ieee80211_local *local = sdata->local;
789 struct sk_buff *skb; 800 int sent, buffered;
790 int sent = 0;
791 801
792 atomic_dec(&sdata->bss->num_sta_ps); 802 atomic_dec(&sdata->bss->num_sta_ps);
793 803
794 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); 804 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
795 if (local->ops->sta_notify) 805 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
796 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
797 STA_NOTIFY_AWAKE, &sta->sta);
798 806
799 if (!skb_queue_empty(&sta->ps_tx_buf)) 807 if (!skb_queue_empty(&sta->ps_tx_buf))
800 sta_info_clear_tim_bit(sta); 808 sta_info_clear_tim_bit(sta);
@@ -805,22 +813,16 @@ static int ap_sta_ps_end(struct sta_info *sta)
805#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 813#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
806 814
807 /* Send all buffered frames to the station */ 815 /* Send all buffered frames to the station */
808 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 816 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
809 sent++; 817 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
810 skb->requeue = 1; 818 sent += buffered;
811 dev_queue_xmit(skb); 819 local->total_ps_buffered -= buffered;
812 } 820
813 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
814 local->total_ps_buffered--;
815 sent++;
816#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 821#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
817 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame " 822 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
818 "since STA not sleeping anymore\n", sdata->dev->name, 823 "since STA not sleeping anymore\n", sdata->dev->name,
819 sta->sta.addr, sta->sta.aid); 824 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
820#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 825#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
821 skb->requeue = 1;
822 dev_queue_xmit(skb);
823 }
824 826
825 return sent; 827 return sent;
826} 828}
@@ -1212,109 +1214,38 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1212 /* Drop unencrypted frames if key is set. */ 1214 /* Drop unencrypted frames if key is set. */
1213 if (unlikely(!ieee80211_has_protected(fc) && 1215 if (unlikely(!ieee80211_has_protected(fc) &&
1214 !ieee80211_is_nullfunc(fc) && 1216 !ieee80211_is_nullfunc(fc) &&
1215 (!ieee80211_is_mgmt(fc) || 1217 ieee80211_is_data(fc) &&
1216 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1217 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1218 (rx->key || rx->sdata->drop_unencrypted)))
1219 return -EACCES;
1220 /* BIP does not use Protected field, so need to check MMIE */
1221 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1222 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1223 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1224 (rx->key || rx->sdata->drop_unencrypted))) 1218 (rx->key || rx->sdata->drop_unencrypted)))
1225 return -EACCES; 1219 return -EACCES;
1220 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1221 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1222 rx->key))
1223 return -EACCES;
1224 /* BIP does not use Protected field, so need to check MMIE */
1225 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1226 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1227 rx->key))
1228 return -EACCES;
1229 /*
1230 * When using MFP, Action frames are not allowed prior to
1231 * having configured keys.
1232 */
1233 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1234 ieee80211_is_robust_mgmt_frame(
1235 (struct ieee80211_hdr *) rx->skb->data)))
1236 return -EACCES;
1237 }
1226 1238
1227 return 0; 1239 return 0;
1228} 1240}
1229 1241
1230static int 1242static int
1231ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1243__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1232{ 1244{
1233 struct net_device *dev = rx->dev; 1245 struct net_device *dev = rx->dev;
1234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1235 u16 hdrlen, ethertype;
1236 u8 *payload;
1237 u8 dst[ETH_ALEN];
1238 u8 src[ETH_ALEN] __aligned(2);
1239 struct sk_buff *skb = rx->skb;
1240 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1241 1247
1242 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1248 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1243 return -1;
1244
1245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1246
1247 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1248 * header
1249 * IEEE 802.11 address fields:
1250 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1251 * 0 0 DA SA BSSID n/a
1252 * 0 1 DA BSSID SA n/a
1253 * 1 0 BSSID SA DA n/a
1254 * 1 1 RA TA DA SA
1255 */
1256 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1257 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1258
1259 switch (hdr->frame_control &
1260 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1261 case cpu_to_le16(IEEE80211_FCTL_TODS):
1262 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1263 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1264 return -1;
1265 break;
1266 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1267 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1268 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1269 return -1;
1270 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1271 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1272 (skb->data + hdrlen);
1273 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1274 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1275 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1276 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1277 }
1278 }
1279 break;
1280 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1281 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1282 (is_multicast_ether_addr(dst) &&
1283 !compare_ether_addr(src, dev->dev_addr)))
1284 return -1;
1285 break;
1286 case cpu_to_le16(0):
1287 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1288 return -1;
1289 break;
1290 }
1291
1292 if (unlikely(skb->len - hdrlen < 8))
1293 return -1;
1294
1295 payload = skb->data + hdrlen;
1296 ethertype = (payload[6] << 8) | payload[7];
1297
1298 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1299 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1300 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1301 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1302 * replace EtherType */
1303 skb_pull(skb, hdrlen + 6);
1304 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1305 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1306 } else {
1307 struct ethhdr *ehdr;
1308 __be16 len;
1309
1310 skb_pull(skb, hdrlen);
1311 len = htons(skb->len);
1312 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1313 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1314 memcpy(ehdr->h_source, src, ETH_ALEN);
1315 ehdr->h_proto = len;
1316 }
1317 return 0;
1318} 1249}
1319 1250
1320/* 1251/*
@@ -1397,7 +1328,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1397 * mac80211. That also explains the __skb_push() 1328 * mac80211. That also explains the __skb_push()
1398 * below. 1329 * below.
1399 */ 1330 */
1400 align = (unsigned long)skb->data & 3; 1331 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1401 if (align) { 1332 if (align) {
1402 if (WARN_ON(skb_headroom(skb) < 3)) { 1333 if (WARN_ON(skb_headroom(skb) < 3)) {
1403 dev_kfree_skb(skb); 1334 dev_kfree_skb(skb);
@@ -1453,7 +1384,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1453 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1384 if (!(rx->flags & IEEE80211_RX_AMSDU))
1454 return RX_CONTINUE; 1385 return RX_CONTINUE;
1455 1386
1456 err = ieee80211_data_to_8023(rx); 1387 err = __ieee80211_data_to_8023(rx);
1457 if (unlikely(err)) 1388 if (unlikely(err))
1458 return RX_DROP_UNUSABLE; 1389 return RX_DROP_UNUSABLE;
1459 1390
@@ -1639,7 +1570,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1639 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1570 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1640 return RX_DROP_MONITOR; 1571 return RX_DROP_MONITOR;
1641 1572
1642 err = ieee80211_data_to_8023(rx); 1573 err = __ieee80211_data_to_8023(rx);
1643 if (unlikely(err)) 1574 if (unlikely(err))
1644 return RX_DROP_UNUSABLE; 1575 return RX_DROP_UNUSABLE;
1645 1576
@@ -1827,6 +1758,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1827 sizeof(mgmt->u.action.u.chan_switch))) 1758 sizeof(mgmt->u.action.u.chan_switch)))
1828 return RX_DROP_MONITOR; 1759 return RX_DROP_MONITOR;
1829 1760
1761 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1762 return RX_DROP_MONITOR;
1763
1830 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 1764 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1831 return RX_DROP_MONITOR; 1765 return RX_DROP_MONITOR;
1832 1766
@@ -1837,7 +1771,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1837 if (!bss) 1771 if (!bss)
1838 return RX_DROP_MONITOR; 1772 return RX_DROP_MONITOR;
1839 1773
1840 ieee80211_process_chanswitch(sdata, 1774 ieee80211_sta_process_chanswitch(sdata,
1841 &mgmt->u.action.u.chan_switch.sw_elem, bss); 1775 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1842 ieee80211_rx_bss_put(local, bss); 1776 ieee80211_rx_bss_put(local, bss);
1843 break; 1777 break;
@@ -1932,7 +1866,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1932 !ieee80211_is_auth(hdr->frame_control)) 1866 !ieee80211_is_auth(hdr->frame_control))
1933 goto ignore; 1867 goto ignore;
1934 1868
1935 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr); 1869 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL);
1936 ignore: 1870 ignore:
1937 dev_kfree_skb(rx->skb); 1871 dev_kfree_skb(rx->skb);
1938 rx->skb = NULL; 1872 rx->skb = NULL;
@@ -2287,6 +2221,43 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
2287} 2221}
2288 2222
2289 2223
2224static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2225 struct tid_ampdu_rx *tid_agg_rx,
2226 int index)
2227{
2228 struct ieee80211_supported_band *sband;
2229 struct ieee80211_rate *rate;
2230 struct ieee80211_rx_status status;
2231
2232 if (!tid_agg_rx->reorder_buf[index])
2233 goto no_frame;
2234
2235 /* release the reordered frames to stack */
2236 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status));
2237 sband = hw->wiphy->bands[status.band];
2238 if (status.flag & RX_FLAG_HT)
2239 rate = sband->bitrates; /* TODO: HT rates */
2240 else
2241 rate = &sband->bitrates[status.rate_idx];
2242 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2243 &status, rate);
2244 tid_agg_rx->stored_mpdu_num--;
2245 tid_agg_rx->reorder_buf[index] = NULL;
2246
2247no_frame:
2248 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2249}
2250
2251
2252/*
2253 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2254 * the skb was added to the buffer longer than this time ago, the earlier
2255 * frames that have not yet been received are assumed to be lost and the skb
2256 * can be released for processing. This may also release other skb's from the
2257 * reorder buffer if there are no additional gaps between the frames.
2258 */
2259#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2260
2290/* 2261/*
2291 * As it function blongs to Rx path it must be called with 2262 * As it function blongs to Rx path it must be called with
2292 * the proper rcu_read_lock protection for its flow. 2263 * the proper rcu_read_lock protection for its flow.
@@ -2298,12 +2269,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2298 u16 mpdu_seq_num, 2269 u16 mpdu_seq_num,
2299 int bar_req) 2270 int bar_req)
2300{ 2271{
2301 struct ieee80211_local *local = hw_to_local(hw);
2302 struct ieee80211_rx_status status;
2303 u16 head_seq_num, buf_size; 2272 u16 head_seq_num, buf_size;
2304 int index; 2273 int index;
2305 struct ieee80211_supported_band *sband;
2306 struct ieee80211_rate *rate;
2307 2274
2308 buf_size = tid_agg_rx->buf_size; 2275 buf_size = tid_agg_rx->buf_size;
2309 head_seq_num = tid_agg_rx->head_seq_num; 2276 head_seq_num = tid_agg_rx->head_seq_num;
@@ -2328,28 +2295,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2328 index = seq_sub(tid_agg_rx->head_seq_num, 2295 index = seq_sub(tid_agg_rx->head_seq_num,
2329 tid_agg_rx->ssn) 2296 tid_agg_rx->ssn)
2330 % tid_agg_rx->buf_size; 2297 % tid_agg_rx->buf_size;
2331 2298 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2332 if (tid_agg_rx->reorder_buf[index]) { 2299 index);
2333 /* release the reordered frames to stack */
2334 memcpy(&status,
2335 tid_agg_rx->reorder_buf[index]->cb,
2336 sizeof(status));
2337 sband = local->hw.wiphy->bands[status.band];
2338 if (status.flag & RX_FLAG_HT) {
2339 /* TODO: HT rates */
2340 rate = sband->bitrates;
2341 } else {
2342 rate = &sband->bitrates
2343 [status.rate_idx];
2344 }
2345 __ieee80211_rx_handle_packet(hw,
2346 tid_agg_rx->reorder_buf[index],
2347 &status, rate);
2348 tid_agg_rx->stored_mpdu_num--;
2349 tid_agg_rx->reorder_buf[index] = NULL;
2350 }
2351 tid_agg_rx->head_seq_num =
2352 seq_inc(tid_agg_rx->head_seq_num);
2353 } 2300 }
2354 if (bar_req) 2301 if (bar_req)
2355 return 1; 2302 return 1;
@@ -2376,26 +2323,50 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2376 2323
2377 /* put the frame in the reordering buffer */ 2324 /* put the frame in the reordering buffer */
2378 tid_agg_rx->reorder_buf[index] = skb; 2325 tid_agg_rx->reorder_buf[index] = skb;
2326 tid_agg_rx->reorder_time[index] = jiffies;
2379 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, 2327 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2380 sizeof(*rxstatus)); 2328 sizeof(*rxstatus));
2381 tid_agg_rx->stored_mpdu_num++; 2329 tid_agg_rx->stored_mpdu_num++;
2382 /* release the buffer until next missing frame */ 2330 /* release the buffer until next missing frame */
2383 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 2331 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2384 % tid_agg_rx->buf_size; 2332 % tid_agg_rx->buf_size;
2385 while (tid_agg_rx->reorder_buf[index]) { 2333 if (!tid_agg_rx->reorder_buf[index] &&
2386 /* release the reordered frame back to stack */ 2334 tid_agg_rx->stored_mpdu_num > 1) {
2387 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, 2335 /*
2388 sizeof(status)); 2336 * No buffers ready to be released, but check whether any
2389 sband = local->hw.wiphy->bands[status.band]; 2337 * frames in the reorder buffer have timed out.
2390 if (status.flag & RX_FLAG_HT) 2338 */
2391 rate = sband->bitrates; /* TODO: HT rates */ 2339 int j;
2392 else 2340 int skipped = 1;
2393 rate = &sband->bitrates[status.rate_idx]; 2341 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2394 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2342 j = (j + 1) % tid_agg_rx->buf_size) {
2395 &status, rate); 2343 if (tid_agg_rx->reorder_buf[j] == NULL) {
2396 tid_agg_rx->stored_mpdu_num--; 2344 skipped++;
2397 tid_agg_rx->reorder_buf[index] = NULL; 2345 continue;
2398 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2346 }
2347 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2348 HZ / 10))
2349 break;
2350
2351#ifdef CONFIG_MAC80211_HT_DEBUG
2352 if (net_ratelimit())
2353 printk(KERN_DEBUG "%s: release an RX reorder "
2354 "frame due to timeout on earlier "
2355 "frames\n",
2356 wiphy_name(hw->wiphy));
2357#endif
2358 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2359
2360 /*
2361 * Increment the head seq# also for the skipped slots.
2362 */
2363 tid_agg_rx->head_seq_num =
2364 (tid_agg_rx->head_seq_num + skipped) &
2365 SEQ_MASK;
2366 skipped = 0;
2367 }
2368 } else while (tid_agg_rx->reorder_buf[index]) {
2369 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2399 index = seq_sub(tid_agg_rx->head_seq_num, 2370 index = seq_sub(tid_agg_rx->head_seq_num,
2400 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 2371 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2401 } 2372 }
@@ -2517,6 +2488,18 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2517 return; 2488 return;
2518 } 2489 }
2519 2490
2491 /*
2492 * In theory, the block ack reordering should happen after duplicate
2493 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2494 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2495 * happen as a new RX handler between ieee80211_rx_h_check and
2496 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2497 * the time being, the call can be here since RX reorder buf processing
2498 * will implicitly skip duplicates. We could, in theory at least,
2499 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2500 * frames from other than operational channel), but that should not
2501 * happen in normal networks.
2502 */
2520 if (!ieee80211_rx_reorder_ampdu(local, skb, status)) 2503 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2521 __ieee80211_rx_handle_packet(hw, skb, status, rate); 2504 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2522 2505
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3bf9839f5916..2a8d09ad17ff 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -21,6 +21,7 @@
21#include <net/iw_handler.h> 21#include <net/iw_handler.h>
22 22
23#include "ieee80211_i.h" 23#include "ieee80211_i.h"
24#include "driver-ops.h"
24#include "mesh.h" 25#include "mesh.h"
25 26
26#define IEEE80211_PROBE_DELAY (HZ / 33) 27#define IEEE80211_PROBE_DELAY (HZ / 33)
@@ -202,18 +203,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
202 return RX_QUEUED; 203 return RX_QUEUED;
203} 204}
204 205
205void ieee80211_scan_failed(struct ieee80211_local *local)
206{
207 if (WARN_ON(!local->scan_req))
208 return;
209
210 /* notify cfg80211 about the failed scan */
211 if (local->scan_req != &local->int_scan_req)
212 cfg80211_scan_done(local->scan_req, true);
213
214 local->scan_req = NULL;
215}
216
217/* 206/*
218 * inform AP that we will go to sleep so that it will buffer the frames 207 * inform AP that we will go to sleep so that it will buffer the frames
219 * while we scan 208 * while we scan
@@ -253,7 +242,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
253{ 242{
254 struct ieee80211_local *local = sdata->local; 243 struct ieee80211_local *local = sdata->local;
255 244
256 if (!local->powersave) 245 if (!local->ps_sdata)
257 ieee80211_send_nullfunc(local, sdata, 0); 246 ieee80211_send_nullfunc(local, sdata, 0);
258 else { 247 else {
259 /* 248 /*
@@ -274,51 +263,62 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
274 } 263 }
275} 264}
276 265
266static void ieee80211_restore_scan_ies(struct ieee80211_local *local)
267{
268 kfree(local->scan_req->ie);
269 local->scan_req->ie = local->orig_ies;
270 local->scan_req->ie_len = local->orig_ies_len;
271}
272
277void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 273void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
278{ 274{
279 struct ieee80211_local *local = hw_to_local(hw); 275 struct ieee80211_local *local = hw_to_local(hw);
280 struct ieee80211_sub_if_data *sdata; 276 struct ieee80211_sub_if_data *sdata;
277 bool was_hw_scan;
281 278
282 if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) 279 mutex_lock(&local->scan_mtx);
280
281 if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) {
282 mutex_unlock(&local->scan_mtx);
283 return; 283 return;
284 }
284 285
285 if (WARN_ON(!local->scan_req)) 286 if (WARN_ON(!local->scan_req)) {
287 mutex_unlock(&local->scan_mtx);
286 return; 288 return;
289 }
290
291 if (local->hw_scanning)
292 ieee80211_restore_scan_ies(local);
287 293
288 if (local->scan_req != &local->int_scan_req) 294 if (local->scan_req != &local->int_scan_req)
289 cfg80211_scan_done(local->scan_req, aborted); 295 cfg80211_scan_done(local->scan_req, aborted);
290 local->scan_req = NULL; 296 local->scan_req = NULL;
291 297
292 local->last_scan_completed = jiffies; 298 was_hw_scan = local->hw_scanning;
299 local->hw_scanning = false;
300 local->sw_scanning = false;
301 local->scan_channel = NULL;
293 302
294 if (local->hw_scanning) { 303 /* we only have to protect scan_req and hw/sw scan */
295 local->hw_scanning = false; 304 mutex_unlock(&local->scan_mtx);
296 /*
297 * Somebody might have requested channel change during scan
298 * that we won't have acted upon, try now. ieee80211_hw_config
299 * will set the flag based on actual changes.
300 */
301 ieee80211_hw_config(local, 0);
302 goto done;
303 }
304 305
305 local->sw_scanning = false;
306 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 306 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
307 if (was_hw_scan)
308 goto done;
307 309
308 netif_tx_lock_bh(local->mdev); 310 netif_tx_lock_bh(local->mdev);
309 netif_addr_lock(local->mdev); 311 netif_addr_lock(local->mdev);
310 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; 312 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
311 local->ops->configure_filter(local_to_hw(local), 313 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
312 FIF_BCN_PRBRESP_PROMISC, 314 &local->filter_flags,
313 &local->filter_flags, 315 local->mdev->mc_count,
314 local->mdev->mc_count, 316 local->mdev->mc_list);
315 local->mdev->mc_list);
316 317
317 netif_addr_unlock(local->mdev); 318 netif_addr_unlock(local->mdev);
318 netif_tx_unlock_bh(local->mdev); 319 netif_tx_unlock_bh(local->mdev);
319 320
320 if (local->ops->sw_scan_complete) 321 drv_sw_scan_complete(local);
321 local->ops->sw_scan_complete(local_to_hw(local));
322 322
323 mutex_lock(&local->iflist_mtx); 323 mutex_lock(&local->iflist_mtx);
324 list_for_each_entry(sdata, &local->interfaces, list) { 324 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -338,18 +338,160 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
338 if (sdata->vif.type == NL80211_IFTYPE_AP || 338 if (sdata->vif.type == NL80211_IFTYPE_AP ||
339 sdata->vif.type == NL80211_IFTYPE_ADHOC || 339 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
340 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 340 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
341 ieee80211_if_config(sdata, 341 ieee80211_bss_info_change_notify(
342 IEEE80211_IFCC_BEACON_ENABLED); 342 sdata, BSS_CHANGED_BEACON_ENABLED);
343 } 343 }
344 mutex_unlock(&local->iflist_mtx); 344 mutex_unlock(&local->iflist_mtx);
345 345
346 done: 346 done:
347 ieee80211_recalc_idle(local);
347 ieee80211_mlme_notify_scan_completed(local); 348 ieee80211_mlme_notify_scan_completed(local);
348 ieee80211_ibss_notify_scan_completed(local); 349 ieee80211_ibss_notify_scan_completed(local);
349 ieee80211_mesh_notify_scan_completed(local); 350 ieee80211_mesh_notify_scan_completed(local);
350} 351}
351EXPORT_SYMBOL(ieee80211_scan_completed); 352EXPORT_SYMBOL(ieee80211_scan_completed);
352 353
354static int ieee80211_start_sw_scan(struct ieee80211_local *local)
355{
356 struct ieee80211_sub_if_data *sdata;
357
358 /*
359 * Hardware/driver doesn't support hw_scan, so use software
360 * scanning instead. First send a nullfunc frame with power save
361 * bit on so that AP will buffer the frames for us while we are not
362 * listening, then send probe requests to each channel and wait for
363 * the responses. After all channels are scanned, tune back to the
364 * original channel and send a nullfunc frame with power save bit
365 * off to trigger the AP to send us all the buffered frames.
366 *
367 * Note that while local->sw_scanning is true everything else but
368 * nullfunc frames and probe requests will be dropped in
369 * ieee80211_tx_h_check_assoc().
370 */
371 drv_sw_scan_start(local);
372
373 mutex_lock(&local->iflist_mtx);
374 list_for_each_entry(sdata, &local->interfaces, list) {
375 if (!netif_running(sdata->dev))
376 continue;
377
378 /* disable beaconing */
379 if (sdata->vif.type == NL80211_IFTYPE_AP ||
380 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
381 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
382 ieee80211_bss_info_change_notify(
383 sdata, BSS_CHANGED_BEACON_ENABLED);
384
385 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
386 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
387 netif_tx_stop_all_queues(sdata->dev);
388 ieee80211_scan_ps_enable(sdata);
389 }
390 } else
391 netif_tx_stop_all_queues(sdata->dev);
392 }
393 mutex_unlock(&local->iflist_mtx);
394
395 local->scan_state = SCAN_SET_CHANNEL;
396 local->scan_channel_idx = 0;
397
398 netif_addr_lock_bh(local->mdev);
399 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
400 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
401 &local->filter_flags,
402 local->mdev->mc_count,
403 local->mdev->mc_list);
404 netif_addr_unlock_bh(local->mdev);
405
406 /* TODO: start scan as soon as all nullfunc frames are ACKed */
407 queue_delayed_work(local->hw.workqueue, &local->scan_work,
408 IEEE80211_CHANNEL_TIME);
409
410 return 0;
411}
412
413
414static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
415 struct cfg80211_scan_request *req)
416{
417 struct ieee80211_local *local = sdata->local;
418 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
419 int rc;
420
421 if (local->scan_req)
422 return -EBUSY;
423
424 if (local->ops->hw_scan) {
425 u8 *ies;
426 int ielen;
427
428 ies = kmalloc(2 + IEEE80211_MAX_SSID_LEN +
429 local->scan_ies_len + req->ie_len, GFP_KERNEL);
430 if (!ies)
431 return -ENOMEM;
432
433 ielen = ieee80211_build_preq_ies(local, ies,
434 req->ie, req->ie_len);
435 local->orig_ies = req->ie;
436 local->orig_ies_len = req->ie_len;
437 req->ie = ies;
438 req->ie_len = ielen;
439 }
440
441 local->scan_req = req;
442 local->scan_sdata = sdata;
443
444 if (req != &local->int_scan_req &&
445 sdata->vif.type == NL80211_IFTYPE_STATION &&
446 (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE ||
447 ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
448 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE)) {
449 /* actually wait for the assoc to finish/time out */
450 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
451 return 0;
452 }
453
454 if (local->ops->hw_scan)
455 local->hw_scanning = true;
456 else
457 local->sw_scanning = true;
458 /*
459 * Kicking off the scan need not be protected,
460 * only the scan variable stuff, since now
461 * local->scan_req is assigned and other callers
462 * will abort their scan attempts.
463 *
464 * This avoids getting a scan_mtx -> iflist_mtx
465 * dependency, so that the scan completed calls
466 * have more locking freedom.
467 */
468
469 ieee80211_recalc_idle(local);
470 mutex_unlock(&local->scan_mtx);
471
472 if (local->ops->hw_scan)
473 rc = drv_hw_scan(local, local->scan_req);
474 else
475 rc = ieee80211_start_sw_scan(local);
476
477 mutex_lock(&local->scan_mtx);
478
479 if (rc) {
480 if (local->ops->hw_scan) {
481 local->hw_scanning = false;
482 ieee80211_restore_scan_ies(local);
483 } else
484 local->sw_scanning = false;
485
486 ieee80211_recalc_idle(local);
487
488 local->scan_req = NULL;
489 local->scan_sdata = NULL;
490 }
491
492 return rc;
493}
494
353void ieee80211_scan_work(struct work_struct *work) 495void ieee80211_scan_work(struct work_struct *work)
354{ 496{
355 struct ieee80211_local *local = 497 struct ieee80211_local *local =
@@ -359,17 +501,41 @@ void ieee80211_scan_work(struct work_struct *work)
359 int skip, i; 501 int skip, i;
360 unsigned long next_delay = 0; 502 unsigned long next_delay = 0;
361 503
504 mutex_lock(&local->scan_mtx);
505 if (!sdata || !local->scan_req) {
506 mutex_unlock(&local->scan_mtx);
507 return;
508 }
509
510 if (local->scan_req && !(local->sw_scanning || local->hw_scanning)) {
511 struct cfg80211_scan_request *req = local->scan_req;
512 int rc;
513
514 local->scan_req = NULL;
515
516 rc = __ieee80211_start_scan(sdata, req);
517 mutex_unlock(&local->scan_mtx);
518
519 if (rc)
520 ieee80211_scan_completed(&local->hw, true);
521 return;
522 }
523
524 mutex_unlock(&local->scan_mtx);
525
362 /* 526 /*
363 * Avoid re-scheduling when the sdata is going away. 527 * Avoid re-scheduling when the sdata is going away.
364 */ 528 */
365 if (!netif_running(sdata->dev)) 529 if (!netif_running(sdata->dev)) {
530 ieee80211_scan_completed(&local->hw, true);
366 return; 531 return;
532 }
367 533
368 switch (local->scan_state) { 534 switch (local->scan_state) {
369 case SCAN_SET_CHANNEL: 535 case SCAN_SET_CHANNEL:
370 /* if no more bands/channels left, complete scan */ 536 /* if no more bands/channels left, complete scan */
371 if (local->scan_channel_idx >= local->scan_req->n_channels) { 537 if (local->scan_channel_idx >= local->scan_req->n_channels) {
372 ieee80211_scan_completed(local_to_hw(local), false); 538 ieee80211_scan_completed(&local->hw, false);
373 return; 539 return;
374 } 540 }
375 skip = 0; 541 skip = 0;
@@ -393,24 +559,39 @@ void ieee80211_scan_work(struct work_struct *work)
393 if (skip) 559 if (skip)
394 break; 560 break;
395 561
396 next_delay = IEEE80211_PROBE_DELAY + 562 /*
397 usecs_to_jiffies(local->hw.channel_change_time); 563 * Probe delay is used to update the NAV, cf. 11.1.3.2.2
564 * (which unfortunately doesn't say _why_ step a) is done,
565 * but it waits for the probe delay or until a frame is
566 * received - and the received frame would update the NAV).
567 * For now, we do not support waiting until a frame is
568 * received.
569 *
570 * In any case, it is not necessary for a passive scan.
571 */
572 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
573 !local->scan_req->n_ssids) {
574 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
575 break;
576 }
577
578 next_delay = IEEE80211_PROBE_DELAY;
398 local->scan_state = SCAN_SEND_PROBE; 579 local->scan_state = SCAN_SEND_PROBE;
399 break; 580 break;
400 case SCAN_SEND_PROBE: 581 case SCAN_SEND_PROBE:
401 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
402 local->scan_state = SCAN_SET_CHANNEL;
403
404 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
405 !local->scan_req->n_ssids)
406 break;
407 for (i = 0; i < local->scan_req->n_ssids; i++) 582 for (i = 0; i < local->scan_req->n_ssids; i++)
408 ieee80211_send_probe_req( 583 ieee80211_send_probe_req(
409 sdata, NULL, 584 sdata, NULL,
410 local->scan_req->ssids[i].ssid, 585 local->scan_req->ssids[i].ssid,
411 local->scan_req->ssids[i].ssid_len, 586 local->scan_req->ssids[i].ssid_len,
412 local->scan_req->ie, local->scan_req->ie_len); 587 local->scan_req->ie, local->scan_req->ie_len);
588
589 /*
590 * After sending probe requests, wait for probe responses
591 * on the channel.
592 */
413 next_delay = IEEE80211_CHANNEL_TIME; 593 next_delay = IEEE80211_CHANNEL_TIME;
594 local->scan_state = SCAN_SET_CHANNEL;
414 break; 595 break;
415 } 596 }
416 597
@@ -418,150 +599,53 @@ void ieee80211_scan_work(struct work_struct *work)
418 next_delay); 599 next_delay);
419} 600}
420 601
421 602int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
422int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, 603 struct cfg80211_scan_request *req)
423 struct cfg80211_scan_request *req)
424{ 604{
425 struct ieee80211_local *local = scan_sdata->local; 605 int res;
426 struct ieee80211_sub_if_data *sdata;
427
428 if (!req)
429 return -EINVAL;
430
431 if (local->scan_req && local->scan_req != req)
432 return -EBUSY;
433
434 local->scan_req = req;
435
436 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1)
437 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
438 * BSSID: MACAddress
439 * SSID
440 * ScanType: ACTIVE, PASSIVE
441 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
442 * a Probe frame during active scanning
443 * ChannelList
444 * MinChannelTime (>= ProbeDelay), in TU
445 * MaxChannelTime: (>= MinChannelTime), in TU
446 */
447
448 /* MLME-SCAN.confirm
449 * BSSDescriptionSet
450 * ResultCode: SUCCESS, INVALID_PARAMETERS
451 */
452
453 if (local->sw_scanning || local->hw_scanning) {
454 if (local->scan_sdata == scan_sdata)
455 return 0;
456 return -EBUSY;
457 }
458
459 if (local->ops->hw_scan) {
460 int rc;
461
462 local->hw_scanning = true;
463 rc = local->ops->hw_scan(local_to_hw(local), req);
464 if (rc) {
465 local->hw_scanning = false;
466 return rc;
467 }
468 local->scan_sdata = scan_sdata;
469 return 0;
470 }
471
472 /*
473 * Hardware/driver doesn't support hw_scan, so use software
474 * scanning instead. First send a nullfunc frame with power save
475 * bit on so that AP will buffer the frames for us while we are not
476 * listening, then send probe requests to each channel and wait for
477 * the responses. After all channels are scanned, tune back to the
478 * original channel and send a nullfunc frame with power save bit
479 * off to trigger the AP to send us all the buffered frames.
480 *
481 * Note that while local->sw_scanning is true everything else but
482 * nullfunc frames and probe requests will be dropped in
483 * ieee80211_tx_h_check_assoc().
484 */
485 local->sw_scanning = true;
486 if (local->ops->sw_scan_start)
487 local->ops->sw_scan_start(local_to_hw(local));
488 606
489 mutex_lock(&local->iflist_mtx); 607 mutex_lock(&sdata->local->scan_mtx);
490 list_for_each_entry(sdata, &local->interfaces, list) { 608 res = __ieee80211_start_scan(sdata, req);
491 if (!netif_running(sdata->dev)) 609 mutex_unlock(&sdata->local->scan_mtx);
492 continue;
493 610
494 /* disable beaconing */ 611 return res;
495 if (sdata->vif.type == NL80211_IFTYPE_AP || 612}
496 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
497 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
498 ieee80211_if_config(sdata,
499 IEEE80211_IFCC_BEACON_ENABLED);
500 613
501 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 614int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
502 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 615 const u8 *ssid, u8 ssid_len)
503 netif_tx_stop_all_queues(sdata->dev); 616{
504 ieee80211_scan_ps_enable(sdata); 617 struct ieee80211_local *local = sdata->local;
505 } 618 int ret = -EBUSY;
506 } else
507 netif_tx_stop_all_queues(sdata->dev);
508 }
509 mutex_unlock(&local->iflist_mtx);
510 619
511 local->scan_state = SCAN_SET_CHANNEL; 620 mutex_lock(&local->scan_mtx);
512 local->scan_channel_idx = 0;
513 local->scan_sdata = scan_sdata;
514 local->scan_req = req;
515 621
516 netif_addr_lock_bh(local->mdev); 622 /* busy scanning */
517 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; 623 if (local->scan_req)
518 local->ops->configure_filter(local_to_hw(local), 624 goto unlock;
519 FIF_BCN_PRBRESP_PROMISC,
520 &local->filter_flags,
521 local->mdev->mc_count,
522 local->mdev->mc_list);
523 netif_addr_unlock_bh(local->mdev);
524 625
525 /* TODO: start scan as soon as all nullfunc frames are ACKed */ 626 memcpy(local->int_scan_req.ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
526 queue_delayed_work(local->hw.workqueue, &local->scan_work, 627 local->int_scan_req.ssids[0].ssid_len = ssid_len;
527 IEEE80211_CHANNEL_TIME);
528 628
529 return 0; 629 ret = __ieee80211_start_scan(sdata, &sdata->local->int_scan_req);
630 unlock:
631 mutex_unlock(&local->scan_mtx);
632 return ret;
530} 633}
531 634
532 635void ieee80211_scan_cancel(struct ieee80211_local *local)
533int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
534 struct cfg80211_scan_request *req)
535{ 636{
536 struct ieee80211_local *local = sdata->local; 637 bool swscan;
537 struct ieee80211_if_managed *ifmgd;
538
539 if (!req)
540 return -EINVAL;
541 638
542 if (local->scan_req && local->scan_req != req) 639 cancel_delayed_work_sync(&local->scan_work);
543 return -EBUSY;
544
545 local->scan_req = req;
546
547 if (sdata->vif.type != NL80211_IFTYPE_STATION)
548 return ieee80211_start_scan(sdata, req);
549 640
550 /* 641 /*
551 * STA has a state machine that might need to defer scanning 642 * Only call this function when a scan can't be
552 * while it's trying to associate/authenticate, therefore we 643 * queued -- mostly at suspend under RTNL.
553 * queue it up to the state machine in that case.
554 */ 644 */
645 mutex_lock(&local->scan_mtx);
646 swscan = local->sw_scanning;
647 mutex_unlock(&local->scan_mtx);
555 648
556 if (local->sw_scanning || local->hw_scanning) { 649 if (swscan)
557 if (local->scan_sdata == sdata) 650 ieee80211_scan_completed(&local->hw, true);
558 return 0;
559 return -EBUSY;
560 }
561
562 ifmgd = &sdata->u.mgd;
563 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
564 queue_work(local->hw.workqueue, &ifmgd->work);
565
566 return 0;
567} 651}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 5f7a2624ed74..68953033403d 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -15,7 +15,7 @@
15 */ 15 */
16 16
17#include <linux/ieee80211.h> 17#include <linux/ieee80211.h>
18#include <net/wireless.h> 18#include <net/cfg80211.h>
19#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include "ieee80211_i.h" 20#include "ieee80211_i.h"
21#include "sta_info.h" 21#include "sta_info.h"
@@ -84,104 +84,3 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
84 mgmt->sa, mgmt->bssid, 84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token); 85 mgmt->u.action.u.measurement.dialog_token);
86} 86}
87
88void ieee80211_chswitch_work(struct work_struct *work)
89{
90 struct ieee80211_sub_if_data *sdata =
91 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
92 struct ieee80211_bss *bss;
93 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
94
95 if (!netif_running(sdata->dev))
96 return;
97
98 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
99 sdata->local->hw.conf.channel->center_freq,
100 ifmgd->ssid, ifmgd->ssid_len);
101 if (!bss)
102 goto exit;
103
104 sdata->local->oper_channel = sdata->local->csa_channel;
105 /* XXX: shouldn't really modify cfg80211-owned data! */
106 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
107 bss->cbss.channel = sdata->local->oper_channel;
108
109 ieee80211_rx_bss_put(sdata->local, bss);
110exit:
111 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
112 ieee80211_wake_queues_by_reason(&sdata->local->hw,
113 IEEE80211_QUEUE_STOP_REASON_CSA);
114}
115
116void ieee80211_chswitch_timer(unsigned long data)
117{
118 struct ieee80211_sub_if_data *sdata =
119 (struct ieee80211_sub_if_data *) data;
120 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
121
122 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
123}
124
125void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
126 struct ieee80211_channel_sw_ie *sw_elem,
127 struct ieee80211_bss *bss)
128{
129 struct ieee80211_channel *new_ch;
130 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
131 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
132
133 /* FIXME: Handle ADHOC later */
134 if (sdata->vif.type != NL80211_IFTYPE_STATION)
135 return;
136
137 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
138 return;
139
140 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
141 return;
142
143 /* Disregard subsequent beacons if we are already running a timer
144 processing a CSA */
145
146 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
147 return;
148
149 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
150 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
151 return;
152
153 sdata->local->csa_channel = new_ch;
154
155 if (sw_elem->count <= 1) {
156 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
157 } else {
158 ieee80211_stop_queues_by_reason(&sdata->local->hw,
159 IEEE80211_QUEUE_STOP_REASON_CSA);
160 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
161 mod_timer(&ifmgd->chswitch_timer,
162 jiffies +
163 msecs_to_jiffies(sw_elem->count *
164 bss->cbss.beacon_interval));
165 }
166}
167
168void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
169 u16 capab_info, u8 *pwr_constr_elem,
170 u8 pwr_constr_elem_len)
171{
172 struct ieee80211_conf *conf = &sdata->local->hw.conf;
173
174 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
175 return;
176
177 /* Power constraint IE length should be 1 octet */
178 if (pwr_constr_elem_len != 1)
179 return;
180
181 if ((*pwr_constr_elem <= conf->channel->max_power) &&
182 (*pwr_constr_elem != sdata->local->power_constr_level)) {
183 sdata->local->power_constr_level = *pwr_constr_elem;
184 ieee80211_hw_config(sdata->local, 0);
185 }
186}
187
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c5f14e6bbde2..a360bceeba59 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -19,6 +19,7 @@
19 19
20#include <net/mac80211.h> 20#include <net/mac80211.h>
21#include "ieee80211_i.h" 21#include "ieee80211_i.h"
22#include "driver-ops.h"
22#include "rate.h" 23#include "rate.h"
23#include "sta_info.h" 24#include "sta_info.h"
24#include "debugfs_sta.h" 25#include "debugfs_sta.h"
@@ -43,6 +44,15 @@
43 * When the insertion fails (sta_info_insert()) returns non-zero), the 44 * When the insertion fails (sta_info_insert()) returns non-zero), the
44 * structure will have been freed by sta_info_insert()! 45 * structure will have been freed by sta_info_insert()!
45 * 46 *
47 * sta entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when
51 * we receive a probe response or a beacon from target IBSS network. For
52 * WDS we add the sta for the peer imediately upon device open. When using
53 * AP mode we add stations for each respective station upon request from
54 * userspace through nl80211.
55 *
46 * Because there are debugfs entries for each station, and adding those 56 * Because there are debugfs entries for each station, and adding those
47 * must be able to sleep, it is also possible to "pin" a station entry, 57 * must be able to sleep, it is also possible to "pin" a station entry,
48 * that means it can be removed from the hash table but not be freed. 58 * that means it can be removed from the hash table but not be freed.
@@ -292,6 +302,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
292 skb_queue_head_init(&sta->ps_tx_buf); 302 skb_queue_head_init(&sta->ps_tx_buf);
293 skb_queue_head_init(&sta->tx_filtered); 303 skb_queue_head_init(&sta->tx_filtered);
294 304
305 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
306 sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX);
307
295#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 308#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
296 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 309 printk(KERN_DEBUG "%s: Allocated STA %pM\n",
297 wiphy_name(local->hw.wiphy), sta->sta.addr); 310 wiphy_name(local->hw.wiphy), sta->sta.addr);
@@ -346,8 +359,7 @@ int sta_info_insert(struct sta_info *sta)
346 struct ieee80211_sub_if_data, 359 struct ieee80211_sub_if_data,
347 u.ap); 360 u.ap);
348 361
349 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 362 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta);
350 STA_NOTIFY_ADD, &sta->sta);
351 } 363 }
352 364
353#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 365#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -405,8 +417,7 @@ static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
405 417
406 if (sta->local->ops->set_tim) { 418 if (sta->local->ops->set_tim) {
407 sta->local->tim_in_locked_section = true; 419 sta->local->tim_in_locked_section = true;
408 sta->local->ops->set_tim(local_to_hw(sta->local), 420 drv_set_tim(sta->local, &sta->sta, true);
409 &sta->sta, true);
410 sta->local->tim_in_locked_section = false; 421 sta->local->tim_in_locked_section = false;
411 } 422 }
412} 423}
@@ -431,8 +442,7 @@ static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
431 442
432 if (sta->local->ops->set_tim) { 443 if (sta->local->ops->set_tim) {
433 sta->local->tim_in_locked_section = true; 444 sta->local->tim_in_locked_section = true;
434 sta->local->ops->set_tim(local_to_hw(sta->local), 445 drv_set_tim(sta->local, &sta->sta, false);
435 &sta->sta, false);
436 sta->local->tim_in_locked_section = false; 446 sta->local->tim_in_locked_section = false;
437 } 447 }
438} 448}
@@ -482,8 +492,8 @@ static void __sta_info_unlink(struct sta_info **sta)
482 struct ieee80211_sub_if_data, 492 struct ieee80211_sub_if_data,
483 u.ap); 493 u.ap);
484 494
485 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 495 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
486 STA_NOTIFY_REMOVE, &(*sta)->sta); 496 &(*sta)->sta);
487 } 497 }
488 498
489 if (ieee80211_vif_is_mesh(&sdata->vif)) { 499 if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -543,9 +553,8 @@ void sta_info_unlink(struct sta_info **sta)
543 spin_unlock_irqrestore(&local->sta_lock, flags); 553 spin_unlock_irqrestore(&local->sta_lock, flags);
544} 554}
545 555
546static inline int sta_info_buffer_expired(struct ieee80211_local *local, 556static int sta_info_buffer_expired(struct sta_info *sta,
547 struct sta_info *sta, 557 struct sk_buff *skb)
548 struct sk_buff *skb)
549{ 558{
550 struct ieee80211_tx_info *info; 559 struct ieee80211_tx_info *info;
551 int timeout; 560 int timeout;
@@ -556,8 +565,9 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local,
556 info = IEEE80211_SKB_CB(skb); 565 info = IEEE80211_SKB_CB(skb);
557 566
558 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 567 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
559 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / 568 timeout = (sta->listen_interval *
560 15625) * HZ; 569 sta->sdata->vif.bss_conf.beacon_int *
570 32 / 15625) * HZ;
561 if (timeout < STA_TX_BUFFER_EXPIRE) 571 if (timeout < STA_TX_BUFFER_EXPIRE)
562 timeout = STA_TX_BUFFER_EXPIRE; 572 timeout = STA_TX_BUFFER_EXPIRE;
563 return time_after(jiffies, info->control.jiffies + timeout); 573 return time_after(jiffies, info->control.jiffies + timeout);
@@ -577,7 +587,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
577 for (;;) { 587 for (;;) {
578 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 588 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
579 skb = skb_peek(&sta->ps_tx_buf); 589 skb = skb_peek(&sta->ps_tx_buf);
580 if (sta_info_buffer_expired(local, sta, skb)) 590 if (sta_info_buffer_expired(sta, skb))
581 skb = __skb_dequeue(&sta->ps_tx_buf); 591 skb = __skb_dequeue(&sta->ps_tx_buf);
582 else 592 else
583 skb = NULL; 593 skb = NULL;
@@ -610,6 +620,9 @@ static void sta_info_cleanup(unsigned long data)
610 sta_info_cleanup_expire_buffered(local, sta); 620 sta_info_cleanup_expire_buffered(local, sta);
611 rcu_read_unlock(); 621 rcu_read_unlock();
612 622
623 if (local->quiescing)
624 return;
625
613 local->sta_cleanup.expires = 626 local->sta_cleanup.expires =
614 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 627 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
615 add_timer(&local->sta_cleanup); 628 add_timer(&local->sta_cleanup);
@@ -686,41 +699,10 @@ static void sta_info_debugfs_add_work(struct work_struct *work)
686} 699}
687#endif 700#endif
688 701
689static void __ieee80211_run_pending_flush(struct ieee80211_local *local)
690{
691 struct sta_info *sta;
692 unsigned long flags;
693
694 ASSERT_RTNL();
695
696 spin_lock_irqsave(&local->sta_lock, flags);
697 while (!list_empty(&local->sta_flush_list)) {
698 sta = list_first_entry(&local->sta_flush_list,
699 struct sta_info, list);
700 list_del(&sta->list);
701 spin_unlock_irqrestore(&local->sta_lock, flags);
702 sta_info_destroy(sta);
703 spin_lock_irqsave(&local->sta_lock, flags);
704 }
705 spin_unlock_irqrestore(&local->sta_lock, flags);
706}
707
708static void ieee80211_sta_flush_work(struct work_struct *work)
709{
710 struct ieee80211_local *local =
711 container_of(work, struct ieee80211_local, sta_flush_work);
712
713 rtnl_lock();
714 __ieee80211_run_pending_flush(local);
715 rtnl_unlock();
716}
717
718void sta_info_init(struct ieee80211_local *local) 702void sta_info_init(struct ieee80211_local *local)
719{ 703{
720 spin_lock_init(&local->sta_lock); 704 spin_lock_init(&local->sta_lock);
721 INIT_LIST_HEAD(&local->sta_list); 705 INIT_LIST_HEAD(&local->sta_list);
722 INIT_LIST_HEAD(&local->sta_flush_list);
723 INIT_WORK(&local->sta_flush_work, ieee80211_sta_flush_work);
724 706
725 setup_timer(&local->sta_cleanup, sta_info_cleanup, 707 setup_timer(&local->sta_cleanup, sta_info_cleanup,
726 (unsigned long)local); 708 (unsigned long)local);
@@ -741,7 +723,6 @@ int sta_info_start(struct ieee80211_local *local)
741void sta_info_stop(struct ieee80211_local *local) 723void sta_info_stop(struct ieee80211_local *local)
742{ 724{
743 del_timer(&local->sta_cleanup); 725 del_timer(&local->sta_cleanup);
744 cancel_work_sync(&local->sta_flush_work);
745#ifdef CONFIG_MAC80211_DEBUGFS 726#ifdef CONFIG_MAC80211_DEBUGFS
746 /* 727 /*
747 * Make sure the debugfs adding work isn't pending after this 728 * Make sure the debugfs adding work isn't pending after this
@@ -752,10 +733,7 @@ void sta_info_stop(struct ieee80211_local *local)
752 cancel_work_sync(&local->sta_debugfs_add); 733 cancel_work_sync(&local->sta_debugfs_add);
753#endif 734#endif
754 735
755 rtnl_lock();
756 sta_info_flush(local, NULL); 736 sta_info_flush(local, NULL);
757 __ieee80211_run_pending_flush(local);
758 rtnl_unlock();
759} 737}
760 738
761/** 739/**
@@ -767,7 +745,7 @@ void sta_info_stop(struct ieee80211_local *local)
767 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs 745 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs
768 */ 746 */
769int sta_info_flush(struct ieee80211_local *local, 747int sta_info_flush(struct ieee80211_local *local,
770 struct ieee80211_sub_if_data *sdata) 748 struct ieee80211_sub_if_data *sdata)
771{ 749{
772 struct sta_info *sta, *tmp; 750 struct sta_info *sta, *tmp;
773 LIST_HEAD(tmp_list); 751 LIST_HEAD(tmp_list);
@@ -775,7 +753,6 @@ int sta_info_flush(struct ieee80211_local *local,
775 unsigned long flags; 753 unsigned long flags;
776 754
777 might_sleep(); 755 might_sleep();
778 ASSERT_RTNL();
779 756
780 spin_lock_irqsave(&local->sta_lock, flags); 757 spin_lock_irqsave(&local->sta_lock, flags);
781 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 758 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
@@ -795,39 +772,6 @@ int sta_info_flush(struct ieee80211_local *local,
795 return ret; 772 return ret;
796} 773}
797 774
798/**
799 * sta_info_flush_delayed - flush matching STA entries from the STA table
800 *
801 * This function unlinks all stations for a given interface and queues
802 * them for freeing. Note that the workqueue function scheduled here has
803 * to run before any new keys can be added to the system to avoid set_key()
804 * callback ordering issues.
805 *
806 * @sdata: the interface
807 */
808void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
809{
810 struct ieee80211_local *local = sdata->local;
811 struct sta_info *sta, *tmp;
812 unsigned long flags;
813 bool work = false;
814
815 spin_lock_irqsave(&local->sta_lock, flags);
816 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
817 if (sdata == sta->sdata) {
818 __sta_info_unlink(&sta);
819 if (sta) {
820 list_add_tail(&sta->list,
821 &local->sta_flush_list);
822 work = true;
823 }
824 }
825 }
826 if (work)
827 schedule_work(&local->sta_flush_work);
828 spin_unlock_irqrestore(&local->sta_lock, flags);
829}
830
831void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 775void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
832 unsigned long exp_time) 776 unsigned long exp_time)
833{ 777{
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5534d489f506..49a1a1f76511 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -88,6 +88,7 @@ struct tid_ampdu_tx {
88 * struct tid_ampdu_rx - TID aggregation information (Rx). 88 * struct tid_ampdu_rx - TID aggregation information (Rx).
89 * 89 *
90 * @reorder_buf: buffer to reorder incoming aggregated MPDUs 90 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
91 * @reorder_time: jiffies when skb was added
91 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 92 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
92 * @head_seq_num: head sequence number in reordering buffer. 93 * @head_seq_num: head sequence number in reordering buffer.
93 * @stored_mpdu_num: number of MPDUs in reordering buffer 94 * @stored_mpdu_num: number of MPDUs in reordering buffer
@@ -99,6 +100,7 @@ struct tid_ampdu_tx {
99 */ 100 */
100struct tid_ampdu_rx { 101struct tid_ampdu_rx {
101 struct sk_buff **reorder_buf; 102 struct sk_buff **reorder_buf;
103 unsigned long *reorder_time;
102 struct timer_list session_timer; 104 struct timer_list session_timer;
103 u16 head_seq_num; 105 u16 head_seq_num;
104 u16 stored_mpdu_num; 106 u16 stored_mpdu_num;
@@ -214,6 +216,7 @@ struct sta_ampdu_mlme {
214 * @plink_state: peer link state 216 * @plink_state: peer link state
215 * @plink_timeout: timeout of peer link 217 * @plink_timeout: timeout of peer link
216 * @plink_timer: peer link watch timer 218 * @plink_timer: peer link watch timer
219 * @plink_timer_was_running: used by suspend/resume to restore timers
217 * @debugfs: debug filesystem info 220 * @debugfs: debug filesystem info
218 * @sta: station information we share with the driver 221 * @sta: station information we share with the driver
219 */ 222 */
@@ -291,6 +294,7 @@ struct sta_info {
291 __le16 reason; 294 __le16 reason;
292 u8 plink_retries; 295 u8 plink_retries;
293 bool ignore_plink_timer; 296 bool ignore_plink_timer;
297 bool plink_timer_was_running;
294 enum plink_state plink_state; 298 enum plink_state plink_state;
295 u32 plink_timeout; 299 u32 plink_timeout;
296 struct timer_list plink_timer; 300 struct timer_list plink_timer;
@@ -442,8 +446,7 @@ void sta_info_init(struct ieee80211_local *local);
442int sta_info_start(struct ieee80211_local *local); 446int sta_info_start(struct ieee80211_local *local);
443void sta_info_stop(struct ieee80211_local *local); 447void sta_info_stop(struct ieee80211_local *local);
444int sta_info_flush(struct ieee80211_local *local, 448int sta_info_flush(struct ieee80211_local *local,
445 struct ieee80211_sub_if_data *sdata); 449 struct ieee80211_sub_if_data *sdata);
446void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
447void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 450void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
448 unsigned long exp_time); 451 unsigned long exp_time);
449 452
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 38fa111d2dc6..964b7faa7f17 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -13,6 +13,7 @@
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14 14
15#include <net/mac80211.h> 15#include <net/mac80211.h>
16#include "driver-ops.h"
16#include "key.h" 17#include "key.h"
17#include "tkip.h" 18#include "tkip.h"
18#include "wep.h" 19#include "wep.h"
@@ -307,9 +308,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
307 if (is_multicast_ether_addr(ra)) 308 if (is_multicast_ether_addr(ra))
308 sta_addr = bcast; 309 sta_addr = bcast;
309 310
310 key->local->ops->update_tkip_key( 311 drv_update_tkip_key(key->local, &key->conf, sta_addr,
311 local_to_hw(key->local), &key->conf, 312 iv32, key->u.tkip.rx[queue].p1k);
312 sta_addr, iv32, key->u.tkip.rx[queue].p1k);
313 } 313 }
314 } 314 }
315 315
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 63656266d567..d238a8939a09 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -25,6 +25,7 @@
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26 26
27#include "ieee80211_i.h" 27#include "ieee80211_i.h"
28#include "driver-ops.h"
28#include "led.h" 29#include "led.h"
29#include "mesh.h" 30#include "mesh.h"
30#include "wep.h" 31#include "wep.h"
@@ -399,6 +400,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
399 sta_info_set_tim_bit(sta); 400 sta_info_set_tim_bit(sta);
400 401
401 info->control.jiffies = jiffies; 402 info->control.jiffies = jiffies;
403 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
402 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 404 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
403 return TX_QUEUED; 405 return TX_QUEUED;
404 } 406 }
@@ -409,8 +411,24 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
409 sta->sta.addr); 411 sta->sta.addr);
410 } 412 }
411#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 413#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
412 clear_sta_flags(sta, WLAN_STA_PSPOLL); 414 if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
415 /*
416 * The sleeping station with pending data is now snoozing.
417 * It queried us for its buffered frames and will go back
418 * to deep sleep once it got everything.
419 *
420 * inform the driver, in case the hardware does powersave
421 * frame filtering and keeps a station blacklist on its own
422 * (e.g: p54), so that frames can be delivered unimpeded.
423 *
424 * Note: It should be safe to disable the filter now.
425 * As, it is really unlikely that we still have any pending
426 * frame for this station in the hw's buffers/fifos left,
427 * that is not rejected with a unsuccessful tx_status yet.
428 */
413 429
430 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
431 }
414 return TX_CONTINUE; 432 return TX_CONTINUE;
415} 433}
416 434
@@ -429,7 +447,7 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
429static ieee80211_tx_result debug_noinline 447static ieee80211_tx_result debug_noinline
430ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 448ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
431{ 449{
432 struct ieee80211_key *key; 450 struct ieee80211_key *key = NULL;
433 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 451 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
434 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 452 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
435 453
@@ -500,7 +518,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
500 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 518 sband = tx->local->hw.wiphy->bands[tx->channel->band];
501 519
502 len = min_t(int, tx->skb->len + FCS_LEN, 520 len = min_t(int, tx->skb->len + FCS_LEN,
503 tx->local->fragmentation_threshold); 521 tx->local->hw.wiphy->frag_threshold);
504 522
505 /* set up the tx rate control struct we give the RC algo */ 523 /* set up the tx rate control struct we give the RC algo */
506 txrc.hw = local_to_hw(tx->local); 524 txrc.hw = local_to_hw(tx->local);
@@ -511,8 +529,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
511 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 529 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx;
512 530
513 /* set up RTS protection if desired */ 531 /* set up RTS protection if desired */
514 if (tx->local->rts_threshold < IEEE80211_MAX_RTS_THRESHOLD && 532 if (len > tx->local->hw.wiphy->rts_threshold) {
515 len > tx->local->rts_threshold) {
516 txrc.rts = rts = true; 533 txrc.rts = rts = true;
517 } 534 }
518 535
@@ -542,6 +559,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
542 if (unlikely(!info->control.rates[0].count)) 559 if (unlikely(!info->control.rates[0].count))
543 info->control.rates[0].count = 1; 560 info->control.rates[0].count = 1;
544 561
562 if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
563 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
564 info->control.rates[0].count = 1;
565
545 if (is_multicast_ether_addr(hdr->addr1)) { 566 if (is_multicast_ether_addr(hdr->addr1)) {
546 /* 567 /*
547 * XXX: verify the rate is in the basic rateset 568 * XXX: verify the rate is in the basic rateset
@@ -754,7 +775,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
754 struct sk_buff *skb = tx->skb; 775 struct sk_buff *skb = tx->skb;
755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
756 struct ieee80211_hdr *hdr = (void *)skb->data; 777 struct ieee80211_hdr *hdr = (void *)skb->data;
757 int frag_threshold = tx->local->fragmentation_threshold; 778 int frag_threshold = tx->local->hw.wiphy->frag_threshold;
758 int hdrlen; 779 int hdrlen;
759 int fragnum; 780 int fragnum;
760 781
@@ -852,6 +873,8 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
852 873
853 do { 874 do {
854 hdr = (void *) skb->data; 875 hdr = (void *) skb->data;
876 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
877 break; /* must not overwrite AID */
855 next_len = skb->next ? skb->next->len : 0; 878 next_len = skb->next ? skb->next->len : 0;
856 group_addr = is_multicast_ether_addr(hdr->addr1); 879 group_addr = is_multicast_ether_addr(hdr->addr1);
857 880
@@ -885,9 +908,8 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
885 * deal with packet injection down monitor interface 908 * deal with packet injection down monitor interface
886 * with Radiotap Header -- only called for monitor mode interface 909 * with Radiotap Header -- only called for monitor mode interface
887 */ 910 */
888static ieee80211_tx_result 911static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
889__ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, 912 struct sk_buff *skb)
890 struct sk_buff *skb)
891{ 913{
892 /* 914 /*
893 * this is the moment to interpret and discard the radiotap header that 915 * this is the moment to interpret and discard the radiotap header that
@@ -938,7 +960,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
938 * on transmission 960 * on transmission
939 */ 961 */
940 if (skb->len < (iterator.max_length + FCS_LEN)) 962 if (skb->len < (iterator.max_length + FCS_LEN))
941 return TX_DROP; 963 return false;
942 964
943 skb_trim(skb, skb->len - FCS_LEN); 965 skb_trim(skb, skb->len - FCS_LEN);
944 } 966 }
@@ -960,7 +982,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
960 } 982 }
961 983
962 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ 984 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
963 return TX_DROP; 985 return false;
964 986
965 /* 987 /*
966 * remove the radiotap header 988 * remove the radiotap header
@@ -969,7 +991,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
969 */ 991 */
970 skb_pull(skb, iterator.max_length); 992 skb_pull(skb, iterator.max_length);
971 993
972 return TX_CONTINUE; 994 return true;
973} 995}
974 996
975/* 997/*
@@ -1003,7 +1025,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1003 /* process and remove the injection radiotap header */ 1025 /* process and remove the injection radiotap header */
1004 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1026 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1005 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { 1027 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
1006 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) 1028 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1007 return TX_DROP; 1029 return TX_DROP;
1008 1030
1009 /* 1031 /*
@@ -1067,12 +1089,15 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1067 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1089 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1068 } else { 1090 } else {
1069 tx->flags |= IEEE80211_TX_UNICAST; 1091 tx->flags |= IEEE80211_TX_UNICAST;
1070 info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 1092 if (unlikely(local->wifi_wme_noack_test))
1093 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1094 else
1095 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1071 } 1096 }
1072 1097
1073 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1098 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1074 if ((tx->flags & IEEE80211_TX_UNICAST) && 1099 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1075 skb->len + FCS_LEN > local->fragmentation_threshold && 1100 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
1076 !(info->flags & IEEE80211_TX_CTL_AMPDU)) 1101 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1077 tx->flags |= IEEE80211_TX_FRAGMENTED; 1102 tx->flags |= IEEE80211_TX_FRAGMENTED;
1078 else 1103 else
@@ -1147,7 +1172,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1147 1172
1148 next = skb->next; 1173 next = skb->next;
1149 len = skb->len; 1174 len = skb->len;
1150 ret = local->ops->tx(local_to_hw(local), skb); 1175 ret = drv_tx(local, skb);
1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { 1176 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1152 dev_kfree_skb(skb); 1177 dev_kfree_skb(skb);
1153 ret = NETDEV_TX_OK; 1178 ret = NETDEV_TX_OK;
@@ -1213,7 +1238,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1213 bool txpending) 1238 bool txpending)
1214{ 1239{
1215 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1240 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1216 struct sta_info *sta;
1217 struct ieee80211_tx_data tx; 1241 struct ieee80211_tx_data tx;
1218 ieee80211_tx_result res_prepare; 1242 ieee80211_tx_result res_prepare;
1219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1243 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1245,7 +1269,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1245 return; 1269 return;
1246 } 1270 }
1247 1271
1248 sta = tx.sta;
1249 tx.channel = local->hw.conf.channel; 1272 tx.channel = local->hw.conf.channel;
1250 info->band = tx.channel->band; 1273 info->band = tx.channel->band;
1251 1274
@@ -1392,7 +1415,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1392 } 1415 }
1393 1416
1394 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 1417 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1395 local->hw.conf.dynamic_ps_timeout > 0) { 1418 local->hw.conf.dynamic_ps_timeout > 0 &&
1419 !local->sw_scanning && !local->hw_scanning && local->ps_sdata) {
1396 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1420 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1397 ieee80211_stop_queues_by_reason(&local->hw, 1421 ieee80211_stop_queues_by_reason(&local->hw,
1398 IEEE80211_QUEUE_STOP_REASON_PS); 1422 IEEE80211_QUEUE_STOP_REASON_PS);
@@ -1591,7 +1615,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1591{ 1615{
1592 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1616 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1593 struct ieee80211_local *local = sdata->local; 1617 struct ieee80211_local *local = sdata->local;
1594 int ret = 1, head_need; 1618 int ret = NETDEV_TX_BUSY, head_need;
1595 u16 ethertype, hdrlen, meshhdrlen = 0; 1619 u16 ethertype, hdrlen, meshhdrlen = 0;
1596 __le16 fc; 1620 __le16 fc;
1597 struct ieee80211_hdr hdr; 1621 struct ieee80211_hdr hdr;
@@ -2086,18 +2110,18 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2086 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 2110 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2087 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 2111 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2088 struct ieee80211_hdr *hdr; 2112 struct ieee80211_hdr *hdr;
2113 struct sk_buff *presp = rcu_dereference(ifibss->presp);
2089 2114
2090 if (!ifibss->probe_resp) 2115 if (!presp)
2091 goto out; 2116 goto out;
2092 2117
2093 skb = skb_copy(ifibss->probe_resp, GFP_ATOMIC); 2118 skb = skb_copy(presp, GFP_ATOMIC);
2094 if (!skb) 2119 if (!skb)
2095 goto out; 2120 goto out;
2096 2121
2097 hdr = (struct ieee80211_hdr *) skb->data; 2122 hdr = (struct ieee80211_hdr *) skb->data;
2098 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2123 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2099 IEEE80211_STYPE_BEACON); 2124 IEEE80211_STYPE_BEACON);
2100
2101 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2125 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2102 struct ieee80211_mgmt *mgmt; 2126 struct ieee80211_mgmt *mgmt;
2103 u8 *pos; 2127 u8 *pos;
@@ -2117,7 +2141,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2117 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2141 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2118 /* BSSID is left zeroed, wildcard value */ 2142 /* BSSID is left zeroed, wildcard value */
2119 mgmt->u.beacon.beacon_int = 2143 mgmt->u.beacon.beacon_int =
2120 cpu_to_le16(local->hw.conf.beacon_int); 2144 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2121 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2145 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
2122 2146
2123 pos = skb_put(skb, 2); 2147 pos = skb_put(skb, 2);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index fdf432f14554..66ce96a69f31 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -20,27 +20,21 @@
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <linux/bitmap.h> 22#include <linux/bitmap.h>
23#include <linux/crc32.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/cfg80211.h> 25#include <net/cfg80211.h>
25#include <net/rtnetlink.h> 26#include <net/rtnetlink.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
29#include "driver-ops.h"
28#include "rate.h" 30#include "rate.h"
29#include "mesh.h" 31#include "mesh.h"
30#include "wme.h" 32#include "wme.h"
33#include "led.h"
31 34
32/* privid for wiphys to determine whether they belong to us or not */ 35/* privid for wiphys to determine whether they belong to us or not */
33void *mac80211_wiphy_privid = &mac80211_wiphy_privid; 36void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
34 37
35/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
36/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
37const unsigned char rfc1042_header[] __aligned(2) =
38 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
39
40/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
41const unsigned char bridge_tunnel_header[] __aligned(2) =
42 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
43
44struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) 38struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
45{ 39{
46 struct ieee80211_local *local; 40 struct ieee80211_local *local;
@@ -100,70 +94,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
100 return NULL; 94 return NULL;
101} 95}
102 96
103unsigned int ieee80211_hdrlen(__le16 fc)
104{
105 unsigned int hdrlen = 24;
106
107 if (ieee80211_is_data(fc)) {
108 if (ieee80211_has_a4(fc))
109 hdrlen = 30;
110 if (ieee80211_is_data_qos(fc))
111 hdrlen += IEEE80211_QOS_CTL_LEN;
112 goto out;
113 }
114
115 if (ieee80211_is_ctl(fc)) {
116 /*
117 * ACK and CTS are 10 bytes, all others 16. To see how
118 * to get this condition consider
119 * subtype mask: 0b0000000011110000 (0x00F0)
120 * ACK subtype: 0b0000000011010000 (0x00D0)
121 * CTS subtype: 0b0000000011000000 (0x00C0)
122 * bits that matter: ^^^ (0x00E0)
123 * value of those: 0b0000000011000000 (0x00C0)
124 */
125 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
126 hdrlen = 10;
127 else
128 hdrlen = 16;
129 }
130out:
131 return hdrlen;
132}
133EXPORT_SYMBOL(ieee80211_hdrlen);
134
135unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
136{
137 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data;
138 unsigned int hdrlen;
139
140 if (unlikely(skb->len < 10))
141 return 0;
142 hdrlen = ieee80211_hdrlen(hdr->frame_control);
143 if (unlikely(hdrlen > skb->len))
144 return 0;
145 return hdrlen;
146}
147EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
148
149int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
150{
151 int ae = meshhdr->flags & IEEE80211S_FLAGS_AE;
152 /* 7.1.3.5a.2 */
153 switch (ae) {
154 case 0:
155 return 6;
156 case 1:
157 return 12;
158 case 2:
159 return 18;
160 case 3:
161 return 24;
162 default:
163 return 6;
164 }
165}
166
167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 97void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
168{ 98{
169 struct sk_buff *skb = tx->skb; 99 struct sk_buff *skb = tx->skb;
@@ -411,6 +341,52 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
411} 341}
412EXPORT_SYMBOL(ieee80211_stop_queue); 342EXPORT_SYMBOL(ieee80211_stop_queue);
413 343
344void ieee80211_add_pending_skb(struct ieee80211_local *local,
345 struct sk_buff *skb)
346{
347 struct ieee80211_hw *hw = &local->hw;
348 unsigned long flags;
349 int queue = skb_get_queue_mapping(skb);
350
351 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
352 __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
353 __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_PENDING);
354 skb_queue_tail(&local->pending[queue], skb);
355 __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
356 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
357}
358
359int ieee80211_add_pending_skbs(struct ieee80211_local *local,
360 struct sk_buff_head *skbs)
361{
362 struct ieee80211_hw *hw = &local->hw;
363 struct sk_buff *skb;
364 unsigned long flags;
365 int queue, ret = 0, i;
366
367 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
368 for (i = 0; i < hw->queues; i++)
369 __ieee80211_stop_queue(hw, i,
370 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
371
372 while ((skb = skb_dequeue(skbs))) {
373 ret++;
374 queue = skb_get_queue_mapping(skb);
375 skb_queue_tail(&local->pending[queue], skb);
376 }
377
378 for (i = 0; i < hw->queues; i++) {
379 if (ret)
380 __ieee80211_stop_queue(hw, i,
381 IEEE80211_QUEUE_STOP_REASON_PENDING);
382 __ieee80211_wake_queue(hw, i,
383 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
384 }
385 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
386
387 return ret;
388}
389
414void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 390void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
415 enum queue_stop_reason reason) 391 enum queue_stop_reason reason)
416{ 392{
@@ -536,8 +512,16 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
536void ieee802_11_parse_elems(u8 *start, size_t len, 512void ieee802_11_parse_elems(u8 *start, size_t len,
537 struct ieee802_11_elems *elems) 513 struct ieee802_11_elems *elems)
538{ 514{
515 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
516}
517
518u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
519 struct ieee802_11_elems *elems,
520 u64 filter, u32 crc)
521{
539 size_t left = len; 522 size_t left = len;
540 u8 *pos = start; 523 u8 *pos = start;
524 bool calc_crc = filter != 0;
541 525
542 memset(elems, 0, sizeof(*elems)); 526 memset(elems, 0, sizeof(*elems));
543 elems->ie_start = start; 527 elems->ie_start = start;
@@ -551,7 +535,10 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
551 left -= 2; 535 left -= 2;
552 536
553 if (elen > left) 537 if (elen > left)
554 return; 538 break;
539
540 if (calc_crc && id < 64 && (filter & BIT(id)))
541 crc = crc32_be(crc, pos - 2, elen + 2);
555 542
556 switch (id) { 543 switch (id) {
557 case WLAN_EID_SSID: 544 case WLAN_EID_SSID:
@@ -575,8 +562,10 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
575 elems->cf_params_len = elen; 562 elems->cf_params_len = elen;
576 break; 563 break;
577 case WLAN_EID_TIM: 564 case WLAN_EID_TIM:
578 elems->tim = pos; 565 if (elen >= sizeof(struct ieee80211_tim_ie)) {
579 elems->tim_len = elen; 566 elems->tim = (void *)pos;
567 elems->tim_len = elen;
568 }
580 break; 569 break;
581 case WLAN_EID_IBSS_PARAMS: 570 case WLAN_EID_IBSS_PARAMS:
582 elems->ibss_params = pos; 571 elems->ibss_params = pos;
@@ -586,15 +575,20 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
586 elems->challenge = pos; 575 elems->challenge = pos;
587 elems->challenge_len = elen; 576 elems->challenge_len = elen;
588 break; 577 break;
589 case WLAN_EID_WPA: 578 case WLAN_EID_VENDOR_SPECIFIC:
590 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && 579 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
591 pos[2] == 0xf2) { 580 pos[2] == 0xf2) {
592 /* Microsoft OUI (00:50:F2) */ 581 /* Microsoft OUI (00:50:F2) */
582
583 if (calc_crc)
584 crc = crc32_be(crc, pos - 2, elen + 2);
585
593 if (pos[3] == 1) { 586 if (pos[3] == 1) {
594 /* OUI Type 1 - WPA IE */ 587 /* OUI Type 1 - WPA IE */
595 elems->wpa = pos; 588 elems->wpa = pos;
596 elems->wpa_len = elen; 589 elems->wpa_len = elen;
597 } else if (elen >= 5 && pos[3] == 2) { 590 } else if (elen >= 5 && pos[3] == 2) {
591 /* OUI Type 2 - WMM IE */
598 if (pos[4] == 0) { 592 if (pos[4] == 0) {
599 elems->wmm_info = pos; 593 elems->wmm_info = pos;
600 elems->wmm_info_len = elen; 594 elems->wmm_info_len = elen;
@@ -679,32 +673,70 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
679 left -= elen; 673 left -= elen;
680 pos += elen; 674 pos += elen;
681 } 675 }
676
677 return crc;
682} 678}
683 679
684void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 680void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
685{ 681{
686 struct ieee80211_local *local = sdata->local; 682 struct ieee80211_local *local = sdata->local;
687 struct ieee80211_tx_queue_params qparam; 683 struct ieee80211_tx_queue_params qparam;
688 int i; 684 int queue;
685 bool use_11b;
686 int aCWmin, aCWmax;
689 687
690 if (!local->ops->conf_tx) 688 if (!local->ops->conf_tx)
691 return; 689 return;
692 690
693 memset(&qparam, 0, sizeof(qparam)); 691 memset(&qparam, 0, sizeof(qparam));
694 692
695 qparam.aifs = 2; 693 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
694 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
696 695
697 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 696 for (queue = 0; queue < local_to_hw(local)->queues; queue++) {
698 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) 697 /* Set defaults according to 802.11-2007 Table 7-37 */
699 qparam.cw_min = 31; 698 aCWmax = 1023;
700 else 699 if (use_11b)
701 qparam.cw_min = 15; 700 aCWmin = 31;
702 701 else
703 qparam.cw_max = 1023; 702 aCWmin = 15;
704 qparam.txop = 0; 703
704 switch (queue) {
705 case 3: /* AC_BK */
706 qparam.cw_max = aCWmax;
707 qparam.cw_min = aCWmin;
708 qparam.txop = 0;
709 qparam.aifs = 7;
710 break;
711 default: /* never happens but let's not leave undefined */
712 case 2: /* AC_BE */
713 qparam.cw_max = aCWmax;
714 qparam.cw_min = aCWmin;
715 qparam.txop = 0;
716 qparam.aifs = 3;
717 break;
718 case 1: /* AC_VI */
719 qparam.cw_max = aCWmin;
720 qparam.cw_min = (aCWmin + 1) / 2 - 1;
721 if (use_11b)
722 qparam.txop = 6016/32;
723 else
724 qparam.txop = 3008/32;
725 qparam.aifs = 2;
726 break;
727 case 0: /* AC_VO */
728 qparam.cw_max = (aCWmin + 1) / 2 - 1;
729 qparam.cw_min = (aCWmin + 1) / 4 - 1;
730 if (use_11b)
731 qparam.txop = 3264/32;
732 else
733 qparam.txop = 1504/32;
734 qparam.aifs = 2;
735 break;
736 }
705 737
706 for (i = 0; i < local_to_hw(local)->queues; i++) 738 drv_conf_tx(local, queue, &qparam);
707 local->ops->conf_tx(local_to_hw(local), i, &qparam); 739 }
708} 740}
709 741
710void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 742void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -831,16 +863,73 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
831 ieee80211_tx_skb(sdata, skb, encrypt); 863 ieee80211_tx_skb(sdata, skb, encrypt);
832} 864}
833 865
866int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
867 const u8 *ie, size_t ie_len)
868{
869 struct ieee80211_supported_band *sband;
870 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
871 int i;
872
873 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
874
875 pos = buffer;
876
877 *pos++ = WLAN_EID_SUPP_RATES;
878 supp_rates_len = pos;
879 *pos++ = 0;
880
881 for (i = 0; i < sband->n_bitrates; i++) {
882 struct ieee80211_rate *rate = &sband->bitrates[i];
883
884 if (esupp_rates_len) {
885 *esupp_rates_len += 1;
886 } else if (*supp_rates_len == 8) {
887 *pos++ = WLAN_EID_EXT_SUPP_RATES;
888 esupp_rates_len = pos;
889 *pos++ = 1;
890 } else
891 *supp_rates_len += 1;
892
893 *pos++ = rate->bitrate / 5;
894 }
895
896 if (sband->ht_cap.ht_supported) {
897 __le16 tmp = cpu_to_le16(sband->ht_cap.cap);
898
899 *pos++ = WLAN_EID_HT_CAPABILITY;
900 *pos++ = sizeof(struct ieee80211_ht_cap);
901 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
902 memcpy(pos, &tmp, sizeof(u16));
903 pos += sizeof(u16);
904 /* TODO: needs a define here for << 2 */
905 *pos++ = sband->ht_cap.ampdu_factor |
906 (sband->ht_cap.ampdu_density << 2);
907 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
908 pos += sizeof(sband->ht_cap.mcs);
909 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
910 }
911
912 /*
913 * If adding more here, adjust code in main.c
914 * that calculates local->scan_ies_len.
915 */
916
917 if (ie) {
918 memcpy(pos, ie, ie_len);
919 pos += ie_len;
920 }
921
922 return pos - buffer;
923}
924
834void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 925void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
835 u8 *ssid, size_t ssid_len, 926 const u8 *ssid, size_t ssid_len,
836 u8 *ie, size_t ie_len) 927 const u8 *ie, size_t ie_len)
837{ 928{
838 struct ieee80211_local *local = sdata->local; 929 struct ieee80211_local *local = sdata->local;
839 struct ieee80211_supported_band *sband;
840 struct sk_buff *skb; 930 struct sk_buff *skb;
841 struct ieee80211_mgmt *mgmt; 931 struct ieee80211_mgmt *mgmt;
842 u8 *pos, *supp_rates, *esupp_rates = NULL; 932 u8 *pos;
843 int i;
844 933
845 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 934 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
846 ie_len); 935 ie_len);
@@ -867,31 +956,9 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
867 *pos++ = WLAN_EID_SSID; 956 *pos++ = WLAN_EID_SSID;
868 *pos++ = ssid_len; 957 *pos++ = ssid_len;
869 memcpy(pos, ssid, ssid_len); 958 memcpy(pos, ssid, ssid_len);
959 pos += ssid_len;
870 960
871 supp_rates = skb_put(skb, 2); 961 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len));
872 supp_rates[0] = WLAN_EID_SUPP_RATES;
873 supp_rates[1] = 0;
874 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
875
876 for (i = 0; i < sband->n_bitrates; i++) {
877 struct ieee80211_rate *rate = &sband->bitrates[i];
878 if (esupp_rates) {
879 pos = skb_put(skb, 1);
880 esupp_rates[1]++;
881 } else if (supp_rates[1] == 8) {
882 esupp_rates = skb_put(skb, 3);
883 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
884 esupp_rates[1] = 1;
885 pos = &esupp_rates[2];
886 } else {
887 pos = skb_put(skb, 1);
888 supp_rates[1]++;
889 }
890 *pos = rate->bitrate / 5;
891 }
892
893 if (ie)
894 memcpy(skb_put(skb, ie_len), ie, ie_len);
895 962
896 ieee80211_tx_skb(sdata, skb, 0); 963 ieee80211_tx_skb(sdata, skb, 0);
897} 964}
@@ -931,3 +998,151 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
931 } 998 }
932 return supp_rates; 999 return supp_rates;
933} 1000}
1001
1002int ieee80211_reconfig(struct ieee80211_local *local)
1003{
1004 struct ieee80211_hw *hw = &local->hw;
1005 struct ieee80211_sub_if_data *sdata;
1006 struct ieee80211_if_init_conf conf;
1007 struct sta_info *sta;
1008 unsigned long flags;
1009 int res;
1010 bool from_suspend = local->suspended;
1011
1012 /*
1013 * We're going to start the hardware, at that point
1014 * we are no longer suspended and can RX frames.
1015 */
1016 local->suspended = false;
1017
1018 /* restart hardware */
1019 if (local->open_count) {
1020 res = drv_start(local);
1021
1022 ieee80211_led_radio(local, true);
1023 }
1024
1025 /* add interfaces */
1026 list_for_each_entry(sdata, &local->interfaces, list) {
1027 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1028 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1029 netif_running(sdata->dev)) {
1030 conf.vif = &sdata->vif;
1031 conf.type = sdata->vif.type;
1032 conf.mac_addr = sdata->dev->dev_addr;
1033 res = drv_add_interface(local, &conf);
1034 }
1035 }
1036
1037 /* add STAs back */
1038 if (local->ops->sta_notify) {
1039 spin_lock_irqsave(&local->sta_lock, flags);
1040 list_for_each_entry(sta, &local->sta_list, list) {
1041 sdata = sta->sdata;
1042 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1043 sdata = container_of(sdata->bss,
1044 struct ieee80211_sub_if_data,
1045 u.ap);
1046
1047 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD,
1048 &sta->sta);
1049 }
1050 spin_unlock_irqrestore(&local->sta_lock, flags);
1051 }
1052
1053 /* Clear Suspend state so that ADDBA requests can be processed */
1054
1055 rcu_read_lock();
1056
1057 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1058 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1059 clear_sta_flags(sta, WLAN_STA_SUSPEND);
1060 }
1061 }
1062
1063 rcu_read_unlock();
1064
1065 /* setup RTS threshold */
1066 drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
1067
1068 /* reconfigure hardware */
1069 ieee80211_hw_config(local, ~0);
1070
1071 netif_addr_lock_bh(local->mdev);
1072 ieee80211_configure_filter(local);
1073 netif_addr_unlock_bh(local->mdev);
1074
1075 /* Finally also reconfigure all the BSS information */
1076 list_for_each_entry(sdata, &local->interfaces, list) {
1077 u32 changed = ~0;
1078 if (!netif_running(sdata->dev))
1079 continue;
1080 switch (sdata->vif.type) {
1081 case NL80211_IFTYPE_STATION:
1082 /* disable beacon change bits */
1083 changed &= ~(BSS_CHANGED_BEACON |
1084 BSS_CHANGED_BEACON_ENABLED);
1085 /* fall through */
1086 case NL80211_IFTYPE_ADHOC:
1087 case NL80211_IFTYPE_AP:
1088 case NL80211_IFTYPE_MESH_POINT:
1089 ieee80211_bss_info_change_notify(sdata, changed);
1090 break;
1091 case NL80211_IFTYPE_WDS:
1092 break;
1093 case NL80211_IFTYPE_AP_VLAN:
1094 case NL80211_IFTYPE_MONITOR:
1095 /* ignore virtual */
1096 break;
1097 case NL80211_IFTYPE_UNSPECIFIED:
1098 case __NL80211_IFTYPE_AFTER_LAST:
1099 WARN_ON(1);
1100 break;
1101 }
1102 }
1103
1104 /* add back keys */
1105 list_for_each_entry(sdata, &local->interfaces, list)
1106 if (netif_running(sdata->dev))
1107 ieee80211_enable_keys(sdata);
1108
1109 ieee80211_wake_queues_by_reason(hw,
1110 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1111
1112 /*
1113 * If this is for hw restart things are still running.
1114 * We may want to change that later, however.
1115 */
1116 if (!from_suspend)
1117 return 0;
1118
1119#ifdef CONFIG_PM
1120 local->suspended = false;
1121
1122 list_for_each_entry(sdata, &local->interfaces, list) {
1123 switch(sdata->vif.type) {
1124 case NL80211_IFTYPE_STATION:
1125 ieee80211_sta_restart(sdata);
1126 break;
1127 case NL80211_IFTYPE_ADHOC:
1128 ieee80211_ibss_restart(sdata);
1129 break;
1130 case NL80211_IFTYPE_MESH_POINT:
1131 ieee80211_mesh_restart(sdata);
1132 break;
1133 default:
1134 break;
1135 }
1136 }
1137
1138 add_timer(&local->sta_cleanup);
1139
1140 spin_lock_irqsave(&local->sta_lock, flags);
1141 list_for_each_entry(sta, &local->sta_list, list)
1142 mesh_plink_restart(sta);
1143 spin_unlock_irqrestore(&local->sta_lock, flags);
1144#else
1145 WARN_ON(1);
1146#endif
1147 return 0;
1148}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 959aa8379ccf..d2d81b103341 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,100 +27,6 @@
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
29 29
30static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
31 int idx, int alg, int remove,
32 int set_tx_key, const u8 *_key,
33 size_t key_len)
34{
35 struct ieee80211_local *local = sdata->local;
36 struct sta_info *sta;
37 struct ieee80211_key *key;
38 int err;
39
40 if (alg == ALG_AES_CMAC) {
41 if (idx < NUM_DEFAULT_KEYS ||
42 idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
43 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d "
44 "(BIP)\n", sdata->dev->name, idx);
45 return -EINVAL;
46 }
47 } else if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
48 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
49 sdata->dev->name, idx);
50 return -EINVAL;
51 }
52
53 if (remove) {
54 rcu_read_lock();
55
56 err = 0;
57
58 if (is_broadcast_ether_addr(sta_addr)) {
59 key = sdata->keys[idx];
60 } else {
61 sta = sta_info_get(local, sta_addr);
62 if (!sta) {
63 err = -ENOENT;
64 goto out_unlock;
65 }
66 key = sta->key;
67 }
68
69 ieee80211_key_free(key);
70 } else {
71 key = ieee80211_key_alloc(alg, idx, key_len, _key);
72 if (!key)
73 return -ENOMEM;
74
75 sta = NULL;
76 err = 0;
77
78 rcu_read_lock();
79
80 if (!is_broadcast_ether_addr(sta_addr)) {
81 set_tx_key = 0;
82 /*
83 * According to the standard, the key index of a
84 * pairwise key must be zero. However, some AP are
85 * broken when it comes to WEP key indices, so we
86 * work around this.
87 */
88 if (idx != 0 && alg != ALG_WEP) {
89 ieee80211_key_free(key);
90 err = -EINVAL;
91 goto out_unlock;
92 }
93
94 sta = sta_info_get(local, sta_addr);
95 if (!sta) {
96 ieee80211_key_free(key);
97 err = -ENOENT;
98 goto out_unlock;
99 }
100 }
101
102 if (alg == ALG_WEP &&
103 key_len != LEN_WEP40 && key_len != LEN_WEP104) {
104 ieee80211_key_free(key);
105 err = -EINVAL;
106 goto out_unlock;
107 }
108
109 ieee80211_key_link(key, sdata, sta);
110
111 if (set_tx_key || (!sta && !sdata->default_key && key))
112 ieee80211_set_default_key(sdata, idx);
113 if (alg == ALG_AES_CMAC &&
114 (set_tx_key || (!sta && !sdata->default_mgmt_key && key)))
115 ieee80211_set_default_mgmt_key(sdata, idx);
116 }
117
118 out_unlock:
119 rcu_read_unlock();
120
121 return err;
122}
123
124static int ieee80211_ioctl_siwgenie(struct net_device *dev, 30static int ieee80211_ioctl_siwgenie(struct net_device *dev,
125 struct iw_request_info *info, 31 struct iw_request_info *info,
126 struct iw_point *data, char *extra) 32 struct iw_point *data, char *extra)
@@ -131,11 +37,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
131 37
132 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 38 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
133 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); 39 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
134 if (ret) 40 if (ret && ret != -EALREADY)
135 return ret; 41 return ret;
136 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 42 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
137 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 43 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
138 ieee80211_sta_req_auth(sdata); 44 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
45 if (ret != -EALREADY)
46 ieee80211_sta_req_auth(sdata);
139 return 0; 47 return 0;
140 } 48 }
141 49
@@ -149,17 +57,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
149 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 57 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
150 58
151 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 59 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
152 sdata->u.ibss.flags &= ~IEEE80211_IBSS_AUTO_CHANNEL_SEL; 60 return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra);
153 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 61 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
154 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; 62 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
155 63
156 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ 64 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
157 if (freq->e == 0) { 65 if (freq->e == 0) {
158 if (freq->m < 0) { 66 if (freq->m < 0) {
159 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 67 if (sdata->vif.type == NL80211_IFTYPE_STATION)
160 sdata->u.ibss.flags |=
161 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
162 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
163 sdata->u.mgd.flags |= 68 sdata->u.mgd.flags |=
164 IEEE80211_STA_AUTO_CHANNEL_SEL; 69 IEEE80211_STA_AUTO_CHANNEL_SEL;
165 return 0; 70 return 0;
@@ -183,8 +88,12 @@ static int ieee80211_ioctl_giwfreq(struct net_device *dev,
183 struct iw_freq *freq, char *extra) 88 struct iw_freq *freq, char *extra)
184{ 89{
185 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 90 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
91 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
186 92
187 freq->m = local->hw.conf.channel->center_freq; 93 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
94 return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
95
96 freq->m = local->oper_channel->center_freq;
188 freq->e = 6; 97 freq->e = 6;
189 98
190 return 0; 99 return 0;
@@ -195,15 +104,17 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
195 struct iw_request_info *info, 104 struct iw_request_info *info,
196 struct iw_point *data, char *ssid) 105 struct iw_point *data, char *ssid)
197{ 106{
198 struct ieee80211_sub_if_data *sdata; 107 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
199 size_t len = data->length; 108 size_t len = data->length;
200 int ret; 109 int ret;
201 110
111 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
112 return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
113
202 /* iwconfig uses nul termination in SSID.. */ 114 /* iwconfig uses nul termination in SSID.. */
203 if (len > 0 && ssid[len - 1] == '\0') 115 if (len > 0 && ssid[len - 1] == '\0')
204 len--; 116 len--;
205 117
206 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
207 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 118 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
208 if (data->flags) 119 if (data->flags)
209 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 120 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
@@ -215,10 +126,10 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
215 return ret; 126 return ret;
216 127
217 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 128 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
129 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
218 ieee80211_sta_req_auth(sdata); 130 ieee80211_sta_req_auth(sdata);
219 return 0; 131 return 0;
220 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 132 }
221 return ieee80211_ibss_set_ssid(sdata, ssid, len);
222 133
223 return -EOPNOTSUPP; 134 return -EOPNOTSUPP;
224} 135}
@@ -229,9 +140,13 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
229 struct iw_point *data, char *ssid) 140 struct iw_point *data, char *ssid)
230{ 141{
231 size_t len; 142 size_t len;
232
233 struct ieee80211_sub_if_data *sdata; 143 struct ieee80211_sub_if_data *sdata;
144
234 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 145 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
146
147 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
148 return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
149
235 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 150 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
236 int res = ieee80211_sta_get_ssid(sdata, ssid, &len); 151 int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
237 if (res == 0) { 152 if (res == 0) {
@@ -240,14 +155,6 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
240 } else 155 } else
241 data->flags = 0; 156 data->flags = 0;
242 return res; 157 return res;
243 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
244 int res = ieee80211_ibss_get_ssid(sdata, ssid, &len);
245 if (res == 0) {
246 data->length = len;
247 data->flags = 1;
248 } else
249 data->flags = 0;
250 return res;
251 } 158 }
252 159
253 return -EOPNOTSUPP; 160 return -EOPNOTSUPP;
@@ -258,9 +165,11 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
258 struct iw_request_info *info, 165 struct iw_request_info *info,
259 struct sockaddr *ap_addr, char *extra) 166 struct sockaddr *ap_addr, char *extra)
260{ 167{
261 struct ieee80211_sub_if_data *sdata; 168 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
169
170 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
171 return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
262 172
263 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
264 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 173 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
265 int ret; 174 int ret;
266 175
@@ -275,18 +184,9 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
275 if (ret) 184 if (ret)
276 return ret; 185 return ret;
277 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 186 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
187 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
278 ieee80211_sta_req_auth(sdata); 188 ieee80211_sta_req_auth(sdata);
279 return 0; 189 return 0;
280 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
281 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
282 sdata->u.ibss.flags |= IEEE80211_IBSS_AUTO_BSSID_SEL |
283 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
284 else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data))
285 sdata->u.ibss.flags |= IEEE80211_IBSS_AUTO_BSSID_SEL;
286 else
287 sdata->u.ibss.flags &= ~IEEE80211_IBSS_AUTO_BSSID_SEL;
288
289 return ieee80211_ibss_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
290 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { 190 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
291 /* 191 /*
292 * If it is necessary to update the WDS peer address 192 * If it is necessary to update the WDS peer address
@@ -312,9 +212,11 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
312 struct iw_request_info *info, 212 struct iw_request_info *info,
313 struct sockaddr *ap_addr, char *extra) 213 struct sockaddr *ap_addr, char *extra)
314{ 214{
315 struct ieee80211_sub_if_data *sdata; 215 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
216
217 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
218 return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
316 219
317 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
318 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 220 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
319 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) { 221 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) {
320 ap_addr->sa_family = ARPHRD_ETHER; 222 ap_addr->sa_family = ARPHRD_ETHER;
@@ -322,13 +224,6 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
322 } else 224 } else
323 memset(&ap_addr->sa_data, 0, ETH_ALEN); 225 memset(&ap_addr->sa_data, 0, ETH_ALEN);
324 return 0; 226 return 0;
325 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
326 if (sdata->u.ibss.state == IEEE80211_IBSS_MLME_JOINED) {
327 ap_addr->sa_family = ARPHRD_ETHER;
328 memcpy(&ap_addr->sa_data, sdata->u.ibss.bssid, ETH_ALEN);
329 } else
330 memset(&ap_addr->sa_data, 0, ETH_ALEN);
331 return 0;
332 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { 227 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
333 ap_addr->sa_family = ARPHRD_ETHER; 228 ap_addr->sa_family = ARPHRD_ETHER;
334 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); 229 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
@@ -411,334 +306,6 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
411 return 0; 306 return 0;
412} 307}
413 308
414static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
415 struct iw_request_info *info,
416 union iwreq_data *data, char *extra)
417{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
419 struct ieee80211_channel* chan = local->hw.conf.channel;
420 bool reconf = false;
421 u32 reconf_flags = 0;
422 int new_power_level;
423
424 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
425 return -EINVAL;
426 if (data->txpower.flags & IW_TXPOW_RANGE)
427 return -EINVAL;
428 if (!chan)
429 return -EINVAL;
430
431 /* only change when not disabling */
432 if (!data->txpower.disabled) {
433 if (data->txpower.fixed) {
434 if (data->txpower.value < 0)
435 return -EINVAL;
436 new_power_level = data->txpower.value;
437 /*
438 * Debatable, but we cannot do a fixed power
439 * level above the regulatory constraint.
440 * Use "iwconfig wlan0 txpower 15dBm" instead.
441 */
442 if (new_power_level > chan->max_power)
443 return -EINVAL;
444 } else {
445 /*
446 * Automatic power level setting, max being the value
447 * passed in from userland.
448 */
449 if (data->txpower.value < 0)
450 new_power_level = -1;
451 else
452 new_power_level = data->txpower.value;
453 }
454
455 reconf = true;
456
457 /*
458 * ieee80211_hw_config() will limit to the channel's
459 * max power and possibly power constraint from AP.
460 */
461 local->user_power_level = new_power_level;
462 }
463
464 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
465 local->hw.conf.radio_enabled = !(data->txpower.disabled);
466 reconf_flags |= IEEE80211_CONF_CHANGE_RADIO_ENABLED;
467 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
468 }
469
470 if (reconf || reconf_flags)
471 ieee80211_hw_config(local, reconf_flags);
472
473 return 0;
474}
475
476static int ieee80211_ioctl_giwtxpower(struct net_device *dev,
477 struct iw_request_info *info,
478 union iwreq_data *data, char *extra)
479{
480 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
481
482 data->txpower.fixed = 1;
483 data->txpower.disabled = !(local->hw.conf.radio_enabled);
484 data->txpower.value = local->hw.conf.power_level;
485 data->txpower.flags = IW_TXPOW_DBM;
486
487 return 0;
488}
489
490static int ieee80211_ioctl_siwrts(struct net_device *dev,
491 struct iw_request_info *info,
492 struct iw_param *rts, char *extra)
493{
494 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
495
496 if (rts->disabled)
497 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
498 else if (!rts->fixed)
499 /* if the rts value is not fixed, then take default */
500 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
501 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD)
502 return -EINVAL;
503 else
504 local->rts_threshold = rts->value;
505
506 /* If the wlan card performs RTS/CTS in hardware/firmware,
507 * configure it here */
508
509 if (local->ops->set_rts_threshold)
510 local->ops->set_rts_threshold(local_to_hw(local),
511 local->rts_threshold);
512
513 return 0;
514}
515
516static int ieee80211_ioctl_giwrts(struct net_device *dev,
517 struct iw_request_info *info,
518 struct iw_param *rts, char *extra)
519{
520 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
521
522 rts->value = local->rts_threshold;
523 rts->disabled = (rts->value >= IEEE80211_MAX_RTS_THRESHOLD);
524 rts->fixed = 1;
525
526 return 0;
527}
528
529
530static int ieee80211_ioctl_siwfrag(struct net_device *dev,
531 struct iw_request_info *info,
532 struct iw_param *frag, char *extra)
533{
534 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
535
536 if (frag->disabled)
537 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
538 else if (!frag->fixed)
539 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
540 else if (frag->value < 256 ||
541 frag->value > IEEE80211_MAX_FRAG_THRESHOLD)
542 return -EINVAL;
543 else {
544 /* Fragment length must be even, so strip LSB. */
545 local->fragmentation_threshold = frag->value & ~0x1;
546 }
547
548 return 0;
549}
550
551static int ieee80211_ioctl_giwfrag(struct net_device *dev,
552 struct iw_request_info *info,
553 struct iw_param *frag, char *extra)
554{
555 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
556
557 frag->value = local->fragmentation_threshold;
558 frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
559 frag->fixed = 1;
560
561 return 0;
562}
563
564
565static int ieee80211_ioctl_siwretry(struct net_device *dev,
566 struct iw_request_info *info,
567 struct iw_param *retry, char *extra)
568{
569 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
570
571 if (retry->disabled ||
572 (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
573 return -EINVAL;
574
575 if (retry->flags & IW_RETRY_MAX) {
576 local->hw.conf.long_frame_max_tx_count = retry->value;
577 } else if (retry->flags & IW_RETRY_MIN) {
578 local->hw.conf.short_frame_max_tx_count = retry->value;
579 } else {
580 local->hw.conf.long_frame_max_tx_count = retry->value;
581 local->hw.conf.short_frame_max_tx_count = retry->value;
582 }
583
584 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
585
586 return 0;
587}
588
589
590static int ieee80211_ioctl_giwretry(struct net_device *dev,
591 struct iw_request_info *info,
592 struct iw_param *retry, char *extra)
593{
594 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
595
596 retry->disabled = 0;
597 if (retry->flags == 0 || retry->flags & IW_RETRY_MIN) {
598 /* first return min value, iwconfig will ask max value
599 * later if needed */
600 retry->flags |= IW_RETRY_LIMIT;
601 retry->value = local->hw.conf.short_frame_max_tx_count;
602 if (local->hw.conf.long_frame_max_tx_count !=
603 local->hw.conf.short_frame_max_tx_count)
604 retry->flags |= IW_RETRY_MIN;
605 return 0;
606 }
607 if (retry->flags & IW_RETRY_MAX) {
608 retry->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
609 retry->value = local->hw.conf.long_frame_max_tx_count;
610 }
611
612 return 0;
613}
614
615static int ieee80211_ioctl_siwmlme(struct net_device *dev,
616 struct iw_request_info *info,
617 struct iw_point *data, char *extra)
618{
619 struct ieee80211_sub_if_data *sdata;
620 struct iw_mlme *mlme = (struct iw_mlme *) extra;
621
622 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
623 if (!(sdata->vif.type == NL80211_IFTYPE_STATION))
624 return -EINVAL;
625
626 switch (mlme->cmd) {
627 case IW_MLME_DEAUTH:
628 /* TODO: mlme->addr.sa_data */
629 return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
630 case IW_MLME_DISASSOC:
631 /* TODO: mlme->addr.sa_data */
632 return ieee80211_sta_disassociate(sdata, mlme->reason_code);
633 default:
634 return -EOPNOTSUPP;
635 }
636}
637
638
639static int ieee80211_ioctl_siwencode(struct net_device *dev,
640 struct iw_request_info *info,
641 struct iw_point *erq, char *keybuf)
642{
643 struct ieee80211_sub_if_data *sdata;
644 int idx, i, alg = ALG_WEP;
645 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
646 int remove = 0, ret;
647
648 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
649
650 idx = erq->flags & IW_ENCODE_INDEX;
651 if (idx == 0) {
652 if (sdata->default_key)
653 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
654 if (sdata->default_key == sdata->keys[i]) {
655 idx = i;
656 break;
657 }
658 }
659 } else if (idx < 1 || idx > 4)
660 return -EINVAL;
661 else
662 idx--;
663
664 if (erq->flags & IW_ENCODE_DISABLED)
665 remove = 1;
666 else if (erq->length == 0) {
667 /* No key data - just set the default TX key index */
668 ieee80211_set_default_key(sdata, idx);
669 return 0;
670 }
671
672 ret = ieee80211_set_encryption(
673 sdata, bcaddr,
674 idx, alg, remove,
675 !sdata->default_key,
676 keybuf, erq->length);
677
678 if (!ret) {
679 if (remove)
680 sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED;
681 else
682 sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED;
683 }
684
685 return ret;
686}
687
688
689static int ieee80211_ioctl_giwencode(struct net_device *dev,
690 struct iw_request_info *info,
691 struct iw_point *erq, char *key)
692{
693 struct ieee80211_sub_if_data *sdata;
694 int idx, i;
695
696 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
697
698 idx = erq->flags & IW_ENCODE_INDEX;
699 if (idx < 1 || idx > 4) {
700 idx = -1;
701 if (!sdata->default_key)
702 idx = 0;
703 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
704 if (sdata->default_key == sdata->keys[i]) {
705 idx = i;
706 break;
707 }
708 }
709 if (idx < 0)
710 return -EINVAL;
711 } else
712 idx--;
713
714 erq->flags = idx + 1;
715
716 if (!sdata->keys[idx]) {
717 erq->length = 0;
718 erq->flags |= IW_ENCODE_DISABLED;
719 return 0;
720 }
721
722 memcpy(key, sdata->keys[idx]->conf.key,
723 min_t(int, erq->length, sdata->keys[idx]->conf.keylen));
724 erq->length = sdata->keys[idx]->conf.keylen;
725 erq->flags |= IW_ENCODE_ENABLED;
726
727 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
728 switch (sdata->u.mgd.auth_alg) {
729 case WLAN_AUTH_OPEN:
730 case WLAN_AUTH_LEAP:
731 erq->flags |= IW_ENCODE_OPEN;
732 break;
733 case WLAN_AUTH_SHARED_KEY:
734 erq->flags |= IW_ENCODE_RESTRICTED;
735 break;
736 }
737 }
738
739 return 0;
740}
741
742static int ieee80211_ioctl_siwpower(struct net_device *dev, 309static int ieee80211_ioctl_siwpower(struct net_device *dev,
743 struct iw_request_info *info, 310 struct iw_request_info *info,
744 struct iw_param *wrq, 311 struct iw_param *wrq,
@@ -747,7 +314,7 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
747 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 314 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
748 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 315 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
749 struct ieee80211_conf *conf = &local->hw.conf; 316 struct ieee80211_conf *conf = &local->hw.conf;
750 int ret = 0, timeout = 0; 317 int timeout = 0;
751 bool ps; 318 bool ps;
752 319
753 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 320 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -779,42 +346,18 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
779 timeout = wrq->value / 1000; 346 timeout = wrq->value / 1000;
780 347
781 set: 348 set:
782 if (ps == local->powersave && timeout == conf->dynamic_ps_timeout) 349 if (ps == sdata->u.mgd.powersave && timeout == conf->dynamic_ps_timeout)
783 return ret; 350 return 0;
784 351
785 local->powersave = ps; 352 sdata->u.mgd.powersave = ps;
786 conf->dynamic_ps_timeout = timeout; 353 conf->dynamic_ps_timeout = timeout;
787 354
788 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 355 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
789 ret = ieee80211_hw_config(local, 356 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
790 IEEE80211_CONF_CHANGE_DYNPS_TIMEOUT);
791
792 if (!(sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED))
793 return ret;
794 357
795 if (conf->dynamic_ps_timeout > 0 && 358 ieee80211_recalc_ps(local, -1);
796 !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
797 mod_timer(&local->dynamic_ps_timer, jiffies +
798 msecs_to_jiffies(conf->dynamic_ps_timeout));
799 } else {
800 if (local->powersave) {
801 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
802 ieee80211_send_nullfunc(local, sdata, 1);
803 conf->flags |= IEEE80211_CONF_PS;
804 ret = ieee80211_hw_config(local,
805 IEEE80211_CONF_CHANGE_PS);
806 } else {
807 conf->flags &= ~IEEE80211_CONF_PS;
808 ret = ieee80211_hw_config(local,
809 IEEE80211_CONF_CHANGE_PS);
810 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
811 ieee80211_send_nullfunc(local, sdata, 0);
812 del_timer_sync(&local->dynamic_ps_timer);
813 cancel_work_sync(&local->dynamic_ps_enable_work);
814 }
815 }
816 359
817 return ret; 360 return 0;
818} 361}
819 362
820static int ieee80211_ioctl_giwpower(struct net_device *dev, 363static int ieee80211_ioctl_giwpower(struct net_device *dev,
@@ -822,9 +365,9 @@ static int ieee80211_ioctl_giwpower(struct net_device *dev,
822 union iwreq_data *wrqu, 365 union iwreq_data *wrqu,
823 char *extra) 366 char *extra)
824{ 367{
825 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 368 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
826 369
827 wrqu->power.disabled = !local->powersave; 370 wrqu->power.disabled = !sdata->u.mgd.powersave;
828 371
829 return 0; 372 return 0;
830} 373}
@@ -997,82 +540,6 @@ static int ieee80211_ioctl_giwauth(struct net_device *dev,
997} 540}
998 541
999 542
1000static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1001 struct iw_request_info *info,
1002 struct iw_point *erq, char *extra)
1003{
1004 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1005 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
1006 int uninitialized_var(alg), idx, i, remove = 0;
1007
1008 switch (ext->alg) {
1009 case IW_ENCODE_ALG_NONE:
1010 remove = 1;
1011 break;
1012 case IW_ENCODE_ALG_WEP:
1013 alg = ALG_WEP;
1014 break;
1015 case IW_ENCODE_ALG_TKIP:
1016 alg = ALG_TKIP;
1017 break;
1018 case IW_ENCODE_ALG_CCMP:
1019 alg = ALG_CCMP;
1020 break;
1021 case IW_ENCODE_ALG_AES_CMAC:
1022 alg = ALG_AES_CMAC;
1023 break;
1024 default:
1025 return -EOPNOTSUPP;
1026 }
1027
1028 if (erq->flags & IW_ENCODE_DISABLED)
1029 remove = 1;
1030
1031 idx = erq->flags & IW_ENCODE_INDEX;
1032 if (alg == ALG_AES_CMAC) {
1033 if (idx < NUM_DEFAULT_KEYS + 1 ||
1034 idx > NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
1035 idx = -1;
1036 if (!sdata->default_mgmt_key)
1037 idx = 0;
1038 else for (i = NUM_DEFAULT_KEYS;
1039 i < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1040 i++) {
1041 if (sdata->default_mgmt_key == sdata->keys[i])
1042 {
1043 idx = i;
1044 break;
1045 }
1046 }
1047 if (idx < 0)
1048 return -EINVAL;
1049 } else
1050 idx--;
1051 } else {
1052 if (idx < 1 || idx > 4) {
1053 idx = -1;
1054 if (!sdata->default_key)
1055 idx = 0;
1056 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1057 if (sdata->default_key == sdata->keys[i]) {
1058 idx = i;
1059 break;
1060 }
1061 }
1062 if (idx < 0)
1063 return -EINVAL;
1064 } else
1065 idx--;
1066 }
1067
1068 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1069 remove,
1070 ext->ext_flags &
1071 IW_ENCODE_EXT_SET_TX_KEY,
1072 ext->key, ext->key_len);
1073}
1074
1075
1076/* Structures to export the Wireless Handlers */ 543/* Structures to export the Wireless Handlers */
1077 544
1078static const iw_handler ieee80211_handler[] = 545static const iw_handler ieee80211_handler[] =
@@ -1099,7 +566,7 @@ static const iw_handler ieee80211_handler[] =
1099 (iw_handler) NULL, /* SIOCGIWTHRSPY */ 566 (iw_handler) NULL, /* SIOCGIWTHRSPY */
1100 (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */ 567 (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */
1101 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */ 568 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */
1102 (iw_handler) ieee80211_ioctl_siwmlme, /* SIOCSIWMLME */ 569 (iw_handler) cfg80211_wext_siwmlme, /* SIOCSIWMLME */
1103 (iw_handler) NULL, /* SIOCGIWAPLIST */ 570 (iw_handler) NULL, /* SIOCGIWAPLIST */
1104 (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */ 571 (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */
1105 (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */ 572 (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */
@@ -1111,16 +578,16 @@ static const iw_handler ieee80211_handler[] =
1111 (iw_handler) NULL, /* -- hole -- */ 578 (iw_handler) NULL, /* -- hole -- */
1112 (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */ 579 (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */
1113 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */ 580 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */
1114 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */ 581 (iw_handler) cfg80211_wext_siwrts, /* SIOCSIWRTS */
1115 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */ 582 (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */
1116 (iw_handler) ieee80211_ioctl_siwfrag, /* SIOCSIWFRAG */ 583 (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */
1117 (iw_handler) ieee80211_ioctl_giwfrag, /* SIOCGIWFRAG */ 584 (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */
1118 (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */ 585 (iw_handler) cfg80211_wext_siwtxpower, /* SIOCSIWTXPOW */
1119 (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */ 586 (iw_handler) cfg80211_wext_giwtxpower, /* SIOCGIWTXPOW */
1120 (iw_handler) ieee80211_ioctl_siwretry, /* SIOCSIWRETRY */ 587 (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */
1121 (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ 588 (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */
1122 (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ 589 (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */
1123 (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */ 590 (iw_handler) cfg80211_wext_giwencode, /* SIOCGIWENCODE */
1124 (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */ 591 (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */
1125 (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */ 592 (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */
1126 (iw_handler) NULL, /* -- hole -- */ 593 (iw_handler) NULL, /* -- hole -- */
@@ -1129,7 +596,7 @@ static const iw_handler ieee80211_handler[] =
1129 (iw_handler) NULL, /* SIOCGIWGENIE */ 596 (iw_handler) NULL, /* SIOCGIWGENIE */
1130 (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */ 597 (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */
1131 (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */ 598 (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */
1132 (iw_handler) ieee80211_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */ 599 (iw_handler) cfg80211_wext_siwencodeext, /* SIOCSIWENCODEEXT */
1133 (iw_handler) NULL, /* SIOCGIWENCODEEXT */ 600 (iw_handler) NULL, /* SIOCGIWENCODEEXT */
1134 (iw_handler) NULL, /* SIOCSIWPMKSA */ 601 (iw_handler) NULL, /* SIOCSIWPMKSA */
1135 (iw_handler) NULL, /* -- hole -- */ 602 (iw_handler) NULL, /* -- hole -- */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 0b8ad1f4ecdd..116a923b14d6 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -23,34 +23,6 @@
23 */ 23 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 25
26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
27
28/* Given a data frame determine the 802.1p/1d tag to use. */
29static unsigned int classify_1d(struct sk_buff *skb)
30{
31 unsigned int dscp;
32
33 /* skb->priority values from 256->263 are magic values to
34 * directly indicate a specific 802.1d priority. This is used
35 * to allow 802.1d priority to be passed directly in from VLAN
36 * tags, etc.
37 */
38 if (skb->priority >= 256 && skb->priority <= 263)
39 return skb->priority - 256;
40
41 switch (skb->protocol) {
42 case htons(ETH_P_IP):
43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break;
45
46 default:
47 return 0;
48 }
49
50 return dscp >> 5;
51}
52
53
54static int wme_downgrade_ac(struct sk_buff *skb) 26static int wme_downgrade_ac(struct sk_buff *skb)
55{ 27{
56 switch (skb->priority) { 28 switch (skb->priority) {
@@ -94,7 +66,7 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
94 66
95 /* use the data classifier to determine what 802.1d tag the 67 /* use the data classifier to determine what 802.1d tag the
96 * data frame has */ 68 * data frame has */
97 skb->priority = classify_1d(skb); 69 skb->priority = cfg80211_classify8021d(skb);
98 70
99 /* in case we are a client verify acm is not set for this ac */ 71 /* in case we are a client verify acm is not set for this ac */
100 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 72 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
@@ -129,11 +101,11 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
129 * Now we know the 1d priority, fill in the QoS header if 101 * Now we know the 1d priority, fill in the QoS header if
130 * there is one (and we haven't done this before). 102 * there is one (and we haven't done this before).
131 */ 103 */
132 if (!skb->requeue && ieee80211_is_data_qos(hdr->frame_control)) { 104 if (ieee80211_is_data_qos(hdr->frame_control)) {
133 u8 *p = ieee80211_get_qos_ctl(hdr); 105 u8 *p = ieee80211_get_qos_ctl(hdr);
134 u8 ack_policy = 0; 106 u8 ack_policy = 0;
135 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 107 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
136 if (local->wifi_wme_noack_test) 108 if (unlikely(local->wifi_wme_noack_test))
137 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 109 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
138 QOS_CONTROL_ACK_POLICY_SHIFT; 110 QOS_CONTROL_ACK_POLICY_SHIFT;
139 /* qos header is 2 bytes, second reserved */ 111 /* qos header is 2 bytes, second reserved */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 4f8bfea278f2..dcfae8884b86 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -122,7 +122,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
122 return RX_DROP_UNUSABLE; 122 return RX_DROP_UNUSABLE;
123 123
124 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, 124 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
125 (void *) skb->data); 125 (void *) skb->data, NULL);
126 return RX_DROP_UNUSABLE; 126 return RX_DROP_UNUSABLE;
127 } 127 }
128 128
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index cb3ad741ebf8..634d14affc8d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -327,7 +327,7 @@ config NETFILTER_XT_TARGET_CONNMARK
327 327
328 If you want to compile it as a module, say M here and read 328 If you want to compile it as a module, say M here and read
329 <file:Documentation/kbuild/modules.txt>. The module will be called 329 <file:Documentation/kbuild/modules.txt>. The module will be called
330 ipt_CONNMARK.ko. If unsure, say `N'. 330 ipt_CONNMARK. If unsure, say `N'.
331 331
332config NETFILTER_XT_TARGET_CONNSECMARK 332config NETFILTER_XT_TARGET_CONNSECMARK
333 tristate '"CONNSECMARK" target support' 333 tristate '"CONNSECMARK" target support'
@@ -584,7 +584,7 @@ config NETFILTER_XT_MATCH_CONNMARK
584 584
585 If you want to compile it as a module, say M here and read 585 If you want to compile it as a module, say M here and read
586 <file:Documentation/kbuild/modules.txt>. The module will be called 586 <file:Documentation/kbuild/modules.txt>. The module will be called
587 ipt_connmark.ko. If unsure, say `N'. 587 ipt_connmark. If unsure, say `N'.
588 588
589config NETFILTER_XT_MATCH_CONNTRACK 589config NETFILTER_XT_MATCH_CONNTRACK
590 tristate '"conntrack" connection tracking match support' 590 tristate '"conntrack" connection tracking match support'
@@ -917,6 +917,19 @@ config NETFILTER_XT_MATCH_U32
917 917
918 Details and examples are in the kernel module source. 918 Details and examples are in the kernel module source.
919 919
920config NETFILTER_XT_MATCH_OSF
921 tristate '"osf" Passive OS fingerprint match'
922 depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
923 help
924 This option selects the Passive OS Fingerprinting match module
925 that allows to passively match the remote operating system by
926 analyzing incoming TCP SYN packets.
927
928 Rules and loading software can be downloaded from
929 http://www.ioremap.net/projects/osf
930
931 To compile it as a module, choose M here. If unsure, say N.
932
920endif # NETFILTER_XTABLES 933endif # NETFILTER_XTABLES
921 934
922endmenu 935endmenu
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 6282060fbda9..49f62ee4e9ff 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
77obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o 77obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
78obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o 78obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
79obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o 79obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
80obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
80obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o 81obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
81obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o 82obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
82obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o 83obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e01061f49cdc..7c1333c67ff3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3345,22 +3345,8 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3345 3345
3346static int __init ip_vs_genl_register(void) 3346static int __init ip_vs_genl_register(void)
3347{ 3347{
3348 int ret, i; 3348 return genl_register_family_with_ops(&ip_vs_genl_family,
3349 3349 ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops));
3350 ret = genl_register_family(&ip_vs_genl_family);
3351 if (ret)
3352 return ret;
3353
3354 for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
3355 ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
3356 if (ret)
3357 goto err_out;
3358 }
3359 return 0;
3360
3361err_out:
3362 genl_unregister_family(&ip_vs_genl_family);
3363 return ret;
3364} 3350}
3365 3351
3366static void ip_vs_genl_unregister(void) 3352static void ip_vs_genl_unregister(void)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 425ab144f15d..5874657af7f2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -260,8 +260,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
260 ip_send_check(ip_hdr(skb)); 260 ip_send_check(ip_hdr(skb));
261 261
262 /* drop old route */ 262 /* drop old route */
263 dst_release(skb->dst); 263 skb_dst_drop(skb);
264 skb->dst = &rt->u.dst; 264 skb_dst_set(skb, &rt->u.dst);
265 265
266 /* Another hack: avoid icmp_send in ip_fragment */ 266 /* Another hack: avoid icmp_send in ip_fragment */
267 skb->local_df = 1; 267 skb->local_df = 1;
@@ -324,8 +324,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
324 } 324 }
325 325
326 /* drop old route */ 326 /* drop old route */
327 dst_release(skb->dst); 327 skb_dst_drop(skb);
328 skb->dst = &rt->u.dst; 328 skb_dst_set(skb, &rt->u.dst);
329 329
330 /* Another hack: avoid icmp_send in ip_fragment */ 330 /* Another hack: avoid icmp_send in ip_fragment */
331 skb->local_df = 1; 331 skb->local_df = 1;
@@ -388,8 +388,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
388 goto tx_error_put; 388 goto tx_error_put;
389 389
390 /* drop old route */ 390 /* drop old route */
391 dst_release(skb->dst); 391 skb_dst_drop(skb);
392 skb->dst = &rt->u.dst; 392 skb_dst_set(skb, &rt->u.dst);
393 393
394 /* mangle the packet */ 394 /* mangle the packet */
395 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 395 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -465,8 +465,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
465 goto tx_error_put; 465 goto tx_error_put;
466 466
467 /* drop old route */ 467 /* drop old route */
468 dst_release(skb->dst); 468 skb_dst_drop(skb);
469 skb->dst = &rt->u.dst; 469 skb_dst_set(skb, &rt->u.dst);
470 470
471 /* mangle the packet */ 471 /* mangle the packet */
472 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 472 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -553,8 +553,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
553 IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n"); 553 IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n");
554 goto tx_error; 554 goto tx_error;
555 } 555 }
556 if (skb->dst) 556 if (skb_dst(skb))
557 skb->dst->ops->update_pmtu(skb->dst, mtu); 557 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
558 558
559 df |= (old_iph->frag_off & htons(IP_DF)); 559 df |= (old_iph->frag_off & htons(IP_DF));
560 560
@@ -596,8 +596,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
596 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 596 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
597 597
598 /* drop old route */ 598 /* drop old route */
599 dst_release(skb->dst); 599 skb_dst_drop(skb);
600 skb->dst = &rt->u.dst; 600 skb_dst_set(skb, &rt->u.dst);
601 601
602 /* 602 /*
603 * Push down and install the IPIP header. 603 * Push down and install the IPIP header.
@@ -665,8 +665,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
665 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); 665 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
666 goto tx_error; 666 goto tx_error;
667 } 667 }
668 if (skb->dst) 668 if (skb_dst(skb))
669 skb->dst->ops->update_pmtu(skb->dst, mtu); 669 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
670 670
671 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { 671 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
672 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 672 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
@@ -702,8 +702,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
702 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 702 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
703 703
704 /* drop old route */ 704 /* drop old route */
705 dst_release(skb->dst); 705 skb_dst_drop(skb);
706 skb->dst = &rt->u.dst; 706 skb_dst_set(skb, &rt->u.dst);
707 707
708 /* 708 /*
709 * Push down and install the IPIP header. 709 * Push down and install the IPIP header.
@@ -775,8 +775,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
775 ip_send_check(ip_hdr(skb)); 775 ip_send_check(ip_hdr(skb));
776 776
777 /* drop old route */ 777 /* drop old route */
778 dst_release(skb->dst); 778 skb_dst_drop(skb);
779 skb->dst = &rt->u.dst; 779 skb_dst_set(skb, &rt->u.dst);
780 780
781 /* Another hack: avoid icmp_send in ip_fragment */ 781 /* Another hack: avoid icmp_send in ip_fragment */
782 skb->local_df = 1; 782 skb->local_df = 1;
@@ -828,8 +828,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
828 } 828 }
829 829
830 /* drop old route */ 830 /* drop old route */
831 dst_release(skb->dst); 831 skb_dst_drop(skb);
832 skb->dst = &rt->u.dst; 832 skb_dst_set(skb, &rt->u.dst);
833 833
834 /* Another hack: avoid icmp_send in ip_fragment */ 834 /* Another hack: avoid icmp_send in ip_fragment */
835 skb->local_df = 1; 835 skb->local_df = 1;
@@ -900,8 +900,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
900 goto tx_error_put; 900 goto tx_error_put;
901 901
902 /* drop the old route when skb is not shared */ 902 /* drop the old route when skb is not shared */
903 dst_release(skb->dst); 903 skb_dst_drop(skb);
904 skb->dst = &rt->u.dst; 904 skb_dst_set(skb, &rt->u.dst);
905 905
906 ip_vs_nat_icmp(skb, pp, cp, 0); 906 ip_vs_nat_icmp(skb, pp, cp, 0);
907 907
@@ -975,8 +975,8 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
975 goto tx_error_put; 975 goto tx_error_put;
976 976
977 /* drop the old route when skb is not shared */ 977 /* drop the old route when skb is not shared */
978 dst_release(skb->dst); 978 skb_dst_drop(skb);
979 skb->dst = &rt->u.dst; 979 skb_dst_set(skb, &rt->u.dst);
980 980
981 ip_vs_nat_icmp_v6(skb, pp, cp, 0); 981 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
982 982
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 9fe8982bd7c9..4a1d94aac20b 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -116,7 +116,7 @@ int nf_conntrack_acct_init(struct net *net)
116 if (net_eq(net, &init_net)) { 116 if (net_eq(net, &init_net)) {
117#ifdef CONFIG_NF_CT_ACCT 117#ifdef CONFIG_NF_CT_ACCT
118 printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n"); 118 printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n");
119 printk(KERN_WARNING "nf_conntrack.acct=1 kernel paramater, acct=1 nf_conntrack module option or\n"); 119 printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n");
120 printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n"); 120 printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n");
121#endif 121#endif
122 122
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8020db6274b8..5f72b94b4918 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -39,6 +39,7 @@
39#include <net/netfilter/nf_conntrack_core.h> 39#include <net/netfilter/nf_conntrack_core.h>
40#include <net/netfilter/nf_conntrack_extend.h> 40#include <net/netfilter/nf_conntrack_extend.h>
41#include <net/netfilter/nf_conntrack_acct.h> 41#include <net/netfilter/nf_conntrack_acct.h>
42#include <net/netfilter/nf_conntrack_ecache.h>
42#include <net/netfilter/nf_nat.h> 43#include <net/netfilter/nf_nat.h>
43#include <net/netfilter/nf_nat_core.h> 44#include <net/netfilter/nf_nat_core.h>
44 45
@@ -182,10 +183,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
182 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 183 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
183 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 184 NF_CT_ASSERT(!timer_pending(&ct->timeout));
184 185
185 if (!test_bit(IPS_DYING_BIT, &ct->status))
186 nf_conntrack_event(IPCT_DESTROY, ct);
187 set_bit(IPS_DYING_BIT, &ct->status);
188
189 /* To make sure we don't get any weird locking issues here: 186 /* To make sure we don't get any weird locking issues here:
190 * destroy_conntrack() MUST NOT be called with a write lock 187 * destroy_conntrack() MUST NOT be called with a write lock
191 * to nf_conntrack_lock!!! -HW */ 188 * to nf_conntrack_lock!!! -HW */
@@ -219,27 +216,70 @@ destroy_conntrack(struct nf_conntrack *nfct)
219 nf_conntrack_free(ct); 216 nf_conntrack_free(ct);
220} 217}
221 218
222static void death_by_timeout(unsigned long ul_conntrack) 219void nf_ct_delete_from_lists(struct nf_conn *ct)
223{ 220{
224 struct nf_conn *ct = (void *)ul_conntrack;
225 struct net *net = nf_ct_net(ct); 221 struct net *net = nf_ct_net(ct);
226 struct nf_conn_help *help = nfct_help(ct);
227 struct nf_conntrack_helper *helper;
228
229 if (help) {
230 rcu_read_lock();
231 helper = rcu_dereference(help->helper);
232 if (helper && helper->destroy)
233 helper->destroy(ct);
234 rcu_read_unlock();
235 }
236 222
223 nf_ct_helper_destroy(ct);
237 spin_lock_bh(&nf_conntrack_lock); 224 spin_lock_bh(&nf_conntrack_lock);
238 /* Inside lock so preempt is disabled on module removal path. 225 /* Inside lock so preempt is disabled on module removal path.
239 * Otherwise we can get spurious warnings. */ 226 * Otherwise we can get spurious warnings. */
240 NF_CT_STAT_INC(net, delete_list); 227 NF_CT_STAT_INC(net, delete_list);
241 clean_from_lists(ct); 228 clean_from_lists(ct);
242 spin_unlock_bh(&nf_conntrack_lock); 229 spin_unlock_bh(&nf_conntrack_lock);
230}
231EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
232
233static void death_by_event(unsigned long ul_conntrack)
234{
235 struct nf_conn *ct = (void *)ul_conntrack;
236 struct net *net = nf_ct_net(ct);
237
238 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
239 /* bad luck, let's retry again */
240 ct->timeout.expires = jiffies +
241 (random32() % net->ct.sysctl_events_retry_timeout);
242 add_timer(&ct->timeout);
243 return;
244 }
245 /* we've got the event delivered, now it's dying */
246 set_bit(IPS_DYING_BIT, &ct->status);
247 spin_lock(&nf_conntrack_lock);
248 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
249 spin_unlock(&nf_conntrack_lock);
250 nf_ct_put(ct);
251}
252
253void nf_ct_insert_dying_list(struct nf_conn *ct)
254{
255 struct net *net = nf_ct_net(ct);
256
257 /* add this conntrack to the dying list */
258 spin_lock_bh(&nf_conntrack_lock);
259 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
260 &net->ct.dying);
261 spin_unlock_bh(&nf_conntrack_lock);
262 /* set a new timer to retry event delivery */
263 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
264 ct->timeout.expires = jiffies +
265 (random32() % net->ct.sysctl_events_retry_timeout);
266 add_timer(&ct->timeout);
267}
268EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
269
270static void death_by_timeout(unsigned long ul_conntrack)
271{
272 struct nf_conn *ct = (void *)ul_conntrack;
273
274 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
275 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
276 /* destroy event was not delivered */
277 nf_ct_delete_from_lists(ct);
278 nf_ct_insert_dying_list(ct);
279 return;
280 }
281 set_bit(IPS_DYING_BIT, &ct->status);
282 nf_ct_delete_from_lists(ct);
243 nf_ct_put(ct); 283 nf_ct_put(ct);
244} 284}
245 285
@@ -398,11 +438,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
398 help = nfct_help(ct); 438 help = nfct_help(ct);
399 if (help && help->helper) 439 if (help && help->helper)
400 nf_conntrack_event_cache(IPCT_HELPER, ct); 440 nf_conntrack_event_cache(IPCT_HELPER, ct);
401#ifdef CONFIG_NF_NAT_NEEDED 441
402 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
403 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
404 nf_conntrack_event_cache(IPCT_NATINFO, ct);
405#endif
406 nf_conntrack_event_cache(master_ct(ct) ? 442 nf_conntrack_event_cache(master_ct(ct) ?
407 IPCT_RELATED : IPCT_NEW, ct); 443 IPCT_RELATED : IPCT_NEW, ct);
408 return NF_ACCEPT; 444 return NF_ACCEPT;
@@ -523,6 +559,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
523 return ERR_PTR(-ENOMEM); 559 return ERR_PTR(-ENOMEM);
524 } 560 }
525 561
562 spin_lock_init(&ct->lock);
526 atomic_set(&ct->ct_general.use, 1); 563 atomic_set(&ct->ct_general.use, 1);
527 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 564 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
528 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 565 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
@@ -580,6 +617,7 @@ init_conntrack(struct net *net,
580 } 617 }
581 618
582 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 619 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
620 nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
583 621
584 spin_lock_bh(&nf_conntrack_lock); 622 spin_lock_bh(&nf_conntrack_lock);
585 exp = nf_ct_find_expectation(net, tuple); 623 exp = nf_ct_find_expectation(net, tuple);
@@ -807,13 +845,9 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
807 unsigned long extra_jiffies, 845 unsigned long extra_jiffies,
808 int do_acct) 846 int do_acct)
809{ 847{
810 int event = 0;
811
812 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 848 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
813 NF_CT_ASSERT(skb); 849 NF_CT_ASSERT(skb);
814 850
815 spin_lock_bh(&nf_conntrack_lock);
816
817 /* Only update if this is not a fixed timeout */ 851 /* Only update if this is not a fixed timeout */
818 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 852 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
819 goto acct; 853 goto acct;
@@ -821,19 +855,14 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
821 /* If not in hash table, timer will not be active yet */ 855 /* If not in hash table, timer will not be active yet */
822 if (!nf_ct_is_confirmed(ct)) { 856 if (!nf_ct_is_confirmed(ct)) {
823 ct->timeout.expires = extra_jiffies; 857 ct->timeout.expires = extra_jiffies;
824 event = IPCT_REFRESH;
825 } else { 858 } else {
826 unsigned long newtime = jiffies + extra_jiffies; 859 unsigned long newtime = jiffies + extra_jiffies;
827 860
828 /* Only update the timeout if the new timeout is at least 861 /* Only update the timeout if the new timeout is at least
829 HZ jiffies from the old timeout. Need del_timer for race 862 HZ jiffies from the old timeout. Need del_timer for race
830 avoidance (may already be dying). */ 863 avoidance (may already be dying). */
831 if (newtime - ct->timeout.expires >= HZ 864 if (newtime - ct->timeout.expires >= HZ)
832 && del_timer(&ct->timeout)) { 865 mod_timer_pending(&ct->timeout, newtime);
833 ct->timeout.expires = newtime;
834 add_timer(&ct->timeout);
835 event = IPCT_REFRESH;
836 }
837 } 866 }
838 867
839acct: 868acct:
@@ -842,17 +871,13 @@ acct:
842 871
843 acct = nf_conn_acct_find(ct); 872 acct = nf_conn_acct_find(ct);
844 if (acct) { 873 if (acct) {
874 spin_lock_bh(&ct->lock);
845 acct[CTINFO2DIR(ctinfo)].packets++; 875 acct[CTINFO2DIR(ctinfo)].packets++;
846 acct[CTINFO2DIR(ctinfo)].bytes += 876 acct[CTINFO2DIR(ctinfo)].bytes +=
847 skb->len - skb_network_offset(skb); 877 skb->len - skb_network_offset(skb);
878 spin_unlock_bh(&ct->lock);
848 } 879 }
849 } 880 }
850
851 spin_unlock_bh(&nf_conntrack_lock);
852
853 /* must be unlocked when calling event cache */
854 if (event)
855 nf_conntrack_event_cache(event, ct);
856} 881}
857EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 882EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
858 883
@@ -864,14 +889,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
864 if (do_acct) { 889 if (do_acct) {
865 struct nf_conn_counter *acct; 890 struct nf_conn_counter *acct;
866 891
867 spin_lock_bh(&nf_conntrack_lock);
868 acct = nf_conn_acct_find(ct); 892 acct = nf_conn_acct_find(ct);
869 if (acct) { 893 if (acct) {
894 spin_lock_bh(&ct->lock);
870 acct[CTINFO2DIR(ctinfo)].packets++; 895 acct[CTINFO2DIR(ctinfo)].packets++;
871 acct[CTINFO2DIR(ctinfo)].bytes += 896 acct[CTINFO2DIR(ctinfo)].bytes +=
872 skb->len - skb_network_offset(skb); 897 skb->len - skb_network_offset(skb);
898 spin_unlock_bh(&ct->lock);
873 } 899 }
874 spin_unlock_bh(&nf_conntrack_lock);
875 } 900 }
876 901
877 if (del_timer(&ct->timeout)) { 902 if (del_timer(&ct->timeout)) {
@@ -1001,15 +1026,22 @@ struct __nf_ct_flush_report {
1001 int report; 1026 int report;
1002}; 1027};
1003 1028
1004static int kill_all(struct nf_conn *i, void *data) 1029static int kill_report(struct nf_conn *i, void *data)
1005{ 1030{
1006 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; 1031 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1007 1032
1008 /* get_next_corpse sets the dying bit for us */ 1033 /* If we fail to deliver the event, death_by_timeout() will retry */
1009 nf_conntrack_event_report(IPCT_DESTROY, 1034 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1010 i, 1035 fr->pid, fr->report) < 0)
1011 fr->pid, 1036 return 1;
1012 fr->report); 1037
1038 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1039 set_bit(IPS_DYING_BIT, &i->status);
1040 return 1;
1041}
1042
1043static int kill_all(struct nf_conn *i, void *data)
1044{
1013 return 1; 1045 return 1;
1014} 1046}
1015 1047
@@ -1023,15 +1055,30 @@ void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1023} 1055}
1024EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 1056EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1025 1057
1026void nf_conntrack_flush(struct net *net, u32 pid, int report) 1058void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1027{ 1059{
1028 struct __nf_ct_flush_report fr = { 1060 struct __nf_ct_flush_report fr = {
1029 .pid = pid, 1061 .pid = pid,
1030 .report = report, 1062 .report = report,
1031 }; 1063 };
1032 nf_ct_iterate_cleanup(net, kill_all, &fr); 1064 nf_ct_iterate_cleanup(net, kill_report, &fr);
1065}
1066EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1067
1068static void nf_ct_release_dying_list(void)
1069{
1070 struct nf_conntrack_tuple_hash *h;
1071 struct nf_conn *ct;
1072 struct hlist_nulls_node *n;
1073
1074 spin_lock_bh(&nf_conntrack_lock);
1075 hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) {
1076 ct = nf_ct_tuplehash_to_ctrack(h);
1077 /* never fails to remove them, no listeners at this point */
1078 nf_ct_kill(ct);
1079 }
1080 spin_unlock_bh(&nf_conntrack_lock);
1033} 1081}
1034EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1035 1082
1036static void nf_conntrack_cleanup_init_net(void) 1083static void nf_conntrack_cleanup_init_net(void)
1037{ 1084{
@@ -1042,10 +1089,9 @@ static void nf_conntrack_cleanup_init_net(void)
1042 1089
1043static void nf_conntrack_cleanup_net(struct net *net) 1090static void nf_conntrack_cleanup_net(struct net *net)
1044{ 1091{
1045 nf_ct_event_cache_flush(net);
1046 nf_conntrack_ecache_fini(net);
1047 i_see_dead_people: 1092 i_see_dead_people:
1048 nf_conntrack_flush(net, 0, 0); 1093 nf_ct_iterate_cleanup(net, kill_all, NULL);
1094 nf_ct_release_dying_list();
1049 if (atomic_read(&net->ct.count) != 0) { 1095 if (atomic_read(&net->ct.count) != 0) {
1050 schedule(); 1096 schedule();
1051 goto i_see_dead_people; 1097 goto i_see_dead_people;
@@ -1056,6 +1102,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1056 1102
1057 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1103 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1058 nf_conntrack_htable_size); 1104 nf_conntrack_htable_size);
1105 nf_conntrack_ecache_fini(net);
1059 nf_conntrack_acct_fini(net); 1106 nf_conntrack_acct_fini(net);
1060 nf_conntrack_expect_fini(net); 1107 nf_conntrack_expect_fini(net);
1061 free_percpu(net->ct.stat); 1108 free_percpu(net->ct.stat);
@@ -1226,14 +1273,12 @@ static int nf_conntrack_init_net(struct net *net)
1226 1273
1227 atomic_set(&net->ct.count, 0); 1274 atomic_set(&net->ct.count, 0);
1228 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0); 1275 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
1276 INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
1229 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1277 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1230 if (!net->ct.stat) { 1278 if (!net->ct.stat) {
1231 ret = -ENOMEM; 1279 ret = -ENOMEM;
1232 goto err_stat; 1280 goto err_stat;
1233 } 1281 }
1234 ret = nf_conntrack_ecache_init(net);
1235 if (ret < 0)
1236 goto err_ecache;
1237 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1282 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1238 &net->ct.hash_vmalloc, 1); 1283 &net->ct.hash_vmalloc, 1);
1239 if (!net->ct.hash) { 1284 if (!net->ct.hash) {
@@ -1247,6 +1292,9 @@ static int nf_conntrack_init_net(struct net *net)
1247 ret = nf_conntrack_acct_init(net); 1292 ret = nf_conntrack_acct_init(net);
1248 if (ret < 0) 1293 if (ret < 0)
1249 goto err_acct; 1294 goto err_acct;
1295 ret = nf_conntrack_ecache_init(net);
1296 if (ret < 0)
1297 goto err_ecache;
1250 1298
1251 /* Set up fake conntrack: 1299 /* Set up fake conntrack:
1252 - to never be deleted, not in any hashes */ 1300 - to never be deleted, not in any hashes */
@@ -1259,14 +1307,14 @@ static int nf_conntrack_init_net(struct net *net)
1259 1307
1260 return 0; 1308 return 0;
1261 1309
1310err_ecache:
1311 nf_conntrack_acct_fini(net);
1262err_acct: 1312err_acct:
1263 nf_conntrack_expect_fini(net); 1313 nf_conntrack_expect_fini(net);
1264err_expect: 1314err_expect:
1265 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1315 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1266 nf_conntrack_htable_size); 1316 nf_conntrack_htable_size);
1267err_hash: 1317err_hash:
1268 nf_conntrack_ecache_fini(net);
1269err_ecache:
1270 free_percpu(net->ct.stat); 1318 free_percpu(net->ct.stat);
1271err_stat: 1319err_stat:
1272 return ret; 1320 return ret;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index dee4190209cc..aee560b4768d 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -16,121 +16,245 @@
16#include <linux/stddef.h> 16#include <linux/stddef.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/notifier.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/netdevice.h> 20#include <linux/netdevice.h>
22 21
23#include <net/netfilter/nf_conntrack.h> 22#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h> 23#include <net/netfilter/nf_conntrack_core.h>
24#include <net/netfilter/nf_conntrack_extend.h>
25 25
26ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); 26static DEFINE_MUTEX(nf_ct_ecache_mutex);
27EXPORT_SYMBOL_GPL(nf_conntrack_chain);
28 27
29ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain); 28struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
30EXPORT_SYMBOL_GPL(nf_ct_expect_chain); 29EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
30
31struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
32EXPORT_SYMBOL_GPL(nf_expect_event_cb);
31 33
32/* deliver cached events and clear cache entry - must be called with locally 34/* deliver cached events and clear cache entry - must be called with locally
33 * disabled softirqs */ 35 * disabled softirqs */
34static inline void 36void nf_ct_deliver_cached_events(struct nf_conn *ct)
35__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
36{ 37{
37 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) 38 unsigned long events;
38 && ecache->events) { 39 struct nf_ct_event_notifier *notify;
40 struct nf_conntrack_ecache *e;
41
42 rcu_read_lock();
43 notify = rcu_dereference(nf_conntrack_event_cb);
44 if (notify == NULL)
45 goto out_unlock;
46
47 e = nf_ct_ecache_find(ct);
48 if (e == NULL)
49 goto out_unlock;
50
51 events = xchg(&e->cache, 0);
52
53 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
39 struct nf_ct_event item = { 54 struct nf_ct_event item = {
40 .ct = ecache->ct, 55 .ct = ct,
41 .pid = 0, 56 .pid = 0,
42 .report = 0 57 .report = 0
43 }; 58 };
59 int ret;
60 /* We make a copy of the missed event cache without taking
61 * the lock, thus we may send missed events twice. However,
62 * this does not harm and it happens very rarely. */
63 unsigned long missed = e->missed;
44 64
45 atomic_notifier_call_chain(&nf_conntrack_chain, 65 ret = notify->fcn(events | missed, &item);
46 ecache->events, 66 if (unlikely(ret < 0 || missed)) {
47 &item); 67 spin_lock_bh(&ct->lock);
68 if (ret < 0)
69 e->missed |= events;
70 else
71 e->missed &= ~missed;
72 spin_unlock_bh(&ct->lock);
73 }
48 } 74 }
49 75
50 ecache->events = 0; 76out_unlock:
51 nf_ct_put(ecache->ct); 77 rcu_read_unlock();
52 ecache->ct = NULL;
53} 78}
79EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
54 80
55/* Deliver all cached events for a particular conntrack. This is called 81int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
56 * by code prior to async packet handling for freeing the skb */
57void nf_ct_deliver_cached_events(const struct nf_conn *ct)
58{ 82{
59 struct net *net = nf_ct_net(ct); 83 int ret = 0;
60 struct nf_conntrack_ecache *ecache; 84 struct nf_ct_event_notifier *notify;
61 85
62 local_bh_disable(); 86 mutex_lock(&nf_ct_ecache_mutex);
63 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id()); 87 notify = rcu_dereference(nf_conntrack_event_cb);
64 if (ecache->ct == ct) 88 if (notify != NULL) {
65 __nf_ct_deliver_cached_events(ecache); 89 ret = -EBUSY;
66 local_bh_enable(); 90 goto out_unlock;
91 }
92 rcu_assign_pointer(nf_conntrack_event_cb, new);
93 mutex_unlock(&nf_ct_ecache_mutex);
94 return ret;
95
96out_unlock:
97 mutex_unlock(&nf_ct_ecache_mutex);
98 return ret;
67} 99}
68EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); 100EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
69 101
70/* Deliver cached events for old pending events, if current conntrack != old */ 102void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
71void __nf_ct_event_cache_init(struct nf_conn *ct)
72{ 103{
73 struct net *net = nf_ct_net(ct); 104 struct nf_ct_event_notifier *notify;
74 struct nf_conntrack_ecache *ecache; 105
75 106 mutex_lock(&nf_ct_ecache_mutex);
76 /* take care of delivering potentially old events */ 107 notify = rcu_dereference(nf_conntrack_event_cb);
77 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id()); 108 BUG_ON(notify != new);
78 BUG_ON(ecache->ct == ct); 109 rcu_assign_pointer(nf_conntrack_event_cb, NULL);
79 if (ecache->ct) 110 mutex_unlock(&nf_ct_ecache_mutex);
80 __nf_ct_deliver_cached_events(ecache);
81 /* initialize for this conntrack/packet */
82 ecache->ct = ct;
83 nf_conntrack_get(&ct->ct_general);
84} 111}
85EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init); 112EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
86 113
87/* flush the event cache - touches other CPU's data and must not be called 114int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
88 * while packets are still passing through the code */
89void nf_ct_event_cache_flush(struct net *net)
90{ 115{
91 struct nf_conntrack_ecache *ecache; 116 int ret = 0;
92 int cpu; 117 struct nf_exp_event_notifier *notify;
93 118
94 for_each_possible_cpu(cpu) { 119 mutex_lock(&nf_ct_ecache_mutex);
95 ecache = per_cpu_ptr(net->ct.ecache, cpu); 120 notify = rcu_dereference(nf_expect_event_cb);
96 if (ecache->ct) 121 if (notify != NULL) {
97 nf_ct_put(ecache->ct); 122 ret = -EBUSY;
123 goto out_unlock;
98 } 124 }
125 rcu_assign_pointer(nf_expect_event_cb, new);
126 mutex_unlock(&nf_ct_ecache_mutex);
127 return ret;
128
129out_unlock:
130 mutex_unlock(&nf_ct_ecache_mutex);
131 return ret;
99} 132}
133EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
100 134
101int nf_conntrack_ecache_init(struct net *net) 135void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
102{ 136{
103 net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache); 137 struct nf_exp_event_notifier *notify;
104 if (!net->ct.ecache) 138
105 return -ENOMEM; 139 mutex_lock(&nf_ct_ecache_mutex);
106 return 0; 140 notify = rcu_dereference(nf_expect_event_cb);
141 BUG_ON(notify != new);
142 rcu_assign_pointer(nf_expect_event_cb, NULL);
143 mutex_unlock(&nf_ct_ecache_mutex);
107} 144}
145EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
108 146
109void nf_conntrack_ecache_fini(struct net *net) 147#define NF_CT_EVENTS_DEFAULT 1
148static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
149static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
150
151#ifdef CONFIG_SYSCTL
152static struct ctl_table event_sysctl_table[] = {
153 {
154 .ctl_name = CTL_UNNUMBERED,
155 .procname = "nf_conntrack_events",
156 .data = &init_net.ct.sysctl_events,
157 .maxlen = sizeof(unsigned int),
158 .mode = 0644,
159 .proc_handler = proc_dointvec,
160 },
161 {
162 .ctl_name = CTL_UNNUMBERED,
163 .procname = "nf_conntrack_events_retry_timeout",
164 .data = &init_net.ct.sysctl_events_retry_timeout,
165 .maxlen = sizeof(unsigned int),
166 .mode = 0644,
167 .proc_handler = proc_dointvec_jiffies,
168 },
169 {}
170};
171#endif /* CONFIG_SYSCTL */
172
173static struct nf_ct_ext_type event_extend __read_mostly = {
174 .len = sizeof(struct nf_conntrack_ecache),
175 .align = __alignof__(struct nf_conntrack_ecache),
176 .id = NF_CT_EXT_ECACHE,
177};
178
179#ifdef CONFIG_SYSCTL
180static int nf_conntrack_event_init_sysctl(struct net *net)
110{ 181{
111 free_percpu(net->ct.ecache); 182 struct ctl_table *table;
183
184 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
185 GFP_KERNEL);
186 if (!table)
187 goto out;
188
189 table[0].data = &net->ct.sysctl_events;
190 table[1].data = &net->ct.sysctl_events_retry_timeout;
191
192 net->ct.event_sysctl_header =
193 register_net_sysctl_table(net,
194 nf_net_netfilter_sysctl_path, table);
195 if (!net->ct.event_sysctl_header) {
196 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
197 goto out_register;
198 }
199 return 0;
200
201out_register:
202 kfree(table);
203out:
204 return -ENOMEM;
112} 205}
113 206
114int nf_conntrack_register_notifier(struct notifier_block *nb) 207static void nf_conntrack_event_fini_sysctl(struct net *net)
115{ 208{
116 return atomic_notifier_chain_register(&nf_conntrack_chain, nb); 209 struct ctl_table *table;
210
211 table = net->ct.event_sysctl_header->ctl_table_arg;
212 unregister_net_sysctl_table(net->ct.event_sysctl_header);
213 kfree(table);
214}
215#else
216static int nf_conntrack_event_init_sysctl(struct net *net)
217{
218 return 0;
117} 219}
118EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
119 220
120int nf_conntrack_unregister_notifier(struct notifier_block *nb) 221static void nf_conntrack_event_fini_sysctl(struct net *net)
121{ 222{
122 return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
123} 223}
124EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 224#endif /* CONFIG_SYSCTL */
125 225
126int nf_ct_expect_register_notifier(struct notifier_block *nb) 226int nf_conntrack_ecache_init(struct net *net)
127{ 227{
128 return atomic_notifier_chain_register(&nf_ct_expect_chain, nb); 228 int ret;
229
230 net->ct.sysctl_events = nf_ct_events;
231 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
232
233 if (net_eq(net, &init_net)) {
234 ret = nf_ct_extend_register(&event_extend);
235 if (ret < 0) {
236 printk(KERN_ERR "nf_ct_event: Unable to register "
237 "event extension.\n");
238 goto out_extend_register;
239 }
240 }
241
242 ret = nf_conntrack_event_init_sysctl(net);
243 if (ret < 0)
244 goto out_sysctl;
245
246 return 0;
247
248out_sysctl:
249 if (net_eq(net, &init_net))
250 nf_ct_extend_unregister(&event_extend);
251out_extend_register:
252 return ret;
129} 253}
130EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
131 254
132int nf_ct_expect_unregister_notifier(struct notifier_block *nb) 255void nf_conntrack_ecache_fini(struct net *net)
133{ 256{
134 return atomic_notifier_chain_unregister(&nf_ct_expect_chain, nb); 257 nf_conntrack_event_fini_sysctl(net);
258 if (net_eq(net, &init_net))
259 nf_ct_extend_unregister(&event_extend);
135} 260}
136EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 00fecc385f9b..5509dd1f14cf 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -338,11 +338,9 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
338 338
339 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { 339 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
340 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; 340 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
341 nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, ct);
342 } else if (oldest != NUM_SEQ_TO_REMEMBER && 341 } else if (oldest != NUM_SEQ_TO_REMEMBER &&
343 after(nl_seq, info->seq_aft_nl[dir][oldest])) { 342 after(nl_seq, info->seq_aft_nl[dir][oldest])) {
344 info->seq_aft_nl[dir][oldest] = nl_seq; 343 info->seq_aft_nl[dir][oldest] = nl_seq;
345 nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, ct);
346 } 344 }
347} 345}
348 346
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 0fa5a422959f..65c2a7bc3afc 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -136,6 +136,20 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
136 return 0; 136 return 0;
137} 137}
138 138
139void nf_ct_helper_destroy(struct nf_conn *ct)
140{
141 struct nf_conn_help *help = nfct_help(ct);
142 struct nf_conntrack_helper *helper;
143
144 if (help) {
145 rcu_read_lock();
146 helper = rcu_dereference(help->helper);
147 if (helper && helper->destroy)
148 helper->destroy(ct);
149 rcu_read_unlock();
150 }
151}
152
139int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 153int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
140{ 154{
141 unsigned int h = helper_hash(&me->tuple); 155 unsigned int h = helper_hash(&me->tuple);
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 8a3875e36ec2..497b2224536f 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -48,7 +48,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
48{ 48{
49 struct nf_conntrack_expect *exp; 49 struct nf_conntrack_expect *exp;
50 struct iphdr *iph = ip_hdr(skb); 50 struct iphdr *iph = ip_hdr(skb);
51 struct rtable *rt = skb->rtable; 51 struct rtable *rt = skb_rtable(skb);
52 struct in_device *in_dev; 52 struct in_device *in_dev;
53 __be32 mask = 0; 53 __be32 mask = 0;
54 54
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c523f0b8cee5..49479d194570 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -27,7 +27,6 @@
27#include <linux/netlink.h> 27#include <linux/netlink.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/notifier.h>
31 30
32#include <linux/netfilter.h> 31#include <linux/netfilter.h>
33#include <net/netlink.h> 32#include <net/netlink.h>
@@ -144,7 +143,7 @@ nla_put_failure:
144} 143}
145 144
146static inline int 145static inline int
147ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) 146ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
148{ 147{
149 struct nf_conntrack_l4proto *l4proto; 148 struct nf_conntrack_l4proto *l4proto;
150 struct nlattr *nest_proto; 149 struct nlattr *nest_proto;
@@ -346,23 +345,21 @@ nla_put_failure:
346 return -1; 345 return -1;
347} 346}
348 347
349#define tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
350
351static int 348static int
352ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 349ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
353 int event, int nowait, 350 int event, struct nf_conn *ct)
354 const struct nf_conn *ct)
355{ 351{
356 struct nlmsghdr *nlh; 352 struct nlmsghdr *nlh;
357 struct nfgenmsg *nfmsg; 353 struct nfgenmsg *nfmsg;
358 struct nlattr *nest_parms; 354 struct nlattr *nest_parms;
359 unsigned char *b = skb_tail_pointer(skb); 355 unsigned int flags = pid ? NLM_F_MULTI : 0;
360 356
361 event |= NFNL_SUBSYS_CTNETLINK << 8; 357 event |= NFNL_SUBSYS_CTNETLINK << 8;
362 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); 358 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
363 nfmsg = NLMSG_DATA(nlh); 359 if (nlh == NULL)
360 goto nlmsg_failure;
364 361
365 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; 362 nfmsg = nlmsg_data(nlh);
366 nfmsg->nfgen_family = nf_ct_l3num(ct); 363 nfmsg->nfgen_family = nf_ct_l3num(ct);
367 nfmsg->version = NFNETLINK_V0; 364 nfmsg->version = NFNETLINK_V0;
368 nfmsg->res_id = 0; 365 nfmsg->res_id = 0;
@@ -370,14 +367,14 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
370 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); 367 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
371 if (!nest_parms) 368 if (!nest_parms)
372 goto nla_put_failure; 369 goto nla_put_failure;
373 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 370 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
374 goto nla_put_failure; 371 goto nla_put_failure;
375 nla_nest_end(skb, nest_parms); 372 nla_nest_end(skb, nest_parms);
376 373
377 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); 374 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
378 if (!nest_parms) 375 if (!nest_parms)
379 goto nla_put_failure; 376 goto nla_put_failure;
380 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) 377 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
381 goto nla_put_failure; 378 goto nla_put_failure;
382 nla_nest_end(skb, nest_parms); 379 nla_nest_end(skb, nest_parms);
383 380
@@ -395,132 +392,109 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
395 ctnetlink_dump_nat_seq_adj(skb, ct) < 0) 392 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
396 goto nla_put_failure; 393 goto nla_put_failure;
397 394
398 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 395 nlmsg_end(skb, nlh);
399 return skb->len; 396 return skb->len;
400 397
401nlmsg_failure: 398nlmsg_failure:
402nla_put_failure: 399nla_put_failure:
403 nlmsg_trim(skb, b); 400 nlmsg_cancel(skb, nlh);
404 return -1; 401 return -1;
405} 402}
406 403
407#ifdef CONFIG_NF_CONNTRACK_EVENTS 404#ifdef CONFIG_NF_CONNTRACK_EVENTS
408/* 405static inline size_t
409 * The general structure of a ctnetlink event is 406ctnetlink_proto_size(const struct nf_conn *ct)
410 *
411 * CTA_TUPLE_ORIG
412 * <l3/l4-proto-attributes>
413 * CTA_TUPLE_REPLY
414 * <l3/l4-proto-attributes>
415 * CTA_ID
416 * ...
417 * CTA_PROTOINFO
418 * <l4-proto-attributes>
419 * CTA_TUPLE_MASTER
420 * <l3/l4-proto-attributes>
421 *
422 * Therefore the formular is
423 *
424 * size = sizeof(headers) + sizeof(generic_nlas) + 3 * sizeof(tuple_nlas)
425 * + sizeof(protoinfo_nlas)
426 */
427static struct sk_buff *
428ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp)
429{ 407{
430 struct nf_conntrack_l3proto *l3proto; 408 struct nf_conntrack_l3proto *l3proto;
431 struct nf_conntrack_l4proto *l4proto; 409 struct nf_conntrack_l4proto *l4proto;
432 int len; 410 size_t len = 0;
433 411
434#define NLA_TYPE_SIZE(type) nla_total_size(sizeof(type)) 412 rcu_read_lock();
435 413 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
436 /* proto independant part */ 414 len += l3proto->nla_size;
437 len = NLMSG_SPACE(sizeof(struct nfgenmsg)) 415
438 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 416 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
439 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 417 len += l4proto->nla_size;
440 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 418 rcu_read_unlock();
441 + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */ 419
442 + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */ 420 return len;
443 + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */ 421}
422
423static inline size_t
424ctnetlink_nlmsg_size(const struct nf_conn *ct)
425{
426 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
427 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
428 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
429 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
430 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
431 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
432 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
444#ifdef CONFIG_NF_CT_ACCT 433#ifdef CONFIG_NF_CT_ACCT
445 + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ 434 + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
446 + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */ 435 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
447 + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */ 436 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
448#endif 437#endif
449 + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */ 438 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
450 + nla_total_size(0) /* CTA_PROTOINFO */ 439 + nla_total_size(0) /* CTA_PROTOINFO */
451 + nla_total_size(0) /* CTA_HELP */ 440 + nla_total_size(0) /* CTA_HELP */
452 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 441 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
453#ifdef CONFIG_NF_CONNTRACK_SECMARK 442#ifdef CONFIG_NF_CONNTRACK_SECMARK
454 + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */ 443 + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */
455#endif 444#endif
456#ifdef CONFIG_NF_NAT_NEEDED 445#ifdef CONFIG_NF_NAT_NEEDED
457 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 446 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
458 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */ 447 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
459 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */
460 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */
461#endif 448#endif
462#ifdef CONFIG_NF_CONNTRACK_MARK 449#ifdef CONFIG_NF_CONNTRACK_MARK
463 + NLA_TYPE_SIZE(u_int32_t) /* CTA_MARK */ 450 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
464#endif 451#endif
465 ; 452 + ctnetlink_proto_size(ct)
466 453 ;
467#undef NLA_TYPE_SIZE
468
469 rcu_read_lock();
470 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
471 len += l3proto->nla_size;
472
473 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
474 len += l4proto->nla_size;
475 rcu_read_unlock();
476
477 return alloc_skb(len, gfp);
478} 454}
479 455
480static int ctnetlink_conntrack_event(struct notifier_block *this, 456static int
481 unsigned long events, void *ptr) 457ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
482{ 458{
483 struct nlmsghdr *nlh; 459 struct nlmsghdr *nlh;
484 struct nfgenmsg *nfmsg; 460 struct nfgenmsg *nfmsg;
485 struct nlattr *nest_parms; 461 struct nlattr *nest_parms;
486 struct nf_ct_event *item = (struct nf_ct_event *)ptr;
487 struct nf_conn *ct = item->ct; 462 struct nf_conn *ct = item->ct;
488 struct sk_buff *skb; 463 struct sk_buff *skb;
489 unsigned int type; 464 unsigned int type;
490 sk_buff_data_t b;
491 unsigned int flags = 0, group; 465 unsigned int flags = 0, group;
466 int err;
492 467
493 /* ignore our fake conntrack entry */ 468 /* ignore our fake conntrack entry */
494 if (ct == &nf_conntrack_untracked) 469 if (ct == &nf_conntrack_untracked)
495 return NOTIFY_DONE; 470 return 0;
496 471
497 if (events & IPCT_DESTROY) { 472 if (events & (1 << IPCT_DESTROY)) {
498 type = IPCTNL_MSG_CT_DELETE; 473 type = IPCTNL_MSG_CT_DELETE;
499 group = NFNLGRP_CONNTRACK_DESTROY; 474 group = NFNLGRP_CONNTRACK_DESTROY;
500 } else if (events & (IPCT_NEW | IPCT_RELATED)) { 475 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
501 type = IPCTNL_MSG_CT_NEW; 476 type = IPCTNL_MSG_CT_NEW;
502 flags = NLM_F_CREATE|NLM_F_EXCL; 477 flags = NLM_F_CREATE|NLM_F_EXCL;
503 group = NFNLGRP_CONNTRACK_NEW; 478 group = NFNLGRP_CONNTRACK_NEW;
504 } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { 479 } else if (events) {
505 type = IPCTNL_MSG_CT_NEW; 480 type = IPCTNL_MSG_CT_NEW;
506 group = NFNLGRP_CONNTRACK_UPDATE; 481 group = NFNLGRP_CONNTRACK_UPDATE;
507 } else 482 } else
508 return NOTIFY_DONE; 483 return 0;
509 484
510 if (!item->report && !nfnetlink_has_listeners(group)) 485 if (!item->report && !nfnetlink_has_listeners(group))
511 return NOTIFY_DONE; 486 return 0;
512 487
513 skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC); 488 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
514 if (!skb) 489 if (skb == NULL)
515 goto errout; 490 goto errout;
516 491
517 b = skb->tail;
518
519 type |= NFNL_SUBSYS_CTNETLINK << 8; 492 type |= NFNL_SUBSYS_CTNETLINK << 8;
520 nlh = NLMSG_PUT(skb, item->pid, 0, type, sizeof(struct nfgenmsg)); 493 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
521 nfmsg = NLMSG_DATA(nlh); 494 if (nlh == NULL)
495 goto nlmsg_failure;
522 496
523 nlh->nlmsg_flags = flags; 497 nfmsg = nlmsg_data(nlh);
524 nfmsg->nfgen_family = nf_ct_l3num(ct); 498 nfmsg->nfgen_family = nf_ct_l3num(ct);
525 nfmsg->version = NFNETLINK_V0; 499 nfmsg->version = NFNETLINK_V0;
526 nfmsg->res_id = 0; 500 nfmsg->res_id = 0;
@@ -529,14 +503,14 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
529 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); 503 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
530 if (!nest_parms) 504 if (!nest_parms)
531 goto nla_put_failure; 505 goto nla_put_failure;
532 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 506 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
533 goto nla_put_failure; 507 goto nla_put_failure;
534 nla_nest_end(skb, nest_parms); 508 nla_nest_end(skb, nest_parms);
535 509
536 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); 510 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
537 if (!nest_parms) 511 if (!nest_parms)
538 goto nla_put_failure; 512 goto nla_put_failure;
539 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) 513 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
540 goto nla_put_failure; 514 goto nla_put_failure;
541 nla_nest_end(skb, nest_parms); 515 nla_nest_end(skb, nest_parms);
542 516
@@ -546,7 +520,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
546 if (ctnetlink_dump_status(skb, ct) < 0) 520 if (ctnetlink_dump_status(skb, ct) < 0)
547 goto nla_put_failure; 521 goto nla_put_failure;
548 522
549 if (events & IPCT_DESTROY) { 523 if (events & (1 << IPCT_DESTROY)) {
550 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 524 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
551 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) 525 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
552 goto nla_put_failure; 526 goto nla_put_failure;
@@ -554,47 +528,51 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
554 if (ctnetlink_dump_timeout(skb, ct) < 0) 528 if (ctnetlink_dump_timeout(skb, ct) < 0)
555 goto nla_put_failure; 529 goto nla_put_failure;
556 530
557 if (events & IPCT_PROTOINFO 531 if (events & (1 << IPCT_PROTOINFO)
558 && ctnetlink_dump_protoinfo(skb, ct) < 0) 532 && ctnetlink_dump_protoinfo(skb, ct) < 0)
559 goto nla_put_failure; 533 goto nla_put_failure;
560 534
561 if ((events & IPCT_HELPER || nfct_help(ct)) 535 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
562 && ctnetlink_dump_helpinfo(skb, ct) < 0) 536 && ctnetlink_dump_helpinfo(skb, ct) < 0)
563 goto nla_put_failure; 537 goto nla_put_failure;
564 538
565#ifdef CONFIG_NF_CONNTRACK_SECMARK 539#ifdef CONFIG_NF_CONNTRACK_SECMARK
566 if ((events & IPCT_SECMARK || ct->secmark) 540 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
567 && ctnetlink_dump_secmark(skb, ct) < 0) 541 && ctnetlink_dump_secmark(skb, ct) < 0)
568 goto nla_put_failure; 542 goto nla_put_failure;
569#endif 543#endif
570 544
571 if (events & IPCT_RELATED && 545 if (events & (1 << IPCT_RELATED) &&
572 ctnetlink_dump_master(skb, ct) < 0) 546 ctnetlink_dump_master(skb, ct) < 0)
573 goto nla_put_failure; 547 goto nla_put_failure;
574 548
575 if (events & IPCT_NATSEQADJ && 549 if (events & (1 << IPCT_NATSEQADJ) &&
576 ctnetlink_dump_nat_seq_adj(skb, ct) < 0) 550 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
577 goto nla_put_failure; 551 goto nla_put_failure;
578 } 552 }
579 553
580#ifdef CONFIG_NF_CONNTRACK_MARK 554#ifdef CONFIG_NF_CONNTRACK_MARK
581 if ((events & IPCT_MARK || ct->mark) 555 if ((events & (1 << IPCT_MARK) || ct->mark)
582 && ctnetlink_dump_mark(skb, ct) < 0) 556 && ctnetlink_dump_mark(skb, ct) < 0)
583 goto nla_put_failure; 557 goto nla_put_failure;
584#endif 558#endif
585 rcu_read_unlock(); 559 rcu_read_unlock();
586 560
587 nlh->nlmsg_len = skb->tail - b; 561 nlmsg_end(skb, nlh);
588 nfnetlink_send(skb, item->pid, group, item->report); 562 err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC);
589 return NOTIFY_DONE; 563 if (err == -ENOBUFS || err == -EAGAIN)
564 return -ENOBUFS;
565
566 return 0;
590 567
591nla_put_failure: 568nla_put_failure:
592 rcu_read_unlock(); 569 rcu_read_unlock();
570 nlmsg_cancel(skb, nlh);
593nlmsg_failure: 571nlmsg_failure:
594 kfree_skb(skb); 572 kfree_skb(skb);
595errout: 573errout:
596 nfnetlink_set_err(0, group, -ENOBUFS); 574 nfnetlink_set_err(0, group, -ENOBUFS);
597 return NOTIFY_DONE; 575 return 0;
598} 576}
599#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 577#endif /* CONFIG_NF_CONNTRACK_EVENTS */
600 578
@@ -611,7 +589,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
611 struct nf_conn *ct, *last; 589 struct nf_conn *ct, *last;
612 struct nf_conntrack_tuple_hash *h; 590 struct nf_conntrack_tuple_hash *h;
613 struct hlist_nulls_node *n; 591 struct hlist_nulls_node *n;
614 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 592 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
615 u_int8_t l3proto = nfmsg->nfgen_family; 593 u_int8_t l3proto = nfmsg->nfgen_family;
616 594
617 rcu_read_lock(); 595 rcu_read_lock();
@@ -637,8 +615,7 @@ restart:
637 } 615 }
638 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 616 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
639 cb->nlh->nlmsg_seq, 617 cb->nlh->nlmsg_seq,
640 IPCTNL_MSG_CT_NEW, 618 IPCTNL_MSG_CT_NEW, ct) < 0) {
641 1, ct) < 0) {
642 cb->args[1] = (unsigned long)ct; 619 cb->args[1] = (unsigned long)ct;
643 goto out; 620 goto out;
644 } 621 }
@@ -792,7 +769,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
792 struct nf_conntrack_tuple_hash *h; 769 struct nf_conntrack_tuple_hash *h;
793 struct nf_conntrack_tuple tuple; 770 struct nf_conntrack_tuple tuple;
794 struct nf_conn *ct; 771 struct nf_conn *ct;
795 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 772 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
796 u_int8_t u3 = nfmsg->nfgen_family; 773 u_int8_t u3 = nfmsg->nfgen_family;
797 int err = 0; 774 int err = 0;
798 775
@@ -802,9 +779,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
802 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); 779 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
803 else { 780 else {
804 /* Flush the whole table */ 781 /* Flush the whole table */
805 nf_conntrack_flush(&init_net, 782 nf_conntrack_flush_report(&init_net,
806 NETLINK_CB(skb).pid, 783 NETLINK_CB(skb).pid,
807 nlmsg_report(nlh)); 784 nlmsg_report(nlh));
808 return 0; 785 return 0;
809 } 786 }
810 787
@@ -825,10 +802,15 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
825 } 802 }
826 } 803 }
827 804
828 nf_conntrack_event_report(IPCT_DESTROY, 805 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
829 ct, 806 NETLINK_CB(skb).pid,
830 NETLINK_CB(skb).pid, 807 nlmsg_report(nlh)) < 0) {
831 nlmsg_report(nlh)); 808 nf_ct_delete_from_lists(ct);
809 /* we failed to report the event, try later */
810 nf_ct_insert_dying_list(ct);
811 nf_ct_put(ct);
812 return 0;
813 }
832 814
833 /* death_by_timeout would report the event again */ 815 /* death_by_timeout would report the event again */
834 set_bit(IPS_DYING_BIT, &ct->status); 816 set_bit(IPS_DYING_BIT, &ct->status);
@@ -847,7 +829,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
847 struct nf_conntrack_tuple tuple; 829 struct nf_conntrack_tuple tuple;
848 struct nf_conn *ct; 830 struct nf_conn *ct;
849 struct sk_buff *skb2 = NULL; 831 struct sk_buff *skb2 = NULL;
850 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 832 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
851 u_int8_t u3 = nfmsg->nfgen_family; 833 u_int8_t u3 = nfmsg->nfgen_family;
852 int err = 0; 834 int err = 0;
853 835
@@ -872,15 +854,15 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
872 ct = nf_ct_tuplehash_to_ctrack(h); 854 ct = nf_ct_tuplehash_to_ctrack(h);
873 855
874 err = -ENOMEM; 856 err = -ENOMEM;
875 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 857 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
876 if (!skb2) { 858 if (skb2 == NULL) {
877 nf_ct_put(ct); 859 nf_ct_put(ct);
878 return -ENOMEM; 860 return -ENOMEM;
879 } 861 }
880 862
881 rcu_read_lock(); 863 rcu_read_lock();
882 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 864 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
883 IPCTNL_MSG_CT_NEW, 1, ct); 865 IPCTNL_MSG_CT_NEW, ct);
884 rcu_read_unlock(); 866 rcu_read_unlock();
885 nf_ct_put(ct); 867 nf_ct_put(ct);
886 if (err <= 0) 868 if (err <= 0)
@@ -1280,6 +1262,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1280 } 1262 }
1281 1263
1282 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 1264 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1265 nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
1283 1266
1284#if defined(CONFIG_NF_CONNTRACK_MARK) 1267#if defined(CONFIG_NF_CONNTRACK_MARK)
1285 if (cda[CTA_MARK]) 1268 if (cda[CTA_MARK])
@@ -1325,7 +1308,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1325{ 1308{
1326 struct nf_conntrack_tuple otuple, rtuple; 1309 struct nf_conntrack_tuple otuple, rtuple;
1327 struct nf_conntrack_tuple_hash *h = NULL; 1310 struct nf_conntrack_tuple_hash *h = NULL;
1328 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 1311 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1329 u_int8_t u3 = nfmsg->nfgen_family; 1312 u_int8_t u3 = nfmsg->nfgen_family;
1330 int err = 0; 1313 int err = 0;
1331 1314
@@ -1367,13 +1350,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1367 else 1350 else
1368 events = IPCT_NEW; 1351 events = IPCT_NEW;
1369 1352
1370 nf_conntrack_event_report(IPCT_STATUS | 1353 nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
1371 IPCT_HELPER | 1354 (1 << IPCT_HELPER) |
1372 IPCT_PROTOINFO | 1355 (1 << IPCT_PROTOINFO) |
1373 IPCT_NATSEQADJ | 1356 (1 << IPCT_NATSEQADJ) |
1374 IPCT_MARK | events, 1357 (1 << IPCT_MARK) | events,
1375 ct, NETLINK_CB(skb).pid, 1358 ct, NETLINK_CB(skb).pid,
1376 nlmsg_report(nlh)); 1359 nlmsg_report(nlh));
1377 nf_ct_put(ct); 1360 nf_ct_put(ct);
1378 } else 1361 } else
1379 spin_unlock_bh(&nf_conntrack_lock); 1362 spin_unlock_bh(&nf_conntrack_lock);
@@ -1392,13 +1375,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1392 if (err == 0) { 1375 if (err == 0) {
1393 nf_conntrack_get(&ct->ct_general); 1376 nf_conntrack_get(&ct->ct_general);
1394 spin_unlock_bh(&nf_conntrack_lock); 1377 spin_unlock_bh(&nf_conntrack_lock);
1395 nf_conntrack_event_report(IPCT_STATUS | 1378 nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
1396 IPCT_HELPER | 1379 (1 << IPCT_HELPER) |
1397 IPCT_PROTOINFO | 1380 (1 << IPCT_PROTOINFO) |
1398 IPCT_NATSEQADJ | 1381 (1 << IPCT_NATSEQADJ) |
1399 IPCT_MARK, 1382 (1 << IPCT_MARK),
1400 ct, NETLINK_CB(skb).pid, 1383 ct, NETLINK_CB(skb).pid,
1401 nlmsg_report(nlh)); 1384 nlmsg_report(nlh));
1402 nf_ct_put(ct); 1385 nf_ct_put(ct);
1403 } else 1386 } else
1404 spin_unlock_bh(&nf_conntrack_lock); 1387 spin_unlock_bh(&nf_conntrack_lock);
@@ -1503,19 +1486,18 @@ nla_put_failure:
1503 1486
1504static int 1487static int
1505ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1488ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1506 int event, 1489 int event, const struct nf_conntrack_expect *exp)
1507 int nowait,
1508 const struct nf_conntrack_expect *exp)
1509{ 1490{
1510 struct nlmsghdr *nlh; 1491 struct nlmsghdr *nlh;
1511 struct nfgenmsg *nfmsg; 1492 struct nfgenmsg *nfmsg;
1512 unsigned char *b = skb_tail_pointer(skb); 1493 unsigned int flags = pid ? NLM_F_MULTI : 0;
1513 1494
1514 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; 1495 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1515 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); 1496 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1516 nfmsg = NLMSG_DATA(nlh); 1497 if (nlh == NULL)
1498 goto nlmsg_failure;
1517 1499
1518 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; 1500 nfmsg = nlmsg_data(nlh);
1519 nfmsg->nfgen_family = exp->tuple.src.l3num; 1501 nfmsg->nfgen_family = exp->tuple.src.l3num;
1520 nfmsg->version = NFNETLINK_V0; 1502 nfmsg->version = NFNETLINK_V0;
1521 nfmsg->res_id = 0; 1503 nfmsg->res_id = 0;
@@ -1523,49 +1505,46 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1523 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 1505 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1524 goto nla_put_failure; 1506 goto nla_put_failure;
1525 1507
1526 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1508 nlmsg_end(skb, nlh);
1527 return skb->len; 1509 return skb->len;
1528 1510
1529nlmsg_failure: 1511nlmsg_failure:
1530nla_put_failure: 1512nla_put_failure:
1531 nlmsg_trim(skb, b); 1513 nlmsg_cancel(skb, nlh);
1532 return -1; 1514 return -1;
1533} 1515}
1534 1516
1535#ifdef CONFIG_NF_CONNTRACK_EVENTS 1517#ifdef CONFIG_NF_CONNTRACK_EVENTS
1536static int ctnetlink_expect_event(struct notifier_block *this, 1518static int
1537 unsigned long events, void *ptr) 1519ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1538{ 1520{
1539 struct nlmsghdr *nlh; 1521 struct nlmsghdr *nlh;
1540 struct nfgenmsg *nfmsg; 1522 struct nfgenmsg *nfmsg;
1541 struct nf_exp_event *item = (struct nf_exp_event *)ptr;
1542 struct nf_conntrack_expect *exp = item->exp; 1523 struct nf_conntrack_expect *exp = item->exp;
1543 struct sk_buff *skb; 1524 struct sk_buff *skb;
1544 unsigned int type; 1525 unsigned int type;
1545 sk_buff_data_t b;
1546 int flags = 0; 1526 int flags = 0;
1547 1527
1548 if (events & IPEXP_NEW) { 1528 if (events & (1 << IPEXP_NEW)) {
1549 type = IPCTNL_MSG_EXP_NEW; 1529 type = IPCTNL_MSG_EXP_NEW;
1550 flags = NLM_F_CREATE|NLM_F_EXCL; 1530 flags = NLM_F_CREATE|NLM_F_EXCL;
1551 } else 1531 } else
1552 return NOTIFY_DONE; 1532 return 0;
1553 1533
1554 if (!item->report && 1534 if (!item->report &&
1555 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1535 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
1556 return NOTIFY_DONE; 1536 return 0;
1557 1537
1558 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 1538 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1559 if (!skb) 1539 if (skb == NULL)
1560 goto errout; 1540 goto errout;
1561 1541
1562 b = skb->tail;
1563
1564 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; 1542 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1565 nlh = NLMSG_PUT(skb, item->pid, 0, type, sizeof(struct nfgenmsg)); 1543 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
1566 nfmsg = NLMSG_DATA(nlh); 1544 if (nlh == NULL)
1545 goto nlmsg_failure;
1567 1546
1568 nlh->nlmsg_flags = flags; 1547 nfmsg = nlmsg_data(nlh);
1569 nfmsg->nfgen_family = exp->tuple.src.l3num; 1548 nfmsg->nfgen_family = exp->tuple.src.l3num;
1570 nfmsg->version = NFNETLINK_V0; 1549 nfmsg->version = NFNETLINK_V0;
1571 nfmsg->res_id = 0; 1550 nfmsg->res_id = 0;
@@ -1575,17 +1554,19 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1575 goto nla_put_failure; 1554 goto nla_put_failure;
1576 rcu_read_unlock(); 1555 rcu_read_unlock();
1577 1556
1578 nlh->nlmsg_len = skb->tail - b; 1557 nlmsg_end(skb, nlh);
1579 nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, item->report); 1558 nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
1580 return NOTIFY_DONE; 1559 item->report, GFP_ATOMIC);
1560 return 0;
1581 1561
1582nla_put_failure: 1562nla_put_failure:
1583 rcu_read_unlock(); 1563 rcu_read_unlock();
1564 nlmsg_cancel(skb, nlh);
1584nlmsg_failure: 1565nlmsg_failure:
1585 kfree_skb(skb); 1566 kfree_skb(skb);
1586errout: 1567errout:
1587 nfnetlink_set_err(0, 0, -ENOBUFS); 1568 nfnetlink_set_err(0, 0, -ENOBUFS);
1588 return NOTIFY_DONE; 1569 return 0;
1589} 1570}
1590#endif 1571#endif
1591static int ctnetlink_exp_done(struct netlink_callback *cb) 1572static int ctnetlink_exp_done(struct netlink_callback *cb)
@@ -1600,7 +1581,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1600{ 1581{
1601 struct net *net = &init_net; 1582 struct net *net = &init_net;
1602 struct nf_conntrack_expect *exp, *last; 1583 struct nf_conntrack_expect *exp, *last;
1603 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 1584 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1604 struct hlist_node *n; 1585 struct hlist_node *n;
1605 u_int8_t l3proto = nfmsg->nfgen_family; 1586 u_int8_t l3proto = nfmsg->nfgen_family;
1606 1587
@@ -1617,10 +1598,11 @@ restart:
1617 continue; 1598 continue;
1618 cb->args[1] = 0; 1599 cb->args[1] = 0;
1619 } 1600 }
1620 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid, 1601 if (ctnetlink_exp_fill_info(skb,
1602 NETLINK_CB(cb->skb).pid,
1621 cb->nlh->nlmsg_seq, 1603 cb->nlh->nlmsg_seq,
1622 IPCTNL_MSG_EXP_NEW, 1604 IPCTNL_MSG_EXP_NEW,
1623 1, exp) < 0) { 1605 exp) < 0) {
1624 if (!atomic_inc_not_zero(&exp->use)) 1606 if (!atomic_inc_not_zero(&exp->use))
1625 continue; 1607 continue;
1626 cb->args[1] = (unsigned long)exp; 1608 cb->args[1] = (unsigned long)exp;
@@ -1652,7 +1634,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1652 struct nf_conntrack_tuple tuple; 1634 struct nf_conntrack_tuple tuple;
1653 struct nf_conntrack_expect *exp; 1635 struct nf_conntrack_expect *exp;
1654 struct sk_buff *skb2; 1636 struct sk_buff *skb2;
1655 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 1637 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1656 u_int8_t u3 = nfmsg->nfgen_family; 1638 u_int8_t u3 = nfmsg->nfgen_family;
1657 int err = 0; 1639 int err = 0;
1658 1640
@@ -1683,14 +1665,13 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1683 } 1665 }
1684 1666
1685 err = -ENOMEM; 1667 err = -ENOMEM;
1686 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1668 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1687 if (!skb2) 1669 if (skb2 == NULL)
1688 goto out; 1670 goto out;
1689 1671
1690 rcu_read_lock(); 1672 rcu_read_lock();
1691 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, 1673 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
1692 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, 1674 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
1693 1, exp);
1694 rcu_read_unlock(); 1675 rcu_read_unlock();
1695 if (err <= 0) 1676 if (err <= 0)
1696 goto free; 1677 goto free;
@@ -1713,7 +1694,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1713 struct nf_conntrack_expect *exp; 1694 struct nf_conntrack_expect *exp;
1714 struct nf_conntrack_tuple tuple; 1695 struct nf_conntrack_tuple tuple;
1715 struct nf_conntrack_helper *h; 1696 struct nf_conntrack_helper *h;
1716 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 1697 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1717 struct hlist_node *n, *next; 1698 struct hlist_node *n, *next;
1718 u_int8_t u3 = nfmsg->nfgen_family; 1699 u_int8_t u3 = nfmsg->nfgen_family;
1719 unsigned int i; 1700 unsigned int i;
@@ -1854,7 +1835,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1854{ 1835{
1855 struct nf_conntrack_tuple tuple; 1836 struct nf_conntrack_tuple tuple;
1856 struct nf_conntrack_expect *exp; 1837 struct nf_conntrack_expect *exp;
1857 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 1838 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1858 u_int8_t u3 = nfmsg->nfgen_family; 1839 u_int8_t u3 = nfmsg->nfgen_family;
1859 int err = 0; 1840 int err = 0;
1860 1841
@@ -1891,12 +1872,12 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1891} 1872}
1892 1873
1893#ifdef CONFIG_NF_CONNTRACK_EVENTS 1874#ifdef CONFIG_NF_CONNTRACK_EVENTS
1894static struct notifier_block ctnl_notifier = { 1875static struct nf_ct_event_notifier ctnl_notifier = {
1895 .notifier_call = ctnetlink_conntrack_event, 1876 .fcn = ctnetlink_conntrack_event,
1896}; 1877};
1897 1878
1898static struct notifier_block ctnl_notifier_exp = { 1879static struct nf_exp_event_notifier ctnl_notifier_exp = {
1899 .notifier_call = ctnetlink_expect_event, 1880 .fcn = ctnetlink_expect_event,
1900}; 1881};
1901#endif 1882#endif
1902 1883
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index aee0d6bea309..1b816a2ea813 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -25,8 +25,6 @@
25#include <net/netfilter/nf_conntrack_ecache.h> 25#include <net/netfilter/nf_conntrack_ecache.h>
26#include <net/netfilter/nf_log.h> 26#include <net/netfilter/nf_log.h>
27 27
28static DEFINE_RWLOCK(dccp_lock);
29
30/* Timeouts are based on values from RFC4340: 28/* Timeouts are based on values from RFC4340:
31 * 29 *
32 * - REQUEST: 30 * - REQUEST:
@@ -492,7 +490,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
492 return NF_ACCEPT; 490 return NF_ACCEPT;
493 } 491 }
494 492
495 write_lock_bh(&dccp_lock); 493 spin_lock_bh(&ct->lock);
496 494
497 role = ct->proto.dccp.role[dir]; 495 role = ct->proto.dccp.role[dir];
498 old_state = ct->proto.dccp.state; 496 old_state = ct->proto.dccp.state;
@@ -536,13 +534,13 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
536 ct->proto.dccp.last_dir = dir; 534 ct->proto.dccp.last_dir = dir;
537 ct->proto.dccp.last_pkt = type; 535 ct->proto.dccp.last_pkt = type;
538 536
539 write_unlock_bh(&dccp_lock); 537 spin_unlock_bh(&ct->lock);
540 if (LOG_INVALID(net, IPPROTO_DCCP)) 538 if (LOG_INVALID(net, IPPROTO_DCCP))
541 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 539 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
542 "nf_ct_dccp: invalid packet ignored "); 540 "nf_ct_dccp: invalid packet ignored ");
543 return NF_ACCEPT; 541 return NF_ACCEPT;
544 case CT_DCCP_INVALID: 542 case CT_DCCP_INVALID:
545 write_unlock_bh(&dccp_lock); 543 spin_unlock_bh(&ct->lock);
546 if (LOG_INVALID(net, IPPROTO_DCCP)) 544 if (LOG_INVALID(net, IPPROTO_DCCP))
547 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 545 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
548 "nf_ct_dccp: invalid state transition "); 546 "nf_ct_dccp: invalid state transition ");
@@ -552,7 +550,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
552 ct->proto.dccp.last_dir = dir; 550 ct->proto.dccp.last_dir = dir;
553 ct->proto.dccp.last_pkt = type; 551 ct->proto.dccp.last_pkt = type;
554 ct->proto.dccp.state = new_state; 552 ct->proto.dccp.state = new_state;
555 write_unlock_bh(&dccp_lock); 553 spin_unlock_bh(&ct->lock);
556 554
557 if (new_state != old_state) 555 if (new_state != old_state)
558 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 556 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
@@ -621,36 +619,39 @@ static int dccp_print_tuple(struct seq_file *s,
621 ntohs(tuple->dst.u.dccp.port)); 619 ntohs(tuple->dst.u.dccp.port));
622} 620}
623 621
624static int dccp_print_conntrack(struct seq_file *s, const struct nf_conn *ct) 622static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
625{ 623{
626 return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); 624 return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
627} 625}
628 626
629#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 627#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
630static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, 628static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
631 const struct nf_conn *ct) 629 struct nf_conn *ct)
632{ 630{
633 struct nlattr *nest_parms; 631 struct nlattr *nest_parms;
634 632
635 read_lock_bh(&dccp_lock); 633 spin_lock_bh(&ct->lock);
636 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); 634 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
637 if (!nest_parms) 635 if (!nest_parms)
638 goto nla_put_failure; 636 goto nla_put_failure;
639 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); 637 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
640 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, 638 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
641 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); 639 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
640 NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
641 cpu_to_be64(ct->proto.dccp.handshake_seq));
642 nla_nest_end(skb, nest_parms); 642 nla_nest_end(skb, nest_parms);
643 read_unlock_bh(&dccp_lock); 643 spin_unlock_bh(&ct->lock);
644 return 0; 644 return 0;
645 645
646nla_put_failure: 646nla_put_failure:
647 read_unlock_bh(&dccp_lock); 647 spin_unlock_bh(&ct->lock);
648 return -1; 648 return -1;
649} 649}
650 650
651static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { 651static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
652 [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, 652 [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
653 [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, 653 [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 },
654 [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
654}; 655};
655 656
656static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) 657static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
@@ -674,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
674 return -EINVAL; 675 return -EINVAL;
675 } 676 }
676 677
677 write_lock_bh(&dccp_lock); 678 spin_lock_bh(&ct->lock);
678 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); 679 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
679 if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { 680 if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
680 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; 681 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
@@ -683,7 +684,11 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
683 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; 684 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER;
684 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; 685 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT;
685 } 686 }
686 write_unlock_bh(&dccp_lock); 687 if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) {
688 ct->proto.dccp.handshake_seq =
689 be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]));
690 }
691 spin_unlock_bh(&ct->lock);
687 return 0; 692 return 0;
688} 693}
689 694
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 117b80112fcb..a54a0af0edba 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -176,7 +176,7 @@ static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
176static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 176static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
177 struct nf_conntrack_tuple *tuple) 177 struct nf_conntrack_tuple *tuple)
178{ 178{
179 struct net *net = dev_net(skb->dev ? skb->dev : skb->dst->dev); 179 struct net *net = dev_net(skb->dev ? skb->dev : skb_dst(skb)->dev);
180 const struct gre_hdr_pptp *pgrehdr; 180 const struct gre_hdr_pptp *pgrehdr;
181 struct gre_hdr_pptp _pgrehdr; 181 struct gre_hdr_pptp _pgrehdr;
182 __be16 srckey; 182 __be16 srckey;
@@ -219,8 +219,7 @@ static int gre_print_tuple(struct seq_file *s,
219} 219}
220 220
221/* print private data for conntrack */ 221/* print private data for conntrack */
222static int gre_print_conntrack(struct seq_file *s, 222static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
223 const struct nf_conn *ct)
224{ 223{
225 return seq_printf(s, "timeout=%u, stream_timeout=%u ", 224 return seq_printf(s, "timeout=%u, stream_timeout=%u ",
226 (ct->proto.gre.timeout / HZ), 225 (ct->proto.gre.timeout / HZ),
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 101b4ad9e817..c10e6f36e31e 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -25,9 +25,6 @@
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
26#include <net/netfilter/nf_conntrack_ecache.h> 26#include <net/netfilter/nf_conntrack_ecache.h>
27 27
28/* Protects ct->proto.sctp */
29static DEFINE_RWLOCK(sctp_lock);
30
31/* FIXME: Examine ipfilter's timeouts and conntrack transitions more 28/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
32 closely. They're more complex. --RR 29 closely. They're more complex. --RR
33 30
@@ -164,13 +161,13 @@ static int sctp_print_tuple(struct seq_file *s,
164} 161}
165 162
166/* Print out the private part of the conntrack. */ 163/* Print out the private part of the conntrack. */
167static int sctp_print_conntrack(struct seq_file *s, const struct nf_conn *ct) 164static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
168{ 165{
169 enum sctp_conntrack state; 166 enum sctp_conntrack state;
170 167
171 read_lock_bh(&sctp_lock); 168 spin_lock_bh(&ct->lock);
172 state = ct->proto.sctp.state; 169 state = ct->proto.sctp.state;
173 read_unlock_bh(&sctp_lock); 170 spin_unlock_bh(&ct->lock);
174 171
175 return seq_printf(s, "%s ", sctp_conntrack_names[state]); 172 return seq_printf(s, "%s ", sctp_conntrack_names[state]);
176} 173}
@@ -318,7 +315,7 @@ static int sctp_packet(struct nf_conn *ct,
318 } 315 }
319 316
320 old_state = new_state = SCTP_CONNTRACK_NONE; 317 old_state = new_state = SCTP_CONNTRACK_NONE;
321 write_lock_bh(&sctp_lock); 318 spin_lock_bh(&ct->lock);
322 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { 319 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
323 /* Special cases of Verification tag check (Sec 8.5.1) */ 320 /* Special cases of Verification tag check (Sec 8.5.1) */
324 if (sch->type == SCTP_CID_INIT) { 321 if (sch->type == SCTP_CID_INIT) {
@@ -371,7 +368,7 @@ static int sctp_packet(struct nf_conn *ct,
371 if (old_state != new_state) 368 if (old_state != new_state)
372 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 369 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
373 } 370 }
374 write_unlock_bh(&sctp_lock); 371 spin_unlock_bh(&ct->lock);
375 372
376 nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); 373 nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]);
377 374
@@ -386,7 +383,7 @@ static int sctp_packet(struct nf_conn *ct,
386 return NF_ACCEPT; 383 return NF_ACCEPT;
387 384
388out_unlock: 385out_unlock:
389 write_unlock_bh(&sctp_lock); 386 spin_unlock_bh(&ct->lock);
390out: 387out:
391 return -NF_ACCEPT; 388 return -NF_ACCEPT;
392} 389}
@@ -469,11 +466,11 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
469#include <linux/netfilter/nfnetlink_conntrack.h> 466#include <linux/netfilter/nfnetlink_conntrack.h>
470 467
471static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, 468static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
472 const struct nf_conn *ct) 469 struct nf_conn *ct)
473{ 470{
474 struct nlattr *nest_parms; 471 struct nlattr *nest_parms;
475 472
476 read_lock_bh(&sctp_lock); 473 spin_lock_bh(&ct->lock);
477 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED); 474 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED);
478 if (!nest_parms) 475 if (!nest_parms)
479 goto nla_put_failure; 476 goto nla_put_failure;
@@ -488,14 +485,14 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
488 CTA_PROTOINFO_SCTP_VTAG_REPLY, 485 CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); 486 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
490 487
491 read_unlock_bh(&sctp_lock); 488 spin_unlock_bh(&ct->lock);
492 489
493 nla_nest_end(skb, nest_parms); 490 nla_nest_end(skb, nest_parms);
494 491
495 return 0; 492 return 0;
496 493
497nla_put_failure: 494nla_put_failure:
498 read_unlock_bh(&sctp_lock); 495 spin_unlock_bh(&ct->lock);
499 return -1; 496 return -1;
500} 497}
501 498
@@ -527,13 +524,13 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
527 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) 524 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
528 return -EINVAL; 525 return -EINVAL;
529 526
530 write_lock_bh(&sctp_lock); 527 spin_lock_bh(&ct->lock);
531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); 528 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = 529 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
533 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); 530 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]);
534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = 531 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
535 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); 532 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]);
536 write_unlock_bh(&sctp_lock); 533 spin_unlock_bh(&ct->lock);
537 534
538 return 0; 535 return 0;
539} 536}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97a6e93d742e..33fc0a443f3d 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -29,9 +29,6 @@
29#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 29#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
31 31
32/* Protects ct->proto.tcp */
33static DEFINE_RWLOCK(tcp_lock);
34
35/* "Be conservative in what you do, 32/* "Be conservative in what you do,
36 be liberal in what you accept from others." 33 be liberal in what you accept from others."
37 If it's non-zero, we mark only out of window RST segments as INVALID. */ 34 If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -59,7 +56,7 @@ static const char *const tcp_conntrack_names[] = {
59 "LAST_ACK", 56 "LAST_ACK",
60 "TIME_WAIT", 57 "TIME_WAIT",
61 "CLOSE", 58 "CLOSE",
62 "LISTEN" 59 "SYN_SENT2",
63}; 60};
64 61
65#define SECS * HZ 62#define SECS * HZ
@@ -82,6 +79,7 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
82 [TCP_CONNTRACK_LAST_ACK] = 30 SECS, 79 [TCP_CONNTRACK_LAST_ACK] = 30 SECS,
83 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS, 80 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
84 [TCP_CONNTRACK_CLOSE] = 10 SECS, 81 [TCP_CONNTRACK_CLOSE] = 10 SECS,
82 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
85}; 83};
86 84
87#define sNO TCP_CONNTRACK_NONE 85#define sNO TCP_CONNTRACK_NONE
@@ -93,7 +91,7 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
93#define sLA TCP_CONNTRACK_LAST_ACK 91#define sLA TCP_CONNTRACK_LAST_ACK
94#define sTW TCP_CONNTRACK_TIME_WAIT 92#define sTW TCP_CONNTRACK_TIME_WAIT
95#define sCL TCP_CONNTRACK_CLOSE 93#define sCL TCP_CONNTRACK_CLOSE
96#define sLI TCP_CONNTRACK_LISTEN 94#define sS2 TCP_CONNTRACK_SYN_SENT2
97#define sIV TCP_CONNTRACK_MAX 95#define sIV TCP_CONNTRACK_MAX
98#define sIG TCP_CONNTRACK_IGNORE 96#define sIG TCP_CONNTRACK_IGNORE
99 97
@@ -123,6 +121,7 @@ enum tcp_bit_set {
123 * 121 *
124 * NONE: initial state 122 * NONE: initial state
125 * SYN_SENT: SYN-only packet seen 123 * SYN_SENT: SYN-only packet seen
124 * SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
126 * SYN_RECV: SYN-ACK packet seen 125 * SYN_RECV: SYN-ACK packet seen
127 * ESTABLISHED: ACK packet seen 126 * ESTABLISHED: ACK packet seen
128 * FIN_WAIT: FIN packet seen 127 * FIN_WAIT: FIN packet seen
@@ -131,26 +130,24 @@ enum tcp_bit_set {
131 * TIME_WAIT: last ACK seen 130 * TIME_WAIT: last ACK seen
132 * CLOSE: closed connection (RST) 131 * CLOSE: closed connection (RST)
133 * 132 *
134 * LISTEN state is not used.
135 *
136 * Packets marked as IGNORED (sIG): 133 * Packets marked as IGNORED (sIG):
137 * if they may be either invalid or valid 134 * if they may be either invalid or valid
138 * and the receiver may send back a connection 135 * and the receiver may send back a connection
139 * closing RST or a SYN/ACK. 136 * closing RST or a SYN/ACK.
140 * 137 *
141 * Packets marked as INVALID (sIV): 138 * Packets marked as INVALID (sIV):
142 * if they are invalid 139 * if we regard them as truly invalid packets
143 * or we do not support the request (simultaneous open)
144 */ 140 */
145static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { 141static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
146 { 142 {
147/* ORIGINAL */ 143/* ORIGINAL */
148/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 144/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
149/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sIV }, 145/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
150/* 146/*
151 * sNO -> sSS Initialize a new connection 147 * sNO -> sSS Initialize a new connection
152 * sSS -> sSS Retransmitted SYN 148 * sSS -> sSS Retransmitted SYN
153 * sSR -> sIG Late retransmitted SYN? 149 * sS2 -> sS2 Late retransmitted SYN
150 * sSR -> sIG
154 * sES -> sIG Error: SYNs in window outside the SYN_SENT state 151 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
155 * are errors. Receiver will reply with RST 152 * are errors. Receiver will reply with RST
156 * and close the connection. 153 * and close the connection.
@@ -161,22 +158,30 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
161 * sTW -> sSS Reopened connection (RFC 1122). 158 * sTW -> sSS Reopened connection (RFC 1122).
162 * sCL -> sSS 159 * sCL -> sSS
163 */ 160 */
164/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 161/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
165/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, 162/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
166/* 163/*
167 * A SYN/ACK from the client is always invalid: 164 * sNO -> sIV Too late and no reason to do anything
168 * - either it tries to set up a simultaneous open, which is 165 * sSS -> sIV Client can't send SYN and then SYN/ACK
169 * not supported; 166 * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
170 * - or the firewall has just been inserted between the two hosts 167 * sSR -> sIG
171 * during the session set-up. The SYN will be retransmitted 168 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
172 * by the true client (or it'll time out). 169 * are errors. Receiver will reply with RST
170 * and close the connection.
171 * Or we are not in sync and hold a dead connection.
172 * sFW -> sIG
173 * sCW -> sIG
174 * sLA -> sIG
175 * sTW -> sIG
176 * sCL -> sIG
173 */ 177 */
174/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 178/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
175/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, 179/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
176/* 180/*
177 * sNO -> sIV Too late and no reason to do anything... 181 * sNO -> sIV Too late and no reason to do anything...
178 * sSS -> sIV Client migth not send FIN in this state: 182 * sSS -> sIV Client migth not send FIN in this state:
179 * we enforce waiting for a SYN/ACK reply first. 183 * we enforce waiting for a SYN/ACK reply first.
184 * sS2 -> sIV
180 * sSR -> sFW Close started. 185 * sSR -> sFW Close started.
181 * sES -> sFW 186 * sES -> sFW
182 * sFW -> sLA FIN seen in both directions, waiting for 187 * sFW -> sLA FIN seen in both directions, waiting for
@@ -187,11 +192,12 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
187 * sTW -> sTW 192 * sTW -> sTW
188 * sCL -> sCL 193 * sCL -> sCL
189 */ 194 */
190/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 195/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
191/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV }, 196/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
192/* 197/*
193 * sNO -> sES Assumed. 198 * sNO -> sES Assumed.
194 * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet. 199 * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
200 * sS2 -> sIV
195 * sSR -> sES Established state is reached. 201 * sSR -> sES Established state is reached.
196 * sES -> sES :-) 202 * sES -> sES :-)
197 * sFW -> sCW Normal close request answered by ACK. 203 * sFW -> sCW Normal close request answered by ACK.
@@ -200,29 +206,31 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
200 * sTW -> sTW Retransmitted last ACK. Remain in the same state. 206 * sTW -> sTW Retransmitted last ACK. Remain in the same state.
201 * sCL -> sCL 207 * sCL -> sCL
202 */ 208 */
203/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 209/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
204/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, 210/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
205/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } 211/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
206 }, 212 },
207 { 213 {
208/* REPLY */ 214/* REPLY */
209/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 215/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
210/*syn*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, 216/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
211/* 217/*
212 * sNO -> sIV Never reached. 218 * sNO -> sIV Never reached.
213 * sSS -> sIV Simultaneous open, not supported 219 * sSS -> sS2 Simultaneous open
214 * sSR -> sIV Simultaneous open, not supported. 220 * sS2 -> sS2 Retransmitted simultaneous SYN
215 * sES -> sIV Server may not initiate a connection. 221 * sSR -> sIV Invalid SYN packets sent by the server
222 * sES -> sIV
216 * sFW -> sIV 223 * sFW -> sIV
217 * sCW -> sIV 224 * sCW -> sIV
218 * sLA -> sIV 225 * sLA -> sIV
219 * sTW -> sIV Reopened connection, but server may not do it. 226 * sTW -> sIV Reopened connection, but server may not do it.
220 * sCL -> sIV 227 * sCL -> sIV
221 */ 228 */
222/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 229/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
223/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIV }, 230/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
224/* 231/*
225 * sSS -> sSR Standard open. 232 * sSS -> sSR Standard open.
233 * sS2 -> sSR Simultaneous open
226 * sSR -> sSR Retransmitted SYN/ACK. 234 * sSR -> sSR Retransmitted SYN/ACK.
227 * sES -> sIG Late retransmitted SYN/ACK? 235 * sES -> sIG Late retransmitted SYN/ACK?
228 * sFW -> sIG Might be SYN/ACK answering ignored SYN 236 * sFW -> sIG Might be SYN/ACK answering ignored SYN
@@ -231,10 +239,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
231 * sTW -> sIG 239 * sTW -> sIG
232 * sCL -> sIG 240 * sCL -> sIG
233 */ 241 */
234/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 242/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
235/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, 243/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
236/* 244/*
237 * sSS -> sIV Server might not send FIN in this state. 245 * sSS -> sIV Server might not send FIN in this state.
246 * sS2 -> sIV
238 * sSR -> sFW Close started. 247 * sSR -> sFW Close started.
239 * sES -> sFW 248 * sES -> sFW
240 * sFW -> sLA FIN seen in both directions. 249 * sFW -> sLA FIN seen in both directions.
@@ -243,10 +252,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
243 * sTW -> sTW 252 * sTW -> sTW
244 * sCL -> sCL 253 * sCL -> sCL
245 */ 254 */
246/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 255/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
247/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIV }, 256/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
248/* 257/*
249 * sSS -> sIG Might be a half-open connection. 258 * sSS -> sIG Might be a half-open connection.
259 * sS2 -> sIG
250 * sSR -> sSR Might answer late resent SYN. 260 * sSR -> sSR Might answer late resent SYN.
251 * sES -> sES :-) 261 * sES -> sES :-)
252 * sFW -> sCW Normal close request answered by ACK. 262 * sFW -> sCW Normal close request answered by ACK.
@@ -255,8 +265,8 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
255 * sTW -> sTW Retransmitted last ACK. 265 * sTW -> sTW Retransmitted last ACK.
256 * sCL -> sCL 266 * sCL -> sCL
257 */ 267 */
258/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 268/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
259/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, 269/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
260/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } 270/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
261 } 271 }
262}; 272};
@@ -296,13 +306,13 @@ static int tcp_print_tuple(struct seq_file *s,
296} 306}
297 307
298/* Print out the private part of the conntrack. */ 308/* Print out the private part of the conntrack. */
299static int tcp_print_conntrack(struct seq_file *s, const struct nf_conn *ct) 309static int tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
300{ 310{
301 enum tcp_conntrack state; 311 enum tcp_conntrack state;
302 312
303 read_lock_bh(&tcp_lock); 313 spin_lock_bh(&ct->lock);
304 state = ct->proto.tcp.state; 314 state = ct->proto.tcp.state;
305 read_unlock_bh(&tcp_lock); 315 spin_unlock_bh(&ct->lock);
306 316
307 return seq_printf(s, "%s ", tcp_conntrack_names[state]); 317 return seq_printf(s, "%s ", tcp_conntrack_names[state]);
308} 318}
@@ -521,13 +531,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
521 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 531 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
522 receiver->td_scale); 532 receiver->td_scale);
523 533
524 if (sender->td_end == 0) { 534 if (sender->td_maxwin == 0) {
525 /* 535 /*
526 * Initialize sender data. 536 * Initialize sender data.
527 */ 537 */
528 if (tcph->syn && tcph->ack) { 538 if (tcph->syn) {
529 /* 539 /*
530 * Outgoing SYN-ACK in reply to a SYN. 540 * SYN-ACK in reply to a SYN
541 * or SYN from reply direction in simultaneous open.
531 */ 542 */
532 sender->td_end = 543 sender->td_end =
533 sender->td_maxend = end; 544 sender->td_maxend = end;
@@ -543,6 +554,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
543 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) 554 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
544 sender->td_scale = 555 sender->td_scale =
545 receiver->td_scale = 0; 556 receiver->td_scale = 0;
557 if (!tcph->ack)
558 /* Simultaneous open */
559 return true;
546 } else { 560 } else {
547 /* 561 /*
548 * We are in the middle of a connection, 562 * We are in the middle of a connection,
@@ -716,14 +730,14 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb,
716 730
717 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph); 731 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
718 732
719 write_lock_bh(&tcp_lock); 733 spin_lock_bh(&ct->lock);
720 /* 734 /*
721 * We have to worry for the ack in the reply packet only... 735 * We have to worry for the ack in the reply packet only...
722 */ 736 */
723 if (after(end, ct->proto.tcp.seen[dir].td_end)) 737 if (after(end, ct->proto.tcp.seen[dir].td_end))
724 ct->proto.tcp.seen[dir].td_end = end; 738 ct->proto.tcp.seen[dir].td_end = end;
725 ct->proto.tcp.last_end = end; 739 ct->proto.tcp.last_end = end;
726 write_unlock_bh(&tcp_lock); 740 spin_unlock_bh(&ct->lock);
727 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " 741 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
728 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 742 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
729 sender->td_end, sender->td_maxend, sender->td_maxwin, 743 sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -832,7 +846,7 @@ static int tcp_packet(struct nf_conn *ct,
832 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); 846 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
833 BUG_ON(th == NULL); 847 BUG_ON(th == NULL);
834 848
835 write_lock_bh(&tcp_lock); 849 spin_lock_bh(&ct->lock);
836 old_state = ct->proto.tcp.state; 850 old_state = ct->proto.tcp.state;
837 dir = CTINFO2DIR(ctinfo); 851 dir = CTINFO2DIR(ctinfo);
838 index = get_conntrack_index(th); 852 index = get_conntrack_index(th);
@@ -862,7 +876,7 @@ static int tcp_packet(struct nf_conn *ct,
862 && ct->proto.tcp.last_index == TCP_RST_SET)) { 876 && ct->proto.tcp.last_index == TCP_RST_SET)) {
863 /* Attempt to reopen a closed/aborted connection. 877 /* Attempt to reopen a closed/aborted connection.
864 * Delete this connection and look up again. */ 878 * Delete this connection and look up again. */
865 write_unlock_bh(&tcp_lock); 879 spin_unlock_bh(&ct->lock);
866 880
867 /* Only repeat if we can actually remove the timer. 881 /* Only repeat if we can actually remove the timer.
868 * Destruction may already be in progress in process 882 * Destruction may already be in progress in process
@@ -898,7 +912,7 @@ static int tcp_packet(struct nf_conn *ct,
898 * that the client cannot but retransmit its SYN and 912 * that the client cannot but retransmit its SYN and
899 * thus initiate a clean new session. 913 * thus initiate a clean new session.
900 */ 914 */
901 write_unlock_bh(&tcp_lock); 915 spin_unlock_bh(&ct->lock);
902 if (LOG_INVALID(net, IPPROTO_TCP)) 916 if (LOG_INVALID(net, IPPROTO_TCP))
903 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 917 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
904 "nf_ct_tcp: killing out of sync session "); 918 "nf_ct_tcp: killing out of sync session ");
@@ -911,7 +925,7 @@ static int tcp_packet(struct nf_conn *ct,
911 ct->proto.tcp.last_end = 925 ct->proto.tcp.last_end =
912 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th); 926 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
913 927
914 write_unlock_bh(&tcp_lock); 928 spin_unlock_bh(&ct->lock);
915 if (LOG_INVALID(net, IPPROTO_TCP)) 929 if (LOG_INVALID(net, IPPROTO_TCP))
916 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 930 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
917 "nf_ct_tcp: invalid packet ignored "); 931 "nf_ct_tcp: invalid packet ignored ");
@@ -920,7 +934,7 @@ static int tcp_packet(struct nf_conn *ct,
920 /* Invalid packet */ 934 /* Invalid packet */
921 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n", 935 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
922 dir, get_conntrack_index(th), old_state); 936 dir, get_conntrack_index(th), old_state);
923 write_unlock_bh(&tcp_lock); 937 spin_unlock_bh(&ct->lock);
924 if (LOG_INVALID(net, IPPROTO_TCP)) 938 if (LOG_INVALID(net, IPPROTO_TCP))
925 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 939 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
926 "nf_ct_tcp: invalid state "); 940 "nf_ct_tcp: invalid state ");
@@ -930,7 +944,7 @@ static int tcp_packet(struct nf_conn *ct,
930 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) 944 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
931 && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) { 945 && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
932 /* Invalid RST */ 946 /* Invalid RST */
933 write_unlock_bh(&tcp_lock); 947 spin_unlock_bh(&ct->lock);
934 if (LOG_INVALID(net, IPPROTO_TCP)) 948 if (LOG_INVALID(net, IPPROTO_TCP))
935 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 949 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
936 "nf_ct_tcp: invalid RST "); 950 "nf_ct_tcp: invalid RST ");
@@ -961,7 +975,7 @@ static int tcp_packet(struct nf_conn *ct,
961 975
962 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index, 976 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
963 skb, dataoff, th, pf)) { 977 skb, dataoff, th, pf)) {
964 write_unlock_bh(&tcp_lock); 978 spin_unlock_bh(&ct->lock);
965 return -NF_ACCEPT; 979 return -NF_ACCEPT;
966 } 980 }
967 in_window: 981 in_window:
@@ -990,9 +1004,8 @@ static int tcp_packet(struct nf_conn *ct,
990 timeout = nf_ct_tcp_timeout_unacknowledged; 1004 timeout = nf_ct_tcp_timeout_unacknowledged;
991 else 1005 else
992 timeout = tcp_timeouts[new_state]; 1006 timeout = tcp_timeouts[new_state];
993 write_unlock_bh(&tcp_lock); 1007 spin_unlock_bh(&ct->lock);
994 1008
995 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, ct);
996 if (new_state != old_state) 1009 if (new_state != old_state)
997 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 1010 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
998 1011
@@ -1086,7 +1099,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1086 1099
1087 ct->proto.tcp.seen[1].td_end = 0; 1100 ct->proto.tcp.seen[1].td_end = 0;
1088 ct->proto.tcp.seen[1].td_maxend = 0; 1101 ct->proto.tcp.seen[1].td_maxend = 0;
1089 ct->proto.tcp.seen[1].td_maxwin = 1; 1102 ct->proto.tcp.seen[1].td_maxwin = 0;
1090 ct->proto.tcp.seen[1].td_scale = 0; 1103 ct->proto.tcp.seen[1].td_scale = 0;
1091 1104
1092 /* tcp_packet will set them */ 1105 /* tcp_packet will set them */
@@ -1108,12 +1121,12 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1108#include <linux/netfilter/nfnetlink_conntrack.h> 1121#include <linux/netfilter/nfnetlink_conntrack.h>
1109 1122
1110static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, 1123static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1111 const struct nf_conn *ct) 1124 struct nf_conn *ct)
1112{ 1125{
1113 struct nlattr *nest_parms; 1126 struct nlattr *nest_parms;
1114 struct nf_ct_tcp_flags tmp = {}; 1127 struct nf_ct_tcp_flags tmp = {};
1115 1128
1116 read_lock_bh(&tcp_lock); 1129 spin_lock_bh(&ct->lock);
1117 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED); 1130 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED);
1118 if (!nest_parms) 1131 if (!nest_parms)
1119 goto nla_put_failure; 1132 goto nla_put_failure;
@@ -1133,14 +1146,14 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1133 tmp.flags = ct->proto.tcp.seen[1].flags; 1146 tmp.flags = ct->proto.tcp.seen[1].flags;
1134 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, 1147 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1135 sizeof(struct nf_ct_tcp_flags), &tmp); 1148 sizeof(struct nf_ct_tcp_flags), &tmp);
1136 read_unlock_bh(&tcp_lock); 1149 spin_unlock_bh(&ct->lock);
1137 1150
1138 nla_nest_end(skb, nest_parms); 1151 nla_nest_end(skb, nest_parms);
1139 1152
1140 return 0; 1153 return 0;
1141 1154
1142nla_put_failure: 1155nla_put_failure:
1143 read_unlock_bh(&tcp_lock); 1156 spin_unlock_bh(&ct->lock);
1144 return -1; 1157 return -1;
1145} 1158}
1146 1159
@@ -1171,7 +1184,7 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1171 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX) 1184 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1172 return -EINVAL; 1185 return -EINVAL;
1173 1186
1174 write_lock_bh(&tcp_lock); 1187 spin_lock_bh(&ct->lock);
1175 if (tb[CTA_PROTOINFO_TCP_STATE]) 1188 if (tb[CTA_PROTOINFO_TCP_STATE])
1176 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]); 1189 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1177 1190
@@ -1198,7 +1211,7 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1198 ct->proto.tcp.seen[1].td_scale = 1211 ct->proto.tcp.seen[1].td_scale =
1199 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]); 1212 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1200 } 1213 }
1201 write_unlock_bh(&tcp_lock); 1214 spin_unlock_bh(&ct->lock);
1202 1215
1203 return 0; 1216 return 0;
1204} 1217}
@@ -1328,6 +1341,13 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1328 .proc_handler = proc_dointvec_jiffies, 1341 .proc_handler = proc_dointvec_jiffies,
1329 }, 1342 },
1330 { 1343 {
1344 .procname = "ip_conntrack_tcp_timeout_syn_sent2",
1345 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
1346 .maxlen = sizeof(unsigned int),
1347 .mode = 0644,
1348 .proc_handler = proc_dointvec_jiffies,
1349 },
1350 {
1331 .procname = "ip_conntrack_tcp_timeout_syn_recv", 1351 .procname = "ip_conntrack_tcp_timeout_syn_recv",
1332 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV], 1352 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
1333 .maxlen = sizeof(unsigned int), 1353 .maxlen = sizeof(unsigned int),
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index beb37311e1a5..2fefe147750a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -248,14 +248,14 @@ static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
248 rcu_assign_pointer(nf_loggers[tindex], logger); 248 rcu_assign_pointer(nf_loggers[tindex], logger);
249 mutex_unlock(&nf_log_mutex); 249 mutex_unlock(&nf_log_mutex);
250 } else { 250 } else {
251 rcu_read_lock(); 251 mutex_lock(&nf_log_mutex);
252 logger = rcu_dereference(nf_loggers[tindex]); 252 logger = nf_loggers[tindex];
253 if (!logger) 253 if (!logger)
254 table->data = "NONE"; 254 table->data = "NONE";
255 else 255 else
256 table->data = logger->name; 256 table->data = logger->name;
257 r = proc_dostring(table, write, filp, buffer, lenp, ppos); 257 r = proc_dostring(table, write, filp, buffer, lenp, ppos);
258 rcu_read_unlock(); 258 mutex_unlock(&nf_log_mutex);
259 } 259 }
260 260
261 return r; 261 return r;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 4f2310c93e01..3a6fd77f7761 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -204,10 +204,10 @@ int nf_queue(struct sk_buff *skb,
204 queuenum); 204 queuenum);
205 205
206 switch (pf) { 206 switch (pf) {
207 case AF_INET: 207 case NFPROTO_IPV4:
208 skb->protocol = htons(ETH_P_IP); 208 skb->protocol = htons(ETH_P_IP);
209 break; 209 break;
210 case AF_INET6: 210 case NFPROTO_IPV6:
211 skb->protocol = htons(ETH_P_IPV6); 211 skb->protocol = htons(ETH_P_IPV6);
212 break; 212 break;
213 } 213 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index b8ab37ad7ed5..92761a988375 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -107,9 +107,10 @@ int nfnetlink_has_listeners(unsigned int group)
107} 107}
108EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 108EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
109 109
110int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 110int nfnetlink_send(struct sk_buff *skb, u32 pid,
111 unsigned group, int echo, gfp_t flags)
111{ 112{
112 return nlmsg_notify(nfnl, skb, pid, group, echo, gfp_any()); 113 return nlmsg_notify(nfnl, skb, pid, group, echo, flags);
113} 114}
114EXPORT_SYMBOL_GPL(nfnetlink_send); 115EXPORT_SYMBOL_GPL(nfnetlink_send);
115 116
@@ -136,7 +137,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
136 return -EPERM; 137 return -EPERM;
137 138
138 /* All the messages must at least contain nfgenmsg */ 139 /* All the messages must at least contain nfgenmsg */
139 if (nlh->nlmsg_len < NLMSG_SPACE(sizeof(struct nfgenmsg))) 140 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg)))
140 return 0; 141 return 0;
141 142
142 type = nlh->nlmsg_type; 143 type = nlh->nlmsg_type;
@@ -160,19 +161,14 @@ replay:
160 { 161 {
161 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 162 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
162 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 163 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
163 u_int16_t attr_count = ss->cb[cb_id].attr_count; 164 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
164 struct nlattr *cda[attr_count+1]; 165 struct nlattr *attr = (void *)nlh + min_len;
165 166 int attrlen = nlh->nlmsg_len - min_len;
166 if (likely(nlh->nlmsg_len >= min_len)) { 167
167 struct nlattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 168 err = nla_parse(cda, ss->cb[cb_id].attr_count,
168 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); 169 attr, attrlen, ss->cb[cb_id].policy);
169 170 if (err < 0)
170 err = nla_parse(cda, attr_count, attr, attrlen, 171 return err;
171 ss->cb[cb_id].policy);
172 if (err < 0)
173 return err;
174 } else
175 return -EINVAL;
176 172
177 err = nc->call(nfnl, skb, nlh, cda); 173 err = nc->call(nfnl, skb, nlh, cda);
178 if (err == -EAGAIN) 174 if (err == -EAGAIN)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 8c860112ce05..71daa0934b6c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * This is a module which is used for queueing packets and communicating with 2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink. 3 * userspace via nfnetlink.
4 * 4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org> 5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 * (C) 2007 by Patrick McHardy <kaber@trash.net> 6 * (C) 2007 by Patrick McHardy <kaber@trash.net>
@@ -932,6 +932,8 @@ static void __exit nfnetlink_queue_fini(void)
932#endif 932#endif
933 nfnetlink_subsys_unregister(&nfqnl_subsys); 933 nfnetlink_subsys_unregister(&nfqnl_subsys);
934 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 934 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
935
936 rcu_barrier(); /* Wait for completion of call_rcu()'s */
935} 937}
936 938
937MODULE_DESCRIPTION("netfilter packet queue handler"); 939MODULE_DESCRIPTION("netfilter packet queue handler");
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 150e5cf62f85..025d1a0af78b 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -329,6 +329,32 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
329} 329}
330EXPORT_SYMBOL_GPL(xt_find_revision); 330EXPORT_SYMBOL_GPL(xt_find_revision);
331 331
332static char *textify_hooks(char *buf, size_t size, unsigned int mask)
333{
334 static const char *const names[] = {
335 "PREROUTING", "INPUT", "FORWARD",
336 "OUTPUT", "POSTROUTING", "BROUTING",
337 };
338 unsigned int i;
339 char *p = buf;
340 bool np = false;
341 int res;
342
343 *p = '\0';
344 for (i = 0; i < ARRAY_SIZE(names); ++i) {
345 if (!(mask & (1 << i)))
346 continue;
347 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
348 if (res > 0) {
349 size -= res;
350 p += res;
351 }
352 np = true;
353 }
354
355 return buf;
356}
357
332int xt_check_match(struct xt_mtchk_param *par, 358int xt_check_match(struct xt_mtchk_param *par,
333 unsigned int size, u_int8_t proto, bool inv_proto) 359 unsigned int size, u_int8_t proto, bool inv_proto)
334{ 360{
@@ -338,26 +364,30 @@ int xt_check_match(struct xt_mtchk_param *par,
338 * ebt_among is exempt from centralized matchsize checking 364 * ebt_among is exempt from centralized matchsize checking
339 * because it uses a dynamic-size data set. 365 * because it uses a dynamic-size data set.
340 */ 366 */
341 printk("%s_tables: %s match: invalid size %Zu != %u\n", 367 pr_err("%s_tables: %s match: invalid size %Zu != %u\n",
342 xt_prefix[par->family], par->match->name, 368 xt_prefix[par->family], par->match->name,
343 XT_ALIGN(par->match->matchsize), size); 369 XT_ALIGN(par->match->matchsize), size);
344 return -EINVAL; 370 return -EINVAL;
345 } 371 }
346 if (par->match->table != NULL && 372 if (par->match->table != NULL &&
347 strcmp(par->match->table, par->table) != 0) { 373 strcmp(par->match->table, par->table) != 0) {
348 printk("%s_tables: %s match: only valid in %s table, not %s\n", 374 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
349 xt_prefix[par->family], par->match->name, 375 xt_prefix[par->family], par->match->name,
350 par->match->table, par->table); 376 par->match->table, par->table);
351 return -EINVAL; 377 return -EINVAL;
352 } 378 }
353 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { 379 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
354 printk("%s_tables: %s match: bad hook_mask %#x/%#x\n", 380 char used[64], allow[64];
381
382 pr_err("%s_tables: %s match: used from hooks %s, but only "
383 "valid from %s\n",
355 xt_prefix[par->family], par->match->name, 384 xt_prefix[par->family], par->match->name,
356 par->hook_mask, par->match->hooks); 385 textify_hooks(used, sizeof(used), par->hook_mask),
386 textify_hooks(allow, sizeof(allow), par->match->hooks));
357 return -EINVAL; 387 return -EINVAL;
358 } 388 }
359 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 389 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
360 printk("%s_tables: %s match: only valid for protocol %u\n", 390 pr_err("%s_tables: %s match: only valid for protocol %u\n",
361 xt_prefix[par->family], par->match->name, 391 xt_prefix[par->family], par->match->name,
362 par->match->proto); 392 par->match->proto);
363 return -EINVAL; 393 return -EINVAL;
@@ -484,26 +514,30 @@ int xt_check_target(struct xt_tgchk_param *par,
484 unsigned int size, u_int8_t proto, bool inv_proto) 514 unsigned int size, u_int8_t proto, bool inv_proto)
485{ 515{
486 if (XT_ALIGN(par->target->targetsize) != size) { 516 if (XT_ALIGN(par->target->targetsize) != size) {
487 printk("%s_tables: %s target: invalid size %Zu != %u\n", 517 pr_err("%s_tables: %s target: invalid size %Zu != %u\n",
488 xt_prefix[par->family], par->target->name, 518 xt_prefix[par->family], par->target->name,
489 XT_ALIGN(par->target->targetsize), size); 519 XT_ALIGN(par->target->targetsize), size);
490 return -EINVAL; 520 return -EINVAL;
491 } 521 }
492 if (par->target->table != NULL && 522 if (par->target->table != NULL &&
493 strcmp(par->target->table, par->table) != 0) { 523 strcmp(par->target->table, par->table) != 0) {
494 printk("%s_tables: %s target: only valid in %s table, not %s\n", 524 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
495 xt_prefix[par->family], par->target->name, 525 xt_prefix[par->family], par->target->name,
496 par->target->table, par->table); 526 par->target->table, par->table);
497 return -EINVAL; 527 return -EINVAL;
498 } 528 }
499 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { 529 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
500 printk("%s_tables: %s target: bad hook_mask %#x/%#x\n", 530 char used[64], allow[64];
531
532 pr_err("%s_tables: %s target: used from hooks %s, but only "
533 "usable from %s\n",
501 xt_prefix[par->family], par->target->name, 534 xt_prefix[par->family], par->target->name,
502 par->hook_mask, par->target->hooks); 535 textify_hooks(used, sizeof(used), par->hook_mask),
536 textify_hooks(allow, sizeof(allow), par->target->hooks));
503 return -EINVAL; 537 return -EINVAL;
504 } 538 }
505 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 539 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
506 printk("%s_tables: %s target: only valid for protocol %u\n", 540 pr_err("%s_tables: %s target: only valid for protocol %u\n",
507 xt_prefix[par->family], par->target->name, 541 xt_prefix[par->family], par->target->name,
508 par->target->proto); 542 par->target->proto);
509 return -EINVAL; 543 return -EINVAL;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index f9977b3311f7..498b45101df7 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -11,6 +11,10 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13 13
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/jhash.h>
17
14#include <linux/netfilter.h> 18#include <linux/netfilter.h>
15#include <linux/netfilter_arp.h> 19#include <linux/netfilter_arp.h>
16#include <linux/netfilter/x_tables.h> 20#include <linux/netfilter/x_tables.h>
@@ -23,6 +27,8 @@ MODULE_ALIAS("ipt_NFQUEUE");
23MODULE_ALIAS("ip6t_NFQUEUE"); 27MODULE_ALIAS("ip6t_NFQUEUE");
24MODULE_ALIAS("arpt_NFQUEUE"); 28MODULE_ALIAS("arpt_NFQUEUE");
25 29
30static u32 jhash_initval __read_mostly;
31
26static unsigned int 32static unsigned int
27nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) 33nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
28{ 34{
@@ -31,32 +37,105 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
31 return NF_QUEUE_NR(tinfo->queuenum); 37 return NF_QUEUE_NR(tinfo->queuenum);
32} 38}
33 39
40static u32 hash_v4(const struct sk_buff *skb)
41{
42 const struct iphdr *iph = ip_hdr(skb);
43 u32 ipaddr;
44
45 /* packets in either direction go into same queue */
46 ipaddr = iph->saddr ^ iph->daddr;
47
48 return jhash_2words(ipaddr, iph->protocol, jhash_initval);
49}
50
51static unsigned int
52nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
53{
54 const struct xt_NFQ_info_v1 *info = par->targinfo;
55 u32 queue = info->queuenum;
56
57 if (info->queues_total > 1)
58 queue = hash_v4(skb) % info->queues_total + queue;
59 return NF_QUEUE_NR(queue);
60}
61
62#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
63static u32 hash_v6(const struct sk_buff *skb)
64{
65 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
66 u32 addr[4];
67
68 addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
69 addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
70 addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
71 addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
72
73 return jhash2(addr, ARRAY_SIZE(addr), jhash_initval);
74}
75
76static unsigned int
77nfqueue_tg6_v1(struct sk_buff *skb, const struct xt_target_param *par)
78{
79 const struct xt_NFQ_info_v1 *info = par->targinfo;
80 u32 queue = info->queuenum;
81
82 if (info->queues_total > 1)
83 queue = hash_v6(skb) % info->queues_total + queue;
84 return NF_QUEUE_NR(queue);
85}
86#endif
87
88static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
89{
90 const struct xt_NFQ_info_v1 *info = par->targinfo;
91 u32 maxid;
92
93 if (info->queues_total == 0) {
94 pr_err("NFQUEUE: number of total queues is 0\n");
95 return false;
96 }
97 maxid = info->queues_total - 1 + info->queuenum;
98 if (maxid > 0xffff) {
99 pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
100 info->queues_total, maxid);
101 return false;
102 }
103 return true;
104}
105
34static struct xt_target nfqueue_tg_reg[] __read_mostly = { 106static struct xt_target nfqueue_tg_reg[] __read_mostly = {
35 { 107 {
36 .name = "NFQUEUE", 108 .name = "NFQUEUE",
37 .family = NFPROTO_IPV4, 109 .family = NFPROTO_UNSPEC,
38 .target = nfqueue_tg, 110 .target = nfqueue_tg,
39 .targetsize = sizeof(struct xt_NFQ_info), 111 .targetsize = sizeof(struct xt_NFQ_info),
40 .me = THIS_MODULE, 112 .me = THIS_MODULE,
41 }, 113 },
42 { 114 {
43 .name = "NFQUEUE", 115 .name = "NFQUEUE",
44 .family = NFPROTO_IPV6, 116 .revision = 1,
45 .target = nfqueue_tg, 117 .family = NFPROTO_IPV4,
46 .targetsize = sizeof(struct xt_NFQ_info), 118 .checkentry = nfqueue_tg_v1_check,
119 .target = nfqueue_tg4_v1,
120 .targetsize = sizeof(struct xt_NFQ_info_v1),
47 .me = THIS_MODULE, 121 .me = THIS_MODULE,
48 }, 122 },
123#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
49 { 124 {
50 .name = "NFQUEUE", 125 .name = "NFQUEUE",
51 .family = NFPROTO_ARP, 126 .revision = 1,
52 .target = nfqueue_tg, 127 .family = NFPROTO_IPV6,
53 .targetsize = sizeof(struct xt_NFQ_info), 128 .checkentry = nfqueue_tg_v1_check,
129 .target = nfqueue_tg6_v1,
130 .targetsize = sizeof(struct xt_NFQ_info_v1),
54 .me = THIS_MODULE, 131 .me = THIS_MODULE,
55 }, 132 },
133#endif
56}; 134};
57 135
58static int __init nfqueue_tg_init(void) 136static int __init nfqueue_tg_init(void)
59{ 137{
138 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
60 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); 139 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
61} 140}
62 141
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 4f3b1f808795..eda64c1cb1e5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -73,11 +73,11 @@ tcpmss_mangle_packet(struct sk_buff *skb,
73 } 73 }
74 74
75 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 75 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
76 if (dst_mtu(skb->dst) <= minlen) { 76 if (dst_mtu(skb_dst(skb)) <= minlen) {
77 if (net_ratelimit()) 77 if (net_ratelimit())
78 printk(KERN_ERR "xt_TCPMSS: " 78 printk(KERN_ERR "xt_TCPMSS: "
79 "unknown or invalid path-MTU (%u)\n", 79 "unknown or invalid path-MTU (%u)\n",
80 dst_mtu(skb->dst)); 80 dst_mtu(skb_dst(skb)));
81 return -1; 81 return -1;
82 } 82 }
83 if (in_mtu <= minlen) { 83 if (in_mtu <= minlen) {
@@ -86,7 +86,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
86 "invalid path-MTU (%u)\n", in_mtu); 86 "invalid path-MTU (%u)\n", in_mtu);
87 return -1; 87 return -1;
88 } 88 }
89 newmss = min(dst_mtu(skb->dst), in_mtu) - minlen; 89 newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
90 } else 90 } else
91 newmss = info->mss; 91 newmss = info->mss;
92 92
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
new file mode 100644
index 000000000000..863e40977a4d
--- /dev/null
+++ b/net/netfilter/xt_osf.c
@@ -0,0 +1,428 @@
1/*
2 * Copyright (c) 2003+ Evgeniy Polyakov <zbr@ioremap.net>
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22
23#include <linux/if.h>
24#include <linux/inetdevice.h>
25#include <linux/ip.h>
26#include <linux/list.h>
27#include <linux/rculist.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/tcp.h>
31
32#include <net/ip.h>
33#include <net/tcp.h>
34
35#include <linux/netfilter/nfnetlink.h>
36#include <linux/netfilter/x_tables.h>
37#include <net/netfilter/nf_log.h>
38#include <linux/netfilter/xt_osf.h>
39
40struct xt_osf_finger {
41 struct rcu_head rcu_head;
42 struct list_head finger_entry;
43 struct xt_osf_user_finger finger;
44};
45
46enum osf_fmatch_states {
47 /* Packet does not match the fingerprint */
48 FMATCH_WRONG = 0,
49 /* Packet matches the fingerprint */
50 FMATCH_OK,
51 /* Options do not match the fingerprint, but header does */
52 FMATCH_OPT_WRONG,
53};
54
55/*
56 * Indexed by dont-fragment bit.
57 * It is the only constant value in the fingerprint.
58 */
59static struct list_head xt_osf_fingers[2];
60
61static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = {
62 [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) },
63};
64
65static void xt_osf_finger_free_rcu(struct rcu_head *rcu_head)
66{
67 struct xt_osf_finger *f = container_of(rcu_head, struct xt_osf_finger, rcu_head);
68
69 kfree(f);
70}
71
72static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
73 struct nlmsghdr *nlh, struct nlattr *osf_attrs[])
74{
75 struct xt_osf_user_finger *f;
76 struct xt_osf_finger *kf = NULL, *sf;
77 int err = 0;
78
79 if (!osf_attrs[OSF_ATTR_FINGER])
80 return -EINVAL;
81
82 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
83 return -EINVAL;
84
85 f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
86
87 kf = kmalloc(sizeof(struct xt_osf_finger), GFP_KERNEL);
88 if (!kf)
89 return -ENOMEM;
90
91 memcpy(&kf->finger, f, sizeof(struct xt_osf_user_finger));
92
93 list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) {
94 if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger)))
95 continue;
96
97 kfree(kf);
98 kf = NULL;
99
100 if (nlh->nlmsg_flags & NLM_F_EXCL)
101 err = -EEXIST;
102 break;
103 }
104
105 /*
106 * We are protected by nfnl mutex.
107 */
108 if (kf)
109 list_add_tail_rcu(&kf->finger_entry, &xt_osf_fingers[!!f->df]);
110
111 return err;
112}
113
114static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
115 struct nlmsghdr *nlh, struct nlattr *osf_attrs[])
116{
117 struct xt_osf_user_finger *f;
118 struct xt_osf_finger *sf;
119 int err = ENOENT;
120
121 if (!osf_attrs[OSF_ATTR_FINGER])
122 return -EINVAL;
123
124 f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
125
126 list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) {
127 if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger)))
128 continue;
129
130 /*
131 * We are protected by nfnl mutex.
132 */
133 list_del_rcu(&sf->finger_entry);
134 call_rcu(&sf->rcu_head, xt_osf_finger_free_rcu);
135
136 err = 0;
137 break;
138 }
139
140 return err;
141}
142
143static const struct nfnl_callback xt_osf_nfnetlink_callbacks[OSF_MSG_MAX] = {
144 [OSF_MSG_ADD] = {
145 .call = xt_osf_add_callback,
146 .attr_count = OSF_ATTR_MAX,
147 .policy = xt_osf_policy,
148 },
149 [OSF_MSG_REMOVE] = {
150 .call = xt_osf_remove_callback,
151 .attr_count = OSF_ATTR_MAX,
152 .policy = xt_osf_policy,
153 },
154};
155
156static const struct nfnetlink_subsystem xt_osf_nfnetlink = {
157 .name = "osf",
158 .subsys_id = NFNL_SUBSYS_OSF,
159 .cb_count = OSF_MSG_MAX,
160 .cb = xt_osf_nfnetlink_callbacks,
161};
162
163static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info *info,
164 unsigned char f_ttl)
165{
166 const struct iphdr *ip = ip_hdr(skb);
167
168 if (info->flags & XT_OSF_TTL) {
169 if (info->ttl == XT_OSF_TTL_TRUE)
170 return ip->ttl == f_ttl;
171 if (info->ttl == XT_OSF_TTL_NOCHECK)
172 return 1;
173 else if (ip->ttl <= f_ttl)
174 return 1;
175 else {
176 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
177 int ret = 0;
178
179 for_ifa(in_dev) {
180 if (inet_ifa_match(ip->saddr, ifa)) {
181 ret = (ip->ttl == f_ttl);
182 break;
183 }
184 }
185 endfor_ifa(in_dev);
186
187 return ret;
188 }
189 }
190
191 return ip->ttl == f_ttl;
192}
193
194static bool xt_osf_match_packet(const struct sk_buff *skb,
195 const struct xt_match_param *p)
196{
197 const struct xt_osf_info *info = p->matchinfo;
198 const struct iphdr *ip = ip_hdr(skb);
199 const struct tcphdr *tcp;
200 struct tcphdr _tcph;
201 int fmatch = FMATCH_WRONG, fcount = 0;
202 unsigned int optsize = 0, check_WSS = 0;
203 u16 window, totlen, mss = 0;
204 bool df;
205 const unsigned char *optp = NULL, *_optp = NULL;
206 unsigned char opts[MAX_IPOPTLEN];
207 const struct xt_osf_finger *kf;
208 const struct xt_osf_user_finger *f;
209
210 if (!info)
211 return false;
212
213 tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
214 if (!tcp)
215 return false;
216
217 if (!tcp->syn)
218 return false;
219
220 totlen = ntohs(ip->tot_len);
221 df = ntohs(ip->frag_off) & IP_DF;
222 window = ntohs(tcp->window);
223
224 if (tcp->doff * 4 > sizeof(struct tcphdr)) {
225 optsize = tcp->doff * 4 - sizeof(struct tcphdr);
226
227 _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) +
228 sizeof(struct tcphdr), optsize, opts);
229 }
230
231 rcu_read_lock();
232 list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) {
233 f = &kf->finger;
234
235 if (!(info->flags & XT_OSF_LOG) && strcmp(info->genre, f->genre))
236 continue;
237
238 optp = _optp;
239 fmatch = FMATCH_WRONG;
240
241 if (totlen == f->ss && xt_osf_ttl(skb, info, f->ttl)) {
242 int foptsize, optnum;
243
244 /*
245 * Should not happen if userspace parser was written correctly.
246 */
247 if (f->wss.wc >= OSF_WSS_MAX)
248 continue;
249
250 /* Check options */
251
252 foptsize = 0;
253 for (optnum = 0; optnum < f->opt_num; ++optnum)
254 foptsize += f->opt[optnum].length;
255
256 if (foptsize > MAX_IPOPTLEN ||
257 optsize > MAX_IPOPTLEN ||
258 optsize != foptsize)
259 continue;
260
261 check_WSS = f->wss.wc;
262
263 for (optnum = 0; optnum < f->opt_num; ++optnum) {
264 if (f->opt[optnum].kind == (*optp)) {
265 __u32 len = f->opt[optnum].length;
266 const __u8 *optend = optp + len;
267 int loop_cont = 0;
268
269 fmatch = FMATCH_OK;
270
271 switch (*optp) {
272 case OSFOPT_MSS:
273 mss = optp[3];
274 mss <<= 8;
275 mss |= optp[2];
276
277 mss = ntohs(mss);
278 break;
279 case OSFOPT_TS:
280 loop_cont = 1;
281 break;
282 }
283
284 optp = optend;
285 } else
286 fmatch = FMATCH_OPT_WRONG;
287
288 if (fmatch != FMATCH_OK)
289 break;
290 }
291
292 if (fmatch != FMATCH_OPT_WRONG) {
293 fmatch = FMATCH_WRONG;
294
295 switch (check_WSS) {
296 case OSF_WSS_PLAIN:
297 if (f->wss.val == 0 || window == f->wss.val)
298 fmatch = FMATCH_OK;
299 break;
300 case OSF_WSS_MSS:
301 /*
302 * Some smart modems decrease mangle MSS to
303 * SMART_MSS_2, so we check standard, decreased
304 * and the one provided in the fingerprint MSS
305 * values.
306 */
307#define SMART_MSS_1 1460
308#define SMART_MSS_2 1448
309 if (window == f->wss.val * mss ||
310 window == f->wss.val * SMART_MSS_1 ||
311 window == f->wss.val * SMART_MSS_2)
312 fmatch = FMATCH_OK;
313 break;
314 case OSF_WSS_MTU:
315 if (window == f->wss.val * (mss + 40) ||
316 window == f->wss.val * (SMART_MSS_1 + 40) ||
317 window == f->wss.val * (SMART_MSS_2 + 40))
318 fmatch = FMATCH_OK;
319 break;
320 case OSF_WSS_MODULO:
321 if ((window % f->wss.val) == 0)
322 fmatch = FMATCH_OK;
323 break;
324 }
325 }
326
327 if (fmatch != FMATCH_OK)
328 continue;
329
330 fcount++;
331
332 if (info->flags & XT_OSF_LOG)
333 nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
334 "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n",
335 f->genre, f->version, f->subtype,
336 &ip->saddr, ntohs(tcp->source),
337 &ip->daddr, ntohs(tcp->dest),
338 f->ttl - ip->ttl);
339
340 if ((info->flags & XT_OSF_LOG) &&
341 info->loglevel == XT_OSF_LOGLEVEL_FIRST)
342 break;
343 }
344 }
345 rcu_read_unlock();
346
347 if (!fcount && (info->flags & XT_OSF_LOG))
348 nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
349 "Remote OS is not known: %pi4:%u -> %pi4:%u\n",
350 &ip->saddr, ntohs(tcp->source),
351 &ip->daddr, ntohs(tcp->dest));
352
353 if (fcount)
354 fmatch = FMATCH_OK;
355
356 return fmatch == FMATCH_OK;
357}
358
359static struct xt_match xt_osf_match = {
360 .name = "osf",
361 .revision = 0,
362 .family = NFPROTO_IPV4,
363 .proto = IPPROTO_TCP,
364 .hooks = (1 << NF_INET_LOCAL_IN) |
365 (1 << NF_INET_PRE_ROUTING) |
366 (1 << NF_INET_FORWARD),
367 .match = xt_osf_match_packet,
368 .matchsize = sizeof(struct xt_osf_info),
369 .me = THIS_MODULE,
370};
371
372static int __init xt_osf_init(void)
373{
374 int err = -EINVAL;
375 int i;
376
377 for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i)
378 INIT_LIST_HEAD(&xt_osf_fingers[i]);
379
380 err = nfnetlink_subsys_register(&xt_osf_nfnetlink);
381 if (err < 0) {
382 printk(KERN_ERR "Failed (%d) to register OSF nsfnetlink helper.\n", err);
383 goto err_out_exit;
384 }
385
386 err = xt_register_match(&xt_osf_match);
387 if (err) {
388 printk(KERN_ERR "Failed (%d) to register OS fingerprint "
389 "matching module.\n", err);
390 goto err_out_remove;
391 }
392
393 return 0;
394
395err_out_remove:
396 nfnetlink_subsys_unregister(&xt_osf_nfnetlink);
397err_out_exit:
398 return err;
399}
400
401static void __exit xt_osf_fini(void)
402{
403 struct xt_osf_finger *f;
404 int i;
405
406 nfnetlink_subsys_unregister(&xt_osf_nfnetlink);
407 xt_unregister_match(&xt_osf_match);
408
409 rcu_read_lock();
410 for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i) {
411
412 list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) {
413 list_del_rcu(&f->finger_entry);
414 call_rcu(&f->rcu_head, xt_osf_finger_free_rcu);
415 }
416 }
417 rcu_read_unlock();
418
419 rcu_barrier();
420}
421
422module_init(xt_osf_init);
423module_exit(xt_osf_fini);
424
425MODULE_LICENSE("GPL");
426MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
427MODULE_DESCRIPTION("Passive OS fingerprint matching.");
428MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 328bd20ddd25..4cbfebda8fa1 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -86,7 +86,7 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
86 unsigned short family) 86 unsigned short family)
87{ 87{
88 const struct xt_policy_elem *e; 88 const struct xt_policy_elem *e;
89 const struct dst_entry *dst = skb->dst; 89 const struct dst_entry *dst = skb_dst(skb);
90 int strict = info->flags & XT_POLICY_MATCH_STRICT; 90 int strict = info->flags & XT_POLICY_MATCH_STRICT;
91 int i, pos; 91 int i, pos;
92 92
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c
index 67419287bc7e..484d1689bfde 100644
--- a/net/netfilter/xt_realm.c
+++ b/net/netfilter/xt_realm.c
@@ -25,7 +25,7 @@ static bool
25realm_mt(const struct sk_buff *skb, const struct xt_match_param *par) 25realm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
26{ 26{
27 const struct xt_realm_info *info = par->matchinfo; 27 const struct xt_realm_info *info = par->matchinfo;
28 const struct dst_entry *dst = skb->dst; 28 const struct dst_entry *dst = skb_dst(skb);
29 29
30 return (info->id == (dst->tclassid & info->mask)) ^ info->invert; 30 return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
31} 31}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 1acc089be7e9..ebf00ad5b194 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -22,6 +22,8 @@
22#include <net/netfilter/nf_tproxy_core.h> 22#include <net/netfilter/nf_tproxy_core.h>
23#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 23#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
24 24
25#include <linux/netfilter/xt_socket.h>
26
25#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 27#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
26#define XT_SOCKET_HAVE_CONNTRACK 1 28#define XT_SOCKET_HAVE_CONNTRACK 1
27#include <net/netfilter/nf_conntrack.h> 29#include <net/netfilter/nf_conntrack.h>
@@ -86,7 +88,8 @@ extract_icmp_fields(const struct sk_buff *skb,
86 88
87 89
88static bool 90static bool
89socket_mt(const struct sk_buff *skb, const struct xt_match_param *par) 91socket_match(const struct sk_buff *skb, const struct xt_match_param *par,
92 const struct xt_socket_mtinfo1 *info)
90{ 93{
91 const struct iphdr *iph = ip_hdr(skb); 94 const struct iphdr *iph = ip_hdr(skb);
92 struct udphdr _hdr, *hp = NULL; 95 struct udphdr _hdr, *hp = NULL;
@@ -141,10 +144,24 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 144 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
142 saddr, daddr, sport, dport, par->in, false); 145 saddr, daddr, sport, dport, par->in, false);
143 if (sk != NULL) { 146 if (sk != NULL) {
144 bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0); 147 bool wildcard;
148 bool transparent = true;
149
150 /* Ignore sockets listening on INADDR_ANY */
151 wildcard = (sk->sk_state != TCP_TIME_WAIT &&
152 inet_sk(sk)->rcv_saddr == 0);
153
154 /* Ignore non-transparent sockets,
155 if XT_SOCKET_TRANSPARENT is used */
156 if (info && info->flags & XT_SOCKET_TRANSPARENT)
157 transparent = ((sk->sk_state != TCP_TIME_WAIT &&
158 inet_sk(sk)->transparent) ||
159 (sk->sk_state == TCP_TIME_WAIT &&
160 inet_twsk(sk)->tw_transparent));
145 161
146 nf_tproxy_put_sock(sk); 162 nf_tproxy_put_sock(sk);
147 if (wildcard) 163
164 if (wildcard || !transparent)
148 sk = NULL; 165 sk = NULL;
149 } 166 }
150 167
@@ -157,23 +174,47 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
157 return (sk != NULL); 174 return (sk != NULL);
158} 175}
159 176
160static struct xt_match socket_mt_reg __read_mostly = { 177static bool
161 .name = "socket", 178socket_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
162 .family = AF_INET, 179{
163 .match = socket_mt, 180 return socket_match(skb, par, NULL);
164 .hooks = 1 << NF_INET_PRE_ROUTING, 181}
165 .me = THIS_MODULE, 182
183static bool
184socket_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
185{
186 return socket_match(skb, par, par->matchinfo);
187}
188
189static struct xt_match socket_mt_reg[] __read_mostly = {
190 {
191 .name = "socket",
192 .revision = 0,
193 .family = NFPROTO_IPV4,
194 .match = socket_mt_v0,
195 .hooks = 1 << NF_INET_PRE_ROUTING,
196 .me = THIS_MODULE,
197 },
198 {
199 .name = "socket",
200 .revision = 1,
201 .family = NFPROTO_IPV4,
202 .match = socket_mt_v1,
203 .matchsize = sizeof(struct xt_socket_mtinfo1),
204 .hooks = 1 << NF_INET_PRE_ROUTING,
205 .me = THIS_MODULE,
206 },
166}; 207};
167 208
168static int __init socket_mt_init(void) 209static int __init socket_mt_init(void)
169{ 210{
170 nf_defrag_ipv4_enable(); 211 nf_defrag_ipv4_enable();
171 return xt_register_match(&socket_mt_reg); 212 return xt_register_matches(socket_mt_reg, ARRAY_SIZE(socket_mt_reg));
172} 213}
173 214
174static void __exit socket_mt_exit(void) 215static void __exit socket_mt_exit(void)
175{ 216{
176 xt_unregister_match(&socket_mt_reg); 217 xt_unregister_matches(socket_mt_reg, ARRAY_SIZE(socket_mt_reg));
177} 218}
178 219
179module_init(socket_mt_init); 220module_init(socket_mt_init);
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index bf1ab1a6790d..e639298bc9c8 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -785,18 +785,6 @@ static struct genl_ops netlbl_cipsov4_ops[] = {
785 */ 785 */
786int __init netlbl_cipsov4_genl_init(void) 786int __init netlbl_cipsov4_genl_init(void)
787{ 787{
788 int ret_val, i; 788 return genl_register_family_with_ops(&netlbl_cipsov4_gnl_family,
789 789 netlbl_cipsov4_ops, ARRAY_SIZE(netlbl_cipsov4_ops));
790 ret_val = genl_register_family(&netlbl_cipsov4_gnl_family);
791 if (ret_val != 0)
792 return ret_val;
793
794 for (i = 0; i < ARRAY_SIZE(netlbl_cipsov4_ops); i++) {
795 ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
796 &netlbl_cipsov4_ops[i]);
797 if (ret_val != 0)
798 return ret_val;
799 }
800
801 return 0;
802} 790}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 1821c5d50fb8..8203623e65ad 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -779,18 +779,6 @@ static struct genl_ops netlbl_mgmt_genl_ops[] = {
779 */ 779 */
780int __init netlbl_mgmt_genl_init(void) 780int __init netlbl_mgmt_genl_init(void)
781{ 781{
782 int ret_val, i; 782 return genl_register_family_with_ops(&netlbl_mgmt_gnl_family,
783 783 netlbl_mgmt_genl_ops, ARRAY_SIZE(netlbl_mgmt_genl_ops));
784 ret_val = genl_register_family(&netlbl_mgmt_gnl_family);
785 if (ret_val != 0)
786 return ret_val;
787
788 for (i = 0; i < ARRAY_SIZE(netlbl_mgmt_genl_ops); i++) {
789 ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
790 &netlbl_mgmt_genl_ops[i]);
791 if (ret_val != 0)
792 return ret_val;
793 }
794
795 return 0;
796} 784}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index f3c5c68c6848..fb357f010189 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1478,20 +1478,8 @@ static struct genl_ops netlbl_unlabel_genl_ops[] = {
1478 */ 1478 */
1479int __init netlbl_unlabel_genl_init(void) 1479int __init netlbl_unlabel_genl_init(void)
1480{ 1480{
1481 int ret_val, i; 1481 return genl_register_family_with_ops(&netlbl_unlabel_gnl_family,
1482 1482 netlbl_unlabel_genl_ops, ARRAY_SIZE(netlbl_unlabel_genl_ops));
1483 ret_val = genl_register_family(&netlbl_unlabel_gnl_family);
1484 if (ret_val != 0)
1485 return ret_val;
1486
1487 for (i = 0; i < ARRAY_SIZE(netlbl_unlabel_genl_ops); i++) {
1488 ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
1489 &netlbl_unlabel_genl_ops[i]);
1490 if (ret_val != 0)
1491 return ret_val;
1492 }
1493
1494 return 0;
1495} 1483}
1496 1484
1497/* 1485/*
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1d3dd30099df..eed4c6a8afc0 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -384,6 +384,52 @@ errout:
384} 384}
385 385
386/** 386/**
387 * genl_register_family_with_ops - register a generic netlink family
388 * @family: generic netlink family
389 * @ops: operations to be registered
390 * @n_ops: number of elements to register
391 *
392 * Registers the specified family and operations from the specified table.
393 * Only one family may be registered with the same family name or identifier.
394 *
395 * The family id may equal GENL_ID_GENERATE causing an unique id to
396 * be automatically generated and assigned.
397 *
398 * Either a doit or dumpit callback must be specified for every registered
399 * operation or the function will fail. Only one operation structure per
400 * command identifier may be registered.
401 *
402 * See include/net/genetlink.h for more documenation on the operations
403 * structure.
404 *
405 * This is equivalent to calling genl_register_family() followed by
406 * genl_register_ops() for every operation entry in the table taking
407 * care to unregister the family on error path.
408 *
409 * Return 0 on success or a negative error code.
410 */
411int genl_register_family_with_ops(struct genl_family *family,
412 struct genl_ops *ops, size_t n_ops)
413{
414 int err, i;
415
416 err = genl_register_family(family);
417 if (err)
418 return err;
419
420 for (i = 0; i < n_ops; ++i, ++ops) {
421 err = genl_register_ops(family, ops);
422 if (err)
423 goto err_out;
424 }
425 return 0;
426err_out:
427 genl_unregister_family(family);
428 return err;
429}
430EXPORT_SYMBOL(genl_register_family_with_ops);
431
432/**
387 * genl_unregister_family - unregister generic netlink family 433 * genl_unregister_family - unregister generic netlink family
388 * @family: generic netlink family 434 * @family: generic netlink family
389 * 435 *
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f546e81acc45..4f76e5552d8e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -39,6 +39,7 @@
39 * will simply extend the hardware address 39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll 40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq. 41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
42 * 43 *
43 * This program is free software; you can redistribute it and/or 44 * This program is free software; you can redistribute it and/or
44 * modify it under the terms of the GNU General Public License 45 * modify it under the terms of the GNU General Public License
@@ -157,7 +158,25 @@ struct packet_mreq_max
157}; 158};
158 159
159#ifdef CONFIG_PACKET_MMAP 160#ifdef CONFIG_PACKET_MMAP
160static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing); 161static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
162 int closing, int tx_ring);
163
164struct packet_ring_buffer {
165 char * *pg_vec;
166 unsigned int head;
167 unsigned int frames_per_block;
168 unsigned int frame_size;
169 unsigned int frame_max;
170
171 unsigned int pg_vec_order;
172 unsigned int pg_vec_pages;
173 unsigned int pg_vec_len;
174
175 atomic_t pending;
176};
177
178struct packet_sock;
179static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
161#endif 180#endif
162 181
163static void packet_flush_mclist(struct sock *sk); 182static void packet_flush_mclist(struct sock *sk);
@@ -167,11 +186,8 @@ struct packet_sock {
167 struct sock sk; 186 struct sock sk;
168 struct tpacket_stats stats; 187 struct tpacket_stats stats;
169#ifdef CONFIG_PACKET_MMAP 188#ifdef CONFIG_PACKET_MMAP
170 char * *pg_vec; 189 struct packet_ring_buffer rx_ring;
171 unsigned int head; 190 struct packet_ring_buffer tx_ring;
172 unsigned int frames_per_block;
173 unsigned int frame_size;
174 unsigned int frame_max;
175 int copy_thresh; 191 int copy_thresh;
176#endif 192#endif
177 struct packet_type prot_hook; 193 struct packet_type prot_hook;
@@ -185,12 +201,10 @@ struct packet_sock {
185 struct packet_mclist *mclist; 201 struct packet_mclist *mclist;
186#ifdef CONFIG_PACKET_MMAP 202#ifdef CONFIG_PACKET_MMAP
187 atomic_t mapped; 203 atomic_t mapped;
188 unsigned int pg_vec_order;
189 unsigned int pg_vec_pages;
190 unsigned int pg_vec_len;
191 enum tpacket_versions tp_version; 204 enum tpacket_versions tp_version;
192 unsigned int tp_hdrlen; 205 unsigned int tp_hdrlen;
193 unsigned int tp_reserve; 206 unsigned int tp_reserve;
207 unsigned int tp_loss:1;
194#endif 208#endif
195}; 209};
196 210
@@ -206,36 +220,33 @@ struct packet_skb_cb {
206 220
207#ifdef CONFIG_PACKET_MMAP 221#ifdef CONFIG_PACKET_MMAP
208 222
209static void *packet_lookup_frame(struct packet_sock *po, unsigned int position, 223static void __packet_set_status(struct packet_sock *po, void *frame, int status)
210 int status)
211{ 224{
212 unsigned int pg_vec_pos, frame_offset;
213 union { 225 union {
214 struct tpacket_hdr *h1; 226 struct tpacket_hdr *h1;
215 struct tpacket2_hdr *h2; 227 struct tpacket2_hdr *h2;
216 void *raw; 228 void *raw;
217 } h; 229 } h;
218 230
219 pg_vec_pos = position / po->frames_per_block; 231 h.raw = frame;
220 frame_offset = position % po->frames_per_block;
221
222 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
223 switch (po->tp_version) { 232 switch (po->tp_version) {
224 case TPACKET_V1: 233 case TPACKET_V1:
225 if (status != (h.h1->tp_status ? TP_STATUS_USER : 234 h.h1->tp_status = status;
226 TP_STATUS_KERNEL)) 235 flush_dcache_page(virt_to_page(&h.h1->tp_status));
227 return NULL;
228 break; 236 break;
229 case TPACKET_V2: 237 case TPACKET_V2:
230 if (status != (h.h2->tp_status ? TP_STATUS_USER : 238 h.h2->tp_status = status;
231 TP_STATUS_KERNEL)) 239 flush_dcache_page(virt_to_page(&h.h2->tp_status));
232 return NULL;
233 break; 240 break;
241 default:
242 printk(KERN_ERR "TPACKET version not supported\n");
243 BUG();
234 } 244 }
235 return h.raw; 245
246 smp_wmb();
236} 247}
237 248
238static void __packet_set_status(struct packet_sock *po, void *frame, int status) 249static int __packet_get_status(struct packet_sock *po, void *frame)
239{ 250{
240 union { 251 union {
241 struct tpacket_hdr *h1; 252 struct tpacket_hdr *h1;
@@ -243,16 +254,66 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
243 void *raw; 254 void *raw;
244 } h; 255 } h;
245 256
257 smp_rmb();
258
246 h.raw = frame; 259 h.raw = frame;
247 switch (po->tp_version) { 260 switch (po->tp_version) {
248 case TPACKET_V1: 261 case TPACKET_V1:
249 h.h1->tp_status = status; 262 flush_dcache_page(virt_to_page(&h.h1->tp_status));
250 break; 263 return h.h1->tp_status;
251 case TPACKET_V2: 264 case TPACKET_V2:
252 h.h2->tp_status = status; 265 flush_dcache_page(virt_to_page(&h.h2->tp_status));
253 break; 266 return h.h2->tp_status;
267 default:
268 printk(KERN_ERR "TPACKET version not supported\n");
269 BUG();
270 return 0;
254 } 271 }
255} 272}
273
274static void *packet_lookup_frame(struct packet_sock *po,
275 struct packet_ring_buffer *rb,
276 unsigned int position,
277 int status)
278{
279 unsigned int pg_vec_pos, frame_offset;
280 union {
281 struct tpacket_hdr *h1;
282 struct tpacket2_hdr *h2;
283 void *raw;
284 } h;
285
286 pg_vec_pos = position / rb->frames_per_block;
287 frame_offset = position % rb->frames_per_block;
288
289 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
290
291 if (status != __packet_get_status(po, h.raw))
292 return NULL;
293
294 return h.raw;
295}
296
297static inline void *packet_current_frame(struct packet_sock *po,
298 struct packet_ring_buffer *rb,
299 int status)
300{
301 return packet_lookup_frame(po, rb, rb->head, status);
302}
303
304static inline void *packet_previous_frame(struct packet_sock *po,
305 struct packet_ring_buffer *rb,
306 int status)
307{
308 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
309 return packet_lookup_frame(po, rb, previous, status);
310}
311
312static inline void packet_increment_head(struct packet_ring_buffer *buff)
313{
314 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
315}
316
256#endif 317#endif
257 318
258static inline struct packet_sock *pkt_sk(struct sock *sk) 319static inline struct packet_sock *pkt_sk(struct sock *sk)
@@ -311,8 +372,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
311 goto oom; 372 goto oom;
312 373
313 /* drop any routing info */ 374 /* drop any routing info */
314 dst_release(skb->dst); 375 skb_dst_drop(skb);
315 skb->dst = NULL;
316 376
317 /* drop conntrack reference */ 377 /* drop conntrack reference */
318 nf_reset(skb); 378 nf_reset(skb);
@@ -560,8 +620,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
560 620
561 skb_set_owner_r(skb, sk); 621 skb_set_owner_r(skb, sk);
562 skb->dev = NULL; 622 skb->dev = NULL;
563 dst_release(skb->dst); 623 skb_dst_drop(skb);
564 skb->dst = NULL;
565 624
566 /* drop conntrack reference */ 625 /* drop conntrack reference */
567 nf_reset(skb); 626 nf_reset(skb);
@@ -648,7 +707,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
648 macoff = netoff - maclen; 707 macoff = netoff - maclen;
649 } 708 }
650 709
651 if (macoff + snaplen > po->frame_size) { 710 if (macoff + snaplen > po->rx_ring.frame_size) {
652 if (po->copy_thresh && 711 if (po->copy_thresh &&
653 atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 712 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
654 (unsigned)sk->sk_rcvbuf) { 713 (unsigned)sk->sk_rcvbuf) {
@@ -661,16 +720,16 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
661 if (copy_skb) 720 if (copy_skb)
662 skb_set_owner_r(copy_skb, sk); 721 skb_set_owner_r(copy_skb, sk);
663 } 722 }
664 snaplen = po->frame_size - macoff; 723 snaplen = po->rx_ring.frame_size - macoff;
665 if ((int)snaplen < 0) 724 if ((int)snaplen < 0)
666 snaplen = 0; 725 snaplen = 0;
667 } 726 }
668 727
669 spin_lock(&sk->sk_receive_queue.lock); 728 spin_lock(&sk->sk_receive_queue.lock);
670 h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL); 729 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
671 if (!h.raw) 730 if (!h.raw)
672 goto ring_is_full; 731 goto ring_is_full;
673 po->head = po->head != po->frame_max ? po->head+1 : 0; 732 packet_increment_head(&po->rx_ring);
674 po->stats.tp_packets++; 733 po->stats.tp_packets++;
675 if (copy_skb) { 734 if (copy_skb) {
676 status |= TP_STATUS_COPY; 735 status |= TP_STATUS_COPY;
@@ -727,7 +786,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
727 786
728 __packet_set_status(po, h.raw, status); 787 __packet_set_status(po, h.raw, status);
729 smp_mb(); 788 smp_mb();
730
731 { 789 {
732 struct page *p_start, *p_end; 790 struct page *p_start, *p_end;
733 u8 *h_end = h.raw + macoff + snaplen - 1; 791 u8 *h_end = h.raw + macoff + snaplen - 1;
@@ -760,10 +818,249 @@ ring_is_full:
760 goto drop_n_restore; 818 goto drop_n_restore;
761} 819}
762 820
763#endif 821static void tpacket_destruct_skb(struct sk_buff *skb)
822{
823 struct packet_sock *po = pkt_sk(skb->sk);
824 void * ph;
764 825
826 BUG_ON(skb == NULL);
765 827
766static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 828 if (likely(po->tx_ring.pg_vec)) {
829 ph = skb_shinfo(skb)->destructor_arg;
830 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
831 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
832 atomic_dec(&po->tx_ring.pending);
833 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
834 }
835
836 sock_wfree(skb);
837}
838
839static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
840 void * frame, struct net_device *dev, int size_max,
841 __be16 proto, unsigned char * addr)
842{
843 union {
844 struct tpacket_hdr *h1;
845 struct tpacket2_hdr *h2;
846 void *raw;
847 } ph;
848 int to_write, offset, len, tp_len, nr_frags, len_max;
849 struct socket *sock = po->sk.sk_socket;
850 struct page *page;
851 void *data;
852 int err;
853
854 ph.raw = frame;
855
856 skb->protocol = proto;
857 skb->dev = dev;
858 skb->priority = po->sk.sk_priority;
859 skb_shinfo(skb)->destructor_arg = ph.raw;
860
861 switch (po->tp_version) {
862 case TPACKET_V2:
863 tp_len = ph.h2->tp_len;
864 break;
865 default:
866 tp_len = ph.h1->tp_len;
867 break;
868 }
869 if (unlikely(tp_len > size_max)) {
870 printk(KERN_ERR "packet size is too long (%d > %d)\n",
871 tp_len, size_max);
872 return -EMSGSIZE;
873 }
874
875 skb_reserve(skb, LL_RESERVED_SPACE(dev));
876 skb_reset_network_header(skb);
877
878 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
879 to_write = tp_len;
880
881 if (sock->type == SOCK_DGRAM) {
882 err = dev_hard_header(skb, dev, ntohs(proto), addr,
883 NULL, tp_len);
884 if (unlikely(err < 0))
885 return -EINVAL;
886 } else if (dev->hard_header_len ) {
887 /* net device doesn't like empty head */
888 if (unlikely(tp_len <= dev->hard_header_len)) {
889 printk(KERN_ERR "packet size is too short "
890 "(%d < %d)\n", tp_len,
891 dev->hard_header_len);
892 return -EINVAL;
893 }
894
895 skb_push(skb, dev->hard_header_len);
896 err = skb_store_bits(skb, 0, data,
897 dev->hard_header_len);
898 if (unlikely(err))
899 return err;
900
901 data += dev->hard_header_len;
902 to_write -= dev->hard_header_len;
903 }
904
905 err = -EFAULT;
906 page = virt_to_page(data);
907 offset = offset_in_page(data);
908 len_max = PAGE_SIZE - offset;
909 len = ((to_write > len_max) ? len_max : to_write);
910
911 skb->data_len = to_write;
912 skb->len += to_write;
913 skb->truesize += to_write;
914 atomic_add(to_write, &po->sk.sk_wmem_alloc);
915
916 while (likely(to_write)) {
917 nr_frags = skb_shinfo(skb)->nr_frags;
918
919 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
920 printk(KERN_ERR "Packet exceed the number "
921 "of skb frags(%lu)\n",
922 MAX_SKB_FRAGS);
923 return -EFAULT;
924 }
925
926 flush_dcache_page(page);
927 get_page(page);
928 skb_fill_page_desc(skb,
929 nr_frags,
930 page++, offset, len);
931 to_write -= len;
932 offset = 0;
933 len_max = PAGE_SIZE;
934 len = ((to_write > len_max) ? len_max : to_write);
935 }
936
937 return tp_len;
938}
939
940static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
941{
942 struct socket *sock;
943 struct sk_buff *skb;
944 struct net_device *dev;
945 __be16 proto;
946 int ifindex, err, reserve = 0;
947 void * ph;
948 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
949 int tp_len, size_max;
950 unsigned char *addr;
951 int len_sum = 0;
952 int status = 0;
953
954 sock = po->sk.sk_socket;
955
956 mutex_lock(&po->pg_vec_lock);
957
958 err = -EBUSY;
959 if (saddr == NULL) {
960 ifindex = po->ifindex;
961 proto = po->num;
962 addr = NULL;
963 } else {
964 err = -EINVAL;
965 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
966 goto out;
967 if (msg->msg_namelen < (saddr->sll_halen
968 + offsetof(struct sockaddr_ll,
969 sll_addr)))
970 goto out;
971 ifindex = saddr->sll_ifindex;
972 proto = saddr->sll_protocol;
973 addr = saddr->sll_addr;
974 }
975
976 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
977 err = -ENXIO;
978 if (unlikely(dev == NULL))
979 goto out;
980
981 reserve = dev->hard_header_len;
982
983 err = -ENETDOWN;
984 if (unlikely(!(dev->flags & IFF_UP)))
985 goto out_put;
986
987 size_max = po->tx_ring.frame_size
988 - sizeof(struct skb_shared_info)
989 - po->tp_hdrlen
990 - LL_ALLOCATED_SPACE(dev)
991 - sizeof(struct sockaddr_ll);
992
993 if (size_max > dev->mtu + reserve)
994 size_max = dev->mtu + reserve;
995
996 do {
997 ph = packet_current_frame(po, &po->tx_ring,
998 TP_STATUS_SEND_REQUEST);
999
1000 if (unlikely(ph == NULL)) {
1001 schedule();
1002 continue;
1003 }
1004
1005 status = TP_STATUS_SEND_REQUEST;
1006 skb = sock_alloc_send_skb(&po->sk,
1007 LL_ALLOCATED_SPACE(dev)
1008 + sizeof(struct sockaddr_ll),
1009 0, &err);
1010
1011 if (unlikely(skb == NULL))
1012 goto out_status;
1013
1014 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1015 addr);
1016
1017 if (unlikely(tp_len < 0)) {
1018 if (po->tp_loss) {
1019 __packet_set_status(po, ph,
1020 TP_STATUS_AVAILABLE);
1021 packet_increment_head(&po->tx_ring);
1022 kfree_skb(skb);
1023 continue;
1024 } else {
1025 status = TP_STATUS_WRONG_FORMAT;
1026 err = tp_len;
1027 goto out_status;
1028 }
1029 }
1030
1031 skb->destructor = tpacket_destruct_skb;
1032 __packet_set_status(po, ph, TP_STATUS_SENDING);
1033 atomic_inc(&po->tx_ring.pending);
1034
1035 status = TP_STATUS_SEND_REQUEST;
1036 err = dev_queue_xmit(skb);
1037 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
1038 goto out_xmit;
1039 packet_increment_head(&po->tx_ring);
1040 len_sum += tp_len;
1041 }
1042 while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
1043 && (atomic_read(&po->tx_ring.pending))))
1044 );
1045
1046 err = len_sum;
1047 goto out_put;
1048
1049out_xmit:
1050 skb->destructor = sock_wfree;
1051 atomic_dec(&po->tx_ring.pending);
1052out_status:
1053 __packet_set_status(po, ph, status);
1054 kfree_skb(skb);
1055out_put:
1056 dev_put(dev);
1057out:
1058 mutex_unlock(&po->pg_vec_lock);
1059 return err;
1060}
1061#endif
1062
1063static int packet_snd(struct socket *sock,
767 struct msghdr *msg, size_t len) 1064 struct msghdr *msg, size_t len)
768{ 1065{
769 struct sock *sk = sock->sk; 1066 struct sock *sk = sock->sk;
@@ -854,6 +1151,19 @@ out:
854 return err; 1151 return err;
855} 1152}
856 1153
1154static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1155 struct msghdr *msg, size_t len)
1156{
1157#ifdef CONFIG_PACKET_MMAP
1158 struct sock *sk = sock->sk;
1159 struct packet_sock *po = pkt_sk(sk);
1160 if (po->tx_ring.pg_vec)
1161 return tpacket_snd(po, msg);
1162 else
1163#endif
1164 return packet_snd(sock, msg, len);
1165}
1166
857/* 1167/*
858 * Close a PACKET socket. This is fairly simple. We immediately go 1168 * Close a PACKET socket. This is fairly simple. We immediately go
859 * to 'closed' state and remove our protocol entry in the device list. 1169 * to 'closed' state and remove our protocol entry in the device list.
@@ -864,6 +1174,9 @@ static int packet_release(struct socket *sock)
864 struct sock *sk = sock->sk; 1174 struct sock *sk = sock->sk;
865 struct packet_sock *po; 1175 struct packet_sock *po;
866 struct net *net; 1176 struct net *net;
1177#ifdef CONFIG_PACKET_MMAP
1178 struct tpacket_req req;
1179#endif
867 1180
868 if (!sk) 1181 if (!sk)
869 return 0; 1182 return 0;
@@ -893,11 +1206,13 @@ static int packet_release(struct socket *sock)
893 packet_flush_mclist(sk); 1206 packet_flush_mclist(sk);
894 1207
895#ifdef CONFIG_PACKET_MMAP 1208#ifdef CONFIG_PACKET_MMAP
896 if (po->pg_vec) { 1209 memset(&req, 0, sizeof(req));
897 struct tpacket_req req; 1210
898 memset(&req, 0, sizeof(req)); 1211 if (po->rx_ring.pg_vec)
899 packet_set_ring(sk, &req, 1); 1212 packet_set_ring(sk, &req, 1, 0);
900 } 1213
1214 if (po->tx_ring.pg_vec)
1215 packet_set_ring(sk, &req, 1, 1);
901#endif 1216#endif
902 1217
903 /* 1218 /*
@@ -1253,9 +1568,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1253 switch (i->type) { 1568 switch (i->type) {
1254 case PACKET_MR_MULTICAST: 1569 case PACKET_MR_MULTICAST:
1255 if (what > 0) 1570 if (what > 0)
1256 dev_mc_add(dev, i->addr, i->alen, 0); 1571 return dev_mc_add(dev, i->addr, i->alen, 0);
1257 else 1572 else
1258 dev_mc_delete(dev, i->addr, i->alen, 0); 1573 return dev_mc_delete(dev, i->addr, i->alen, 0);
1259 break; 1574 break;
1260 case PACKET_MR_PROMISC: 1575 case PACKET_MR_PROMISC:
1261 return dev_set_promiscuity(dev, what); 1576 return dev_set_promiscuity(dev, what);
@@ -1263,6 +1578,12 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1263 case PACKET_MR_ALLMULTI: 1578 case PACKET_MR_ALLMULTI:
1264 return dev_set_allmulti(dev, what); 1579 return dev_set_allmulti(dev, what);
1265 break; 1580 break;
1581 case PACKET_MR_UNICAST:
1582 if (what > 0)
1583 return dev_unicast_add(dev, i->addr);
1584 else
1585 return dev_unicast_delete(dev, i->addr);
1586 break;
1266 default:; 1587 default:;
1267 } 1588 }
1268 return 0; 1589 return 0;
@@ -1391,7 +1712,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1391 if (level != SOL_PACKET) 1712 if (level != SOL_PACKET)
1392 return -ENOPROTOOPT; 1713 return -ENOPROTOOPT;
1393 1714
1394 switch(optname) { 1715 switch (optname) {
1395 case PACKET_ADD_MEMBERSHIP: 1716 case PACKET_ADD_MEMBERSHIP:
1396 case PACKET_DROP_MEMBERSHIP: 1717 case PACKET_DROP_MEMBERSHIP:
1397 { 1718 {
@@ -1415,6 +1736,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1415 1736
1416#ifdef CONFIG_PACKET_MMAP 1737#ifdef CONFIG_PACKET_MMAP
1417 case PACKET_RX_RING: 1738 case PACKET_RX_RING:
1739 case PACKET_TX_RING:
1418 { 1740 {
1419 struct tpacket_req req; 1741 struct tpacket_req req;
1420 1742
@@ -1422,7 +1744,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1422 return -EINVAL; 1744 return -EINVAL;
1423 if (copy_from_user(&req,optval,sizeof(req))) 1745 if (copy_from_user(&req,optval,sizeof(req)))
1424 return -EFAULT; 1746 return -EFAULT;
1425 return packet_set_ring(sk, &req, 0); 1747 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1426 } 1748 }
1427 case PACKET_COPY_THRESH: 1749 case PACKET_COPY_THRESH:
1428 { 1750 {
@@ -1442,7 +1764,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1442 1764
1443 if (optlen != sizeof(val)) 1765 if (optlen != sizeof(val))
1444 return -EINVAL; 1766 return -EINVAL;
1445 if (po->pg_vec) 1767 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1446 return -EBUSY; 1768 return -EBUSY;
1447 if (copy_from_user(&val, optval, sizeof(val))) 1769 if (copy_from_user(&val, optval, sizeof(val)))
1448 return -EFAULT; 1770 return -EFAULT;
@@ -1461,13 +1783,26 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1461 1783
1462 if (optlen != sizeof(val)) 1784 if (optlen != sizeof(val))
1463 return -EINVAL; 1785 return -EINVAL;
1464 if (po->pg_vec) 1786 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1465 return -EBUSY; 1787 return -EBUSY;
1466 if (copy_from_user(&val, optval, sizeof(val))) 1788 if (copy_from_user(&val, optval, sizeof(val)))
1467 return -EFAULT; 1789 return -EFAULT;
1468 po->tp_reserve = val; 1790 po->tp_reserve = val;
1469 return 0; 1791 return 0;
1470 } 1792 }
1793 case PACKET_LOSS:
1794 {
1795 unsigned int val;
1796
1797 if (optlen != sizeof(val))
1798 return -EINVAL;
1799 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1800 return -EBUSY;
1801 if (copy_from_user(&val, optval, sizeof(val)))
1802 return -EFAULT;
1803 po->tp_loss = !!val;
1804 return 0;
1805 }
1471#endif 1806#endif
1472 case PACKET_AUXDATA: 1807 case PACKET_AUXDATA:
1473 { 1808 {
@@ -1517,7 +1852,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1517 if (len < 0) 1852 if (len < 0)
1518 return -EINVAL; 1853 return -EINVAL;
1519 1854
1520 switch(optname) { 1855 switch (optname) {
1521 case PACKET_STATISTICS: 1856 case PACKET_STATISTICS:
1522 if (len > sizeof(struct tpacket_stats)) 1857 if (len > sizeof(struct tpacket_stats))
1523 len = sizeof(struct tpacket_stats); 1858 len = sizeof(struct tpacket_stats);
@@ -1573,6 +1908,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1573 val = po->tp_reserve; 1908 val = po->tp_reserve;
1574 data = &val; 1909 data = &val;
1575 break; 1910 break;
1911 case PACKET_LOSS:
1912 if (len > sizeof(unsigned int))
1913 len = sizeof(unsigned int);
1914 val = po->tp_loss;
1915 data = &val;
1916 break;
1576#endif 1917#endif
1577 default: 1918 default:
1578 return -ENOPROTOOPT; 1919 return -ENOPROTOOPT;
@@ -1643,7 +1984,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
1643{ 1984{
1644 struct sock *sk = sock->sk; 1985 struct sock *sk = sock->sk;
1645 1986
1646 switch(cmd) { 1987 switch (cmd) {
1647 case SIOCOUTQ: 1988 case SIOCOUTQ:
1648 { 1989 {
1649 int amount = atomic_read(&sk->sk_wmem_alloc); 1990 int amount = atomic_read(&sk->sk_wmem_alloc);
@@ -1705,13 +2046,17 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
1705 unsigned int mask = datagram_poll(file, sock, wait); 2046 unsigned int mask = datagram_poll(file, sock, wait);
1706 2047
1707 spin_lock_bh(&sk->sk_receive_queue.lock); 2048 spin_lock_bh(&sk->sk_receive_queue.lock);
1708 if (po->pg_vec) { 2049 if (po->rx_ring.pg_vec) {
1709 unsigned last = po->head ? po->head-1 : po->frame_max; 2050 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
1710
1711 if (packet_lookup_frame(po, last, TP_STATUS_USER))
1712 mask |= POLLIN | POLLRDNORM; 2051 mask |= POLLIN | POLLRDNORM;
1713 } 2052 }
1714 spin_unlock_bh(&sk->sk_receive_queue.lock); 2053 spin_unlock_bh(&sk->sk_receive_queue.lock);
2054 spin_lock_bh(&sk->sk_write_queue.lock);
2055 if (po->tx_ring.pg_vec) {
2056 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2057 mask |= POLLOUT | POLLWRNORM;
2058 }
2059 spin_unlock_bh(&sk->sk_write_queue.lock);
1715 return mask; 2060 return mask;
1716} 2061}
1717 2062
@@ -1788,21 +2133,33 @@ out_free_pgvec:
1788 goto out; 2133 goto out;
1789} 2134}
1790 2135
1791static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing) 2136static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2137 int closing, int tx_ring)
1792{ 2138{
1793 char **pg_vec = NULL; 2139 char **pg_vec = NULL;
1794 struct packet_sock *po = pkt_sk(sk); 2140 struct packet_sock *po = pkt_sk(sk);
1795 int was_running, order = 0; 2141 int was_running, order = 0;
2142 struct packet_ring_buffer *rb;
2143 struct sk_buff_head *rb_queue;
1796 __be16 num; 2144 __be16 num;
1797 int err = 0; 2145 int err;
1798 2146
1799 if (req->tp_block_nr) { 2147 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
1800 int i; 2148 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1801 2149
1802 /* Sanity tests and some calculations */ 2150 err = -EBUSY;
2151 if (!closing) {
2152 if (atomic_read(&po->mapped))
2153 goto out;
2154 if (atomic_read(&rb->pending))
2155 goto out;
2156 }
1803 2157
1804 if (unlikely(po->pg_vec)) 2158 if (req->tp_block_nr) {
1805 return -EBUSY; 2159 /* Sanity tests and some calculations */
2160 err = -EBUSY;
2161 if (unlikely(rb->pg_vec))
2162 goto out;
1806 2163
1807 switch (po->tp_version) { 2164 switch (po->tp_version) {
1808 case TPACKET_V1: 2165 case TPACKET_V1:
@@ -1813,42 +2170,35 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1813 break; 2170 break;
1814 } 2171 }
1815 2172
2173 err = -EINVAL;
1816 if (unlikely((int)req->tp_block_size <= 0)) 2174 if (unlikely((int)req->tp_block_size <= 0))
1817 return -EINVAL; 2175 goto out;
1818 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 2176 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1819 return -EINVAL; 2177 goto out;
1820 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 2178 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1821 po->tp_reserve)) 2179 po->tp_reserve))
1822 return -EINVAL; 2180 goto out;
1823 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 2181 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1824 return -EINVAL; 2182 goto out;
1825 2183
1826 po->frames_per_block = req->tp_block_size/req->tp_frame_size; 2184 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
1827 if (unlikely(po->frames_per_block <= 0)) 2185 if (unlikely(rb->frames_per_block <= 0))
1828 return -EINVAL; 2186 goto out;
1829 if (unlikely((po->frames_per_block * req->tp_block_nr) != 2187 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
1830 req->tp_frame_nr)) 2188 req->tp_frame_nr))
1831 return -EINVAL; 2189 goto out;
1832 2190
1833 err = -ENOMEM; 2191 err = -ENOMEM;
1834 order = get_order(req->tp_block_size); 2192 order = get_order(req->tp_block_size);
1835 pg_vec = alloc_pg_vec(req, order); 2193 pg_vec = alloc_pg_vec(req, order);
1836 if (unlikely(!pg_vec)) 2194 if (unlikely(!pg_vec))
1837 goto out; 2195 goto out;
1838 2196 }
1839 for (i = 0; i < req->tp_block_nr; i++) { 2197 /* Done */
1840 void *ptr = pg_vec[i]; 2198 else {
1841 int k; 2199 err = -EINVAL;
1842
1843 for (k = 0; k < po->frames_per_block; k++) {
1844 __packet_set_status(po, ptr, TP_STATUS_KERNEL);
1845 ptr += req->tp_frame_size;
1846 }
1847 }
1848 /* Done */
1849 } else {
1850 if (unlikely(req->tp_frame_nr)) 2200 if (unlikely(req->tp_frame_nr))
1851 return -EINVAL; 2201 goto out;
1852 } 2202 }
1853 2203
1854 lock_sock(sk); 2204 lock_sock(sk);
@@ -1872,23 +2222,24 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1872 if (closing || atomic_read(&po->mapped) == 0) { 2222 if (closing || atomic_read(&po->mapped) == 0) {
1873 err = 0; 2223 err = 0;
1874#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) 2224#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1875 2225 spin_lock_bh(&rb_queue->lock);
1876 spin_lock_bh(&sk->sk_receive_queue.lock); 2226 pg_vec = XC(rb->pg_vec, pg_vec);
1877 pg_vec = XC(po->pg_vec, pg_vec); 2227 rb->frame_max = (req->tp_frame_nr - 1);
1878 po->frame_max = (req->tp_frame_nr - 1); 2228 rb->head = 0;
1879 po->head = 0; 2229 rb->frame_size = req->tp_frame_size;
1880 po->frame_size = req->tp_frame_size; 2230 spin_unlock_bh(&rb_queue->lock);
1881 spin_unlock_bh(&sk->sk_receive_queue.lock); 2231
1882 2232 order = XC(rb->pg_vec_order, order);
1883 order = XC(po->pg_vec_order, order); 2233 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
1884 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr); 2234
1885 2235 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1886 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 2236 po->prot_hook.func = (po->rx_ring.pg_vec) ?
1887 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv; 2237 tpacket_rcv : packet_rcv;
1888 skb_queue_purge(&sk->sk_receive_queue); 2238 skb_queue_purge(rb_queue);
1889#undef XC 2239#undef XC
1890 if (atomic_read(&po->mapped)) 2240 if (atomic_read(&po->mapped))
1891 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); 2241 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
2242 atomic_read(&po->mapped));
1892 } 2243 }
1893 mutex_unlock(&po->pg_vec_lock); 2244 mutex_unlock(&po->pg_vec_lock);
1894 2245
@@ -1909,11 +2260,13 @@ out:
1909 return err; 2260 return err;
1910} 2261}
1911 2262
1912static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2263static int packet_mmap(struct file *file, struct socket *sock,
2264 struct vm_area_struct *vma)
1913{ 2265{
1914 struct sock *sk = sock->sk; 2266 struct sock *sk = sock->sk;
1915 struct packet_sock *po = pkt_sk(sk); 2267 struct packet_sock *po = pkt_sk(sk);
1916 unsigned long size; 2268 unsigned long size, expected_size;
2269 struct packet_ring_buffer *rb;
1917 unsigned long start; 2270 unsigned long start;
1918 int err = -EINVAL; 2271 int err = -EINVAL;
1919 int i; 2272 int i;
@@ -1921,26 +2274,43 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1921 if (vma->vm_pgoff) 2274 if (vma->vm_pgoff)
1922 return -EINVAL; 2275 return -EINVAL;
1923 2276
1924 size = vma->vm_end - vma->vm_start;
1925
1926 mutex_lock(&po->pg_vec_lock); 2277 mutex_lock(&po->pg_vec_lock);
1927 if (po->pg_vec == NULL) 2278
2279 expected_size = 0;
2280 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2281 if (rb->pg_vec) {
2282 expected_size += rb->pg_vec_len
2283 * rb->pg_vec_pages
2284 * PAGE_SIZE;
2285 }
2286 }
2287
2288 if (expected_size == 0)
1928 goto out; 2289 goto out;
1929 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) 2290
2291 size = vma->vm_end - vma->vm_start;
2292 if (size != expected_size)
1930 goto out; 2293 goto out;
1931 2294
1932 start = vma->vm_start; 2295 start = vma->vm_start;
1933 for (i = 0; i < po->pg_vec_len; i++) { 2296 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
1934 struct page *page = virt_to_page(po->pg_vec[i]); 2297 if (rb->pg_vec == NULL)
1935 int pg_num; 2298 continue;
1936 2299
1937 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) { 2300 for (i = 0; i < rb->pg_vec_len; i++) {
1938 err = vm_insert_page(vma, start, page); 2301 struct page *page = virt_to_page(rb->pg_vec[i]);
1939 if (unlikely(err)) 2302 int pg_num;
1940 goto out; 2303
1941 start += PAGE_SIZE; 2304 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2305 pg_num++,page++) {
2306 err = vm_insert_page(vma, start, page);
2307 if (unlikely(err))
2308 goto out;
2309 start += PAGE_SIZE;
2310 }
1942 } 2311 }
1943 } 2312 }
2313
1944 atomic_inc(&po->mapped); 2314 atomic_inc(&po->mapped);
1945 vma->vm_ops = &packet_mmap_ops; 2315 vma->vm_ops = &packet_mmap_ops;
1946 err = 0; 2316 err = 0;
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 4aa888584d20..480839dfc560 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -115,10 +115,10 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
115 rskb->truesize += rskb->len; 115 rskb->truesize += rskb->len;
116 116
117 /* Avoid nested fragments */ 117 /* Avoid nested fragments */
118 for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) 118 skb_walk_frags(skb, fs)
119 flen += fs->len; 119 flen += fs->len;
120 skb->next = skb_shinfo(skb)->frag_list; 120 skb->next = skb_shinfo(skb)->frag_list;
121 skb_shinfo(skb)->frag_list = NULL; 121 skb_frag_list_init(skb);
122 skb->len -= flen; 122 skb->len -= flen;
123 skb->data_len -= flen; 123 skb->data_len -= flen;
124 skb->truesize -= flen; 124 skb->truesize -= flen;
@@ -212,8 +212,9 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
212 dev->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
213 } 213 }
214 214
215 if (!pep_writeable(sk)) 215 netif_stop_queue(dev);
216 netif_stop_queue(dev); 216 if (pep_writeable(sk))
217 netif_wake_queue(dev);
217 return 0; 218 return 0;
218} 219}
219 220
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 8ad2b5333881..eef833ea6d7b 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -940,10 +940,10 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
940 rskb->truesize += rskb->len; 940 rskb->truesize += rskb->len;
941 941
942 /* Avoid nested fragments */ 942 /* Avoid nested fragments */
943 for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) 943 skb_walk_frags(skb, fs)
944 flen += fs->len; 944 flen += fs->len;
945 skb->next = skb_shinfo(skb)->frag_list; 945 skb->next = skb_shinfo(skb)->frag_list;
946 skb_shinfo(skb)->frag_list = NULL; 946 skb_frag_list_init(skb);
947 skb->len -= flen; 947 skb->len -= flen;
948 skb->data_len -= flen; 948 skb->data_len -= flen;
949 skb->truesize -= flen; 949 skb->truesize -= flen;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 20cf16fc572f..b11e7e527864 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -35,7 +35,6 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/version.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include "rds.h" 40#include "rds.h"
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 273f064930a8..d14445c48304 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -148,14 +148,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
148 if (conn) 148 if (conn)
149 goto out; 149 goto out;
150 150
151 conn = kmem_cache_alloc(rds_conn_slab, gfp); 151 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
152 if (conn == NULL) { 152 if (conn == NULL) {
153 conn = ERR_PTR(-ENOMEM); 153 conn = ERR_PTR(-ENOMEM);
154 goto out; 154 goto out;
155 } 155 }
156 156
157 memset(conn, 0, sizeof(*conn));
158
159 INIT_HLIST_NODE(&conn->c_hash_node); 157 INIT_HLIST_NODE(&conn->c_hash_node);
160 conn->c_version = RDS_PROTOCOL_3_0; 158 conn->c_version = RDS_PROTOCOL_3_0;
161 conn->c_laddr = laddr; 159 conn->c_laddr = laddr;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 4933b380985e..b9bcd32431e1 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -224,8 +224,8 @@ static int rds_ib_laddr_check(__be32 addr)
224 * IB and iWARP capable NICs. 224 * IB and iWARP capable NICs.
225 */ 225 */
226 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 226 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
227 if (!cm_id) 227 if (IS_ERR(cm_id))
228 return -EADDRNOTAVAIL; 228 return PTR_ERR(cm_id);
229 229
230 memset(&sin, 0, sizeof(sin)); 230 memset(&sin, 0, sizeof(sin));
231 sin.sin_family = AF_INET; 231 sin.sin_family = AF_INET;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 069206cae733..455ae73047fe 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -333,7 +333,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
333void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 333void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
334void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 334void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
335int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 335int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
336 u32 *adv_credits, int need_posted); 336 u32 *adv_credits, int need_posted, int max_posted);
337 337
338/* ib_stats.c */ 338/* ib_stats.c */
339DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 339DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 36d931573ff4..5709bad28329 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -524,7 +524,7 @@ void rds_ib_attempt_ack(struct rds_ib_connection *ic)
524 } 524 }
525 525
526 /* Can we get a send credit? */ 526 /* Can we get a send credit? */
527 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0)) { 527 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
528 rds_ib_stats_inc(s_ib_tx_throttle); 528 rds_ib_stats_inc(s_ib_tx_throttle);
529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
530 return; 530 return;
diff --git a/net/rds/ib_ring.c b/net/rds/ib_ring.c
index 99a6ccae964c..ff97e8eda858 100644
--- a/net/rds/ib_ring.c
+++ b/net/rds/ib_ring.c
@@ -137,7 +137,7 @@ int rds_ib_ring_empty(struct rds_ib_work_ring *ring)
137 137
138int rds_ib_ring_low(struct rds_ib_work_ring *ring) 138int rds_ib_ring_low(struct rds_ib_work_ring *ring)
139{ 139{
140 return __rds_ib_ring_used(ring) <= (ring->w_nr >> 2); 140 return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1);
141} 141}
142 142
143/* 143/*
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index cb6c52cb1c4c..23bf830db2d5 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -311,7 +311,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
311 * and using atomic_cmpxchg when updating the two counters. 311 * and using atomic_cmpxchg when updating the two counters.
312 */ 312 */
313int rds_ib_send_grab_credits(struct rds_ib_connection *ic, 313int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
314 u32 wanted, u32 *adv_credits, int need_posted) 314 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
315{ 315{
316 unsigned int avail, posted, got = 0, advertise; 316 unsigned int avail, posted, got = 0, advertise;
317 long oldval, newval; 317 long oldval, newval;
@@ -351,7 +351,7 @@ try_again:
351 * available. 351 * available.
352 */ 352 */
353 if (posted && (got || need_posted)) { 353 if (posted && (got || need_posted)) {
354 advertise = min_t(unsigned int, posted, RDS_MAX_ADV_CREDIT); 354 advertise = min_t(unsigned int, posted, max_posted);
355 newval -= IB_SET_POST_CREDITS(advertise); 355 newval -= IB_SET_POST_CREDITS(advertise);
356 } 356 }
357 357
@@ -498,7 +498,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
498 498
499 credit_alloc = work_alloc; 499 credit_alloc = work_alloc;
500 if (ic->i_flowctl) { 500 if (ic->i_flowctl) {
501 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0); 501 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
502 adv_credits += posted; 502 adv_credits += posted;
503 if (credit_alloc < work_alloc) { 503 if (credit_alloc < work_alloc) {
504 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 504 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
@@ -506,7 +506,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
506 flow_controlled++; 506 flow_controlled++;
507 } 507 }
508 if (work_alloc == 0) { 508 if (work_alloc == 0) {
509 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 509 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
510 rds_ib_stats_inc(s_ib_tx_throttle); 510 rds_ib_stats_inc(s_ib_tx_throttle);
511 ret = -ENOMEM; 511 ret = -ENOMEM;
512 goto out; 512 goto out;
@@ -571,7 +571,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
571 /* 571 /*
572 * Update adv_credits since we reset the ACK_REQUIRED bit. 572 * Update adv_credits since we reset the ACK_REQUIRED bit.
573 */ 573 */
574 rds_ib_send_grab_credits(ic, 0, &posted, 1); 574 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
575 adv_credits += posted; 575 adv_credits += posted;
576 BUG_ON(adv_credits > 255); 576 BUG_ON(adv_credits > 255);
577 } else if (ic->i_rm != rm) 577 } else if (ic->i_rm != rm)
diff --git a/net/rds/info.c b/net/rds/info.c
index 1d885535214d..62aeef37aefe 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -188,10 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
188 ret = -ENOMEM; 188 ret = -ENOMEM;
189 goto out; 189 goto out;
190 } 190 }
191 down_read(&current->mm->mmap_sem); 191 ret = get_user_pages_fast(start, nr_pages, 1, pages);
192 ret = get_user_pages(current, current->mm, start, nr_pages, 1, 0,
193 pages, NULL);
194 up_read(&current->mm->mmap_sem);
195 if (ret != nr_pages) { 192 if (ret != nr_pages) {
196 if (ret > 0) 193 if (ret > 0)
197 nr_pages = ret; 194 nr_pages = ret;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index b732efb5b634..d16e1cbc8e83 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -233,8 +233,8 @@ static int rds_iw_laddr_check(__be32 addr)
233 * IB and iWARP capable NICs. 233 * IB and iWARP capable NICs.
234 */ 234 */
235 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 235 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
236 if (!cm_id) 236 if (IS_ERR(cm_id))
237 return -EADDRNOTAVAIL; 237 return PTR_ERR(cm_id);
238 238
239 memset(&sin, 0, sizeof(sin)); 239 memset(&sin, 0, sizeof(sin));
240 sin.sin_family = AF_INET; 240 sin.sin_family = AF_INET;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index b4fb27252895..0715dde323e7 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -361,7 +361,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
364 u32 *adv_credits, int need_posted); 364 u32 *adv_credits, int need_posted, int max_posted);
365 365
366/* ib_stats.c */ 366/* ib_stats.c */
367DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); 367DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats);
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index fde470fa50d5..8683f5f66c4b 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -524,7 +524,7 @@ void rds_iw_attempt_ack(struct rds_iw_connection *ic)
524 } 524 }
525 525
526 /* Can we get a send credit? */ 526 /* Can we get a send credit? */
527 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0)) { 527 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
528 rds_iw_stats_inc(s_iw_tx_throttle); 528 rds_iw_stats_inc(s_iw_tx_throttle);
529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
530 return; 530 return;
diff --git a/net/rds/iw_ring.c b/net/rds/iw_ring.c
index d422d4b5deef..da8e3b63f663 100644
--- a/net/rds/iw_ring.c
+++ b/net/rds/iw_ring.c
@@ -137,7 +137,7 @@ int rds_iw_ring_empty(struct rds_iw_work_ring *ring)
137 137
138int rds_iw_ring_low(struct rds_iw_work_ring *ring) 138int rds_iw_ring_low(struct rds_iw_work_ring *ring)
139{ 139{
140 return __rds_iw_ring_used(ring) <= (ring->w_nr >> 2); 140 return __rds_iw_ring_used(ring) <= (ring->w_nr >> 1);
141} 141}
142 142
143 143
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 22dd38ffd608..44a6a0551f28 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -347,7 +347,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
347 * and using atomic_cmpxchg when updating the two counters. 347 * and using atomic_cmpxchg when updating the two counters.
348 */ 348 */
349int rds_iw_send_grab_credits(struct rds_iw_connection *ic, 349int rds_iw_send_grab_credits(struct rds_iw_connection *ic,
350 u32 wanted, u32 *adv_credits, int need_posted) 350 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
351{ 351{
352 unsigned int avail, posted, got = 0, advertise; 352 unsigned int avail, posted, got = 0, advertise;
353 long oldval, newval; 353 long oldval, newval;
@@ -387,7 +387,7 @@ try_again:
387 * available. 387 * available.
388 */ 388 */
389 if (posted && (got || need_posted)) { 389 if (posted && (got || need_posted)) {
390 advertise = min_t(unsigned int, posted, RDS_MAX_ADV_CREDIT); 390 advertise = min_t(unsigned int, posted, max_posted);
391 newval -= IB_SET_POST_CREDITS(advertise); 391 newval -= IB_SET_POST_CREDITS(advertise);
392 } 392 }
393 393
@@ -541,7 +541,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
541 541
542 credit_alloc = work_alloc; 542 credit_alloc = work_alloc;
543 if (ic->i_flowctl) { 543 if (ic->i_flowctl) {
544 credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0); 544 credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
545 adv_credits += posted; 545 adv_credits += posted;
546 if (credit_alloc < work_alloc) { 546 if (credit_alloc < work_alloc) {
547 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 547 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
@@ -549,7 +549,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
549 flow_controlled++; 549 flow_controlled++;
550 } 550 }
551 if (work_alloc == 0) { 551 if (work_alloc == 0) {
552 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); 552 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
553 rds_iw_stats_inc(s_iw_tx_throttle); 553 rds_iw_stats_inc(s_iw_tx_throttle);
554 ret = -ENOMEM; 554 ret = -ENOMEM;
555 goto out; 555 goto out;
@@ -614,7 +614,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
614 /* 614 /*
615 * Update adv_credits since we reset the ACK_REQUIRED bit. 615 * Update adv_credits since we reset the ACK_REQUIRED bit.
616 */ 616 */
617 rds_iw_send_grab_credits(ic, 0, &posted, 1); 617 rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
618 adv_credits += posted; 618 adv_credits += posted;
619 BUG_ON(adv_credits > 255); 619 BUG_ON(adv_credits > 255);
620 } else if (ic->i_rm != rm) 620 } else if (ic->i_rm != rm)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index eaeeb91e1119..8dc83d2caa58 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -150,12 +150,9 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
150{ 150{
151 int ret; 151 int ret;
152 152
153 down_read(&current->mm->mmap_sem); 153 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
154 ret = get_user_pages(current, current->mm, user_addr,
155 nr_pages, write, 0, pages, NULL);
156 up_read(&current->mm->mmap_sem);
157 154
158 if (0 <= ret && (unsigned) ret < nr_pages) { 155 if (ret >= 0 && ret < nr_pages) {
159 while (ret--) 156 while (ret--)
160 put_page(pages[ret]); 157 put_page(pages[ret]);
161 ret = -EFAULT; 158 ret = -EFAULT;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7b19024f9706..7d0f901c93d5 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -34,7 +34,7 @@
34 34
35#include "rdma_transport.h" 35#include "rdma_transport.h"
36 36
37static struct rdma_cm_id *rds_iw_listen_id; 37static struct rdma_cm_id *rds_rdma_listen_id;
38 38
39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
40 struct rdma_cm_event *event) 40 struct rdma_cm_event *event)
@@ -161,7 +161,7 @@ static int __init rds_rdma_listen_init(void)
161 161
162 rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT); 162 rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
163 163
164 rds_iw_listen_id = cm_id; 164 rds_rdma_listen_id = cm_id;
165 cm_id = NULL; 165 cm_id = NULL;
166out: 166out:
167 if (cm_id) 167 if (cm_id)
@@ -171,10 +171,10 @@ out:
171 171
172static void rds_rdma_listen_stop(void) 172static void rds_rdma_listen_stop(void)
173{ 173{
174 if (rds_iw_listen_id) { 174 if (rds_rdma_listen_id) {
175 rdsdebug("cm %p\n", rds_iw_listen_id); 175 rdsdebug("cm %p\n", rds_rdma_listen_id);
176 rdma_destroy_id(rds_iw_listen_id); 176 rdma_destroy_id(rds_rdma_listen_id);
177 rds_iw_listen_id = NULL; 177 rds_rdma_listen_id = NULL;
178 } 178 }
179} 179}
180 180
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 71794449ca4e..dbe111236783 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -132,7 +132,7 @@ struct rds_connection {
132#define RDS_FLAG_CONG_BITMAP 0x01 132#define RDS_FLAG_CONG_BITMAP 0x01
133#define RDS_FLAG_ACK_REQUIRED 0x02 133#define RDS_FLAG_ACK_REQUIRED 0x02
134#define RDS_FLAG_RETRANSMITTED 0x04 134#define RDS_FLAG_RETRANSMITTED 0x04
135#define RDS_MAX_ADV_CREDIT 127 135#define RDS_MAX_ADV_CREDIT 255
136 136
137/* 137/*
138 * Maximum space available for extension headers. 138 * Maximum space available for extension headers.
diff --git a/net/rds/send.c b/net/rds/send.c
index 104fe033203d..a4a7f428cd76 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -854,11 +854,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
854 854
855 rm->m_daddr = daddr; 855 rm->m_daddr = daddr;
856 856
857 /* Parse any control messages the user may have included. */
858 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
859 if (ret)
860 goto out;
861
862 /* rds_conn_create has a spinlock that runs with IRQ off. 857 /* rds_conn_create has a spinlock that runs with IRQ off.
863 * Caching the conn in the socket helps a lot. */ 858 * Caching the conn in the socket helps a lot. */
864 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) 859 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
@@ -874,6 +869,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
874 rs->rs_conn = conn; 869 rs->rs_conn = conn;
875 } 870 }
876 871
872 /* Parse any control messages the user may have included. */
873 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
874 if (ret)
875 goto out;
876
877 if ((rm->m_rdma_cookie || rm->m_rdma_op) 877 if ((rm->m_rdma_cookie || rm->m_rdma_op)
878 && conn->c_trans->xmit_rdma == NULL) { 878 && conn->c_trans->xmit_rdma == NULL) {
879 if (printk_ratelimit()) 879 if (printk_ratelimit())
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 7f807b30cfbb..eaf765876458 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,22 +10,15 @@ menuconfig RFKILL
10 To compile this driver as a module, choose M here: the 10 To compile this driver as a module, choose M here: the
11 module will be called rfkill. 11 module will be called rfkill.
12 12
13config RFKILL_INPUT
14 tristate "Input layer to RF switch connector"
15 depends on RFKILL && INPUT
16 help
17 Say Y here if you want kernel automatically toggle state
18 of RF switches on and off when user presses appropriate
19 button or a key on the keyboard. Without this module you
20 need a some kind of userspace application to control
21 state of the switches.
22
23 To compile this driver as a module, choose M here: the
24 module will be called rfkill-input.
25
26# LED trigger support 13# LED trigger support
27config RFKILL_LEDS 14config RFKILL_LEDS
28 bool 15 bool
29 depends on RFKILL && LEDS_TRIGGERS 16 depends on RFKILL
17 depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS
30 default y 18 default y
31 19
20config RFKILL_INPUT
21 bool "RF switch input support" if EMBEDDED
22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EMBEDDED
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index b38c430be057..662105352691 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -2,5 +2,6 @@
2# Makefile for the RF switch subsystem. 2# Makefile for the RF switch subsystem.
3# 3#
4 4
5obj-$(CONFIG_RFKILL) += rfkill.o 5rfkill-y += core.o
6obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
new file mode 100644
index 000000000000..4e68ab439d5d
--- /dev/null
+++ b/net/rfkill/core.c
@@ -0,0 +1,1205 @@
1/*
2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/workqueue.h>
26#include <linux/capability.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/rfkill.h>
30#include <linux/spinlock.h>
31#include <linux/miscdevice.h>
32#include <linux/wait.h>
33#include <linux/poll.h>
34#include <linux/fs.h>
35
36#include "rfkill.h"
37
38#define POLL_INTERVAL (5 * HZ)
39
40#define RFKILL_BLOCK_HW BIT(0)
41#define RFKILL_BLOCK_SW BIT(1)
42#define RFKILL_BLOCK_SW_PREV BIT(2)
43#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
44 RFKILL_BLOCK_SW |\
45 RFKILL_BLOCK_SW_PREV)
46#define RFKILL_BLOCK_SW_SETCALL BIT(31)
47
48struct rfkill {
49 spinlock_t lock;
50
51 const char *name;
52 enum rfkill_type type;
53
54 unsigned long state;
55
56 u32 idx;
57
58 bool registered;
59 bool suspended;
60 bool persistent;
61
62 const struct rfkill_ops *ops;
63 void *data;
64
65#ifdef CONFIG_RFKILL_LEDS
66 struct led_trigger led_trigger;
67 const char *ledtrigname;
68#endif
69
70 struct device dev;
71 struct list_head node;
72
73 struct delayed_work poll_work;
74 struct work_struct uevent_work;
75 struct work_struct sync_work;
76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78
79struct rfkill_int_event {
80 struct list_head list;
81 struct rfkill_event ev;
82};
83
84struct rfkill_data {
85 struct list_head list;
86 struct list_head events;
87 struct mutex mtx;
88 wait_queue_head_t read_wait;
89 bool input_handler;
90};
91
92
93MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
94MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
95MODULE_DESCRIPTION("RF switch support");
96MODULE_LICENSE("GPL");
97
98
99/*
100 * The locking here should be made much smarter, we currently have
101 * a bit of a stupid situation because drivers might want to register
102 * the rfkill struct under their own lock, and take this lock during
103 * rfkill method calls -- which will cause an AB-BA deadlock situation.
104 *
105 * To fix that, we need to rework this code here to be mostly lock-free
106 * and only use the mutex for list manipulations, not to protect the
107 * various other global variables. Then we can avoid holding the mutex
108 * around driver operations, and all is happy.
109 */
110static LIST_HEAD(rfkill_list); /* list of registered rf switches */
111static DEFINE_MUTEX(rfkill_global_mutex);
112static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
113
114static unsigned int rfkill_default_state = 1;
115module_param_named(default_state, rfkill_default_state, uint, 0444);
116MODULE_PARM_DESC(default_state,
117 "Default initial state for all radio types, 0 = radio off");
118
119static struct {
120 bool cur, sav;
121} rfkill_global_states[NUM_RFKILL_TYPES];
122
123static bool rfkill_epo_lock_active;
124
125
126#ifdef CONFIG_RFKILL_LEDS
127static void rfkill_led_trigger_event(struct rfkill *rfkill)
128{
129 struct led_trigger *trigger;
130
131 if (!rfkill->registered)
132 return;
133
134 trigger = &rfkill->led_trigger;
135
136 if (rfkill->state & RFKILL_BLOCK_ANY)
137 led_trigger_event(trigger, LED_OFF);
138 else
139 led_trigger_event(trigger, LED_FULL);
140}
141
142static void rfkill_led_trigger_activate(struct led_classdev *led)
143{
144 struct rfkill *rfkill;
145
146 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
147
148 rfkill_led_trigger_event(rfkill);
149}
150
151const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
152{
153 return rfkill->led_trigger.name;
154}
155EXPORT_SYMBOL(rfkill_get_led_trigger_name);
156
157void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
158{
159 BUG_ON(!rfkill);
160
161 rfkill->ledtrigname = name;
162}
163EXPORT_SYMBOL(rfkill_set_led_trigger_name);
164
165static int rfkill_led_trigger_register(struct rfkill *rfkill)
166{
167 rfkill->led_trigger.name = rfkill->ledtrigname
168 ? : dev_name(&rfkill->dev);
169 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
170 return led_trigger_register(&rfkill->led_trigger);
171}
172
173static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
174{
175 led_trigger_unregister(&rfkill->led_trigger);
176}
177#else
178static void rfkill_led_trigger_event(struct rfkill *rfkill)
179{
180}
181
182static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
183{
184 return 0;
185}
186
187static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
188{
189}
190#endif /* CONFIG_RFKILL_LEDS */
191
192static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
193 enum rfkill_operation op)
194{
195 unsigned long flags;
196
197 ev->idx = rfkill->idx;
198 ev->type = rfkill->type;
199 ev->op = op;
200
201 spin_lock_irqsave(&rfkill->lock, flags);
202 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
203 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
204 RFKILL_BLOCK_SW_PREV));
205 spin_unlock_irqrestore(&rfkill->lock, flags);
206}
207
208static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
209{
210 struct rfkill_data *data;
211 struct rfkill_int_event *ev;
212
213 list_for_each_entry(data, &rfkill_fds, list) {
214 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
215 if (!ev)
216 continue;
217 rfkill_fill_event(&ev->ev, rfkill, op);
218 mutex_lock(&data->mtx);
219 list_add_tail(&ev->list, &data->events);
220 mutex_unlock(&data->mtx);
221 wake_up_interruptible(&data->read_wait);
222 }
223}
224
225static void rfkill_event(struct rfkill *rfkill)
226{
227 if (!rfkill->registered || rfkill->suspended)
228 return;
229
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
231
232 /* also send event to /dev/rfkill */
233 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
234}
235
236static bool __rfkill_set_hw_state(struct rfkill *rfkill,
237 bool blocked, bool *change)
238{
239 unsigned long flags;
240 bool prev, any;
241
242 BUG_ON(!rfkill);
243
244 spin_lock_irqsave(&rfkill->lock, flags);
245 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
246 if (blocked)
247 rfkill->state |= RFKILL_BLOCK_HW;
248 else
249 rfkill->state &= ~RFKILL_BLOCK_HW;
250 *change = prev != blocked;
251 any = rfkill->state & RFKILL_BLOCK_ANY;
252 spin_unlock_irqrestore(&rfkill->lock, flags);
253
254 rfkill_led_trigger_event(rfkill);
255
256 return any;
257}
258
259/**
260 * rfkill_set_block - wrapper for set_block method
261 *
262 * @rfkill: the rfkill struct to use
263 * @blocked: the new software state
264 *
265 * Calls the set_block method (when applicable) and handles notifications
266 * etc. as well.
267 */
268static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
269{
270 unsigned long flags;
271 int err;
272
273 /*
274 * Some platforms (...!) generate input events which affect the
275 * _hard_ kill state -- whenever something tries to change the
276 * current software state query the hardware state too.
277 */
278 if (rfkill->ops->query)
279 rfkill->ops->query(rfkill, rfkill->data);
280
281 spin_lock_irqsave(&rfkill->lock, flags);
282 if (rfkill->state & RFKILL_BLOCK_SW)
283 rfkill->state |= RFKILL_BLOCK_SW_PREV;
284 else
285 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
286
287 if (blocked)
288 rfkill->state |= RFKILL_BLOCK_SW;
289 else
290 rfkill->state &= ~RFKILL_BLOCK_SW;
291
292 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
293 spin_unlock_irqrestore(&rfkill->lock, flags);
294
295 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
296 return;
297
298 err = rfkill->ops->set_block(rfkill->data, blocked);
299
300 spin_lock_irqsave(&rfkill->lock, flags);
301 if (err) {
302 /*
303 * Failed -- reset status to _prev, this may be different
304 * from what set set _PREV to earlier in this function
305 * if rfkill_set_sw_state was invoked.
306 */
307 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
308 rfkill->state |= RFKILL_BLOCK_SW;
309 else
310 rfkill->state &= ~RFKILL_BLOCK_SW;
311 }
312 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
313 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
314 spin_unlock_irqrestore(&rfkill->lock, flags);
315
316 rfkill_led_trigger_event(rfkill);
317 rfkill_event(rfkill);
318}
319
320#ifdef CONFIG_RFKILL_INPUT
321static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
322
323/**
324 * __rfkill_switch_all - Toggle state of all switches of given type
325 * @type: type of interfaces to be affected
326 * @state: the new state
327 *
328 * This function sets the state of all switches of given type,
329 * unless a specific switch is claimed by userspace (in which case,
330 * that switch is left alone) or suspended.
331 *
332 * Caller must have acquired rfkill_global_mutex.
333 */
334static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
335{
336 struct rfkill *rfkill;
337
338 rfkill_global_states[type].cur = blocked;
339 list_for_each_entry(rfkill, &rfkill_list, node) {
340 if (rfkill->type != type)
341 continue;
342
343 rfkill_set_block(rfkill, blocked);
344 }
345}
346
347/**
348 * rfkill_switch_all - Toggle state of all switches of given type
349 * @type: type of interfaces to be affected
350 * @state: the new state
351 *
352 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
353 * Please refer to __rfkill_switch_all() for details.
354 *
355 * Does nothing if the EPO lock is active.
356 */
357void rfkill_switch_all(enum rfkill_type type, bool blocked)
358{
359 if (atomic_read(&rfkill_input_disabled))
360 return;
361
362 mutex_lock(&rfkill_global_mutex);
363
364 if (!rfkill_epo_lock_active)
365 __rfkill_switch_all(type, blocked);
366
367 mutex_unlock(&rfkill_global_mutex);
368}
369
370/**
371 * rfkill_epo - emergency power off all transmitters
372 *
373 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
374 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
375 *
376 * The global state before the EPO is saved and can be restored later
377 * using rfkill_restore_states().
378 */
379void rfkill_epo(void)
380{
381 struct rfkill *rfkill;
382 int i;
383
384 if (atomic_read(&rfkill_input_disabled))
385 return;
386
387 mutex_lock(&rfkill_global_mutex);
388
389 rfkill_epo_lock_active = true;
390 list_for_each_entry(rfkill, &rfkill_list, node)
391 rfkill_set_block(rfkill, true);
392
393 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
394 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
395 rfkill_global_states[i].cur = true;
396 }
397
398 mutex_unlock(&rfkill_global_mutex);
399}
400
401/**
402 * rfkill_restore_states - restore global states
403 *
404 * Restore (and sync switches to) the global state from the
405 * states in rfkill_default_states. This can undo the effects of
406 * a call to rfkill_epo().
407 */
408void rfkill_restore_states(void)
409{
410 int i;
411
412 if (atomic_read(&rfkill_input_disabled))
413 return;
414
415 mutex_lock(&rfkill_global_mutex);
416
417 rfkill_epo_lock_active = false;
418 for (i = 0; i < NUM_RFKILL_TYPES; i++)
419 __rfkill_switch_all(i, rfkill_global_states[i].sav);
420 mutex_unlock(&rfkill_global_mutex);
421}
422
423/**
424 * rfkill_remove_epo_lock - unlock state changes
425 *
426 * Used by rfkill-input manually unlock state changes, when
427 * the EPO switch is deactivated.
428 */
429void rfkill_remove_epo_lock(void)
430{
431 if (atomic_read(&rfkill_input_disabled))
432 return;
433
434 mutex_lock(&rfkill_global_mutex);
435 rfkill_epo_lock_active = false;
436 mutex_unlock(&rfkill_global_mutex);
437}
438
439/**
440 * rfkill_is_epo_lock_active - returns true EPO is active
441 *
442 * Returns 0 (false) if there is NOT an active EPO contidion,
443 * and 1 (true) if there is an active EPO contition, which
444 * locks all radios in one of the BLOCKED states.
445 *
446 * Can be called in atomic context.
447 */
448bool rfkill_is_epo_lock_active(void)
449{
450 return rfkill_epo_lock_active;
451}
452
453/**
454 * rfkill_get_global_sw_state - returns global state for a type
455 * @type: the type to get the global state of
456 *
457 * Returns the current global state for a given wireless
458 * device type.
459 */
460bool rfkill_get_global_sw_state(const enum rfkill_type type)
461{
462 return rfkill_global_states[type].cur;
463}
464#endif
465
466
467bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
468{
469 bool ret, change;
470
471 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
472
473 if (!rfkill->registered)
474 return ret;
475
476 if (change)
477 schedule_work(&rfkill->uevent_work);
478
479 return ret;
480}
481EXPORT_SYMBOL(rfkill_set_hw_state);
482
483static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
484{
485 u32 bit = RFKILL_BLOCK_SW;
486
487 /* if in a ops->set_block right now, use other bit */
488 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
489 bit = RFKILL_BLOCK_SW_PREV;
490
491 if (blocked)
492 rfkill->state |= bit;
493 else
494 rfkill->state &= ~bit;
495}
496
497bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
498{
499 unsigned long flags;
500 bool prev, hwblock;
501
502 BUG_ON(!rfkill);
503
504 spin_lock_irqsave(&rfkill->lock, flags);
505 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
506 __rfkill_set_sw_state(rfkill, blocked);
507 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
508 blocked = blocked || hwblock;
509 spin_unlock_irqrestore(&rfkill->lock, flags);
510
511 if (!rfkill->registered) {
512 rfkill->persistent = true;
513 } else {
514 if (prev != blocked && !hwblock)
515 schedule_work(&rfkill->uevent_work);
516
517 rfkill_led_trigger_event(rfkill);
518 }
519
520 return blocked;
521}
522EXPORT_SYMBOL(rfkill_set_sw_state);
523
524void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
525{
526 unsigned long flags;
527 bool swprev, hwprev;
528
529 BUG_ON(!rfkill);
530
531 spin_lock_irqsave(&rfkill->lock, flags);
532
533 /*
534 * No need to care about prev/setblock ... this is for uevent only
535 * and that will get triggered by rfkill_set_block anyway.
536 */
537 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
538 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
539 __rfkill_set_sw_state(rfkill, sw);
540
541 spin_unlock_irqrestore(&rfkill->lock, flags);
542
543 if (!rfkill->registered) {
544 rfkill->persistent = true;
545 } else {
546 if (swprev != sw || hwprev != hw)
547 schedule_work(&rfkill->uevent_work);
548
549 rfkill_led_trigger_event(rfkill);
550 }
551}
552EXPORT_SYMBOL(rfkill_set_states);
553
554static ssize_t rfkill_name_show(struct device *dev,
555 struct device_attribute *attr,
556 char *buf)
557{
558 struct rfkill *rfkill = to_rfkill(dev);
559
560 return sprintf(buf, "%s\n", rfkill->name);
561}
562
563static const char *rfkill_get_type_str(enum rfkill_type type)
564{
565 switch (type) {
566 case RFKILL_TYPE_WLAN:
567 return "wlan";
568 case RFKILL_TYPE_BLUETOOTH:
569 return "bluetooth";
570 case RFKILL_TYPE_UWB:
571 return "ultrawideband";
572 case RFKILL_TYPE_WIMAX:
573 return "wimax";
574 case RFKILL_TYPE_WWAN:
575 return "wwan";
576 default:
577 BUG();
578 }
579
580 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
581}
582
583static ssize_t rfkill_type_show(struct device *dev,
584 struct device_attribute *attr,
585 char *buf)
586{
587 struct rfkill *rfkill = to_rfkill(dev);
588
589 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
590}
591
592static ssize_t rfkill_idx_show(struct device *dev,
593 struct device_attribute *attr,
594 char *buf)
595{
596 struct rfkill *rfkill = to_rfkill(dev);
597
598 return sprintf(buf, "%d\n", rfkill->idx);
599}
600
601static u8 user_state_from_blocked(unsigned long state)
602{
603 if (state & RFKILL_BLOCK_HW)
604 return RFKILL_USER_STATE_HARD_BLOCKED;
605 if (state & RFKILL_BLOCK_SW)
606 return RFKILL_USER_STATE_SOFT_BLOCKED;
607
608 return RFKILL_USER_STATE_UNBLOCKED;
609}
610
611static ssize_t rfkill_state_show(struct device *dev,
612 struct device_attribute *attr,
613 char *buf)
614{
615 struct rfkill *rfkill = to_rfkill(dev);
616 unsigned long flags;
617 u32 state;
618
619 spin_lock_irqsave(&rfkill->lock, flags);
620 state = rfkill->state;
621 spin_unlock_irqrestore(&rfkill->lock, flags);
622
623 return sprintf(buf, "%d\n", user_state_from_blocked(state));
624}
625
626static ssize_t rfkill_state_store(struct device *dev,
627 struct device_attribute *attr,
628 const char *buf, size_t count)
629{
630 /*
631 * The intention was that userspace can only take control over
632 * a given device when/if rfkill-input doesn't control it due
633 * to user_claim. Since user_claim is currently unsupported,
634 * we never support changing the state from userspace -- this
635 * can be implemented again later.
636 */
637
638 return -EPERM;
639}
640
641static ssize_t rfkill_claim_show(struct device *dev,
642 struct device_attribute *attr,
643 char *buf)
644{
645 return sprintf(buf, "%d\n", 0);
646}
647
648static ssize_t rfkill_claim_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t count)
651{
652 return -EOPNOTSUPP;
653}
654
655static struct device_attribute rfkill_dev_attrs[] = {
656 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
657 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
658 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
659 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
660 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
661 __ATTR_NULL
662};
663
664static void rfkill_release(struct device *dev)
665{
666 struct rfkill *rfkill = to_rfkill(dev);
667
668 kfree(rfkill);
669}
670
671static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
672{
673 struct rfkill *rfkill = to_rfkill(dev);
674 unsigned long flags;
675 u32 state;
676 int error;
677
678 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
679 if (error)
680 return error;
681 error = add_uevent_var(env, "RFKILL_TYPE=%s",
682 rfkill_get_type_str(rfkill->type));
683 if (error)
684 return error;
685 spin_lock_irqsave(&rfkill->lock, flags);
686 state = rfkill->state;
687 spin_unlock_irqrestore(&rfkill->lock, flags);
688 error = add_uevent_var(env, "RFKILL_STATE=%d",
689 user_state_from_blocked(state));
690 return error;
691}
692
693void rfkill_pause_polling(struct rfkill *rfkill)
694{
695 BUG_ON(!rfkill);
696
697 if (!rfkill->ops->poll)
698 return;
699
700 cancel_delayed_work_sync(&rfkill->poll_work);
701}
702EXPORT_SYMBOL(rfkill_pause_polling);
703
704void rfkill_resume_polling(struct rfkill *rfkill)
705{
706 BUG_ON(!rfkill);
707
708 if (!rfkill->ops->poll)
709 return;
710
711 schedule_work(&rfkill->poll_work.work);
712}
713EXPORT_SYMBOL(rfkill_resume_polling);
714
715static int rfkill_suspend(struct device *dev, pm_message_t state)
716{
717 struct rfkill *rfkill = to_rfkill(dev);
718
719 rfkill_pause_polling(rfkill);
720
721 rfkill->suspended = true;
722
723 return 0;
724}
725
726static int rfkill_resume(struct device *dev)
727{
728 struct rfkill *rfkill = to_rfkill(dev);
729 bool cur;
730
731 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
732 rfkill_set_block(rfkill, cur);
733
734 rfkill->suspended = false;
735
736 rfkill_resume_polling(rfkill);
737
738 return 0;
739}
740
741static struct class rfkill_class = {
742 .name = "rfkill",
743 .dev_release = rfkill_release,
744 .dev_attrs = rfkill_dev_attrs,
745 .dev_uevent = rfkill_dev_uevent,
746 .suspend = rfkill_suspend,
747 .resume = rfkill_resume,
748};
749
750bool rfkill_blocked(struct rfkill *rfkill)
751{
752 unsigned long flags;
753 u32 state;
754
755 spin_lock_irqsave(&rfkill->lock, flags);
756 state = rfkill->state;
757 spin_unlock_irqrestore(&rfkill->lock, flags);
758
759 return !!(state & RFKILL_BLOCK_ANY);
760}
761EXPORT_SYMBOL(rfkill_blocked);
762
763
764struct rfkill * __must_check rfkill_alloc(const char *name,
765 struct device *parent,
766 const enum rfkill_type type,
767 const struct rfkill_ops *ops,
768 void *ops_data)
769{
770 struct rfkill *rfkill;
771 struct device *dev;
772
773 if (WARN_ON(!ops))
774 return NULL;
775
776 if (WARN_ON(!ops->set_block))
777 return NULL;
778
779 if (WARN_ON(!name))
780 return NULL;
781
782 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
783 return NULL;
784
785 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
786 if (!rfkill)
787 return NULL;
788
789 spin_lock_init(&rfkill->lock);
790 INIT_LIST_HEAD(&rfkill->node);
791 rfkill->type = type;
792 rfkill->name = name;
793 rfkill->ops = ops;
794 rfkill->data = ops_data;
795
796 dev = &rfkill->dev;
797 dev->class = &rfkill_class;
798 dev->parent = parent;
799 device_initialize(dev);
800
801 return rfkill;
802}
803EXPORT_SYMBOL(rfkill_alloc);
804
805static void rfkill_poll(struct work_struct *work)
806{
807 struct rfkill *rfkill;
808
809 rfkill = container_of(work, struct rfkill, poll_work.work);
810
811 /*
812 * Poll hardware state -- driver will use one of the
813 * rfkill_set{,_hw,_sw}_state functions and use its
814 * return value to update the current status.
815 */
816 rfkill->ops->poll(rfkill, rfkill->data);
817
818 schedule_delayed_work(&rfkill->poll_work,
819 round_jiffies_relative(POLL_INTERVAL));
820}
821
822static void rfkill_uevent_work(struct work_struct *work)
823{
824 struct rfkill *rfkill;
825
826 rfkill = container_of(work, struct rfkill, uevent_work);
827
828 mutex_lock(&rfkill_global_mutex);
829 rfkill_event(rfkill);
830 mutex_unlock(&rfkill_global_mutex);
831}
832
833static void rfkill_sync_work(struct work_struct *work)
834{
835 struct rfkill *rfkill;
836 bool cur;
837
838 rfkill = container_of(work, struct rfkill, sync_work);
839
840 mutex_lock(&rfkill_global_mutex);
841 cur = rfkill_global_states[rfkill->type].cur;
842 rfkill_set_block(rfkill, cur);
843 mutex_unlock(&rfkill_global_mutex);
844}
845
846int __must_check rfkill_register(struct rfkill *rfkill)
847{
848 static unsigned long rfkill_no;
849 struct device *dev = &rfkill->dev;
850 int error;
851
852 BUG_ON(!rfkill);
853
854 mutex_lock(&rfkill_global_mutex);
855
856 if (rfkill->registered) {
857 error = -EALREADY;
858 goto unlock;
859 }
860
861 rfkill->idx = rfkill_no;
862 dev_set_name(dev, "rfkill%lu", rfkill_no);
863 rfkill_no++;
864
865 list_add_tail(&rfkill->node, &rfkill_list);
866
867 error = device_add(dev);
868 if (error)
869 goto remove;
870
871 error = rfkill_led_trigger_register(rfkill);
872 if (error)
873 goto devdel;
874
875 rfkill->registered = true;
876
877 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
878 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
879 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
880
881 if (rfkill->ops->poll)
882 schedule_delayed_work(&rfkill->poll_work,
883 round_jiffies_relative(POLL_INTERVAL));
884
885 if (!rfkill->persistent || rfkill_epo_lock_active) {
886 schedule_work(&rfkill->sync_work);
887 } else {
888#ifdef CONFIG_RFKILL_INPUT
889 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
890
891 if (!atomic_read(&rfkill_input_disabled))
892 __rfkill_switch_all(rfkill->type, soft_blocked);
893#endif
894 }
895
896 rfkill_send_events(rfkill, RFKILL_OP_ADD);
897
898 mutex_unlock(&rfkill_global_mutex);
899 return 0;
900
901 devdel:
902 device_del(&rfkill->dev);
903 remove:
904 list_del_init(&rfkill->node);
905 unlock:
906 mutex_unlock(&rfkill_global_mutex);
907 return error;
908}
909EXPORT_SYMBOL(rfkill_register);
910
911void rfkill_unregister(struct rfkill *rfkill)
912{
913 BUG_ON(!rfkill);
914
915 if (rfkill->ops->poll)
916 cancel_delayed_work_sync(&rfkill->poll_work);
917
918 cancel_work_sync(&rfkill->uevent_work);
919 cancel_work_sync(&rfkill->sync_work);
920
921 rfkill->registered = false;
922
923 device_del(&rfkill->dev);
924
925 mutex_lock(&rfkill_global_mutex);
926 rfkill_send_events(rfkill, RFKILL_OP_DEL);
927 list_del_init(&rfkill->node);
928 mutex_unlock(&rfkill_global_mutex);
929
930 rfkill_led_trigger_unregister(rfkill);
931}
932EXPORT_SYMBOL(rfkill_unregister);
933
934void rfkill_destroy(struct rfkill *rfkill)
935{
936 if (rfkill)
937 put_device(&rfkill->dev);
938}
939EXPORT_SYMBOL(rfkill_destroy);
940
941static int rfkill_fop_open(struct inode *inode, struct file *file)
942{
943 struct rfkill_data *data;
944 struct rfkill *rfkill;
945 struct rfkill_int_event *ev, *tmp;
946
947 data = kzalloc(sizeof(*data), GFP_KERNEL);
948 if (!data)
949 return -ENOMEM;
950
951 INIT_LIST_HEAD(&data->events);
952 mutex_init(&data->mtx);
953 init_waitqueue_head(&data->read_wait);
954
955 mutex_lock(&rfkill_global_mutex);
956 mutex_lock(&data->mtx);
957 /*
958 * start getting events from elsewhere but hold mtx to get
959 * startup events added first
960 */
961 list_add(&data->list, &rfkill_fds);
962
963 list_for_each_entry(rfkill, &rfkill_list, node) {
964 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
965 if (!ev)
966 goto free;
967 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
968 list_add_tail(&ev->list, &data->events);
969 }
970 mutex_unlock(&data->mtx);
971 mutex_unlock(&rfkill_global_mutex);
972
973 file->private_data = data;
974
975 return nonseekable_open(inode, file);
976
977 free:
978 mutex_unlock(&data->mtx);
979 mutex_unlock(&rfkill_global_mutex);
980 mutex_destroy(&data->mtx);
981 list_for_each_entry_safe(ev, tmp, &data->events, list)
982 kfree(ev);
983 kfree(data);
984 return -ENOMEM;
985}
986
987static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
988{
989 struct rfkill_data *data = file->private_data;
990 unsigned int res = POLLOUT | POLLWRNORM;
991
992 poll_wait(file, &data->read_wait, wait);
993
994 mutex_lock(&data->mtx);
995 if (!list_empty(&data->events))
996 res = POLLIN | POLLRDNORM;
997 mutex_unlock(&data->mtx);
998
999 return res;
1000}
1001
1002static bool rfkill_readable(struct rfkill_data *data)
1003{
1004 bool r;
1005
1006 mutex_lock(&data->mtx);
1007 r = !list_empty(&data->events);
1008 mutex_unlock(&data->mtx);
1009
1010 return r;
1011}
1012
1013static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1014 size_t count, loff_t *pos)
1015{
1016 struct rfkill_data *data = file->private_data;
1017 struct rfkill_int_event *ev;
1018 unsigned long sz;
1019 int ret;
1020
1021 mutex_lock(&data->mtx);
1022
1023 while (list_empty(&data->events)) {
1024 if (file->f_flags & O_NONBLOCK) {
1025 ret = -EAGAIN;
1026 goto out;
1027 }
1028 mutex_unlock(&data->mtx);
1029 ret = wait_event_interruptible(data->read_wait,
1030 rfkill_readable(data));
1031 mutex_lock(&data->mtx);
1032
1033 if (ret)
1034 goto out;
1035 }
1036
1037 ev = list_first_entry(&data->events, struct rfkill_int_event,
1038 list);
1039
1040 sz = min_t(unsigned long, sizeof(ev->ev), count);
1041 ret = sz;
1042 if (copy_to_user(buf, &ev->ev, sz))
1043 ret = -EFAULT;
1044
1045 list_del(&ev->list);
1046 kfree(ev);
1047 out:
1048 mutex_unlock(&data->mtx);
1049 return ret;
1050}
1051
1052static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *pos)
1054{
1055 struct rfkill *rfkill;
1056 struct rfkill_event ev;
1057
1058 /* we don't need the 'hard' variable but accept it */
1059 if (count < sizeof(ev) - 1)
1060 return -EINVAL;
1061
1062 if (copy_from_user(&ev, buf, sizeof(ev) - 1))
1063 return -EFAULT;
1064
1065 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1066 return -EINVAL;
1067
1068 if (ev.type >= NUM_RFKILL_TYPES)
1069 return -EINVAL;
1070
1071 mutex_lock(&rfkill_global_mutex);
1072
1073 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1074 if (ev.type == RFKILL_TYPE_ALL) {
1075 enum rfkill_type i;
1076 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1077 rfkill_global_states[i].cur = ev.soft;
1078 } else {
1079 rfkill_global_states[ev.type].cur = ev.soft;
1080 }
1081 }
1082
1083 list_for_each_entry(rfkill, &rfkill_list, node) {
1084 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1085 continue;
1086
1087 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1088 continue;
1089
1090 rfkill_set_block(rfkill, ev.soft);
1091 }
1092 mutex_unlock(&rfkill_global_mutex);
1093
1094 return count;
1095}
1096
1097static int rfkill_fop_release(struct inode *inode, struct file *file)
1098{
1099 struct rfkill_data *data = file->private_data;
1100 struct rfkill_int_event *ev, *tmp;
1101
1102 mutex_lock(&rfkill_global_mutex);
1103 list_del(&data->list);
1104 mutex_unlock(&rfkill_global_mutex);
1105
1106 mutex_destroy(&data->mtx);
1107 list_for_each_entry_safe(ev, tmp, &data->events, list)
1108 kfree(ev);
1109
1110#ifdef CONFIG_RFKILL_INPUT
1111 if (data->input_handler)
1112 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1113 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1114#endif
1115
1116 kfree(data);
1117
1118 return 0;
1119}
1120
1121#ifdef CONFIG_RFKILL_INPUT
1122static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1123 unsigned long arg)
1124{
1125 struct rfkill_data *data = file->private_data;
1126
1127 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1128 return -ENOSYS;
1129
1130 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1131 return -ENOSYS;
1132
1133 mutex_lock(&data->mtx);
1134
1135 if (!data->input_handler) {
1136 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1137 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1138 data->input_handler = true;
1139 }
1140
1141 mutex_unlock(&data->mtx);
1142
1143 return 0;
1144}
1145#endif
1146
1147static const struct file_operations rfkill_fops = {
1148 .open = rfkill_fop_open,
1149 .read = rfkill_fop_read,
1150 .write = rfkill_fop_write,
1151 .poll = rfkill_fop_poll,
1152 .release = rfkill_fop_release,
1153#ifdef CONFIG_RFKILL_INPUT
1154 .unlocked_ioctl = rfkill_fop_ioctl,
1155 .compat_ioctl = rfkill_fop_ioctl,
1156#endif
1157};
1158
1159static struct miscdevice rfkill_miscdev = {
1160 .name = "rfkill",
1161 .fops = &rfkill_fops,
1162 .minor = MISC_DYNAMIC_MINOR,
1163};
1164
1165static int __init rfkill_init(void)
1166{
1167 int error;
1168 int i;
1169
1170 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1171 rfkill_global_states[i].cur = !rfkill_default_state;
1172
1173 error = class_register(&rfkill_class);
1174 if (error)
1175 goto out;
1176
1177 error = misc_register(&rfkill_miscdev);
1178 if (error) {
1179 class_unregister(&rfkill_class);
1180 goto out;
1181 }
1182
1183#ifdef CONFIG_RFKILL_INPUT
1184 error = rfkill_handler_init();
1185 if (error) {
1186 misc_deregister(&rfkill_miscdev);
1187 class_unregister(&rfkill_class);
1188 goto out;
1189 }
1190#endif
1191
1192 out:
1193 return error;
1194}
1195subsys_initcall(rfkill_init);
1196
1197static void __exit rfkill_exit(void)
1198{
1199#ifdef CONFIG_RFKILL_INPUT
1200 rfkill_handler_exit();
1201#endif
1202 misc_deregister(&rfkill_miscdev);
1203 class_unregister(&rfkill_class);
1204}
1205module_exit(rfkill_exit);
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
new file mode 100644
index 000000000000..a7295ad5f9cb
--- /dev/null
+++ b/net/rfkill/input.c
@@ -0,0 +1,342 @@
1/*
2 * Input layer to RF Kill interface connector
3 *
4 * Copyright (c) 2007 Dmitry Torokhov
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * If you ever run into a situation in which you have a SW_ type rfkill
12 * input device, then you can revive code that was removed in the patch
13 * "rfkill-input: remove unused code".
14 */
15
16#include <linux/input.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <linux/init.h>
20#include <linux/rfkill.h>
21#include <linux/sched.h>
22
23#include "rfkill.h"
24
25enum rfkill_input_master_mode {
26 RFKILL_INPUT_MASTER_UNLOCK = 0,
27 RFKILL_INPUT_MASTER_RESTORE = 1,
28 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
29 NUM_RFKILL_INPUT_MASTER_MODES
30};
31
32/* Delay (in ms) between consecutive switch ops */
33#define RFKILL_OPS_DELAY 200
34
35static enum rfkill_input_master_mode rfkill_master_switch_mode =
36 RFKILL_INPUT_MASTER_UNBLOCKALL;
37module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
38MODULE_PARM_DESC(master_switch_mode,
39 "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
40
41static spinlock_t rfkill_op_lock;
42static bool rfkill_op_pending;
43static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
44static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
45
46enum rfkill_sched_op {
47 RFKILL_GLOBAL_OP_EPO = 0,
48 RFKILL_GLOBAL_OP_RESTORE,
49 RFKILL_GLOBAL_OP_UNLOCK,
50 RFKILL_GLOBAL_OP_UNBLOCK,
51};
52
53static enum rfkill_sched_op rfkill_master_switch_op;
54static enum rfkill_sched_op rfkill_op;
55
56static void __rfkill_handle_global_op(enum rfkill_sched_op op)
57{
58 unsigned int i;
59
60 switch (op) {
61 case RFKILL_GLOBAL_OP_EPO:
62 rfkill_epo();
63 break;
64 case RFKILL_GLOBAL_OP_RESTORE:
65 rfkill_restore_states();
66 break;
67 case RFKILL_GLOBAL_OP_UNLOCK:
68 rfkill_remove_epo_lock();
69 break;
70 case RFKILL_GLOBAL_OP_UNBLOCK:
71 rfkill_remove_epo_lock();
72 for (i = 0; i < NUM_RFKILL_TYPES; i++)
73 rfkill_switch_all(i, false);
74 break;
75 default:
76 /* memory corruption or bug, fail safely */
77 rfkill_epo();
78 WARN(1, "Unknown requested operation %d! "
79 "rfkill Emergency Power Off activated\n",
80 op);
81 }
82}
83
84static void __rfkill_handle_normal_op(const enum rfkill_type type,
85 const bool complement)
86{
87 bool blocked;
88
89 blocked = rfkill_get_global_sw_state(type);
90 if (complement)
91 blocked = !blocked;
92
93 rfkill_switch_all(type, blocked);
94}
95
96static void rfkill_op_handler(struct work_struct *work)
97{
98 unsigned int i;
99 bool c;
100
101 spin_lock_irq(&rfkill_op_lock);
102 do {
103 if (rfkill_op_pending) {
104 enum rfkill_sched_op op = rfkill_op;
105 rfkill_op_pending = false;
106 memset(rfkill_sw_pending, 0,
107 sizeof(rfkill_sw_pending));
108 spin_unlock_irq(&rfkill_op_lock);
109
110 __rfkill_handle_global_op(op);
111
112 spin_lock_irq(&rfkill_op_lock);
113
114 /*
115 * handle global ops first -- during unlocked period
116 * we might have gotten a new global op.
117 */
118 if (rfkill_op_pending)
119 continue;
120 }
121
122 if (rfkill_is_epo_lock_active())
123 continue;
124
125 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
126 if (__test_and_clear_bit(i, rfkill_sw_pending)) {
127 c = __test_and_clear_bit(i, rfkill_sw_state);
128 spin_unlock_irq(&rfkill_op_lock);
129
130 __rfkill_handle_normal_op(i, c);
131
132 spin_lock_irq(&rfkill_op_lock);
133 }
134 }
135 } while (rfkill_op_pending);
136 spin_unlock_irq(&rfkill_op_lock);
137}
138
139static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
140static unsigned long rfkill_last_scheduled;
141
142static unsigned long rfkill_ratelimit(const unsigned long last)
143{
144 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
145 return (time_after(jiffies, last + delay)) ? 0 : delay;
146}
147
148static void rfkill_schedule_ratelimited(void)
149{
150 if (delayed_work_pending(&rfkill_op_work))
151 return;
152 schedule_delayed_work(&rfkill_op_work,
153 rfkill_ratelimit(rfkill_last_scheduled));
154 rfkill_last_scheduled = jiffies;
155}
156
157static void rfkill_schedule_global_op(enum rfkill_sched_op op)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&rfkill_op_lock, flags);
162 rfkill_op = op;
163 rfkill_op_pending = true;
164 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
165 /* bypass the limiter for EPO */
166 cancel_delayed_work(&rfkill_op_work);
167 schedule_delayed_work(&rfkill_op_work, 0);
168 rfkill_last_scheduled = jiffies;
169 } else
170 rfkill_schedule_ratelimited();
171 spin_unlock_irqrestore(&rfkill_op_lock, flags);
172}
173
174static void rfkill_schedule_toggle(enum rfkill_type type)
175{
176 unsigned long flags;
177
178 if (rfkill_is_epo_lock_active())
179 return;
180
181 spin_lock_irqsave(&rfkill_op_lock, flags);
182 if (!rfkill_op_pending) {
183 __set_bit(type, rfkill_sw_pending);
184 __change_bit(type, rfkill_sw_state);
185 rfkill_schedule_ratelimited();
186 }
187 spin_unlock_irqrestore(&rfkill_op_lock, flags);
188}
189
190static void rfkill_schedule_evsw_rfkillall(int state)
191{
192 if (state)
193 rfkill_schedule_global_op(rfkill_master_switch_op);
194 else
195 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
196}
197
198static void rfkill_event(struct input_handle *handle, unsigned int type,
199 unsigned int code, int data)
200{
201 if (type == EV_KEY && data == 1) {
202 switch (code) {
203 case KEY_WLAN:
204 rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
205 break;
206 case KEY_BLUETOOTH:
207 rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
208 break;
209 case KEY_UWB:
210 rfkill_schedule_toggle(RFKILL_TYPE_UWB);
211 break;
212 case KEY_WIMAX:
213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214 break;
215 }
216 } else if (type == EV_SW && code == SW_RFKILL_ALL)
217 rfkill_schedule_evsw_rfkillall(data);
218}
219
220static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
221 const struct input_device_id *id)
222{
223 struct input_handle *handle;
224 int error;
225
226 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
227 if (!handle)
228 return -ENOMEM;
229
230 handle->dev = dev;
231 handle->handler = handler;
232 handle->name = "rfkill";
233
234 /* causes rfkill_start() to be called */
235 error = input_register_handle(handle);
236 if (error)
237 goto err_free_handle;
238
239 error = input_open_device(handle);
240 if (error)
241 goto err_unregister_handle;
242
243 return 0;
244
245 err_unregister_handle:
246 input_unregister_handle(handle);
247 err_free_handle:
248 kfree(handle);
249 return error;
250}
251
252static void rfkill_start(struct input_handle *handle)
253{
254 /*
255 * Take event_lock to guard against configuration changes, we
256 * should be able to deal with concurrency with rfkill_event()
257 * just fine (which event_lock will also avoid).
258 */
259 spin_lock_irq(&handle->dev->event_lock);
260
261 if (test_bit(EV_SW, handle->dev->evbit) &&
262 test_bit(SW_RFKILL_ALL, handle->dev->swbit))
263 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
264 handle->dev->sw));
265
266 spin_unlock_irq(&handle->dev->event_lock);
267}
268
269static void rfkill_disconnect(struct input_handle *handle)
270{
271 input_close_device(handle);
272 input_unregister_handle(handle);
273 kfree(handle);
274}
275
276static const struct input_device_id rfkill_ids[] = {
277 {
278 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
279 .evbit = { BIT_MASK(EV_KEY) },
280 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
281 },
282 {
283 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
284 .evbit = { BIT_MASK(EV_KEY) },
285 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
286 },
287 {
288 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
289 .evbit = { BIT_MASK(EV_KEY) },
290 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
291 },
292 {
293 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
294 .evbit = { BIT_MASK(EV_KEY) },
295 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
296 },
297 {
298 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
299 .evbit = { BIT(EV_SW) },
300 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
301 },
302 { }
303};
304
305static struct input_handler rfkill_handler = {
306 .name = "rfkill",
307 .event = rfkill_event,
308 .connect = rfkill_connect,
309 .start = rfkill_start,
310 .disconnect = rfkill_disconnect,
311 .id_table = rfkill_ids,
312};
313
314int __init rfkill_handler_init(void)
315{
316 switch (rfkill_master_switch_mode) {
317 case RFKILL_INPUT_MASTER_UNBLOCKALL:
318 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
319 break;
320 case RFKILL_INPUT_MASTER_RESTORE:
321 rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
322 break;
323 case RFKILL_INPUT_MASTER_UNLOCK:
324 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
325 break;
326 default:
327 return -EINVAL;
328 }
329
330 spin_lock_init(&rfkill_op_lock);
331
332 /* Avoid delay at first schedule */
333 rfkill_last_scheduled =
334 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
335 return input_register_handler(&rfkill_handler);
336}
337
338void __exit rfkill_handler_exit(void)
339{
340 input_unregister_handler(&rfkill_handler);
341 cancel_delayed_work_sync(&rfkill_op_work);
342}
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
deleted file mode 100644
index 84efde97c5a7..000000000000
--- a/net/rfkill/rfkill-input.c
+++ /dev/null
@@ -1,459 +0,0 @@
1/*
2 * Input layer to RF Kill interface connector
3 *
4 * Copyright (c) 2007 Dmitry Torokhov
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/input.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/init.h>
18#include <linux/rfkill.h>
19#include <linux/sched.h>
20
21#include "rfkill-input.h"
22
23MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24MODULE_DESCRIPTION("Input layer to RF switch connector");
25MODULE_LICENSE("GPL");
26
27enum rfkill_input_master_mode {
28 RFKILL_INPUT_MASTER_DONOTHING = 0,
29 RFKILL_INPUT_MASTER_RESTORE = 1,
30 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31 RFKILL_INPUT_MASTER_MAX, /* marker */
32};
33
34/* Delay (in ms) between consecutive switch ops */
35#define RFKILL_OPS_DELAY 200
36
37static enum rfkill_input_master_mode rfkill_master_switch_mode =
38 RFKILL_INPUT_MASTER_UNBLOCKALL;
39module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
40MODULE_PARM_DESC(master_switch_mode,
41 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
42
43enum rfkill_global_sched_op {
44 RFKILL_GLOBAL_OP_EPO = 0,
45 RFKILL_GLOBAL_OP_RESTORE,
46 RFKILL_GLOBAL_OP_UNLOCK,
47 RFKILL_GLOBAL_OP_UNBLOCK,
48};
49
50/*
51 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
52 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
53 * future, when such events are added, that code will be necessary.
54 */
55
56struct rfkill_task {
57 struct delayed_work dwork;
58
59 /* ensures that task is serialized */
60 struct mutex mutex;
61
62 /* protects everything below */
63 spinlock_t lock;
64
65 /* pending regular switch operations (1=pending) */
66 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
67
68#ifdef RFKILL_NEED_SWSET
69 /* set operation pending (1=pending) */
70 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
71
72 /* desired state for pending set operation (1=unblock) */
73 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
74#endif
75
76 /* should the state be complemented (1=yes) */
77 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
78
79 bool global_op_pending;
80 enum rfkill_global_sched_op op;
81
82 /* last time it was scheduled */
83 unsigned long last_scheduled;
84};
85
86static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
87{
88 unsigned int i;
89
90 switch (op) {
91 case RFKILL_GLOBAL_OP_EPO:
92 rfkill_epo();
93 break;
94 case RFKILL_GLOBAL_OP_RESTORE:
95 rfkill_restore_states();
96 break;
97 case RFKILL_GLOBAL_OP_UNLOCK:
98 rfkill_remove_epo_lock();
99 break;
100 case RFKILL_GLOBAL_OP_UNBLOCK:
101 rfkill_remove_epo_lock();
102 for (i = 0; i < RFKILL_TYPE_MAX; i++)
103 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
104 break;
105 default:
106 /* memory corruption or bug, fail safely */
107 rfkill_epo();
108 WARN(1, "Unknown requested operation %d! "
109 "rfkill Emergency Power Off activated\n",
110 op);
111 }
112}
113
114#ifdef RFKILL_NEED_SWSET
115static void __rfkill_handle_normal_op(const enum rfkill_type type,
116 const bool sp, const bool s, const bool c)
117{
118 enum rfkill_state state;
119
120 if (sp)
121 state = (s) ? RFKILL_STATE_UNBLOCKED :
122 RFKILL_STATE_SOFT_BLOCKED;
123 else
124 state = rfkill_get_global_state(type);
125
126 if (c)
127 state = rfkill_state_complement(state);
128
129 rfkill_switch_all(type, state);
130}
131#else
132static void __rfkill_handle_normal_op(const enum rfkill_type type,
133 const bool c)
134{
135 enum rfkill_state state;
136
137 state = rfkill_get_global_state(type);
138 if (c)
139 state = rfkill_state_complement(state);
140
141 rfkill_switch_all(type, state);
142}
143#endif
144
145static void rfkill_task_handler(struct work_struct *work)
146{
147 struct rfkill_task *task = container_of(work,
148 struct rfkill_task, dwork.work);
149 bool doit = true;
150
151 mutex_lock(&task->mutex);
152
153 spin_lock_irq(&task->lock);
154 while (doit) {
155 if (task->global_op_pending) {
156 enum rfkill_global_sched_op op = task->op;
157 task->global_op_pending = false;
158 memset(task->sw_pending, 0, sizeof(task->sw_pending));
159 spin_unlock_irq(&task->lock);
160
161 __rfkill_handle_global_op(op);
162
163 /* make sure we do at least one pass with
164 * !task->global_op_pending */
165 spin_lock_irq(&task->lock);
166 continue;
167 } else if (!rfkill_is_epo_lock_active()) {
168 unsigned int i = 0;
169
170 while (!task->global_op_pending &&
171 i < RFKILL_TYPE_MAX) {
172 if (test_and_clear_bit(i, task->sw_pending)) {
173 bool c;
174#ifdef RFKILL_NEED_SWSET
175 bool sp, s;
176 sp = test_and_clear_bit(i,
177 task->sw_setpending);
178 s = test_bit(i, task->sw_newstate);
179#endif
180 c = test_and_clear_bit(i,
181 task->sw_togglestate);
182 spin_unlock_irq(&task->lock);
183
184#ifdef RFKILL_NEED_SWSET
185 __rfkill_handle_normal_op(i, sp, s, c);
186#else
187 __rfkill_handle_normal_op(i, c);
188#endif
189
190 spin_lock_irq(&task->lock);
191 }
192 i++;
193 }
194 }
195 doit = task->global_op_pending;
196 }
197 spin_unlock_irq(&task->lock);
198
199 mutex_unlock(&task->mutex);
200}
201
202static struct rfkill_task rfkill_task = {
203 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
204 rfkill_task_handler),
205 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
206 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
207};
208
209static unsigned long rfkill_ratelimit(const unsigned long last)
210{
211 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
212 return (time_after(jiffies, last + delay)) ? 0 : delay;
213}
214
215static void rfkill_schedule_ratelimited(void)
216{
217 if (!delayed_work_pending(&rfkill_task.dwork)) {
218 schedule_delayed_work(&rfkill_task.dwork,
219 rfkill_ratelimit(rfkill_task.last_scheduled));
220 rfkill_task.last_scheduled = jiffies;
221 }
222}
223
224static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
225{
226 unsigned long flags;
227
228 spin_lock_irqsave(&rfkill_task.lock, flags);
229 rfkill_task.op = op;
230 rfkill_task.global_op_pending = true;
231 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
232 /* bypass the limiter for EPO */
233 cancel_delayed_work(&rfkill_task.dwork);
234 schedule_delayed_work(&rfkill_task.dwork, 0);
235 rfkill_task.last_scheduled = jiffies;
236 } else
237 rfkill_schedule_ratelimited();
238 spin_unlock_irqrestore(&rfkill_task.lock, flags);
239}
240
241#ifdef RFKILL_NEED_SWSET
242/* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
243
244static void rfkill_schedule_set(enum rfkill_type type,
245 enum rfkill_state desired_state)
246{
247 unsigned long flags;
248
249 if (rfkill_is_epo_lock_active())
250 return;
251
252 spin_lock_irqsave(&rfkill_task.lock, flags);
253 if (!rfkill_task.global_op_pending) {
254 set_bit(type, rfkill_task.sw_pending);
255 set_bit(type, rfkill_task.sw_setpending);
256 clear_bit(type, rfkill_task.sw_togglestate);
257 if (desired_state)
258 set_bit(type, rfkill_task.sw_newstate);
259 else
260 clear_bit(type, rfkill_task.sw_newstate);
261 rfkill_schedule_ratelimited();
262 }
263 spin_unlock_irqrestore(&rfkill_task.lock, flags);
264}
265#endif
266
267static void rfkill_schedule_toggle(enum rfkill_type type)
268{
269 unsigned long flags;
270
271 if (rfkill_is_epo_lock_active())
272 return;
273
274 spin_lock_irqsave(&rfkill_task.lock, flags);
275 if (!rfkill_task.global_op_pending) {
276 set_bit(type, rfkill_task.sw_pending);
277 change_bit(type, rfkill_task.sw_togglestate);
278 rfkill_schedule_ratelimited();
279 }
280 spin_unlock_irqrestore(&rfkill_task.lock, flags);
281}
282
283static void rfkill_schedule_evsw_rfkillall(int state)
284{
285 if (state) {
286 switch (rfkill_master_switch_mode) {
287 case RFKILL_INPUT_MASTER_UNBLOCKALL:
288 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
289 break;
290 case RFKILL_INPUT_MASTER_RESTORE:
291 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
292 break;
293 case RFKILL_INPUT_MASTER_DONOTHING:
294 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
295 break;
296 default:
297 /* memory corruption or driver bug! fail safely */
298 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
299 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
300 "driver bug or memory corruption detected!\n",
301 rfkill_master_switch_mode);
302 break;
303 }
304 } else
305 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
306}
307
308static void rfkill_event(struct input_handle *handle, unsigned int type,
309 unsigned int code, int data)
310{
311 if (type == EV_KEY && data == 1) {
312 enum rfkill_type t;
313
314 switch (code) {
315 case KEY_WLAN:
316 t = RFKILL_TYPE_WLAN;
317 break;
318 case KEY_BLUETOOTH:
319 t = RFKILL_TYPE_BLUETOOTH;
320 break;
321 case KEY_UWB:
322 t = RFKILL_TYPE_UWB;
323 break;
324 case KEY_WIMAX:
325 t = RFKILL_TYPE_WIMAX;
326 break;
327 default:
328 return;
329 }
330 rfkill_schedule_toggle(t);
331 return;
332 } else if (type == EV_SW) {
333 switch (code) {
334 case SW_RFKILL_ALL:
335 rfkill_schedule_evsw_rfkillall(data);
336 return;
337 default:
338 return;
339 }
340 }
341}
342
343static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
344 const struct input_device_id *id)
345{
346 struct input_handle *handle;
347 int error;
348
349 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
350 if (!handle)
351 return -ENOMEM;
352
353 handle->dev = dev;
354 handle->handler = handler;
355 handle->name = "rfkill";
356
357 /* causes rfkill_start() to be called */
358 error = input_register_handle(handle);
359 if (error)
360 goto err_free_handle;
361
362 error = input_open_device(handle);
363 if (error)
364 goto err_unregister_handle;
365
366 return 0;
367
368 err_unregister_handle:
369 input_unregister_handle(handle);
370 err_free_handle:
371 kfree(handle);
372 return error;
373}
374
375static void rfkill_start(struct input_handle *handle)
376{
377 /* Take event_lock to guard against configuration changes, we
378 * should be able to deal with concurrency with rfkill_event()
379 * just fine (which event_lock will also avoid). */
380 spin_lock_irq(&handle->dev->event_lock);
381
382 if (test_bit(EV_SW, handle->dev->evbit)) {
383 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
384 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
385 handle->dev->sw));
386 /* add resync for further EV_SW events here */
387 }
388
389 spin_unlock_irq(&handle->dev->event_lock);
390}
391
392static void rfkill_disconnect(struct input_handle *handle)
393{
394 input_close_device(handle);
395 input_unregister_handle(handle);
396 kfree(handle);
397}
398
399static const struct input_device_id rfkill_ids[] = {
400 {
401 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
402 .evbit = { BIT_MASK(EV_KEY) },
403 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
404 },
405 {
406 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
407 .evbit = { BIT_MASK(EV_KEY) },
408 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
409 },
410 {
411 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
412 .evbit = { BIT_MASK(EV_KEY) },
413 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
414 },
415 {
416 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
417 .evbit = { BIT_MASK(EV_KEY) },
418 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
419 },
420 {
421 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
422 .evbit = { BIT(EV_SW) },
423 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
424 },
425 { }
426};
427
428static struct input_handler rfkill_handler = {
429 .event = rfkill_event,
430 .connect = rfkill_connect,
431 .disconnect = rfkill_disconnect,
432 .start = rfkill_start,
433 .name = "rfkill",
434 .id_table = rfkill_ids,
435};
436
437static int __init rfkill_handler_init(void)
438{
439 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
440 return -EINVAL;
441
442 /*
443 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
444 * at the first use. Acceptable, but if we can avoid it, why not?
445 */
446 rfkill_task.last_scheduled =
447 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
448 return input_register_handler(&rfkill_handler);
449}
450
451static void __exit rfkill_handler_exit(void)
452{
453 input_unregister_handler(&rfkill_handler);
454 cancel_delayed_work_sync(&rfkill_task.dwork);
455 rfkill_remove_epo_lock();
456}
457
458module_init(rfkill_handler_init);
459module_exit(rfkill_handler_exit);
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
deleted file mode 100644
index 3eaa39403c13..000000000000
--- a/net/rfkill/rfkill.c
+++ /dev/null
@@ -1,882 +0,0 @@
1/*
2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/workqueue.h>
25#include <linux/capability.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/rfkill.h>
29
30/* Get declaration of rfkill_switch_all() to shut up sparse. */
31#include "rfkill-input.h"
32
33
34MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
35MODULE_VERSION("1.0");
36MODULE_DESCRIPTION("RF switch support");
37MODULE_LICENSE("GPL");
38
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_global_mutex);
41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off");
46
47struct rfkill_gsw_state {
48 enum rfkill_state current_state;
49 enum rfkill_state default_state;
50};
51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
54static bool rfkill_epo_lock_active;
55
56
57#ifdef CONFIG_RFKILL_LEDS
58static void rfkill_led_trigger(struct rfkill *rfkill,
59 enum rfkill_state state)
60{
61 struct led_trigger *led = &rfkill->led_trigger;
62
63 if (!led->name)
64 return;
65 if (state != RFKILL_STATE_UNBLOCKED)
66 led_trigger_event(led, LED_OFF);
67 else
68 led_trigger_event(led, LED_FULL);
69}
70
71static void rfkill_led_trigger_activate(struct led_classdev *led)
72{
73 struct rfkill *rfkill = container_of(led->trigger,
74 struct rfkill, led_trigger);
75
76 rfkill_led_trigger(rfkill, rfkill->state);
77}
78#endif /* CONFIG_RFKILL_LEDS */
79
80static void rfkill_uevent(struct rfkill *rfkill)
81{
82 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
83}
84
85static void update_rfkill_state(struct rfkill *rfkill)
86{
87 enum rfkill_state newstate, oldstate;
88
89 if (rfkill->get_state) {
90 mutex_lock(&rfkill->mutex);
91 if (!rfkill->get_state(rfkill->data, &newstate)) {
92 oldstate = rfkill->state;
93 rfkill->state = newstate;
94 if (oldstate != newstate)
95 rfkill_uevent(rfkill);
96 }
97 mutex_unlock(&rfkill->mutex);
98 }
99}
100
101/**
102 * rfkill_toggle_radio - wrapper for toggle_radio hook
103 * @rfkill: the rfkill struct to use
104 * @force: calls toggle_radio even if cache says it is not needed,
105 * and also makes sure notifications of the state will be
106 * sent even if it didn't change
107 * @state: the new state to call toggle_radio() with
108 *
109 * Calls rfkill->toggle_radio, enforcing the API for toggle_radio
110 * calls and handling all the red tape such as issuing notifications
111 * if the call is successful.
112 *
113 * Suspended devices are not touched at all, and -EAGAIN is returned.
114 *
115 * Note that the @force parameter cannot override a (possibly cached)
116 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
117 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or
118 * rfkill_force_state(), so the cache either is bypassed or valid.
119 *
120 * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED
121 * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to
122 * give the driver a hint that it should double-BLOCK the transmitter.
123 *
124 * Caller must have acquired rfkill->mutex.
125 */
126static int rfkill_toggle_radio(struct rfkill *rfkill,
127 enum rfkill_state state,
128 int force)
129{
130 int retval = 0;
131 enum rfkill_state oldstate, newstate;
132
133 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
134 return -EBUSY;
135
136 oldstate = rfkill->state;
137
138 if (rfkill->get_state && !force &&
139 !rfkill->get_state(rfkill->data, &newstate))
140 rfkill->state = newstate;
141
142 switch (state) {
143 case RFKILL_STATE_HARD_BLOCKED:
144 /* typically happens when refreshing hardware state,
145 * such as on resume */
146 state = RFKILL_STATE_SOFT_BLOCKED;
147 break;
148 case RFKILL_STATE_UNBLOCKED:
149 /* force can't override this, only rfkill_force_state() can */
150 if (rfkill->state == RFKILL_STATE_HARD_BLOCKED)
151 return -EPERM;
152 break;
153 case RFKILL_STATE_SOFT_BLOCKED:
154 /* nothing to do, we want to give drivers the hint to double
155 * BLOCK even a transmitter that is already in state
156 * RFKILL_STATE_HARD_BLOCKED */
157 break;
158 default:
159 WARN(1, KERN_WARNING
160 "rfkill: illegal state %d passed as parameter "
161 "to rfkill_toggle_radio\n", state);
162 return -EINVAL;
163 }
164
165 if (force || state != rfkill->state) {
166 retval = rfkill->toggle_radio(rfkill->data, state);
167 /* never allow a HARD->SOFT downgrade! */
168 if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED)
169 rfkill->state = state;
170 }
171
172 if (force || rfkill->state != oldstate)
173 rfkill_uevent(rfkill);
174
175 return retval;
176}
177
178/**
179 * __rfkill_switch_all - Toggle state of all switches of given type
180 * @type: type of interfaces to be affected
181 * @state: the new state
182 *
183 * This function toggles the state of all switches of given type,
184 * unless a specific switch is claimed by userspace (in which case,
185 * that switch is left alone) or suspended.
186 *
187 * Caller must have acquired rfkill_global_mutex.
188 */
189static void __rfkill_switch_all(const enum rfkill_type type,
190 const enum rfkill_state state)
191{
192 struct rfkill *rfkill;
193
194 if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
195 KERN_WARNING
196 "rfkill: illegal state %d or type %d "
197 "passed as parameter to __rfkill_switch_all\n",
198 state, type))
199 return;
200
201 rfkill_global_states[type].current_state = state;
202 list_for_each_entry(rfkill, &rfkill_list, node) {
203 if ((!rfkill->user_claim) && (rfkill->type == type)) {
204 mutex_lock(&rfkill->mutex);
205 rfkill_toggle_radio(rfkill, state, 0);
206 mutex_unlock(&rfkill->mutex);
207 }
208 }
209}
210
211/**
212 * rfkill_switch_all - Toggle state of all switches of given type
213 * @type: type of interfaces to be affected
214 * @state: the new state
215 *
216 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
217 * Please refer to __rfkill_switch_all() for details.
218 *
219 * Does nothing if the EPO lock is active.
220 */
221void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
222{
223 mutex_lock(&rfkill_global_mutex);
224 if (!rfkill_epo_lock_active)
225 __rfkill_switch_all(type, state);
226 mutex_unlock(&rfkill_global_mutex);
227}
228EXPORT_SYMBOL(rfkill_switch_all);
229
230/**
231 * rfkill_epo - emergency power off all transmitters
232 *
233 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
234 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
235 *
236 * The global state before the EPO is saved and can be restored later
237 * using rfkill_restore_states().
238 */
239void rfkill_epo(void)
240{
241 struct rfkill *rfkill;
242 int i;
243
244 mutex_lock(&rfkill_global_mutex);
245
246 rfkill_epo_lock_active = true;
247 list_for_each_entry(rfkill, &rfkill_list, node) {
248 mutex_lock(&rfkill->mutex);
249 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
250 mutex_unlock(&rfkill->mutex);
251 }
252 for (i = 0; i < RFKILL_TYPE_MAX; i++) {
253 rfkill_global_states[i].default_state =
254 rfkill_global_states[i].current_state;
255 rfkill_global_states[i].current_state =
256 RFKILL_STATE_SOFT_BLOCKED;
257 }
258 mutex_unlock(&rfkill_global_mutex);
259}
260EXPORT_SYMBOL_GPL(rfkill_epo);
261
262/**
263 * rfkill_restore_states - restore global states
264 *
265 * Restore (and sync switches to) the global state from the
266 * states in rfkill_default_states. This can undo the effects of
267 * a call to rfkill_epo().
268 */
269void rfkill_restore_states(void)
270{
271 int i;
272
273 mutex_lock(&rfkill_global_mutex);
274
275 rfkill_epo_lock_active = false;
276 for (i = 0; i < RFKILL_TYPE_MAX; i++)
277 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
278 mutex_unlock(&rfkill_global_mutex);
279}
280EXPORT_SYMBOL_GPL(rfkill_restore_states);
281
282/**
283 * rfkill_remove_epo_lock - unlock state changes
284 *
285 * Used by rfkill-input manually unlock state changes, when
286 * the EPO switch is deactivated.
287 */
288void rfkill_remove_epo_lock(void)
289{
290 mutex_lock(&rfkill_global_mutex);
291 rfkill_epo_lock_active = false;
292 mutex_unlock(&rfkill_global_mutex);
293}
294EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock);
295
296/**
297 * rfkill_is_epo_lock_active - returns true EPO is active
298 *
299 * Returns 0 (false) if there is NOT an active EPO contidion,
300 * and 1 (true) if there is an active EPO contition, which
301 * locks all radios in one of the BLOCKED states.
302 *
303 * Can be called in atomic context.
304 */
305bool rfkill_is_epo_lock_active(void)
306{
307 return rfkill_epo_lock_active;
308}
309EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active);
310
311/**
312 * rfkill_get_global_state - returns global state for a type
313 * @type: the type to get the global state of
314 *
315 * Returns the current global state for a given wireless
316 * device type.
317 */
318enum rfkill_state rfkill_get_global_state(const enum rfkill_type type)
319{
320 return rfkill_global_states[type].current_state;
321}
322EXPORT_SYMBOL_GPL(rfkill_get_global_state);
323
324/**
325 * rfkill_force_state - Force the internal rfkill radio state
326 * @rfkill: pointer to the rfkill class to modify.
327 * @state: the current radio state the class should be forced to.
328 *
329 * This function updates the internal state of the radio cached
330 * by the rfkill class. It should be used when the driver gets
331 * a notification by the firmware/hardware of the current *real*
332 * state of the radio rfkill switch.
333 *
334 * Devices which are subject to external changes on their rfkill
335 * state (such as those caused by a hardware rfkill line) MUST
336 * have their driver arrange to call rfkill_force_state() as soon
337 * as possible after such a change.
338 *
339 * This function may not be called from an atomic context.
340 */
341int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
342{
343 enum rfkill_state oldstate;
344
345 BUG_ON(!rfkill);
346 if (WARN((state >= RFKILL_STATE_MAX),
347 KERN_WARNING
348 "rfkill: illegal state %d passed as parameter "
349 "to rfkill_force_state\n", state))
350 return -EINVAL;
351
352 mutex_lock(&rfkill->mutex);
353
354 oldstate = rfkill->state;
355 rfkill->state = state;
356
357 if (state != oldstate)
358 rfkill_uevent(rfkill);
359
360 mutex_unlock(&rfkill->mutex);
361
362 return 0;
363}
364EXPORT_SYMBOL(rfkill_force_state);
365
366static ssize_t rfkill_name_show(struct device *dev,
367 struct device_attribute *attr,
368 char *buf)
369{
370 struct rfkill *rfkill = to_rfkill(dev);
371
372 return sprintf(buf, "%s\n", rfkill->name);
373}
374
375static const char *rfkill_get_type_str(enum rfkill_type type)
376{
377 switch (type) {
378 case RFKILL_TYPE_WLAN:
379 return "wlan";
380 case RFKILL_TYPE_BLUETOOTH:
381 return "bluetooth";
382 case RFKILL_TYPE_UWB:
383 return "ultrawideband";
384 case RFKILL_TYPE_WIMAX:
385 return "wimax";
386 case RFKILL_TYPE_WWAN:
387 return "wwan";
388 default:
389 BUG();
390 }
391}
392
393static ssize_t rfkill_type_show(struct device *dev,
394 struct device_attribute *attr,
395 char *buf)
396{
397 struct rfkill *rfkill = to_rfkill(dev);
398
399 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
400}
401
402static ssize_t rfkill_state_show(struct device *dev,
403 struct device_attribute *attr,
404 char *buf)
405{
406 struct rfkill *rfkill = to_rfkill(dev);
407
408 update_rfkill_state(rfkill);
409 return sprintf(buf, "%d\n", rfkill->state);
410}
411
412static ssize_t rfkill_state_store(struct device *dev,
413 struct device_attribute *attr,
414 const char *buf, size_t count)
415{
416 struct rfkill *rfkill = to_rfkill(dev);
417 unsigned long state;
418 int error;
419
420 if (!capable(CAP_NET_ADMIN))
421 return -EPERM;
422
423 error = strict_strtoul(buf, 0, &state);
424 if (error)
425 return error;
426
427 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
428 if (state != RFKILL_STATE_UNBLOCKED &&
429 state != RFKILL_STATE_SOFT_BLOCKED)
430 return -EINVAL;
431
432 error = mutex_lock_killable(&rfkill->mutex);
433 if (error)
434 return error;
435
436 if (!rfkill_epo_lock_active)
437 error = rfkill_toggle_radio(rfkill, state, 0);
438 else
439 error = -EPERM;
440
441 mutex_unlock(&rfkill->mutex);
442
443 return error ? error : count;
444}
445
446static ssize_t rfkill_claim_show(struct device *dev,
447 struct device_attribute *attr,
448 char *buf)
449{
450 struct rfkill *rfkill = to_rfkill(dev);
451
452 return sprintf(buf, "%d\n", rfkill->user_claim);
453}
454
455static ssize_t rfkill_claim_store(struct device *dev,
456 struct device_attribute *attr,
457 const char *buf, size_t count)
458{
459 struct rfkill *rfkill = to_rfkill(dev);
460 unsigned long claim_tmp;
461 bool claim;
462 int error;
463
464 if (!capable(CAP_NET_ADMIN))
465 return -EPERM;
466
467 if (rfkill->user_claim_unsupported)
468 return -EOPNOTSUPP;
469
470 error = strict_strtoul(buf, 0, &claim_tmp);
471 if (error)
472 return error;
473 claim = !!claim_tmp;
474
475 /*
476 * Take the global lock to make sure the kernel is not in
477 * the middle of rfkill_switch_all
478 */
479 error = mutex_lock_killable(&rfkill_global_mutex);
480 if (error)
481 return error;
482
483 if (rfkill->user_claim != claim) {
484 if (!claim && !rfkill_epo_lock_active) {
485 mutex_lock(&rfkill->mutex);
486 rfkill_toggle_radio(rfkill,
487 rfkill_global_states[rfkill->type].current_state,
488 0);
489 mutex_unlock(&rfkill->mutex);
490 }
491 rfkill->user_claim = claim;
492 }
493
494 mutex_unlock(&rfkill_global_mutex);
495
496 return error ? error : count;
497}
498
499static struct device_attribute rfkill_dev_attrs[] = {
500 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
501 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
502 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
503 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
504 __ATTR_NULL
505};
506
507static void rfkill_release(struct device *dev)
508{
509 struct rfkill *rfkill = to_rfkill(dev);
510
511 kfree(rfkill);
512 module_put(THIS_MODULE);
513}
514
515#ifdef CONFIG_PM
516static int rfkill_suspend(struct device *dev, pm_message_t state)
517{
518 struct rfkill *rfkill = to_rfkill(dev);
519
520 /* mark class device as suspended */
521 if (dev->power.power_state.event != state.event)
522 dev->power.power_state = state;
523
524 /* store state for the resume handler */
525 rfkill->state_for_resume = rfkill->state;
526
527 return 0;
528}
529
530static int rfkill_resume(struct device *dev)
531{
532 struct rfkill *rfkill = to_rfkill(dev);
533 enum rfkill_state newstate;
534
535 if (dev->power.power_state.event != PM_EVENT_ON) {
536 mutex_lock(&rfkill->mutex);
537
538 dev->power.power_state.event = PM_EVENT_ON;
539
540 /*
541 * rfkill->state could have been modified before we got
542 * called, and won't be updated by rfkill_toggle_radio()
543 * in force mode. Sync it FIRST.
544 */
545 if (rfkill->get_state &&
546 !rfkill->get_state(rfkill->data, &newstate))
547 rfkill->state = newstate;
548
549 /*
550 * If we are under EPO, kick transmitter offline,
551 * otherwise restore to pre-suspend state.
552 *
553 * Issue a notification in any case
554 */
555 rfkill_toggle_radio(rfkill,
556 rfkill_epo_lock_active ?
557 RFKILL_STATE_SOFT_BLOCKED :
558 rfkill->state_for_resume,
559 1);
560
561 mutex_unlock(&rfkill->mutex);
562 }
563
564 return 0;
565}
566#else
567#define rfkill_suspend NULL
568#define rfkill_resume NULL
569#endif
570
571static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
572{
573 struct rfkill *rfkill = to_rfkill(dev);
574 int error;
575
576 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
577 if (error)
578 return error;
579 error = add_uevent_var(env, "RFKILL_TYPE=%s",
580 rfkill_get_type_str(rfkill->type));
581 if (error)
582 return error;
583 error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state);
584 return error;
585}
586
587static struct class rfkill_class = {
588 .name = "rfkill",
589 .dev_release = rfkill_release,
590 .dev_attrs = rfkill_dev_attrs,
591 .suspend = rfkill_suspend,
592 .resume = rfkill_resume,
593 .dev_uevent = rfkill_dev_uevent,
594};
595
596static int rfkill_check_duplicity(const struct rfkill *rfkill)
597{
598 struct rfkill *p;
599 unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
600
601 memset(seen, 0, sizeof(seen));
602
603 list_for_each_entry(p, &rfkill_list, node) {
604 if (WARN((p == rfkill), KERN_WARNING
605 "rfkill: illegal attempt to register "
606 "an already registered rfkill struct\n"))
607 return -EEXIST;
608 set_bit(p->type, seen);
609 }
610
611 /* 0: first switch of its kind */
612 return (test_bit(rfkill->type, seen)) ? 1 : 0;
613}
614
615static int rfkill_add_switch(struct rfkill *rfkill)
616{
617 int error;
618
619 mutex_lock(&rfkill_global_mutex);
620
621 error = rfkill_check_duplicity(rfkill);
622 if (error < 0)
623 goto unlock_out;
624
625 if (!error) {
626 /* lock default after first use */
627 set_bit(rfkill->type, rfkill_states_lockdflt);
628 rfkill_global_states[rfkill->type].current_state =
629 rfkill_global_states[rfkill->type].default_state;
630 }
631
632 rfkill_toggle_radio(rfkill,
633 rfkill_global_states[rfkill->type].current_state,
634 0);
635
636 list_add_tail(&rfkill->node, &rfkill_list);
637
638 error = 0;
639unlock_out:
640 mutex_unlock(&rfkill_global_mutex);
641
642 return error;
643}
644
645static void rfkill_remove_switch(struct rfkill *rfkill)
646{
647 mutex_lock(&rfkill_global_mutex);
648 list_del_init(&rfkill->node);
649 mutex_unlock(&rfkill_global_mutex);
650
651 mutex_lock(&rfkill->mutex);
652 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
653 mutex_unlock(&rfkill->mutex);
654}
655
656/**
657 * rfkill_allocate - allocate memory for rfkill structure.
658 * @parent: device that has rf switch on it
659 * @type: type of the switch (RFKILL_TYPE_*)
660 *
661 * This function should be called by the network driver when it needs
662 * rfkill structure. Once the structure is allocated the driver should
663 * finish its initialization by setting the name, private data, enable_radio
664 * and disable_radio methods and then register it with rfkill_register().
665 *
666 * NOTE: If registration fails the structure shoudl be freed by calling
667 * rfkill_free() otherwise rfkill_unregister() should be used.
668 */
669struct rfkill * __must_check rfkill_allocate(struct device *parent,
670 enum rfkill_type type)
671{
672 struct rfkill *rfkill;
673 struct device *dev;
674
675 if (WARN((type >= RFKILL_TYPE_MAX),
676 KERN_WARNING
677 "rfkill: illegal type %d passed as parameter "
678 "to rfkill_allocate\n", type))
679 return NULL;
680
681 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
682 if (!rfkill)
683 return NULL;
684
685 mutex_init(&rfkill->mutex);
686 INIT_LIST_HEAD(&rfkill->node);
687 rfkill->type = type;
688
689 dev = &rfkill->dev;
690 dev->class = &rfkill_class;
691 dev->parent = parent;
692 device_initialize(dev);
693
694 __module_get(THIS_MODULE);
695
696 return rfkill;
697}
698EXPORT_SYMBOL(rfkill_allocate);
699
700/**
701 * rfkill_free - Mark rfkill structure for deletion
702 * @rfkill: rfkill structure to be destroyed
703 *
704 * Decrements reference count of the rfkill structure so it is destroyed.
705 * Note that rfkill_free() should _not_ be called after rfkill_unregister().
706 */
707void rfkill_free(struct rfkill *rfkill)
708{
709 if (rfkill)
710 put_device(&rfkill->dev);
711}
712EXPORT_SYMBOL(rfkill_free);
713
714static void rfkill_led_trigger_register(struct rfkill *rfkill)
715{
716#ifdef CONFIG_RFKILL_LEDS
717 int error;
718
719 if (!rfkill->led_trigger.name)
720 rfkill->led_trigger.name = dev_name(&rfkill->dev);
721 if (!rfkill->led_trigger.activate)
722 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
723 error = led_trigger_register(&rfkill->led_trigger);
724 if (error)
725 rfkill->led_trigger.name = NULL;
726#endif /* CONFIG_RFKILL_LEDS */
727}
728
729static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
730{
731#ifdef CONFIG_RFKILL_LEDS
732 if (rfkill->led_trigger.name) {
733 led_trigger_unregister(&rfkill->led_trigger);
734 rfkill->led_trigger.name = NULL;
735 }
736#endif
737}
738
739/**
740 * rfkill_register - Register a rfkill structure.
741 * @rfkill: rfkill structure to be registered
742 *
743 * This function should be called by the network driver when the rfkill
744 * structure needs to be registered. Immediately from registration the
745 * switch driver should be able to service calls to toggle_radio.
746 */
747int __must_check rfkill_register(struct rfkill *rfkill)
748{
749 static atomic_t rfkill_no = ATOMIC_INIT(0);
750 struct device *dev = &rfkill->dev;
751 int error;
752
753 if (WARN((!rfkill || !rfkill->toggle_radio ||
754 rfkill->type >= RFKILL_TYPE_MAX ||
755 rfkill->state >= RFKILL_STATE_MAX),
756 KERN_WARNING
757 "rfkill: attempt to register a "
758 "badly initialized rfkill struct\n"))
759 return -EINVAL;
760
761 dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
762
763 rfkill_led_trigger_register(rfkill);
764
765 error = rfkill_add_switch(rfkill);
766 if (error) {
767 rfkill_led_trigger_unregister(rfkill);
768 return error;
769 }
770
771 error = device_add(dev);
772 if (error) {
773 rfkill_remove_switch(rfkill);
774 rfkill_led_trigger_unregister(rfkill);
775 return error;
776 }
777
778 return 0;
779}
780EXPORT_SYMBOL(rfkill_register);
781
782/**
783 * rfkill_unregister - Unregister a rfkill structure.
784 * @rfkill: rfkill structure to be unregistered
785 *
786 * This function should be called by the network driver during device
787 * teardown to destroy rfkill structure. Note that rfkill_free() should
788 * _not_ be called after rfkill_unregister().
789 */
790void rfkill_unregister(struct rfkill *rfkill)
791{
792 BUG_ON(!rfkill);
793 device_del(&rfkill->dev);
794 rfkill_remove_switch(rfkill);
795 rfkill_led_trigger_unregister(rfkill);
796 put_device(&rfkill->dev);
797}
798EXPORT_SYMBOL(rfkill_unregister);
799
800/**
801 * rfkill_set_default - set initial value for a switch type
802 * @type - the type of switch to set the default state of
803 * @state - the new default state for that group of switches
804 *
805 * Sets the initial state rfkill should use for a given type.
806 * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
807 * and RFKILL_STATE_UNBLOCKED.
808 *
809 * This function is meant to be used by platform drivers for platforms
810 * that can save switch state across power down/reboot.
811 *
812 * The default state for each switch type can be changed exactly once.
813 * After a switch of that type is registered, the default state cannot
814 * be changed anymore. This guards against multiple drivers it the
815 * same platform trying to set the initial switch default state, which
816 * is not allowed.
817 *
818 * Returns -EPERM if the state has already been set once or is in use,
819 * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
820 * if this function returns -EPERM.
821 *
822 * Returns 0 if the new default state was set, or an error if it
823 * could not be set.
824 */
825int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
826{
827 int error;
828
829 if (WARN((type >= RFKILL_TYPE_MAX ||
830 (state != RFKILL_STATE_SOFT_BLOCKED &&
831 state != RFKILL_STATE_UNBLOCKED)),
832 KERN_WARNING
833 "rfkill: illegal state %d or type %d passed as "
834 "parameter to rfkill_set_default\n", state, type))
835 return -EINVAL;
836
837 mutex_lock(&rfkill_global_mutex);
838
839 if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
840 rfkill_global_states[type].default_state = state;
841 rfkill_global_states[type].current_state = state;
842 error = 0;
843 } else
844 error = -EPERM;
845
846 mutex_unlock(&rfkill_global_mutex);
847 return error;
848}
849EXPORT_SYMBOL_GPL(rfkill_set_default);
850
851/*
852 * Rfkill module initialization/deinitialization.
853 */
854static int __init rfkill_init(void)
855{
856 int error;
857 int i;
858
859 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
860 if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED &&
861 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
862 return -EINVAL;
863
864 for (i = 0; i < RFKILL_TYPE_MAX; i++)
865 rfkill_global_states[i].default_state = rfkill_default_state;
866
867 error = class_register(&rfkill_class);
868 if (error) {
869 printk(KERN_ERR "rfkill: unable to register rfkill class\n");
870 return error;
871 }
872
873 return 0;
874}
875
876static void __exit rfkill_exit(void)
877{
878 class_unregister(&rfkill_class);
879}
880
881subsys_initcall(rfkill_init);
882module_exit(rfkill_exit);
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill.h
index fe8df6b5b935..d1117cb6e4de 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 Ivo van Doorn 2 * Copyright (C) 2007 Ivo van Doorn
3 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
3 */ 4 */
4 5
5/* 6/*
@@ -11,11 +12,16 @@
11#ifndef __RFKILL_INPUT_H 12#ifndef __RFKILL_INPUT_H
12#define __RFKILL_INPUT_H 13#define __RFKILL_INPUT_H
13 14
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 15/* core code */
16void rfkill_switch_all(const enum rfkill_type type, bool blocked);
15void rfkill_epo(void); 17void rfkill_epo(void);
16void rfkill_restore_states(void); 18void rfkill_restore_states(void);
17void rfkill_remove_epo_lock(void); 19void rfkill_remove_epo_lock(void);
18bool rfkill_is_epo_lock_active(void); 20bool rfkill_is_epo_lock_active(void);
19enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); 21bool rfkill_get_global_sw_state(const enum rfkill_type type);
22
23/* input handler */
24int rfkill_handler_init(void);
25void rfkill_handler_exit(void);
20 26
21#endif /* __RFKILL_INPUT_H */ 27#endif /* __RFKILL_INPUT_H */
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 7dcf2569613b..389d6e0d7740 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -137,7 +137,7 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
137 137
138 if (!netif_running(dev)) { 138 if (!netif_running(dev)) {
139 printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); 139 printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
140 return 1; 140 return NETDEV_TX_BUSY;
141 } 141 }
142 dev_kfree_skb(skb); 142 dev_kfree_skb(skb);
143 stats->tx_errors++; 143 stats->tx_errors++;
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 67e38a056240..9f1ce841a0bb 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -444,6 +444,11 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
444 conn = list_entry(bundle->avail_conns.next, 444 conn = list_entry(bundle->avail_conns.next,
445 struct rxrpc_connection, 445 struct rxrpc_connection,
446 bundle_link); 446 bundle_link);
447 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
448 list_del_init(&conn->bundle_link);
449 bundle->num_conns--;
450 continue;
451 }
447 if (--conn->avail_calls == 0) 452 if (--conn->avail_calls == 0)
448 list_move(&conn->bundle_link, 453 list_move(&conn->bundle_link,
449 &bundle->busy_conns); 454 &bundle->busy_conns);
@@ -461,6 +466,11 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
461 conn = list_entry(bundle->unused_conns.next, 466 conn = list_entry(bundle->unused_conns.next,
462 struct rxrpc_connection, 467 struct rxrpc_connection,
463 bundle_link); 468 bundle_link);
469 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
470 list_del_init(&conn->bundle_link);
471 bundle->num_conns--;
472 continue;
473 }
464 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS); 474 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
465 conn->avail_calls = RXRPC_MAXCALLS - 1; 475 conn->avail_calls = RXRPC_MAXCALLS - 1;
466 ASSERT(conn->channels[0] == NULL && 476 ASSERT(conn->channels[0] == NULL &&
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index dc5cb1e19509..0505cdc4d6d4 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -150,11 +150,15 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
150 u32 serial; 150 u32 serial;
151 int loop, ret; 151 int loop, ret;
152 152
153 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) 153 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
154 kleave(" = -ECONNABORTED [%u]", conn->state);
154 return -ECONNABORTED; 155 return -ECONNABORTED;
156 }
155 157
156 serial = ntohl(sp->hdr.serial); 158 serial = ntohl(sp->hdr.serial);
157 159
160 _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, serial);
161
158 switch (sp->hdr.type) { 162 switch (sp->hdr.type) {
159 case RXRPC_PACKET_TYPE_ABORT: 163 case RXRPC_PACKET_TYPE_ABORT:
160 if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0) 164 if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
@@ -199,6 +203,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
199 return 0; 203 return 0;
200 204
201 default: 205 default:
206 _leave(" = -EPROTO [%u]", sp->hdr.type);
202 return -EPROTO; 207 return -EPROTO;
203 } 208 }
204} 209}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e5becb92b3e7..e4877ca6727c 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -62,13 +62,7 @@ static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
62 62
63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
64{ 64{
65 if (!cgroup_lock_live_group(cgrp))
66 return -ENODEV;
67
68 cgrp_cls_state(cgrp)->classid = (u32) value; 65 cgrp_cls_state(cgrp)->classid = (u32) value;
69
70 cgroup_unlock();
71
72 return 0; 66 return 0;
73} 67}
74 68
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 0ef4e3065bcd..9402a7fd3785 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -84,7 +84,7 @@ static u32 flow_get_dst(const struct sk_buff *skb)
84 case htons(ETH_P_IPV6): 84 case htons(ETH_P_IPV6):
85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
86 default: 86 default:
87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol; 87 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
88 } 88 }
89} 89}
90 90
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
163 break; 163 break;
164 } 164 }
165 default: 165 default:
166 res = addr_fold(skb->dst) ^ (__force u16)skb->protocol; 166 res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
167 } 167 }
168 168
169 return res; 169 return res;
@@ -251,8 +251,8 @@ fallback:
251static u32 flow_get_rtclassid(const struct sk_buff *skb) 251static u32 flow_get_rtclassid(const struct sk_buff *skb)
252{ 252{
253#ifdef CONFIG_NET_CLS_ROUTE 253#ifdef CONFIG_NET_CLS_ROUTE
254 if (skb->dst) 254 if (skb_dst(skb))
255 return skb->dst->tclassid; 255 return skb_dst(skb)->tclassid;
256#endif 256#endif
257 return 0; 257 return 0;
258} 258}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index bdf1f4172eef..dd872d5383ef 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -137,7 +137,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
137 u32 id, h; 137 u32 id, h;
138 int iif, dont_cache = 0; 138 int iif, dont_cache = 0;
139 139
140 if ((dst = skb->dst) == NULL) 140 if ((dst = skb_dst(skb)) == NULL)
141 goto failure; 141 goto failure;
142 142
143 id = dst->tclassid; 143 id = dst->tclassid;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index fad596bf32d7..266151ae85a3 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -246,11 +246,11 @@ META_COLLECTOR(int_tcindex)
246 246
247META_COLLECTOR(int_rtclassid) 247META_COLLECTOR(int_rtclassid)
248{ 248{
249 if (unlikely(skb->dst == NULL)) 249 if (unlikely(skb_dst(skb) == NULL))
250 *err = -1; 250 *err = -1;
251 else 251 else
252#ifdef CONFIG_NET_CLS_ROUTE 252#ifdef CONFIG_NET_CLS_ROUTE
253 dst->value = skb->dst->tclassid; 253 dst->value = skb_dst(skb)->tclassid;
254#else 254#else
255 dst->value = 0; 255 dst->value = 0;
256#endif 256#endif
@@ -258,10 +258,10 @@ META_COLLECTOR(int_rtclassid)
258 258
259META_COLLECTOR(int_rtiif) 259META_COLLECTOR(int_rtiif)
260{ 260{
261 if (unlikely(skb->rtable == NULL)) 261 if (unlikely(skb_rtable(skb) == NULL))
262 *err = -1; 262 *err = -1;
263 else 263 else
264 dst->value = skb->rtable->fl.iif; 264 dst->value = skb_rtable(skb)->fl.iif;
265} 265}
266 266
267/************************************************************************** 267/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 32009793307b..24d17ce9c294 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -484,7 +484,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
484 484
485 wd->qdisc->flags |= TCQ_F_THROTTLED; 485 wd->qdisc->flags |= TCQ_F_THROTTLED;
486 time = ktime_set(0, 0); 486 time = ktime_set(0, 0);
487 time = ktime_add_ns(time, PSCHED_US2NS(expires)); 487 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
489} 489}
490EXPORT_SYMBOL(qdisc_watchdog_schedule); 490EXPORT_SYMBOL(qdisc_watchdog_schedule);
@@ -1680,7 +1680,7 @@ static int psched_show(struct seq_file *seq, void *v)
1680 1680
1681 hrtimer_get_res(CLOCK_MONOTONIC, &ts); 1681 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1682 seq_printf(seq, "%08x %08x %08x %08x\n", 1682 seq_printf(seq, "%08x %08x %08x %08x\n",
1683 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1), 1683 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1684 1000000, 1684 1000000,
1685 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts))); 1685 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1686 1686
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d728d8111732..23a167670fd5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -509,7 +509,7 @@ static void cbq_ovl_delay(struct cbq_class *cl)
509 q->pmask |= (1<<TC_CBQ_MAXPRIO); 509 q->pmask |= (1<<TC_CBQ_MAXPRIO);
510 510
511 expires = ktime_set(0, 0); 511 expires = ktime_set(0, 0);
512 expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); 512 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
513 if (hrtimer_try_to_cancel(&q->delay_timer) && 513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
514 ktime_to_ns(ktime_sub( 514 ktime_to_ns(ktime_sub(
515 hrtimer_get_expires(&q->delay_timer), 515 hrtimer_get_expires(&q->delay_timer),
@@ -620,7 +620,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
620 ktime_t time; 620 ktime_t time;
621 621
622 time = ktime_set(0, 0); 622 time = ktime_set(0, 0);
623 time = ktime_add_ns(time, PSCHED_US2NS(now + delay)); 623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); 624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
625 } 625 }
626 626
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5f5efe4e6072..27d03816ec3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -196,6 +196,21 @@ void __qdisc_run(struct Qdisc *q)
196 clear_bit(__QDISC_STATE_RUNNING, &q->state); 196 clear_bit(__QDISC_STATE_RUNNING, &q->state);
197} 197}
198 198
199unsigned long dev_trans_start(struct net_device *dev)
200{
201 unsigned long val, res = dev->trans_start;
202 unsigned int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++) {
205 val = netdev_get_tx_queue(dev, i)->trans_start;
206 if (val && time_after(val, res))
207 res = val;
208 }
209 dev->trans_start = res;
210 return res;
211}
212EXPORT_SYMBOL(dev_trans_start);
213
199static void dev_watchdog(unsigned long arg) 214static void dev_watchdog(unsigned long arg)
200{ 215{
201 struct net_device *dev = (struct net_device *)arg; 216 struct net_device *dev = (struct net_device *)arg;
@@ -205,25 +220,30 @@ static void dev_watchdog(unsigned long arg)
205 if (netif_device_present(dev) && 220 if (netif_device_present(dev) &&
206 netif_running(dev) && 221 netif_running(dev) &&
207 netif_carrier_ok(dev)) { 222 netif_carrier_ok(dev)) {
208 int some_queue_stopped = 0; 223 int some_queue_timedout = 0;
209 unsigned int i; 224 unsigned int i;
225 unsigned long trans_start;
210 226
211 for (i = 0; i < dev->num_tx_queues; i++) { 227 for (i = 0; i < dev->num_tx_queues; i++) {
212 struct netdev_queue *txq; 228 struct netdev_queue *txq;
213 229
214 txq = netdev_get_tx_queue(dev, i); 230 txq = netdev_get_tx_queue(dev, i);
215 if (netif_tx_queue_stopped(txq)) { 231 /*
216 some_queue_stopped = 1; 232 * old device drivers set dev->trans_start
233 */
234 trans_start = txq->trans_start ? : dev->trans_start;
235 if (netif_tx_queue_stopped(txq) &&
236 time_after(jiffies, (trans_start +
237 dev->watchdog_timeo))) {
238 some_queue_timedout = 1;
217 break; 239 break;
218 } 240 }
219 } 241 }
220 242
221 if (some_queue_stopped && 243 if (some_queue_timedout) {
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64]; 244 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 245 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64)); 246 dev->name, netdev_drivername(dev, drivername, 64), i);
227 dev->netdev_ops->ndo_tx_timeout(dev); 247 dev->netdev_ops->ndo_tx_timeout(dev);
228 } 248 }
229 if (!mod_timer(&dev->watchdog_timer, 249 if (!mod_timer(&dev->watchdog_timer,
@@ -602,8 +622,10 @@ static void transition_one_qdisc(struct net_device *dev,
602 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 622 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
603 623
604 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 624 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
605 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 625 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
626 dev_queue->trans_start = 0;
606 *need_watchdog_p = 1; 627 *need_watchdog_p = 1;
628 }
607} 629}
608 630
609void dev_activate(struct net_device *dev) 631void dev_activate(struct net_device *dev)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5022f9c1f34b..362c2811b2df 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -372,7 +372,7 @@ cftree_update(struct hfsc_class *cl)
372 * ism: (psched_us/byte) << ISM_SHIFT 372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us 373 * dx: psched_us
374 * 374 *
375 * The clock source resolution with ktime is 1.024us. 375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
376 * 376 *
377 * sm and ism are scaled in order to keep effective digits. 377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
@@ -383,9 +383,11 @@ cftree_update(struct hfsc_class *cl)
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
384 * 384 *
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
386 *
387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
386 */ 388 */
387#define SM_SHIFT 20 389#define SM_SHIFT (30 - PSCHED_SHIFT)
388#define ISM_SHIFT 18 390#define ISM_SHIFT (8 + PSCHED_SHIFT)
389 391
390#define SM_MASK ((1ULL << SM_SHIFT) - 1) 392#define SM_MASK ((1ULL << SM_SHIFT) - 1)
391#define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 393#define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 33133d27b539..8706920a6d45 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -149,7 +149,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
149 break; 149 break;
150 } 150 }
151 default: 151 default:
152 h = (unsigned long)skb->dst ^ skb->protocol; 152 h = (unsigned long)skb_dst(skb) ^ skb->protocol;
153 h2 = (unsigned long)skb->sk; 153 h2 = (unsigned long)skb->sk;
154 } 154 }
155 155
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3b6418297231..9c002b6e0533 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -58,7 +58,6 @@ struct teql_master
58 struct net_device *dev; 58 struct net_device *dev;
59 struct Qdisc *slaves; 59 struct Qdisc *slaves;
60 struct list_head master_list; 60 struct list_head master_list;
61 struct net_device_stats stats;
62}; 61};
63 62
64struct teql_sched_data 63struct teql_sched_data
@@ -223,7 +222,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
223{ 222{
224 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 223 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
225 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); 224 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
226 struct neighbour *mn = skb->dst->neighbour; 225 struct neighbour *mn = skb_dst(skb)->neighbour;
227 struct neighbour *n = q->ncache; 226 struct neighbour *n = q->ncache;
228 227
229 if (mn->tbl == NULL) 228 if (mn->tbl == NULL)
@@ -263,8 +262,8 @@ static inline int teql_resolve(struct sk_buff *skb,
263 return -ENODEV; 262 return -ENODEV;
264 263
265 if (dev->header_ops == NULL || 264 if (dev->header_ops == NULL ||
266 skb->dst == NULL || 265 skb_dst(skb) == NULL ||
267 skb->dst->neighbour == NULL) 266 skb_dst(skb)->neighbour == NULL)
268 return 0; 267 return 0;
269 return __teql_resolve(skb, skb_res, dev); 268 return __teql_resolve(skb, skb_res, dev);
270} 269}
@@ -272,6 +271,7 @@ static inline int teql_resolve(struct sk_buff *skb,
272static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 271static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
273{ 272{
274 struct teql_master *master = netdev_priv(dev); 273 struct teql_master *master = netdev_priv(dev);
274 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
275 struct Qdisc *start, *q; 275 struct Qdisc *start, *q;
276 int busy; 276 int busy;
277 int nores; 277 int nores;
@@ -308,11 +308,12 @@ restart:
308 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
309 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
310 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
311 txq_trans_update(slave_txq);
311 __netif_tx_unlock(slave_txq); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
314 master->stats.tx_packets++; 315 txq->tx_packets++;
315 master->stats.tx_bytes += length; 316 txq->tx_bytes += length;
316 return 0; 317 return 0;
317 } 318 }
318 __netif_tx_unlock(slave_txq); 319 __netif_tx_unlock(slave_txq);
@@ -337,12 +338,12 @@ restart:
337 338
338 if (busy) { 339 if (busy) {
339 netif_stop_queue(dev); 340 netif_stop_queue(dev);
340 return 1; 341 return NETDEV_TX_BUSY;
341 } 342 }
342 master->stats.tx_errors++; 343 dev->stats.tx_errors++;
343 344
344drop: 345drop:
345 master->stats.tx_dropped++; 346 txq->tx_dropped++;
346 dev_kfree_skb(skb); 347 dev_kfree_skb(skb);
347 return 0; 348 return 0;
348} 349}
@@ -395,12 +396,6 @@ static int teql_master_close(struct net_device *dev)
395 return 0; 396 return 0;
396} 397}
397 398
398static struct net_device_stats *teql_master_stats(struct net_device *dev)
399{
400 struct teql_master *m = netdev_priv(dev);
401 return &m->stats;
402}
403
404static int teql_master_mtu(struct net_device *dev, int new_mtu) 399static int teql_master_mtu(struct net_device *dev, int new_mtu)
405{ 400{
406 struct teql_master *m = netdev_priv(dev); 401 struct teql_master *m = netdev_priv(dev);
@@ -425,7 +420,6 @@ static const struct net_device_ops teql_netdev_ops = {
425 .ndo_open = teql_master_open, 420 .ndo_open = teql_master_open,
426 .ndo_stop = teql_master_close, 421 .ndo_stop = teql_master_close,
427 .ndo_start_xmit = teql_master_xmit, 422 .ndo_start_xmit = teql_master_xmit,
428 .ndo_get_stats = teql_master_stats,
429 .ndo_change_mtu = teql_master_mtu, 423 .ndo_change_mtu = teql_master_mtu,
430}; 424};
431 425
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f4b23043b610..525864bf4f07 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -293,7 +293,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
293 * told otherwise. 293 * told otherwise.
294 */ 294 */
295 asoc->peer.ipv4_address = 1; 295 asoc->peer.ipv4_address = 1;
296 asoc->peer.ipv6_address = 1; 296 if (asoc->base.sk->sk_family == PF_INET6)
297 asoc->peer.ipv6_address = 1;
297 INIT_LIST_HEAD(&asoc->asocs); 298 INIT_LIST_HEAD(&asoc->asocs);
298 299
299 asoc->autoclose = sp->autoclose; 300 asoc->autoclose = sp->autoclose;
@@ -566,6 +567,21 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
566 if (asoc->init_last_sent_to == peer) 567 if (asoc->init_last_sent_to == peer)
567 asoc->init_last_sent_to = NULL; 568 asoc->init_last_sent_to = NULL;
568 569
570 /* If we remove the transport an SHUTDOWN was last sent to, set it
571 * to NULL. Combined with the update of the retran path above, this
572 * will cause the next SHUTDOWN to be sent to the next available
573 * transport, maintaining the cycle.
574 */
575 if (asoc->shutdown_last_sent_to == peer)
576 asoc->shutdown_last_sent_to = NULL;
577
578 /* If we remove the transport an ASCONF was last sent to, set it to
579 * NULL.
580 */
581 if (asoc->addip_last_asconf &&
582 asoc->addip_last_asconf->transport == peer)
583 asoc->addip_last_asconf->transport = NULL;
584
569 asoc->peer.transport_count--; 585 asoc->peer.transport_count--;
570 586
571 sctp_transport_free(peer); 587 sctp_transport_free(peer);
@@ -1268,49 +1284,21 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1268 ntohs(t->ipaddr.v4.sin_port)); 1284 ntohs(t->ipaddr.v4.sin_port));
1269} 1285}
1270 1286
1271/* Choose the transport for sending a INIT packet. */ 1287/* Choose the transport for sending retransmit packet. */
1272struct sctp_transport *sctp_assoc_choose_init_transport( 1288struct sctp_transport *sctp_assoc_choose_alter_transport(
1273 struct sctp_association *asoc) 1289 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1274{
1275 struct sctp_transport *t;
1276
1277 /* Use the retran path. If the last INIT was sent over the
1278 * retran path, update the retran path and use it.
1279 */
1280 if (!asoc->init_last_sent_to) {
1281 t = asoc->peer.active_path;
1282 } else {
1283 if (asoc->init_last_sent_to == asoc->peer.retran_path)
1284 sctp_assoc_update_retran_path(asoc);
1285 t = asoc->peer.retran_path;
1286 }
1287
1288 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1289 " %p addr: ",
1290 " port: %d\n",
1291 asoc,
1292 (&t->ipaddr),
1293 ntohs(t->ipaddr.v4.sin_port));
1294
1295 return t;
1296}
1297
1298/* Choose the transport for sending a SHUTDOWN packet. */
1299struct sctp_transport *sctp_assoc_choose_shutdown_transport(
1300 struct sctp_association *asoc)
1301{ 1290{
1302 /* If this is the first time SHUTDOWN is sent, use the active path, 1291 /* If this is the first time packet is sent, use the active path,
1303 * else use the retran path. If the last SHUTDOWN was sent over the 1292 * else use the retran path. If the last packet was sent over the
1304 * retran path, update the retran path and use it. 1293 * retran path, update the retran path and use it.
1305 */ 1294 */
1306 if (!asoc->shutdown_last_sent_to) 1295 if (!last_sent_to)
1307 return asoc->peer.active_path; 1296 return asoc->peer.active_path;
1308 else { 1297 else {
1309 if (asoc->shutdown_last_sent_to == asoc->peer.retran_path) 1298 if (last_sent_to == asoc->peer.retran_path)
1310 sctp_assoc_update_retran_path(asoc); 1299 sctp_assoc_update_retran_path(asoc);
1311 return asoc->peer.retran_path; 1300 return asoc->peer.retran_path;
1312 } 1301 }
1313
1314} 1302}
1315 1303
1316/* Update the association's pmtu and frag_point by going through all the 1304/* Update the association's pmtu and frag_point by going through all the
@@ -1482,6 +1470,10 @@ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1482{ 1470{
1483 int assoc_id; 1471 int assoc_id;
1484 int error = 0; 1472 int error = 0;
1473
1474 /* If the id is already assigned, keep it. */
1475 if (asoc->assoc_id)
1476 return error;
1485retry: 1477retry:
1486 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) 1478 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1487 return -ENOMEM; 1479 return -ENOMEM;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d2e98803ffe3..c0c973e67add 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -81,13 +81,13 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
81/* Calculate the SCTP checksum of an SCTP packet. */ 81/* Calculate the SCTP checksum of an SCTP packet. */
82static inline int sctp_rcv_checksum(struct sk_buff *skb) 82static inline int sctp_rcv_checksum(struct sk_buff *skb)
83{ 83{
84 struct sk_buff *list = skb_shinfo(skb)->frag_list;
85 struct sctphdr *sh = sctp_hdr(skb); 84 struct sctphdr *sh = sctp_hdr(skb);
86 __le32 cmp = sh->checksum; 85 __le32 cmp = sh->checksum;
86 struct sk_buff *list;
87 __le32 val; 87 __le32 val;
88 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); 88 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
89 89
90 for (; list; list = list->next) 90 skb_walk_frags(skb, list)
91 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), 91 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
92 tmp); 92 tmp);
93 93
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7d08f522ec84..b76411444515 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -405,13 +405,14 @@ int sctp_packet_transmit(struct sctp_packet *packet)
405 sctp_assoc_sync_pmtu(asoc); 405 sctp_assoc_sync_pmtu(asoc);
406 } 406 }
407 } 407 }
408 nskb->dst = dst_clone(tp->dst); 408 dst = dst_clone(tp->dst);
409 if (!nskb->dst) 409 skb_dst_set(nskb, dst);
410 if (dst)
410 goto no_route; 411 goto no_route;
411 dst = nskb->dst;
412 412
413 /* Build the SCTP header. */ 413 /* Build the SCTP header. */
414 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); 414 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
415 skb_reset_transport_header(nskb);
415 sh->source = htons(packet->source_port); 416 sh->source = htons(packet->source_port);
416 sh->dest = htons(packet->destination_port); 417 sh->dest = htons(packet->destination_port);
417 418
@@ -527,15 +528,25 @@ int sctp_packet_transmit(struct sctp_packet *packet)
527 * Note: Adler-32 is no longer applicable, as has been replaced 528 * Note: Adler-32 is no longer applicable, as has been replaced
528 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 529 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
529 */ 530 */
530 if (!sctp_checksum_disable && !(dst->dev->features & NETIF_F_NO_CSUM)) { 531 if (!sctp_checksum_disable &&
532 !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
531 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 533 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
532 534
533 /* 3) Put the resultant value into the checksum field in the 535 /* 3) Put the resultant value into the checksum field in the
534 * common header, and leave the rest of the bits unchanged. 536 * common header, and leave the rest of the bits unchanged.
535 */ 537 */
536 sh->checksum = sctp_end_cksum(crc32); 538 sh->checksum = sctp_end_cksum(crc32);
537 } else 539 } else {
538 nskb->ip_summed = CHECKSUM_UNNECESSARY; 540 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
541 /* no need to seed psuedo checksum for SCTP */
542 nskb->ip_summed = CHECKSUM_PARTIAL;
543 nskb->csum_start = (skb_transport_header(nskb) -
544 nskb->head);
545 nskb->csum_offset = offsetof(struct sctphdr, checksum);
546 } else {
547 nskb->ip_summed = CHECKSUM_UNNECESSARY;
548 }
549 }
539 550
540 /* IP layer ECN support 551 /* IP layer ECN support
541 * From RFC 2481 552 * From RFC 2481
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 8eb3e61cb701..79cbd47f4df7 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -393,7 +393,7 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
393 return 0; 393 return 0;
394 394
395 /* Is this a broadcast address? */ 395 /* Is this a broadcast address? */
396 if (skb && skb->rtable->rt_flags & RTCF_BROADCAST) 396 if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST)
397 return 0; 397 return 0;
398 398
399 return 1; 399 return 1;
@@ -572,7 +572,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
572/* What interface did this skb arrive on? */ 572/* What interface did this skb arrive on? */
573static int sctp_v4_skb_iif(const struct sk_buff *skb) 573static int sctp_v4_skb_iif(const struct sk_buff *skb)
574{ 574{
575 return skb->rtable->rt_iif; 575 return skb_rtable(skb)->rt_iif;
576} 576}
577 577
578/* Was this packet marked by Explicit Congestion Notification? */ 578/* Was this packet marked by Explicit Congestion Notification? */
@@ -848,8 +848,8 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
848 848
849 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", 849 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n",
850 __func__, skb, skb->len, 850 __func__, skb, skb->len,
851 &skb->rtable->rt_src, 851 &skb_rtable(skb)->rt_src,
852 &skb->rtable->rt_dst); 852 &skb_rtable(skb)->rt_dst);
853 853
854 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? 854 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
855 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 855 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
@@ -1370,6 +1370,8 @@ SCTP_STATIC __exit void sctp_exit(void)
1370 sctp_proc_exit(); 1370 sctp_proc_exit();
1371 cleanup_sctp_mibs(); 1371 cleanup_sctp_mibs();
1372 1372
1373 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1374
1373 kmem_cache_destroy(sctp_chunk_cachep); 1375 kmem_cache_destroy(sctp_chunk_cachep);
1374 kmem_cache_destroy(sctp_bucket_cachep); 1376 kmem_cache_destroy(sctp_bucket_cachep);
1375} 1377}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 6851ee94e974..61cc6075b0df 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2864,19 +2864,19 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2864 switch (addr_param->v4.param_hdr.type) { 2864 switch (addr_param->v4.param_hdr.type) {
2865 case SCTP_PARAM_IPV6_ADDRESS: 2865 case SCTP_PARAM_IPV6_ADDRESS:
2866 if (!asoc->peer.ipv6_address) 2866 if (!asoc->peer.ipv6_address)
2867 return SCTP_ERROR_INV_PARAM; 2867 return SCTP_ERROR_DNS_FAILED;
2868 break; 2868 break;
2869 case SCTP_PARAM_IPV4_ADDRESS: 2869 case SCTP_PARAM_IPV4_ADDRESS:
2870 if (!asoc->peer.ipv4_address) 2870 if (!asoc->peer.ipv4_address)
2871 return SCTP_ERROR_INV_PARAM; 2871 return SCTP_ERROR_DNS_FAILED;
2872 break; 2872 break;
2873 default: 2873 default:
2874 return SCTP_ERROR_INV_PARAM; 2874 return SCTP_ERROR_DNS_FAILED;
2875 } 2875 }
2876 2876
2877 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2877 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type));
2878 if (unlikely(!af)) 2878 if (unlikely(!af))
2879 return SCTP_ERROR_INV_PARAM; 2879 return SCTP_ERROR_DNS_FAILED;
2880 2880
2881 af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); 2881 af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
2882 2882
@@ -2886,7 +2886,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2886 * make sure we check for that) 2886 * make sure we check for that)
2887 */ 2887 */
2888 if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) 2888 if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb))
2889 return SCTP_ERROR_INV_PARAM; 2889 return SCTP_ERROR_DNS_FAILED;
2890 2890
2891 switch (asconf_param->param_hdr.type) { 2891 switch (asconf_param->param_hdr.type) {
2892 case SCTP_PARAM_ADD_IP: 2892 case SCTP_PARAM_ADD_IP:
@@ -2954,12 +2954,12 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2954 2954
2955 peer = sctp_assoc_lookup_paddr(asoc, &addr); 2955 peer = sctp_assoc_lookup_paddr(asoc, &addr);
2956 if (!peer) 2956 if (!peer)
2957 return SCTP_ERROR_INV_PARAM; 2957 return SCTP_ERROR_DNS_FAILED;
2958 2958
2959 sctp_assoc_set_primary(asoc, peer); 2959 sctp_assoc_set_primary(asoc, peer);
2960 break; 2960 break;
2961 default: 2961 default:
2962 return SCTP_ERROR_INV_PARAM; 2962 return SCTP_ERROR_UNKNOWN_PARAM;
2963 break; 2963 break;
2964 } 2964 }
2965 2965
@@ -3273,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3273 retval = 1; 3273 retval = 1;
3274 break; 3274 break;
3275 3275
3276 case SCTP_ERROR_INV_PARAM: 3276 case SCTP_ERROR_UNKNOWN_PARAM:
3277 /* Disable sending this type of asconf parameter in 3277 /* Disable sending this type of asconf parameter in
3278 * future. 3278 * future.
3279 */ 3279 */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e2020eb2c8ca..86426aac1600 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -686,7 +686,8 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
686{ 686{
687 struct sctp_transport *t; 687 struct sctp_transport *t;
688 688
689 t = sctp_assoc_choose_shutdown_transport(asoc); 689 t = sctp_assoc_choose_alter_transport(asoc,
690 asoc->shutdown_last_sent_to);
690 asoc->shutdown_last_sent_to = t; 691 asoc->shutdown_last_sent_to = t;
691 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; 692 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
692 chunk->transport = t; 693 chunk->transport = t;
@@ -777,7 +778,7 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
777{ 778{
778 struct sctp_transport *t; 779 struct sctp_transport *t;
779 780
780 t = asoc->peer.active_path; 781 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
781 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; 782 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
782 chunk->transport = t; 783 chunk->transport = t;
783} 784}
@@ -1379,7 +1380,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1379 1380
1380 case SCTP_CMD_INIT_CHOOSE_TRANSPORT: 1381 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1381 chunk = cmd->obj.ptr; 1382 chunk = cmd->obj.ptr;
1382 t = sctp_assoc_choose_init_transport(asoc); 1383 t = sctp_assoc_choose_alter_transport(asoc,
1384 asoc->init_last_sent_to);
1383 asoc->init_last_sent_to = t; 1385 asoc->init_last_sent_to = t;
1384 chunk->transport = t; 1386 chunk->transport = t;
1385 t->init_sent_count++; 1387 t->init_sent_count++;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 55a61aa69662..7288192f7df5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5432,9 +5432,13 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
5432 if (!reply) 5432 if (!reply)
5433 goto nomem; 5433 goto nomem;
5434 5434
5435 /* Do some failure management (Section 8.2). */ 5435 /* Do some failure management (Section 8.2).
5436 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, 5436 * If we remove the transport an SHUTDOWN was last sent to, don't
5437 SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); 5437 * do failure management.
5438 */
5439 if (asoc->shutdown_last_sent_to)
5440 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
5441 SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
5438 5442
5439 /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for 5443 /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
5440 * the T2-shutdown timer. 5444 * the T2-shutdown timer.
@@ -5471,7 +5475,9 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
5471 * detection on the appropriate destination address as defined in 5475 * detection on the appropriate destination address as defined in
5472 * RFC2960 [5] section 8.1 and 8.2. 5476 * RFC2960 [5] section 8.1 and 8.2.
5473 */ 5477 */
5474 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); 5478 if (transport)
5479 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
5480 SCTP_TRANSPORT(transport));
5475 5481
5476 /* Reconfig T4 timer and transport. */ 5482 /* Reconfig T4 timer and transport. */
5477 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); 5483 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 5c8186d88c61..6d9b3aafcc5d 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -698,7 +698,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
698 TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ 698 TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
699 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ 699 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
700 TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ 700 TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
701} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ 701} /* TYPE_SCTP_PRIMITIVE_ASCONF */
702 702
703/* The primary index for this table is the primitive type. 703/* The primary index for this table is the primitive type.
704 * The secondary index for this table is the state. 704 * The secondary index for this table is the state.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5fb3a8c9792e..0f01e5d8a24f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1100,6 +1100,15 @@ static int __sctp_connect(struct sock* sk,
1100 goto out_free; 1100 goto out_free;
1101 } 1101 }
1102 1102
1103 /* In case the user of sctp_connectx() wants an association
1104 * id back, assign one now.
1105 */
1106 if (assoc_id) {
1107 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1108 if (err < 0)
1109 goto out_free;
1110 }
1111
1103 err = sctp_primitive_ASSOCIATE(asoc, NULL); 1112 err = sctp_primitive_ASSOCIATE(asoc, NULL);
1104 if (err < 0) { 1113 if (err < 0) {
1105 goto out_free; 1114 goto out_free;
@@ -1120,7 +1129,7 @@ static int __sctp_connect(struct sock* sk,
1120 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1129 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1121 1130
1122 err = sctp_wait_for_connect(asoc, &timeo); 1131 err = sctp_wait_for_connect(asoc, &timeo);
1123 if (!err && assoc_id) 1132 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1124 *assoc_id = asoc->assoc_id; 1133 *assoc_id = asoc->assoc_id;
1125 1134
1126 /* Don't free association on exit. */ 1135 /* Don't free association on exit. */
@@ -1264,6 +1273,34 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1264 return assoc_id; 1273 return assoc_id;
1265} 1274}
1266 1275
1276/*
1277 * New (hopefully final) interface for the API. The option buffer is used
1278 * both for the returned association id and the addresses.
1279 */
1280SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
1281 char __user *optval,
1282 int __user *optlen)
1283{
1284 sctp_assoc_t assoc_id = 0;
1285 int err = 0;
1286
1287 if (len < sizeof(assoc_id))
1288 return -EINVAL;
1289
1290 err = __sctp_setsockopt_connectx(sk,
1291 (struct sockaddr __user *)(optval + sizeof(assoc_id)),
1292 len - sizeof(assoc_id), &assoc_id);
1293
1294 if (err == 0 || err == -EINPROGRESS) {
1295 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1296 return -EFAULT;
1297 if (put_user(sizeof(assoc_id), optlen))
1298 return -EFAULT;
1299 }
1300
1301 return err;
1302}
1303
1267/* API 3.1.4 close() - UDP Style Syntax 1304/* API 3.1.4 close() - UDP Style Syntax
1268 * Applications use close() to perform graceful shutdown (as described in 1305 * Applications use close() to perform graceful shutdown (as described in
1269 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1306 * Section 10.1 of [SCTP]) on ALL the associations currently represented
@@ -1844,7 +1881,7 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
1844 len -= skb_len; 1881 len -= skb_len;
1845 __skb_pull(skb, skb_len); 1882 __skb_pull(skb, skb_len);
1846 1883
1847 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { 1884 skb_walk_frags(skb, list) {
1848 rlen = sctp_skb_pull(list, len); 1885 rlen = sctp_skb_pull(list, len);
1849 skb->len -= (len-rlen); 1886 skb->len -= (len-rlen);
1850 skb->data_len -= (len-rlen); 1887 skb->data_len -= (len-rlen);
@@ -5578,6 +5615,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5578 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5615 retval = sctp_getsockopt_local_addrs(sk, len, optval,
5579 optlen); 5616 optlen);
5580 break; 5617 break;
5618 case SCTP_SOCKOPT_CONNECTX3:
5619 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
5620 break;
5581 case SCTP_DEFAULT_SEND_PARAM: 5621 case SCTP_DEFAULT_SEND_PARAM:
5582 retval = sctp_getsockopt_default_send_param(sk, len, 5622 retval = sctp_getsockopt_default_send_param(sk, len,
5583 optval, optlen); 5623 optval, optlen);
@@ -6620,7 +6660,7 @@ static void sctp_sock_rfree_frag(struct sk_buff *skb)
6620 goto done; 6660 goto done;
6621 6661
6622 /* Don't forget the fragments. */ 6662 /* Don't forget the fragments. */
6623 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) 6663 skb_walk_frags(skb, frag)
6624 sctp_sock_rfree_frag(frag); 6664 sctp_sock_rfree_frag(frag);
6625 6665
6626done: 6666done:
@@ -6635,7 +6675,7 @@ static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
6635 goto done; 6675 goto done;
6636 6676
6637 /* Don't forget the fragments. */ 6677 /* Don't forget the fragments. */
6638 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) 6678 skb_walk_frags(skb, frag)
6639 sctp_skb_set_owner_r_frag(frag, sk); 6679 sctp_skb_set_owner_r_frag(frag, sk);
6640 6680
6641done: 6681done:
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index f58e994e6852..63eabbc71298 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -49,8 +49,8 @@ static int zero = 0;
49static int one = 1; 49static int one = 1;
50static int timer_max = 86400000; /* ms in one day */ 50static int timer_max = 86400000; /* ms in one day */
51static int int_max = INT_MAX; 51static int int_max = INT_MAX;
52static long sack_timer_min = 1; 52static int sack_timer_min = 1;
53static long sack_timer_max = 500; 53static int sack_timer_max = 500;
54 54
55extern int sysctl_sctp_mem[3]; 55extern int sysctl_sctp_mem[3];
56extern int sysctl_sctp_rmem[3]; 56extern int sysctl_sctp_rmem[3];
@@ -223,7 +223,7 @@ static ctl_table sctp_table[] = {
223 .ctl_name = NET_SCTP_SACK_TIMEOUT, 223 .ctl_name = NET_SCTP_SACK_TIMEOUT,
224 .procname = "sack_timeout", 224 .procname = "sack_timeout",
225 .data = &sctp_sack_timeout, 225 .data = &sctp_sack_timeout,
226 .maxlen = sizeof(long), 226 .maxlen = sizeof(int),
227 .mode = 0644, 227 .mode = 0644,
228 .proc_handler = proc_dointvec_minmax, 228 .proc_handler = proc_dointvec_minmax,
229 .strategy = sysctl_intvec, 229 .strategy = sysctl_intvec,
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 5f186ca550d7..8b3560fd876d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -976,9 +976,8 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
976 * In general, the skb passed from IP can have only 1 level of 976 * In general, the skb passed from IP can have only 1 level of
977 * fragments. But we allow multiple levels of fragments. 977 * fragments. But we allow multiple levels of fragments.
978 */ 978 */
979 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 979 skb_walk_frags(skb, frag)
980 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); 980 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
981 }
982} 981}
983 982
984/* Do accounting for bytes just read by user and release the references to 983/* Do accounting for bytes just read by user and release the references to
@@ -1003,7 +1002,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1003 goto done; 1002 goto done;
1004 1003
1005 /* Don't forget the fragments. */ 1004 /* Don't forget the fragments. */
1006 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 1005 skb_walk_frags(skb, frag) {
1007 /* NOTE: skb_shinfos are recursive. Although IP returns 1006 /* NOTE: skb_shinfos are recursive. Although IP returns
1008 * skb's with only 1 level of fragments, SCTP reassembly can 1007 * skb's with only 1 level of fragments, SCTP reassembly can
1009 * increase the levels. 1008 * increase the levels.
@@ -1026,7 +1025,7 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
1026 goto done; 1025 goto done;
1027 1026
1028 /* Don't forget the fragments. */ 1027 /* Don't forget the fragments. */
1029 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 1028 skb_walk_frags(skb, frag) {
1030 /* NOTE: skb_shinfos are recursive. Although IP returns 1029 /* NOTE: skb_shinfos are recursive. Although IP returns
1031 * skb's with only 1 level of fragments, SCTP reassembly can 1030 * skb's with only 1 level of fragments, SCTP reassembly can
1032 * increase the levels. 1031 * increase the levels.
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e630b38a6047..66d458fc6920 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1548,6 +1548,7 @@ static void __exit exit_rpcsec_gss(void)
1548{ 1548{
1549 gss_svc_shutdown(); 1549 gss_svc_shutdown();
1550 rpcauth_unregister(&authgss_ops); 1550 rpcauth_unregister(&authgss_ops);
1551 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1551} 1552}
1552 1553
1553MODULE_LICENSE("GPL"); 1554MODULE_LICENSE("GPL");
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6b90ce439c00..952f206ff307 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -125,7 +125,7 @@ svc_pool_map_choose_mode(void)
125{ 125{
126 unsigned int node; 126 unsigned int node;
127 127
128 if (num_online_nodes() > 1) { 128 if (nr_online_nodes > 1) {
129 /* 129 /*
130 * Actually have multiple NUMA nodes, 130 * Actually have multiple NUMA nodes,
131 * so split pools on NUMA node boundaries 131 * so split pools on NUMA node boundaries
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 8a721867b601..9111d11c09fd 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -928,7 +928,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
928 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 928 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
929 929
930 /* Something worked... */ 930 /* Something worked... */
931 dst_confirm(skb->dst); 931 dst_confirm(skb_dst(skb));
932 932
933 xprt_adjust_cwnd(task, copied); 933 xprt_adjust_cwnd(task, copied);
934 xprt_update_rtt(task); 934 xprt_update_rtt(task);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index f72ba774c246..524ba5696d4d 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -167,7 +167,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
167 tb_ptr->mtu = dev->mtu; 167 tb_ptr->mtu = dev->mtu;
168 tb_ptr->blocked = 0; 168 tb_ptr->blocked = 0;
169 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH); 169 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
170 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN); 170 memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
171 return 0; 171 return 0;
172} 172}
173 173
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index c387217bb230..3c57005e44d1 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -68,7 +68,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
68 return 0; 68 return 0;
69} 69}
70 70
71static struct genl_family family = { 71static struct genl_family tipc_genl_family = {
72 .id = GENL_ID_GENERATE, 72 .id = GENL_ID_GENERATE,
73 .name = TIPC_GENL_NAME, 73 .name = TIPC_GENL_NAME,
74 .version = TIPC_GENL_VERSION, 74 .version = TIPC_GENL_VERSION,
@@ -76,39 +76,33 @@ static struct genl_family family = {
76 .maxattr = 0, 76 .maxattr = 0,
77}; 77};
78 78
79static struct genl_ops ops = { 79static struct genl_ops tipc_genl_ops = {
80 .cmd = TIPC_GENL_CMD, 80 .cmd = TIPC_GENL_CMD,
81 .doit = handle_cmd, 81 .doit = handle_cmd,
82}; 82};
83 83
84static int family_registered = 0; 84static int tipc_genl_family_registered;
85 85
86int tipc_netlink_start(void) 86int tipc_netlink_start(void)
87{ 87{
88 int res;
88 89
90 res = genl_register_family_with_ops(&tipc_genl_family,
91 &tipc_genl_ops, 1);
92 if (res) {
93 err("Failed to register netlink interface\n");
94 return res;
95 }
89 96
90 if (genl_register_family(&family)) 97 tipc_genl_family_registered = 1;
91 goto err;
92
93 family_registered = 1;
94
95 if (genl_register_ops(&family, &ops))
96 goto err_unregister;
97
98 return 0; 98 return 0;
99
100 err_unregister:
101 genl_unregister_family(&family);
102 family_registered = 0;
103 err:
104 err("Failed to register netlink interface\n");
105 return -EFAULT;
106} 99}
107 100
108void tipc_netlink_stop(void) 101void tipc_netlink_stop(void)
109{ 102{
110 if (family_registered) { 103 if (!tipc_genl_family_registered)
111 genl_unregister_family(&family); 104 return;
112 family_registered = 0; 105
113 } 106 genl_unregister_family(&tipc_genl_family);
107 tipc_genl_family_registered = 0;
114} 108}
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 1b46747a5f5a..e4d97ab476d5 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,23 +1,10 @@
1# 1#
2# WiMAX LAN device configuration 2# WiMAX LAN device configuration
3# 3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropriate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
16 4
17menuconfig WIMAX 5menuconfig WIMAX
18 tristate "WiMAX Wireless Broadband support" 6 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m 7 depends on RFKILL || !RFKILL
20 depends on (INPUT && RFKILL != n) || RFKILL = n
21 help 8 help
22 9
23 Select to configure support for devices that provide 10 Select to configure support for devices that provide
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
index 5b80b941c2c9..8f1510d0cc2b 100644
--- a/net/wimax/Makefile
+++ b/net/wimax/Makefile
@@ -6,6 +6,7 @@ wimax-y := \
6 op-msg.o \ 6 op-msg.o \
7 op-reset.o \ 7 op-reset.o \
8 op-rfkill.o \ 8 op-rfkill.o \
9 op-state-get.o \
9 stack.o 10 stack.o
10 11
11wimax-$(CONFIG_DEBUG_FS) += debugfs.o 12wimax-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
index 1c29123a3aa9..0975adba6b71 100644
--- a/net/wimax/debug-levels.h
+++ b/net/wimax/debug-levels.h
@@ -36,6 +36,7 @@ enum d_module {
36 D_SUBMODULE_DECLARE(op_msg), 36 D_SUBMODULE_DECLARE(op_msg),
37 D_SUBMODULE_DECLARE(op_reset), 37 D_SUBMODULE_DECLARE(op_reset),
38 D_SUBMODULE_DECLARE(op_rfkill), 38 D_SUBMODULE_DECLARE(op_rfkill),
39 D_SUBMODULE_DECLARE(op_state_get),
39 D_SUBMODULE_DECLARE(stack), 40 D_SUBMODULE_DECLARE(stack),
40}; 41};
41 42
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
index 94d216a46407..6c9bedb7431e 100644
--- a/net/wimax/debugfs.c
+++ b/net/wimax/debugfs.c
@@ -61,6 +61,7 @@ int wimax_debugfs_add(struct wimax_dev *wimax_dev)
61 __debugfs_register("wimax_dl_", op_msg, dentry); 61 __debugfs_register("wimax_dl_", op_msg, dentry);
62 __debugfs_register("wimax_dl_", op_reset, dentry); 62 __debugfs_register("wimax_dl_", op_reset, dentry);
63 __debugfs_register("wimax_dl_", op_rfkill, dentry); 63 __debugfs_register("wimax_dl_", op_rfkill, dentry);
64 __debugfs_register("wimax_dl_", op_state_get, dentry);
64 __debugfs_register("wimax_dl_", stack, dentry); 65 __debugfs_register("wimax_dl_", stack, dentry);
65 result = 0; 66 result = 0;
66out: 67out:
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 9ad4d893a566..d631a17186bc 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -108,6 +108,12 @@
108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as 108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
109 * wimax_msg_send() depends on skb->data being placed at the 109 * wimax_msg_send() depends on skb->data being placed at the
110 * beginning of the user message. 110 * beginning of the user message.
111 *
112 * Unlike other WiMAX stack calls, this call can be used way early,
113 * even before wimax_dev_add() is called, as long as the
114 * wimax_dev->net_dev pointer is set to point to a proper
115 * net_dev. This is so that drivers can use it early in case they need
116 * to send stuff around or communicate with user space.
111 */ 117 */
112struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, 118struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
113 const char *pipe_name, 119 const char *pipe_name,
@@ -115,7 +121,7 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
115 gfp_t gfp_flags) 121 gfp_t gfp_flags)
116{ 122{
117 int result; 123 int result;
118 struct device *dev = wimax_dev->net_dev->dev.parent; 124 struct device *dev = wimax_dev_to_dev(wimax_dev);
119 size_t msg_size; 125 size_t msg_size;
120 void *genl_msg; 126 void *genl_msg;
121 struct sk_buff *skb; 127 struct sk_buff *skb;
@@ -161,7 +167,6 @@ error_genlmsg_put:
161error_new: 167error_new:
162 nlmsg_free(skb); 168 nlmsg_free(skb);
163 return ERR_PTR(result); 169 return ERR_PTR(result);
164
165} 170}
166EXPORT_SYMBOL_GPL(wimax_msg_alloc); 171EXPORT_SYMBOL_GPL(wimax_msg_alloc);
167 172
@@ -256,10 +261,16 @@ EXPORT_SYMBOL_GPL(wimax_msg_len);
256 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as 261 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
257 * wimax_msg_send() depends on skb->data being placed at the 262 * wimax_msg_send() depends on skb->data being placed at the
258 * beginning of the user message. 263 * beginning of the user message.
264 *
265 * Unlike other WiMAX stack calls, this call can be used way early,
266 * even before wimax_dev_add() is called, as long as the
267 * wimax_dev->net_dev pointer is set to point to a proper
268 * net_dev. This is so that drivers can use it early in case they need
269 * to send stuff around or communicate with user space.
259 */ 270 */
260int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) 271int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
261{ 272{
262 struct device *dev = wimax_dev->net_dev->dev.parent; 273 struct device *dev = wimax_dev_to_dev(wimax_dev);
263 void *msg = skb->data; 274 void *msg = skb->data;
264 size_t size = skb->len; 275 size_t size = skb->len;
265 might_sleep(); 276 might_sleep();
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 2b75aee04217..70ef4df863b9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -29,8 +29,8 @@
29 * A non-polled generic rfkill device is embedded into the WiMAX 29 * A non-polled generic rfkill device is embedded into the WiMAX
30 * subsystem's representation of a device. 30 * subsystem's representation of a device.
31 * 31 *
32 * FIXME: Need polled support? use a timer or add the implementation 32 * FIXME: Need polled support? Let drivers provide a poll routine
33 * to the stack. 33 * and hand it to rfkill ops then?
34 * 34 *
35 * All device drivers have to do is after wimax_dev_init(), call 35 * All device drivers have to do is after wimax_dev_init(), call
36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update 36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
@@ -43,7 +43,7 @@
43 * wimax_rfkill() Kernel calling wimax_rfkill() 43 * wimax_rfkill() Kernel calling wimax_rfkill()
44 * __wimax_rf_toggle_radio() 44 * __wimax_rf_toggle_radio()
45 * 45 *
46 * wimax_rfkill_toggle_radio() RF-Kill subsytem calling 46 * wimax_rfkill_set_radio_block() RF-Kill subsytem calling
47 * __wimax_rf_toggle_radio() 47 * __wimax_rf_toggle_radio()
48 * 48 *
49 * __wimax_rf_toggle_radio() 49 * __wimax_rf_toggle_radio()
@@ -65,15 +65,11 @@
65#include <linux/wimax.h> 65#include <linux/wimax.h>
66#include <linux/security.h> 66#include <linux/security.h>
67#include <linux/rfkill.h> 67#include <linux/rfkill.h>
68#include <linux/input.h>
69#include "wimax-internal.h" 68#include "wimax-internal.h"
70 69
71#define D_SUBMODULE op_rfkill 70#define D_SUBMODULE op_rfkill
72#include "debug-levels.h" 71#include "debug-levels.h"
73 72
74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75
76
77/** 73/**
78 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch 74 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
79 * 75 *
@@ -99,7 +95,6 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
99 int result; 95 int result;
100 struct device *dev = wimax_dev_to_dev(wimax_dev); 96 struct device *dev = wimax_dev_to_dev(wimax_dev);
101 enum wimax_st wimax_state; 97 enum wimax_st wimax_state;
102 enum rfkill_state rfkill_state;
103 98
104 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 99 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
105 BUG_ON(state == WIMAX_RF_QUERY); 100 BUG_ON(state == WIMAX_RF_QUERY);
@@ -112,16 +107,16 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
112 107
113 if (state != wimax_dev->rf_hw) { 108 if (state != wimax_dev->rf_hw) {
114 wimax_dev->rf_hw = state; 109 wimax_dev->rf_hw = state;
115 rfkill_state = state == WIMAX_RF_ON ?
116 RFKILL_STATE_OFF : RFKILL_STATE_ON;
117 if (wimax_dev->rf_hw == WIMAX_RF_ON 110 if (wimax_dev->rf_hw == WIMAX_RF_ON
118 && wimax_dev->rf_sw == WIMAX_RF_ON) 111 && wimax_dev->rf_sw == WIMAX_RF_ON)
119 wimax_state = WIMAX_ST_READY; 112 wimax_state = WIMAX_ST_READY;
120 else 113 else
121 wimax_state = WIMAX_ST_RADIO_OFF; 114 wimax_state = WIMAX_ST_RADIO_OFF;
115
116 result = rfkill_set_hw_state(wimax_dev->rfkill,
117 state == WIMAX_RF_OFF);
118
122 __wimax_state_change(wimax_dev, wimax_state); 119 __wimax_state_change(wimax_dev, wimax_state);
123 input_report_key(wimax_dev->rfkill_input, KEY_WIMAX,
124 rfkill_state);
125 } 120 }
126error_not_ready: 121error_not_ready:
127 mutex_unlock(&wimax_dev->mutex); 122 mutex_unlock(&wimax_dev->mutex);
@@ -174,6 +169,7 @@ void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
174 else 169 else
175 wimax_state = WIMAX_ST_RADIO_OFF; 170 wimax_state = WIMAX_ST_RADIO_OFF;
176 __wimax_state_change(wimax_dev, wimax_state); 171 __wimax_state_change(wimax_dev, wimax_state);
172 rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
177 } 173 }
178error_not_ready: 174error_not_ready:
179 mutex_unlock(&wimax_dev->mutex); 175 mutex_unlock(&wimax_dev->mutex);
@@ -249,36 +245,31 @@ out_no_change:
249 * 245 *
250 * NOTE: This call will block until the operation is completed. 246 * NOTE: This call will block until the operation is completed.
251 */ 247 */
252static 248static int wimax_rfkill_set_radio_block(void *data, bool blocked)
253int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
254{ 249{
255 int result; 250 int result;
256 struct wimax_dev *wimax_dev = data; 251 struct wimax_dev *wimax_dev = data;
257 struct device *dev = wimax_dev_to_dev(wimax_dev); 252 struct device *dev = wimax_dev_to_dev(wimax_dev);
258 enum wimax_rf_state rf_state; 253 enum wimax_rf_state rf_state;
259 254
260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 255 d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked);
261 switch (state) { 256 rf_state = WIMAX_RF_ON;
262 case RFKILL_STATE_ON: 257 if (blocked)
263 rf_state = WIMAX_RF_OFF; 258 rf_state = WIMAX_RF_OFF;
264 break;
265 case RFKILL_STATE_OFF:
266 rf_state = WIMAX_RF_ON;
267 break;
268 default:
269 BUG();
270 }
271 mutex_lock(&wimax_dev->mutex); 259 mutex_lock(&wimax_dev->mutex);
272 if (wimax_dev->state <= __WIMAX_ST_QUIESCING) 260 if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
273 result = 0; /* just pretend it didn't happen */ 261 result = 0;
274 else 262 else
275 result = __wimax_rf_toggle_radio(wimax_dev, rf_state); 263 result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
276 mutex_unlock(&wimax_dev->mutex); 264 mutex_unlock(&wimax_dev->mutex);
277 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", 265 d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n",
278 wimax_dev, state, result); 266 wimax_dev, blocked, result);
279 return result; 267 return result;
280} 268}
281 269
270static const struct rfkill_ops wimax_rfkill_ops = {
271 .set_block = wimax_rfkill_set_radio_block,
272};
282 273
283/** 274/**
284 * wimax_rfkill - Set the software RF switch state for a WiMAX device 275 * wimax_rfkill - Set the software RF switch state for a WiMAX device
@@ -322,6 +313,7 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
322 result = __wimax_rf_toggle_radio(wimax_dev, state); 313 result = __wimax_rf_toggle_radio(wimax_dev, state);
323 if (result < 0) 314 if (result < 0)
324 goto error; 315 goto error;
316 rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
325 break; 317 break;
326 case WIMAX_RF_QUERY: 318 case WIMAX_RF_QUERY:
327 break; 319 break;
@@ -349,41 +341,20 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
349{ 341{
350 int result; 342 int result;
351 struct rfkill *rfkill; 343 struct rfkill *rfkill;
352 struct input_dev *input_dev;
353 struct device *dev = wimax_dev_to_dev(wimax_dev); 344 struct device *dev = wimax_dev_to_dev(wimax_dev);
354 345
355 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); 346 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
356 /* Initialize RF Kill */ 347 /* Initialize RF Kill */
357 result = -ENOMEM; 348 result = -ENOMEM;
358 rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX); 349 rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX,
350 &wimax_rfkill_ops, wimax_dev);
359 if (rfkill == NULL) 351 if (rfkill == NULL)
360 goto error_rfkill_allocate; 352 goto error_rfkill_allocate;
353
354 d_printf(1, dev, "rfkill %p\n", rfkill);
355
361 wimax_dev->rfkill = rfkill; 356 wimax_dev->rfkill = rfkill;
362 357
363 rfkill->name = wimax_dev->name;
364 rfkill->state = RFKILL_STATE_OFF;
365 rfkill->data = wimax_dev;
366 rfkill->toggle_radio = wimax_rfkill_toggle_radio;
367 rfkill->user_claim_unsupported = 1;
368
369 /* Initialize the input device for the hw key */
370 input_dev = input_allocate_device();
371 if (input_dev == NULL)
372 goto error_input_allocate;
373 wimax_dev->rfkill_input = input_dev;
374 d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev);
375
376 input_dev->name = wimax_dev->name;
377 /* FIXME: get a real device bus ID and stuff? do we care? */
378 input_dev->id.bustype = BUS_HOST;
379 input_dev->id.vendor = 0xffff;
380 input_dev->evbit[0] = BIT(EV_KEY);
381 set_bit(KEY_WIMAX, input_dev->keybit);
382
383 /* Register both */
384 result = input_register_device(wimax_dev->rfkill_input);
385 if (result < 0)
386 goto error_input_register;
387 result = rfkill_register(wimax_dev->rfkill); 358 result = rfkill_register(wimax_dev->rfkill);
388 if (result < 0) 359 if (result < 0)
389 goto error_rfkill_register; 360 goto error_rfkill_register;
@@ -395,17 +366,8 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
395 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); 366 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
396 return 0; 367 return 0;
397 368
398 /* if rfkill_register() suceeds, can't use rfkill_free() any
399 * more, only rfkill_unregister() [it owns the refcount]; with
400 * the input device we have the same issue--hence the if. */
401error_rfkill_register: 369error_rfkill_register:
402 input_unregister_device(wimax_dev->rfkill_input); 370 rfkill_destroy(wimax_dev->rfkill);
403 wimax_dev->rfkill_input = NULL;
404error_input_register:
405 if (wimax_dev->rfkill_input)
406 input_free_device(wimax_dev->rfkill_input);
407error_input_allocate:
408 rfkill_free(wimax_dev->rfkill);
409error_rfkill_allocate: 371error_rfkill_allocate:
410 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); 372 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
411 return result; 373 return result;
@@ -424,45 +386,12 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
424{ 386{
425 struct device *dev = wimax_dev_to_dev(wimax_dev); 387 struct device *dev = wimax_dev_to_dev(wimax_dev);
426 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); 388 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
427 rfkill_unregister(wimax_dev->rfkill); /* frees */ 389 rfkill_unregister(wimax_dev->rfkill);
428 input_unregister_device(wimax_dev->rfkill_input); 390 rfkill_destroy(wimax_dev->rfkill);
429 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); 391 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
430} 392}
431 393
432 394
433#else /* #ifdef CONFIG_RFKILL */
434
435void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
436 enum wimax_rf_state state)
437{
438}
439EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
440
441void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
442 enum wimax_rf_state state)
443{
444}
445EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
446
447int wimax_rfkill(struct wimax_dev *wimax_dev,
448 enum wimax_rf_state state)
449{
450 return WIMAX_RF_ON << 1 | WIMAX_RF_ON;
451}
452EXPORT_SYMBOL_GPL(wimax_rfkill);
453
454int wimax_rfkill_add(struct wimax_dev *wimax_dev)
455{
456 return 0;
457}
458
459void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
460{
461}
462
463#endif /* #ifdef CONFIG_RFKILL */
464
465
466/* 395/*
467 * Exporting to user space over generic netlink 396 * Exporting to user space over generic netlink
468 * 397 *
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
new file mode 100644
index 000000000000..a76b8fcb056d
--- /dev/null
+++ b/net/wimax/op-state-get.c
@@ -0,0 +1,86 @@
1/*
2 * Linux WiMAX
3 * Implement and export a method for getting a WiMAX device current state
4 *
5 * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
6 *
7 * Based on previous WiMAX core work by:
8 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
9 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version
13 * 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * 02110-1301, USA.
24 */
25
26#include <net/wimax.h>
27#include <net/genetlink.h>
28#include <linux/wimax.h>
29#include <linux/security.h>
30#include "wimax-internal.h"
31
32#define D_SUBMODULE op_state_get
33#include "debug-levels.h"
34
35
36static const
37struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
38 [WIMAX_GNL_STGET_IFIDX] = {
39 .type = NLA_U32,
40 },
41};
42
43
44/*
45 * Exporting to user space over generic netlink
46 *
47 * Parse the state get command from user space, return a combination
48 * value that describe the current state.
49 *
50 * No attributes.
51 */
52static
53int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
54{
55 int result, ifindex;
56 struct wimax_dev *wimax_dev;
57 struct device *dev;
58
59 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
60 result = -ENODEV;
61 if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
62 printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX "
63 "attribute\n");
64 goto error_no_wimax_dev;
65 }
66 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
67 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
68 if (wimax_dev == NULL)
69 goto error_no_wimax_dev;
70 dev = wimax_dev_to_dev(wimax_dev);
71 /* Execute the operation and send the result back to user space */
72 result = wimax_state_get(wimax_dev);
73 dev_put(wimax_dev->net_dev);
74error_no_wimax_dev:
75 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
76 return result;
77}
78
79
80struct genl_ops wimax_gnl_state_get = {
81 .cmd = WIMAX_GNL_OP_STATE_GET,
82 .flags = GENL_ADMIN_PERM,
83 .policy = wimax_gnl_state_get_policy,
84 .doit = wimax_gnl_doit_state_get,
85 .dumpit = NULL,
86};
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 933e1422b09f..79fb7d7c640f 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -402,13 +402,15 @@ EXPORT_SYMBOL_GPL(wimax_dev_init);
402extern struct genl_ops 402extern struct genl_ops
403 wimax_gnl_msg_from_user, 403 wimax_gnl_msg_from_user,
404 wimax_gnl_reset, 404 wimax_gnl_reset,
405 wimax_gnl_rfkill; 405 wimax_gnl_rfkill,
406 wimax_gnl_state_get;
406 407
407static 408static
408struct genl_ops *wimax_gnl_ops[] = { 409struct genl_ops *wimax_gnl_ops[] = {
409 &wimax_gnl_msg_from_user, 410 &wimax_gnl_msg_from_user,
410 &wimax_gnl_reset, 411 &wimax_gnl_reset,
411 &wimax_gnl_rfkill, 412 &wimax_gnl_rfkill,
413 &wimax_gnl_state_get,
412}; 414};
413 415
414 416
@@ -533,6 +535,7 @@ struct d_level D_LEVEL[] = {
533 D_SUBMODULE_DEFINE(op_msg), 535 D_SUBMODULE_DEFINE(op_msg),
534 D_SUBMODULE_DEFINE(op_reset), 536 D_SUBMODULE_DEFINE(op_reset),
535 D_SUBMODULE_DEFINE(op_rfkill), 537 D_SUBMODULE_DEFINE(op_rfkill),
538 D_SUBMODULE_DEFINE(op_state_get),
536 D_SUBMODULE_DEFINE(stack), 539 D_SUBMODULE_DEFINE(stack),
537}; 540};
538size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); 541size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 3c3bc9e579ed..4428dd5e911d 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,6 @@
1config CFG80211 1config CFG80211
2 tristate "Improved wireless configuration API" 2 tristate "Improved wireless configuration API"
3 depends on RFKILL || !RFKILL
3 4
4config CFG80211_REG_DEBUG 5config CFG80211_REG_DEBUG
5 bool "cfg80211 regulatory debugging" 6 bool "cfg80211 regulatory debugging"
@@ -10,6 +11,14 @@ config CFG80211_REG_DEBUG
10 11
11 If unsure, say N. 12 If unsure, say N.
12 13
14config CFG80211_DEBUGFS
15 bool "cfg80211 DebugFS entries"
16 depends on CFG80211 && DEBUG_FS
17 ---help---
18 You can enable this if you want to debugfs entries for cfg80211.
19
20 If unsure, say N.
21
13config WIRELESS_OLD_REGULATORY 22config WIRELESS_OLD_REGULATORY
14 bool "Old wireless static regulatory definitions" 23 bool "Old wireless static regulatory definitions"
15 default n 24 default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 6d1e7b27b752..f78c4832a9ca 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o 5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o
9cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o 10cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
10 11
11ccflags-y += -D__CHECK_ENDIAN__ 12ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d1f556535f6d..d5850292b3df 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -12,12 +12,13 @@
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/rtnetlink.h>
15#include <net/genetlink.h> 16#include <net/genetlink.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include <net/wireless.h>
18#include "nl80211.h" 18#include "nl80211.h"
19#include "core.h" 19#include "core.h"
20#include "sysfs.h" 20#include "sysfs.h"
21#include "debugfs.h"
21 22
22/* name for sysfs, %d is appended */ 23/* name for sysfs, %d is appended */
23#define PHY_NAME "phy" 24#define PHY_NAME "phy"
@@ -227,9 +228,44 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
227 return 0; 228 return 0;
228} 229}
229 230
231static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
232{
233 struct cfg80211_registered_device *drv = data;
234
235 drv->ops->rfkill_poll(&drv->wiphy);
236}
237
238static int cfg80211_rfkill_set_block(void *data, bool blocked)
239{
240 struct cfg80211_registered_device *drv = data;
241 struct wireless_dev *wdev;
242
243 if (!blocked)
244 return 0;
245
246 rtnl_lock();
247 mutex_lock(&drv->devlist_mtx);
248
249 list_for_each_entry(wdev, &drv->netdev_list, list)
250 dev_close(wdev->netdev);
251
252 mutex_unlock(&drv->devlist_mtx);
253 rtnl_unlock();
254
255 return 0;
256}
257
258static void cfg80211_rfkill_sync_work(struct work_struct *work)
259{
260 struct cfg80211_registered_device *drv;
261
262 drv = container_of(work, struct cfg80211_registered_device, rfkill_sync);
263 cfg80211_rfkill_set_block(drv, rfkill_blocked(drv->rfkill));
264}
265
230/* exported functions */ 266/* exported functions */
231 267
232struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) 268struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
233{ 269{
234 static int wiphy_counter; 270 static int wiphy_counter;
235 271
@@ -274,6 +310,28 @@ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
274 drv->wiphy.dev.class = &ieee80211_class; 310 drv->wiphy.dev.class = &ieee80211_class;
275 drv->wiphy.dev.platform_data = drv; 311 drv->wiphy.dev.platform_data = drv;
276 312
313 drv->rfkill_ops.set_block = cfg80211_rfkill_set_block;
314 drv->rfkill = rfkill_alloc(dev_name(&drv->wiphy.dev),
315 &drv->wiphy.dev, RFKILL_TYPE_WLAN,
316 &drv->rfkill_ops, drv);
317
318 if (!drv->rfkill) {
319 kfree(drv);
320 return NULL;
321 }
322
323 INIT_WORK(&drv->rfkill_sync, cfg80211_rfkill_sync_work);
324
325 /*
326 * Initialize wiphy parameters to IEEE 802.11 MIB default values.
327 * Fragmentation and RTS threshold are disabled by default with the
328 * special -1 value.
329 */
330 drv->wiphy.retry_short = 7;
331 drv->wiphy.retry_long = 4;
332 drv->wiphy.frag_threshold = (u32) -1;
333 drv->wiphy.rts_threshold = (u32) -1;
334
277 return &drv->wiphy; 335 return &drv->wiphy;
278} 336}
279EXPORT_SYMBOL(wiphy_new); 337EXPORT_SYMBOL(wiphy_new);
@@ -337,17 +395,23 @@ int wiphy_register(struct wiphy *wiphy)
337 /* check and set up bitrates */ 395 /* check and set up bitrates */
338 ieee80211_set_bitrate_flags(wiphy); 396 ieee80211_set_bitrate_flags(wiphy);
339 397
398 res = device_add(&drv->wiphy.dev);
399 if (res)
400 return res;
401
402 res = rfkill_register(drv->rfkill);
403 if (res)
404 goto out_rm_dev;
405
340 mutex_lock(&cfg80211_mutex); 406 mutex_lock(&cfg80211_mutex);
341 407
342 /* set up regulatory info */ 408 /* set up regulatory info */
343 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 409 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
344 410
345 res = device_add(&drv->wiphy.dev);
346 if (res)
347 goto out_unlock;
348
349 list_add(&drv->list, &cfg80211_drv_list); 411 list_add(&drv->list, &cfg80211_drv_list);
350 412
413 mutex_unlock(&cfg80211_mutex);
414
351 /* add to debugfs */ 415 /* add to debugfs */
352 drv->wiphy.debugfsdir = 416 drv->wiphy.debugfsdir =
353 debugfs_create_dir(wiphy_name(&drv->wiphy), 417 debugfs_create_dir(wiphy_name(&drv->wiphy),
@@ -366,17 +430,41 @@ int wiphy_register(struct wiphy *wiphy)
366 nl80211_send_reg_change_event(&request); 430 nl80211_send_reg_change_event(&request);
367 } 431 }
368 432
369 res = 0; 433 cfg80211_debugfs_drv_add(drv);
370out_unlock: 434
371 mutex_unlock(&cfg80211_mutex); 435 return 0;
436
437 out_rm_dev:
438 device_del(&drv->wiphy.dev);
372 return res; 439 return res;
373} 440}
374EXPORT_SYMBOL(wiphy_register); 441EXPORT_SYMBOL(wiphy_register);
375 442
443void wiphy_rfkill_start_polling(struct wiphy *wiphy)
444{
445 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
446
447 if (!drv->ops->rfkill_poll)
448 return;
449 drv->rfkill_ops.poll = cfg80211_rfkill_poll;
450 rfkill_resume_polling(drv->rfkill);
451}
452EXPORT_SYMBOL(wiphy_rfkill_start_polling);
453
454void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
455{
456 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
457
458 rfkill_pause_polling(drv->rfkill);
459}
460EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
461
376void wiphy_unregister(struct wiphy *wiphy) 462void wiphy_unregister(struct wiphy *wiphy)
377{ 463{
378 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); 464 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
379 465
466 rfkill_unregister(drv->rfkill);
467
380 /* protect the device list */ 468 /* protect the device list */
381 mutex_lock(&cfg80211_mutex); 469 mutex_lock(&cfg80211_mutex);
382 470
@@ -396,6 +484,8 @@ void wiphy_unregister(struct wiphy *wiphy)
396 /* unlock again before freeing */ 484 /* unlock again before freeing */
397 mutex_unlock(&drv->mtx); 485 mutex_unlock(&drv->mtx);
398 486
487 cfg80211_debugfs_drv_del(drv);
488
399 /* If this device got a regulatory hint tell core its 489 /* If this device got a regulatory hint tell core its
400 * free to listen now to a new shiny device regulatory hint */ 490 * free to listen now to a new shiny device regulatory hint */
401 reg_device_remove(wiphy); 491 reg_device_remove(wiphy);
@@ -411,6 +501,7 @@ EXPORT_SYMBOL(wiphy_unregister);
411void cfg80211_dev_free(struct cfg80211_registered_device *drv) 501void cfg80211_dev_free(struct cfg80211_registered_device *drv)
412{ 502{
413 struct cfg80211_internal_bss *scan, *tmp; 503 struct cfg80211_internal_bss *scan, *tmp;
504 rfkill_destroy(drv->rfkill);
414 mutex_destroy(&drv->mtx); 505 mutex_destroy(&drv->mtx);
415 mutex_destroy(&drv->devlist_mtx); 506 mutex_destroy(&drv->devlist_mtx);
416 list_for_each_entry_safe(scan, tmp, &drv->bss_list, list) 507 list_for_each_entry_safe(scan, tmp, &drv->bss_list, list)
@@ -424,6 +515,15 @@ void wiphy_free(struct wiphy *wiphy)
424} 515}
425EXPORT_SYMBOL(wiphy_free); 516EXPORT_SYMBOL(wiphy_free);
426 517
518void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
519{
520 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
521
522 if (rfkill_set_hw_state(drv->rfkill, blocked))
523 schedule_work(&drv->rfkill_sync);
524}
525EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
526
427static int cfg80211_netdev_notifier_call(struct notifier_block * nb, 527static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
428 unsigned long state, 528 unsigned long state,
429 void *ndev) 529 void *ndev)
@@ -432,7 +532,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
432 struct cfg80211_registered_device *rdev; 532 struct cfg80211_registered_device *rdev;
433 533
434 if (!dev->ieee80211_ptr) 534 if (!dev->ieee80211_ptr)
435 return 0; 535 return NOTIFY_DONE;
436 536
437 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); 537 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
438 538
@@ -448,8 +548,28 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
448 "symlink to netdev!\n"); 548 "symlink to netdev!\n");
449 } 549 }
450 dev->ieee80211_ptr->netdev = dev; 550 dev->ieee80211_ptr->netdev = dev;
551#ifdef CONFIG_WIRELESS_EXT
552 dev->ieee80211_ptr->wext.default_key = -1;
553 dev->ieee80211_ptr->wext.default_mgmt_key = -1;
554#endif
451 mutex_unlock(&rdev->devlist_mtx); 555 mutex_unlock(&rdev->devlist_mtx);
452 break; 556 break;
557 case NETDEV_GOING_DOWN:
558 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
559 break;
560 if (!dev->ieee80211_ptr->ssid_len)
561 break;
562 cfg80211_leave_ibss(rdev, dev, true);
563 break;
564 case NETDEV_UP:
565#ifdef CONFIG_WIRELESS_EXT
566 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
567 break;
568 if (!dev->ieee80211_ptr->wext.ibss.ssid_len)
569 break;
570 cfg80211_join_ibss(rdev, dev, &dev->ieee80211_ptr->wext.ibss);
571 break;
572#endif
453 case NETDEV_UNREGISTER: 573 case NETDEV_UNREGISTER:
454 mutex_lock(&rdev->devlist_mtx); 574 mutex_lock(&rdev->devlist_mtx);
455 if (!list_empty(&dev->ieee80211_ptr->list)) { 575 if (!list_empty(&dev->ieee80211_ptr->list)) {
@@ -458,9 +578,13 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
458 } 578 }
459 mutex_unlock(&rdev->devlist_mtx); 579 mutex_unlock(&rdev->devlist_mtx);
460 break; 580 break;
581 case NETDEV_PRE_UP:
582 if (rfkill_blocked(rdev->rfkill))
583 return notifier_from_errno(-ERFKILL);
584 break;
461 } 585 }
462 586
463 return 0; 587 return NOTIFY_DONE;
464} 588}
465 589
466static struct notifier_block cfg80211_netdev_notifier = { 590static struct notifier_block cfg80211_netdev_notifier = {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 0a592e4295f0..bfa340c7abb5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Wireless configuration interface internals. 2 * Wireless configuration interface internals.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#ifndef __NET_WIRELESS_CORE_H 6#ifndef __NET_WIRELESS_CORE_H
7#define __NET_WIRELESS_CORE_H 7#define __NET_WIRELESS_CORE_H
@@ -10,14 +10,15 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h> 11#include <linux/kref.h>
12#include <linux/rbtree.h> 12#include <linux/rbtree.h>
13#include <linux/mutex.h> 13#include <linux/debugfs.h>
14#include <linux/rfkill.h>
15#include <linux/workqueue.h>
14#include <net/genetlink.h> 16#include <net/genetlink.h>
15#include <net/wireless.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include "reg.h" 18#include "reg.h"
18 19
19struct cfg80211_registered_device { 20struct cfg80211_registered_device {
20 struct cfg80211_ops *ops; 21 const struct cfg80211_ops *ops;
21 struct list_head list; 22 struct list_head list;
22 /* we hold this mutex during any call so that 23 /* we hold this mutex during any call so that
23 * we cannot do multiple calls at once, and also 24 * we cannot do multiple calls at once, and also
@@ -25,6 +26,11 @@ struct cfg80211_registered_device {
25 * any call is in progress */ 26 * any call is in progress */
26 struct mutex mtx; 27 struct mutex mtx;
27 28
29 /* rfkill support */
30 struct rfkill_ops rfkill_ops;
31 struct rfkill *rfkill;
32 struct work_struct rfkill_sync;
33
28 /* ISO / IEC 3166 alpha2 for which this device is receiving 34 /* ISO / IEC 3166 alpha2 for which this device is receiving
29 * country IEs on, this can help disregard country IEs from APs 35 * country IEs on, this can help disregard country IEs from APs
30 * on the same alpha2 quickly. The alpha2 may differ from 36 * on the same alpha2 quickly. The alpha2 may differ from
@@ -52,6 +58,17 @@ struct cfg80211_registered_device {
52 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 58 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
53 unsigned long suspend_at; 59 unsigned long suspend_at;
54 60
61#ifdef CONFIG_CFG80211_DEBUGFS
62 /* Debugfs entries */
63 struct wiphy_debugfsdentries {
64 struct dentry *rts_threshold;
65 struct dentry *fragmentation_threshold;
66 struct dentry *short_retry_limit;
67 struct dentry *long_retry_limit;
68 struct dentry *ht40allow_map;
69 } debugfs;
70#endif
71
55 /* must be last because of the way we do wiphy_priv(), 72 /* must be last because of the way we do wiphy_priv(),
56 * and it should at least be aligned to NETDEV_ALIGN */ 73 * and it should at least be aligned to NETDEV_ALIGN */
57 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 74 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -74,10 +91,7 @@ bool wiphy_idx_valid(int wiphy_idx)
74extern struct mutex cfg80211_mutex; 91extern struct mutex cfg80211_mutex;
75extern struct list_head cfg80211_drv_list; 92extern struct list_head cfg80211_drv_list;
76 93
77static inline void assert_cfg80211_lock(void) 94#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex))
78{
79 WARN_ON(!mutex_is_locked(&cfg80211_mutex));
80}
81 95
82/* 96/*
83 * You can use this to mark a wiphy_idx as not having an associated wiphy. 97 * You can use this to mark a wiphy_idx as not having an associated wiphy.
@@ -148,4 +162,16 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
148void cfg80211_bss_age(struct cfg80211_registered_device *dev, 162void cfg80211_bss_age(struct cfg80211_registered_device *dev,
149 unsigned long age_secs); 163 unsigned long age_secs);
150 164
165/* IBSS */
166int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
167 struct net_device *dev,
168 struct cfg80211_ibss_params *params);
169void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
170int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
171 struct net_device *dev, bool nowext);
172
173/* internal helpers */
174int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
175 const u8 *mac_addr);
176
151#endif /* __NET_WIRELESS_CORE_H */ 177#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
new file mode 100644
index 000000000000..679ddfcec1ee
--- /dev/null
+++ b/net/wireless/debugfs.c
@@ -0,0 +1,131 @@
1/*
2 * cfg80211 debugfs
3 *
4 * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include "core.h"
13#include "debugfs.h"
14
15static int cfg80211_open_file_generic(struct inode *inode, struct file *file)
16{
17 file->private_data = inode->i_private;
18 return 0;
19}
20
21#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
22static ssize_t name## _read(struct file *file, char __user *userbuf, \
23 size_t count, loff_t *ppos) \
24{ \
25 struct wiphy *wiphy= file->private_data; \
26 char buf[buflen]; \
27 int res; \
28 \
29 res = scnprintf(buf, buflen, fmt "\n", ##value); \
30 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
31} \
32 \
33static const struct file_operations name## _ops = { \
34 .read = name## _read, \
35 .open = cfg80211_open_file_generic, \
36};
37
38DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
39 wiphy->rts_threshold)
40DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
41 wiphy->frag_threshold);
42DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
43 wiphy->retry_short)
44DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
45 wiphy->retry_long);
46
47static int ht_print_chan(struct ieee80211_channel *chan,
48 char *buf, int buf_size, int offset)
49{
50 if (WARN_ON(offset > buf_size))
51 return 0;
52
53 if (chan->flags & IEEE80211_CHAN_DISABLED)
54 return snprintf(buf + offset,
55 buf_size - offset,
56 "%d Disabled\n",
57 chan->center_freq);
58
59 return snprintf(buf + offset,
60 buf_size - offset,
61 "%d HT40 %c%c\n",
62 chan->center_freq,
63 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
64 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+');
65}
66
67static ssize_t ht40allow_map_read(struct file *file,
68 char __user *user_buf,
69 size_t count, loff_t *ppos)
70{
71 struct wiphy *wiphy = file->private_data;
72 char *buf;
73 unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
74 enum ieee80211_band band;
75 struct ieee80211_supported_band *sband;
76
77 buf = kzalloc(buf_size, GFP_KERNEL);
78 if (!buf)
79 return -ENOMEM;
80
81 mutex_lock(&cfg80211_mutex);
82
83 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
84 sband = wiphy->bands[band];
85 if (!sband)
86 continue;
87 for (i = 0; i < sband->n_channels; i++)
88 offset += ht_print_chan(&sband->channels[i],
89 buf, buf_size, offset);
90 }
91
92 mutex_unlock(&cfg80211_mutex);
93
94 r = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
95
96 kfree(buf);
97
98 return r;
99}
100
101static const struct file_operations ht40allow_map_ops = {
102 .read = ht40allow_map_read,
103 .open = cfg80211_open_file_generic,
104};
105
106#define DEBUGFS_ADD(name) \
107 drv->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
108 &drv->wiphy, &name## _ops);
109#define DEBUGFS_DEL(name) \
110 debugfs_remove(drv->debugfs.name); \
111 drv->debugfs.name = NULL;
112
113void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv)
114{
115 struct dentry *phyd = drv->wiphy.debugfsdir;
116
117 DEBUGFS_ADD(rts_threshold);
118 DEBUGFS_ADD(fragmentation_threshold);
119 DEBUGFS_ADD(short_retry_limit);
120 DEBUGFS_ADD(long_retry_limit);
121 DEBUGFS_ADD(ht40allow_map);
122}
123
124void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv)
125{
126 DEBUGFS_DEL(rts_threshold);
127 DEBUGFS_DEL(fragmentation_threshold);
128 DEBUGFS_DEL(short_retry_limit);
129 DEBUGFS_DEL(long_retry_limit);
130 DEBUGFS_DEL(ht40allow_map);
131}
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
new file mode 100644
index 000000000000..c226983ae66b
--- /dev/null
+++ b/net/wireless/debugfs.h
@@ -0,0 +1,14 @@
1#ifndef __CFG80211_DEBUGFS_H
2#define __CFG80211_DEBUGFS_H
3
4#ifdef CONFIG_CFG80211_DEBUGFS
5void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv);
6void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv);
7#else
8static inline
9void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) {}
10static inline
11void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) {}
12#endif
13
14#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
new file mode 100644
index 000000000000..a4a1c3498ff2
--- /dev/null
+++ b/net/wireless/ibss.c
@@ -0,0 +1,369 @@
1/*
2 * Some IBSS support code for cfg80211.
3 *
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/if_arp.h>
9#include <net/cfg80211.h>
10#include "nl80211.h"
11
12
13void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
14{
15 struct wireless_dev *wdev = dev->ieee80211_ptr;
16 struct cfg80211_bss *bss;
17#ifdef CONFIG_WIRELESS_EXT
18 union iwreq_data wrqu;
19#endif
20
21 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
22 return;
23
24 if (WARN_ON(!wdev->ssid_len))
25 return;
26
27 if (memcmp(bssid, wdev->bssid, ETH_ALEN) == 0)
28 return;
29
30 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
31 wdev->ssid, wdev->ssid_len,
32 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
33
34 if (WARN_ON(!bss))
35 return;
36
37 if (wdev->current_bss) {
38 cfg80211_unhold_bss(wdev->current_bss);
39 cfg80211_put_bss(wdev->current_bss);
40 }
41
42 cfg80211_hold_bss(bss);
43 wdev->current_bss = bss;
44 memcpy(wdev->bssid, bssid, ETH_ALEN);
45
46 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, gfp);
47#ifdef CONFIG_WIRELESS_EXT
48 memset(&wrqu, 0, sizeof(wrqu));
49 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
50 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
51#endif
52}
53EXPORT_SYMBOL(cfg80211_ibss_joined);
54
55int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
56 struct net_device *dev,
57 struct cfg80211_ibss_params *params)
58{
59 struct wireless_dev *wdev = dev->ieee80211_ptr;
60 int err;
61
62 if (wdev->ssid_len)
63 return -EALREADY;
64
65#ifdef CONFIG_WIRELESS_EXT
66 wdev->wext.ibss.channel = params->channel;
67#endif
68 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
69
70 if (err)
71 return err;
72
73 memcpy(wdev->ssid, params->ssid, params->ssid_len);
74 wdev->ssid_len = params->ssid_len;
75
76 return 0;
77}
78
79void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
80{
81 struct wireless_dev *wdev = dev->ieee80211_ptr;
82
83 if (wdev->current_bss) {
84 cfg80211_unhold_bss(wdev->current_bss);
85 cfg80211_put_bss(wdev->current_bss);
86 }
87
88 wdev->current_bss = NULL;
89 wdev->ssid_len = 0;
90 memset(wdev->bssid, 0, ETH_ALEN);
91#ifdef CONFIG_WIRELESS_EXT
92 if (!nowext)
93 wdev->wext.ibss.ssid_len = 0;
94#endif
95}
96
97int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
98 struct net_device *dev, bool nowext)
99{
100 int err;
101
102 err = rdev->ops->leave_ibss(&rdev->wiphy, dev);
103
104 if (err)
105 return err;
106
107 cfg80211_clear_ibss(dev, nowext);
108
109 return 0;
110}
111
112#ifdef CONFIG_WIRELESS_EXT
113static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
114 struct wireless_dev *wdev)
115{
116 enum ieee80211_band band;
117 int i;
118
119 if (!wdev->wext.ibss.beacon_interval)
120 wdev->wext.ibss.beacon_interval = 100;
121
122 /* try to find an IBSS channel if none requested ... */
123 if (!wdev->wext.ibss.channel) {
124 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
125 struct ieee80211_supported_band *sband;
126 struct ieee80211_channel *chan;
127
128 sband = rdev->wiphy.bands[band];
129 if (!sband)
130 continue;
131
132 for (i = 0; i < sband->n_channels; i++) {
133 chan = &sband->channels[i];
134 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
135 continue;
136 if (chan->flags & IEEE80211_CHAN_DISABLED)
137 continue;
138 wdev->wext.ibss.channel = chan;
139 break;
140 }
141
142 if (wdev->wext.ibss.channel)
143 break;
144 }
145
146 if (!wdev->wext.ibss.channel)
147 return -EINVAL;
148 }
149
150 /* don't join -- SSID is not there */
151 if (!wdev->wext.ibss.ssid_len)
152 return 0;
153
154 if (!netif_running(wdev->netdev))
155 return 0;
156
157 return cfg80211_join_ibss(wiphy_to_dev(wdev->wiphy),
158 wdev->netdev, &wdev->wext.ibss);
159}
160
161int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
162 struct iw_request_info *info,
163 struct iw_freq *freq, char *extra)
164{
165 struct wireless_dev *wdev = dev->ieee80211_ptr;
166 struct ieee80211_channel *chan;
167 int err;
168
169 /* call only for ibss! */
170 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
171 return -EINVAL;
172
173 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
174 return -EOPNOTSUPP;
175
176 chan = cfg80211_wext_freq(wdev->wiphy, freq);
177 if (chan && IS_ERR(chan))
178 return PTR_ERR(chan);
179
180 if (chan &&
181 (chan->flags & IEEE80211_CHAN_NO_IBSS ||
182 chan->flags & IEEE80211_CHAN_DISABLED))
183 return -EINVAL;
184
185 if (wdev->wext.ibss.channel == chan)
186 return 0;
187
188 if (wdev->ssid_len) {
189 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
190 dev, true);
191 if (err)
192 return err;
193 }
194
195 if (chan) {
196 wdev->wext.ibss.channel = chan;
197 wdev->wext.ibss.channel_fixed = true;
198 } else {
199 /* cfg80211_ibss_wext_join will pick one if needed */
200 wdev->wext.ibss.channel_fixed = false;
201 }
202
203 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
204}
205/* temporary symbol - mark GPL - in the future the handler won't be */
206EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwfreq);
207
208int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
209 struct iw_request_info *info,
210 struct iw_freq *freq, char *extra)
211{
212 struct wireless_dev *wdev = dev->ieee80211_ptr;
213 struct ieee80211_channel *chan = NULL;
214
215 /* call only for ibss! */
216 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
217 return -EINVAL;
218
219 if (wdev->current_bss)
220 chan = wdev->current_bss->channel;
221 else if (wdev->wext.ibss.channel)
222 chan = wdev->wext.ibss.channel;
223
224 if (chan) {
225 freq->m = chan->center_freq;
226 freq->e = 6;
227 return 0;
228 }
229
230 /* no channel if not joining */
231 return -EINVAL;
232}
233/* temporary symbol - mark GPL - in the future the handler won't be */
234EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwfreq);
235
236int cfg80211_ibss_wext_siwessid(struct net_device *dev,
237 struct iw_request_info *info,
238 struct iw_point *data, char *ssid)
239{
240 struct wireless_dev *wdev = dev->ieee80211_ptr;
241 size_t len = data->length;
242 int err;
243
244 /* call only for ibss! */
245 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
246 return -EINVAL;
247
248 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
249 return -EOPNOTSUPP;
250
251 if (wdev->ssid_len) {
252 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
253 dev, true);
254 if (err)
255 return err;
256 }
257
258 /* iwconfig uses nul termination in SSID.. */
259 if (len > 0 && ssid[len - 1] == '\0')
260 len--;
261
262 wdev->wext.ibss.ssid = wdev->ssid;
263 memcpy(wdev->wext.ibss.ssid, ssid, len);
264 wdev->wext.ibss.ssid_len = len;
265
266 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
267}
268/* temporary symbol - mark GPL - in the future the handler won't be */
269EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwessid);
270
271int cfg80211_ibss_wext_giwessid(struct net_device *dev,
272 struct iw_request_info *info,
273 struct iw_point *data, char *ssid)
274{
275 struct wireless_dev *wdev = dev->ieee80211_ptr;
276
277 /* call only for ibss! */
278 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
279 return -EINVAL;
280
281 data->flags = 0;
282
283 if (wdev->ssid_len) {
284 data->flags = 1;
285 data->length = wdev->ssid_len;
286 memcpy(ssid, wdev->ssid, data->length);
287 } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) {
288 data->flags = 1;
289 data->length = wdev->wext.ibss.ssid_len;
290 memcpy(ssid, wdev->wext.ibss.ssid, data->length);
291 }
292
293 return 0;
294}
295/* temporary symbol - mark GPL - in the future the handler won't be */
296EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwessid);
297
298int cfg80211_ibss_wext_siwap(struct net_device *dev,
299 struct iw_request_info *info,
300 struct sockaddr *ap_addr, char *extra)
301{
302 struct wireless_dev *wdev = dev->ieee80211_ptr;
303 u8 *bssid = ap_addr->sa_data;
304 int err;
305
306 /* call only for ibss! */
307 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
308 return -EINVAL;
309
310 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
311 return -EOPNOTSUPP;
312
313 if (ap_addr->sa_family != ARPHRD_ETHER)
314 return -EINVAL;
315
316 /* automatic mode */
317 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
318 bssid = NULL;
319
320 /* both automatic */
321 if (!bssid && !wdev->wext.ibss.bssid)
322 return 0;
323
324 /* fixed already - and no change */
325 if (wdev->wext.ibss.bssid && bssid &&
326 compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
327 return 0;
328
329 if (wdev->ssid_len) {
330 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
331 dev, true);
332 if (err)
333 return err;
334 }
335
336 if (bssid) {
337 memcpy(wdev->wext.bssid, bssid, ETH_ALEN);
338 wdev->wext.ibss.bssid = wdev->wext.bssid;
339 } else
340 wdev->wext.ibss.bssid = NULL;
341
342 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
343}
344/* temporary symbol - mark GPL - in the future the handler won't be */
345EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwap);
346
347int cfg80211_ibss_wext_giwap(struct net_device *dev,
348 struct iw_request_info *info,
349 struct sockaddr *ap_addr, char *extra)
350{
351 struct wireless_dev *wdev = dev->ieee80211_ptr;
352
353 /* call only for ibss! */
354 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
355 return -EINVAL;
356
357 ap_addr->sa_family = ARPHRD_ETHER;
358
359 if (wdev->wext.ibss.bssid) {
360 memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
361 return 0;
362 }
363
364 memcpy(ap_addr->sa_data, wdev->bssid, ETH_ALEN);
365 return 0;
366}
367/* temporary symbol - mark GPL - in the future the handler won't be */
368EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwap);
369#endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bec5721b6f99..42184361a109 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -28,19 +28,55 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
28} 28}
29EXPORT_SYMBOL(cfg80211_send_rx_assoc); 29EXPORT_SYMBOL(cfg80211_send_rx_assoc);
30 30
31void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, size_t len) 31void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len)
32{ 32{
33 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 33 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
34 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 34 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
35 nl80211_send_rx_deauth(rdev, dev, buf, len); 35 nl80211_send_deauth(rdev, dev, buf, len);
36} 36}
37EXPORT_SYMBOL(cfg80211_send_rx_deauth); 37EXPORT_SYMBOL(cfg80211_send_deauth);
38 38
39void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf, 39void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
40 size_t len)
41{ 40{
42 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 41 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
43 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 42 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
44 nl80211_send_rx_disassoc(rdev, dev, buf, len); 43 nl80211_send_disassoc(rdev, dev, buf, len);
45} 44}
46EXPORT_SYMBOL(cfg80211_send_rx_disassoc); 45EXPORT_SYMBOL(cfg80211_send_disassoc);
46
47static void cfg80211_wext_disconnected(struct net_device *dev)
48{
49#ifdef CONFIG_WIRELESS_EXT
50 union iwreq_data wrqu;
51 memset(&wrqu, 0, sizeof(wrqu));
52 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
53#endif
54}
55
56void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
57{
58 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
59 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
60 nl80211_send_auth_timeout(rdev, dev, addr);
61 cfg80211_wext_disconnected(dev);
62}
63EXPORT_SYMBOL(cfg80211_send_auth_timeout);
64
65void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
66{
67 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
68 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
69 nl80211_send_assoc_timeout(rdev, dev, addr);
70 cfg80211_wext_disconnected(dev);
71}
72EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
73
74void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
75 enum nl80211_key_type key_type, int key_id,
76 const u8 *tsc)
77{
78 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
79 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
80 nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc);
81}
82EXPORT_SYMBOL(cfg80211_michael_mic_failure);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2456e4ee445e..24168560ebae 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the new netlink-based wireless configuration interface. 2 * This is the new netlink-based wireless configuration interface.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -57,10 +57,14 @@ static int get_drv_dev_by_info_ifindex(struct nlattr **attrs,
57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
60 .len = BUS_ID_SIZE-1 }, 60 .len = 20-1 },
61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, 61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED },
62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, 62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, 63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
64 [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
65 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
66 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
67 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
64 68
65 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 69 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
66 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 70 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -73,6 +77,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
73 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, 77 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
74 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, 78 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
75 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, 79 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
80 [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
76 81
77 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, 82 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
78 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, 83 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
@@ -116,8 +121,45 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
116 .len = IEEE80211_MAX_SSID_LEN }, 121 .len = IEEE80211_MAX_SSID_LEN },
117 [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, 122 [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 },
118 [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, 123 [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
124 [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG },
125 [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG },
126 [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 },
127 [NL80211_ATTR_STA_FLAGS2] = {
128 .len = sizeof(struct nl80211_sta_flag_update),
129 },
130 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
119}; 131};
120 132
133/* IE validation */
134static bool is_valid_ie_attr(const struct nlattr *attr)
135{
136 const u8 *pos;
137 int len;
138
139 if (!attr)
140 return true;
141
142 pos = nla_data(attr);
143 len = nla_len(attr);
144
145 while (len) {
146 u8 elemlen;
147
148 if (len < 2)
149 return false;
150 len -= 2;
151
152 elemlen = pos[1];
153 if (elemlen > len)
154 return false;
155
156 len -= elemlen;
157 pos += 2 + elemlen;
158 }
159
160 return true;
161}
162
121/* message building helper */ 163/* message building helper */
122static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, 164static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
123 int flags, u8 cmd) 165 int flags, u8 cmd)
@@ -126,6 +168,30 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
126 return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); 168 return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd);
127} 169}
128 170
171static int nl80211_msg_put_channel(struct sk_buff *msg,
172 struct ieee80211_channel *chan)
173{
174 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
175 chan->center_freq);
176
177 if (chan->flags & IEEE80211_CHAN_DISABLED)
178 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
179 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
180 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
181 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
182 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
183 if (chan->flags & IEEE80211_CHAN_RADAR)
184 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
185
186 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
187 DBM_TO_MBM(chan->max_power));
188
189 return 0;
190
191 nla_put_failure:
192 return -ENOBUFS;
193}
194
129/* netlink command implementations */ 195/* netlink command implementations */
130 196
131static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, 197static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
@@ -149,8 +215,24 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
149 215
150 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); 216 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
151 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 217 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
218
219 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
220 dev->wiphy.retry_short);
221 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
222 dev->wiphy.retry_long);
223 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
224 dev->wiphy.frag_threshold);
225 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
226 dev->wiphy.rts_threshold);
227
152 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 228 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
153 dev->wiphy.max_scan_ssids); 229 dev->wiphy.max_scan_ssids);
230 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
231 dev->wiphy.max_scan_ie_len);
232
233 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
234 sizeof(u32) * dev->wiphy.n_cipher_suites,
235 dev->wiphy.cipher_suites);
154 236
155 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 237 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
156 if (!nl_modes) 238 if (!nl_modes)
@@ -202,20 +284,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
202 goto nla_put_failure; 284 goto nla_put_failure;
203 285
204 chan = &dev->wiphy.bands[band]->channels[i]; 286 chan = &dev->wiphy.bands[band]->channels[i];
205 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
206 chan->center_freq);
207
208 if (chan->flags & IEEE80211_CHAN_DISABLED)
209 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
210 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
211 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
212 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
213 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
214 if (chan->flags & IEEE80211_CHAN_RADAR)
215 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
216 287
217 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 288 if (nl80211_msg_put_channel(msg, chan))
218 DBM_TO_MBM(chan->max_power)); 289 goto nla_put_failure;
219 290
220 nla_nest_end(msg, nl_freq); 291 nla_nest_end(msg, nl_freq);
221 } 292 }
@@ -273,6 +344,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
273 CMD(assoc, ASSOCIATE); 344 CMD(assoc, ASSOCIATE);
274 CMD(deauth, DEAUTHENTICATE); 345 CMD(deauth, DEAUTHENTICATE);
275 CMD(disassoc, DISASSOCIATE); 346 CMD(disassoc, DISASSOCIATE);
347 CMD(join_ibss, JOIN_IBSS);
276 348
277#undef CMD 349#undef CMD
278 nla_nest_end(msg, nl_cmds); 350 nla_nest_end(msg, nl_cmds);
@@ -317,7 +389,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
317 if (IS_ERR(dev)) 389 if (IS_ERR(dev))
318 return PTR_ERR(dev); 390 return PTR_ERR(dev);
319 391
320 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 392 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
321 if (!msg) 393 if (!msg)
322 goto out_err; 394 goto out_err;
323 395
@@ -365,6 +437,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
365 struct cfg80211_registered_device *rdev; 437 struct cfg80211_registered_device *rdev;
366 int result = 0, rem_txq_params = 0; 438 int result = 0, rem_txq_params = 0;
367 struct nlattr *nl_txq_params; 439 struct nlattr *nl_txq_params;
440 u32 changed;
441 u8 retry_short = 0, retry_long = 0;
442 u32 frag_threshold = 0, rts_threshold = 0;
368 443
369 rtnl_lock(); 444 rtnl_lock();
370 445
@@ -418,7 +493,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
418 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 493 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
419 struct ieee80211_channel *chan; 494 struct ieee80211_channel *chan;
420 struct ieee80211_sta_ht_cap *ht_cap; 495 struct ieee80211_sta_ht_cap *ht_cap;
421 u32 freq, sec_freq; 496 u32 freq;
422 497
423 if (!rdev->ops->set_channel) { 498 if (!rdev->ops->set_channel) {
424 result = -EOPNOTSUPP; 499 result = -EOPNOTSUPP;
@@ -444,33 +519,28 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
444 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 519 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
445 goto bad_res; 520 goto bad_res;
446 521
447 if (channel_type == NL80211_CHAN_HT40MINUS) 522 if (channel_type == NL80211_CHAN_HT40MINUS &&
448 sec_freq = freq - 20; 523 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS))
449 else if (channel_type == NL80211_CHAN_HT40PLUS) 524 goto bad_res;
450 sec_freq = freq + 20; 525 else if (channel_type == NL80211_CHAN_HT40PLUS &&
451 else 526 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS))
452 sec_freq = 0;
453
454 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
455
456 /* no HT capabilities */
457 if (channel_type != NL80211_CHAN_NO_HT &&
458 !ht_cap->ht_supported)
459 goto bad_res; 527 goto bad_res;
460 528
461 if (sec_freq) { 529 /*
462 struct ieee80211_channel *schan; 530 * At this point we know if that if HT40 was requested
531 * we are allowed to use it and the extension channel
532 * exists.
533 */
463 534
464 /* no 40 MHz capabilities */ 535 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
536
537 /* no HT capabilities or intolerant */
538 if (channel_type != NL80211_CHAN_NO_HT) {
539 if (!ht_cap->ht_supported)
540 goto bad_res;
465 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 541 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
466 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) 542 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
467 goto bad_res; 543 goto bad_res;
468
469 schan = ieee80211_get_channel(&rdev->wiphy, sec_freq);
470
471 /* Secondary channel not allowed */
472 if (!schan || schan->flags & IEEE80211_CHAN_DISABLED)
473 goto bad_res;
474 } 544 }
475 545
476 result = rdev->ops->set_channel(&rdev->wiphy, chan, 546 result = rdev->ops->set_channel(&rdev->wiphy, chan,
@@ -479,6 +549,84 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
479 goto bad_res; 549 goto bad_res;
480 } 550 }
481 551
552 changed = 0;
553
554 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
555 retry_short = nla_get_u8(
556 info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]);
557 if (retry_short == 0) {
558 result = -EINVAL;
559 goto bad_res;
560 }
561 changed |= WIPHY_PARAM_RETRY_SHORT;
562 }
563
564 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) {
565 retry_long = nla_get_u8(
566 info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]);
567 if (retry_long == 0) {
568 result = -EINVAL;
569 goto bad_res;
570 }
571 changed |= WIPHY_PARAM_RETRY_LONG;
572 }
573
574 if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) {
575 frag_threshold = nla_get_u32(
576 info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]);
577 if (frag_threshold < 256) {
578 result = -EINVAL;
579 goto bad_res;
580 }
581 if (frag_threshold != (u32) -1) {
582 /*
583 * Fragments (apart from the last one) are required to
584 * have even length. Make the fragmentation code
585 * simpler by stripping LSB should someone try to use
586 * odd threshold value.
587 */
588 frag_threshold &= ~0x1;
589 }
590 changed |= WIPHY_PARAM_FRAG_THRESHOLD;
591 }
592
593 if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) {
594 rts_threshold = nla_get_u32(
595 info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]);
596 changed |= WIPHY_PARAM_RTS_THRESHOLD;
597 }
598
599 if (changed) {
600 u8 old_retry_short, old_retry_long;
601 u32 old_frag_threshold, old_rts_threshold;
602
603 if (!rdev->ops->set_wiphy_params) {
604 result = -EOPNOTSUPP;
605 goto bad_res;
606 }
607
608 old_retry_short = rdev->wiphy.retry_short;
609 old_retry_long = rdev->wiphy.retry_long;
610 old_frag_threshold = rdev->wiphy.frag_threshold;
611 old_rts_threshold = rdev->wiphy.rts_threshold;
612
613 if (changed & WIPHY_PARAM_RETRY_SHORT)
614 rdev->wiphy.retry_short = retry_short;
615 if (changed & WIPHY_PARAM_RETRY_LONG)
616 rdev->wiphy.retry_long = retry_long;
617 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
618 rdev->wiphy.frag_threshold = frag_threshold;
619 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
620 rdev->wiphy.rts_threshold = rts_threshold;
621
622 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
623 if (result) {
624 rdev->wiphy.retry_short = old_retry_short;
625 rdev->wiphy.retry_long = old_retry_long;
626 rdev->wiphy.frag_threshold = old_frag_threshold;
627 rdev->wiphy.rts_threshold = old_rts_threshold;
628 }
629 }
482 630
483 bad_res: 631 bad_res:
484 mutex_unlock(&rdev->mtx); 632 mutex_unlock(&rdev->mtx);
@@ -489,6 +637,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
489 637
490 638
491static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, 639static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
640 struct cfg80211_registered_device *rdev,
492 struct net_device *dev) 641 struct net_device *dev)
493{ 642{
494 void *hdr; 643 void *hdr;
@@ -498,6 +647,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
498 return -1; 647 return -1;
499 648
500 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 649 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
650 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
501 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 651 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
502 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); 652 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
503 return genlmsg_end(msg, hdr); 653 return genlmsg_end(msg, hdr);
@@ -532,7 +682,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
532 } 682 }
533 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, 683 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
534 cb->nlh->nlmsg_seq, NLM_F_MULTI, 684 cb->nlh->nlmsg_seq, NLM_F_MULTI,
535 wdev->netdev) < 0) { 685 dev, wdev->netdev) < 0) {
536 mutex_unlock(&dev->devlist_mtx); 686 mutex_unlock(&dev->devlist_mtx);
537 goto out; 687 goto out;
538 } 688 }
@@ -562,11 +712,12 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
562 if (err) 712 if (err)
563 return err; 713 return err;
564 714
565 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 715 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
566 if (!msg) 716 if (!msg)
567 goto out_err; 717 goto out_err;
568 718
569 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, netdev) < 0) 719 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
720 dev, netdev) < 0)
570 goto out_free; 721 goto out_free;
571 722
572 dev_put(netdev); 723 dev_put(netdev);
@@ -616,7 +767,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
616 struct cfg80211_registered_device *drv; 767 struct cfg80211_registered_device *drv;
617 struct vif_params params; 768 struct vif_params params;
618 int err, ifindex; 769 int err, ifindex;
619 enum nl80211_iftype type; 770 enum nl80211_iftype otype, ntype;
620 struct net_device *dev; 771 struct net_device *dev;
621 u32 _flags, *flags = NULL; 772 u32 _flags, *flags = NULL;
622 bool change = false; 773 bool change = false;
@@ -630,30 +781,27 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
630 goto unlock_rtnl; 781 goto unlock_rtnl;
631 782
632 ifindex = dev->ifindex; 783 ifindex = dev->ifindex;
633 type = dev->ieee80211_ptr->iftype; 784 otype = ntype = dev->ieee80211_ptr->iftype;
634 dev_put(dev); 785 dev_put(dev);
635 786
636 if (info->attrs[NL80211_ATTR_IFTYPE]) { 787 if (info->attrs[NL80211_ATTR_IFTYPE]) {
637 enum nl80211_iftype ntype;
638
639 ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); 788 ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
640 if (type != ntype) 789 if (otype != ntype)
641 change = true; 790 change = true;
642 type = ntype; 791 if (ntype > NL80211_IFTYPE_MAX) {
643 if (type > NL80211_IFTYPE_MAX) {
644 err = -EINVAL; 792 err = -EINVAL;
645 goto unlock; 793 goto unlock;
646 } 794 }
647 } 795 }
648 796
649 if (!drv->ops->change_virtual_intf || 797 if (!drv->ops->change_virtual_intf ||
650 !(drv->wiphy.interface_modes & (1 << type))) { 798 !(drv->wiphy.interface_modes & (1 << ntype))) {
651 err = -EOPNOTSUPP; 799 err = -EOPNOTSUPP;
652 goto unlock; 800 goto unlock;
653 } 801 }
654 802
655 if (info->attrs[NL80211_ATTR_MESH_ID]) { 803 if (info->attrs[NL80211_ATTR_MESH_ID]) {
656 if (type != NL80211_IFTYPE_MESH_POINT) { 804 if (ntype != NL80211_IFTYPE_MESH_POINT) {
657 err = -EINVAL; 805 err = -EINVAL;
658 goto unlock; 806 goto unlock;
659 } 807 }
@@ -663,7 +811,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
663 } 811 }
664 812
665 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { 813 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
666 if (type != NL80211_IFTYPE_MONITOR) { 814 if (ntype != NL80211_IFTYPE_MONITOR) {
667 err = -EINVAL; 815 err = -EINVAL;
668 goto unlock; 816 goto unlock;
669 } 817 }
@@ -678,12 +826,17 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
678 826
679 if (change) 827 if (change)
680 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, 828 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
681 type, flags, &params); 829 ntype, flags, &params);
682 else 830 else
683 err = 0; 831 err = 0;
684 832
685 dev = __dev_get_by_index(&init_net, ifindex); 833 dev = __dev_get_by_index(&init_net, ifindex);
686 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); 834 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != ntype));
835
836 if (dev && !err && (ntype != otype)) {
837 if (otype == NL80211_IFTYPE_ADHOC)
838 cfg80211_clear_ibss(dev, false);
839 }
687 840
688 unlock: 841 unlock:
689 cfg80211_put_dev(drv); 842 cfg80211_put_dev(drv);
@@ -832,7 +985,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
832 goto out; 985 goto out;
833 } 986 }
834 987
835 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 988 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
836 if (!msg) { 989 if (!msg) {
837 err = -ENOMEM; 990 err = -ENOMEM;
838 goto out; 991 goto out;
@@ -920,6 +1073,14 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
920 } 1073 }
921 1074
922 err = func(&drv->wiphy, dev, key_idx); 1075 err = func(&drv->wiphy, dev, key_idx);
1076#ifdef CONFIG_WIRELESS_EXT
1077 if (!err) {
1078 if (func == drv->ops->set_default_key)
1079 dev->ieee80211_ptr->wext.default_key = key_idx;
1080 else
1081 dev->ieee80211_ptr->wext.default_mgmt_key = key_idx;
1082 }
1083#endif
923 1084
924 out: 1085 out:
925 cfg80211_put_dev(drv); 1086 cfg80211_put_dev(drv);
@@ -934,7 +1095,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
934static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) 1095static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
935{ 1096{
936 struct cfg80211_registered_device *drv; 1097 struct cfg80211_registered_device *drv;
937 int err; 1098 int err, i;
938 struct net_device *dev; 1099 struct net_device *dev;
939 struct key_params params; 1100 struct key_params params;
940 u8 key_idx = 0; 1101 u8 key_idx = 0;
@@ -950,6 +1111,11 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
950 params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); 1111 params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
951 } 1112 }
952 1113
1114 if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
1115 params.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
1116 params.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
1117 }
1118
953 if (info->attrs[NL80211_ATTR_KEY_IDX]) 1119 if (info->attrs[NL80211_ATTR_KEY_IDX])
954 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); 1120 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
955 1121
@@ -958,44 +1124,8 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
958 if (info->attrs[NL80211_ATTR_MAC]) 1124 if (info->attrs[NL80211_ATTR_MAC])
959 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 1125 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
960 1126
961 if (key_idx > 5) 1127 if (cfg80211_validate_key_settings(&params, key_idx, mac_addr))
962 return -EINVAL;
963
964 /*
965 * Disallow pairwise keys with non-zero index unless it's WEP
966 * (because current deployments use pairwise WEP keys with
967 * non-zero indizes but 802.11i clearly specifies to use zero)
968 */
969 if (mac_addr && key_idx &&
970 params.cipher != WLAN_CIPHER_SUITE_WEP40 &&
971 params.cipher != WLAN_CIPHER_SUITE_WEP104)
972 return -EINVAL;
973
974 /* TODO: add definitions for the lengths to linux/ieee80211.h */
975 switch (params.cipher) {
976 case WLAN_CIPHER_SUITE_WEP40:
977 if (params.key_len != 5)
978 return -EINVAL;
979 break;
980 case WLAN_CIPHER_SUITE_TKIP:
981 if (params.key_len != 32)
982 return -EINVAL;
983 break;
984 case WLAN_CIPHER_SUITE_CCMP:
985 if (params.key_len != 16)
986 return -EINVAL;
987 break;
988 case WLAN_CIPHER_SUITE_WEP104:
989 if (params.key_len != 13)
990 return -EINVAL;
991 break;
992 case WLAN_CIPHER_SUITE_AES_CMAC:
993 if (params.key_len != 16)
994 return -EINVAL;
995 break;
996 default:
997 return -EINVAL; 1128 return -EINVAL;
998 }
999 1129
1000 rtnl_lock(); 1130 rtnl_lock();
1001 1131
@@ -1003,6 +1133,14 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
1003 if (err) 1133 if (err)
1004 goto unlock_rtnl; 1134 goto unlock_rtnl;
1005 1135
1136 for (i = 0; i < drv->wiphy.n_cipher_suites; i++)
1137 if (params.cipher == drv->wiphy.cipher_suites[i])
1138 break;
1139 if (i == drv->wiphy.n_cipher_suites) {
1140 err = -EINVAL;
1141 goto out;
1142 }
1143
1006 if (!drv->ops->add_key) { 1144 if (!drv->ops->add_key) {
1007 err = -EOPNOTSUPP; 1145 err = -EOPNOTSUPP;
1008 goto out; 1146 goto out;
@@ -1049,6 +1187,15 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
1049 1187
1050 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr); 1188 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr);
1051 1189
1190#ifdef CONFIG_WIRELESS_EXT
1191 if (!err) {
1192 if (key_idx == dev->ieee80211_ptr->wext.default_key)
1193 dev->ieee80211_ptr->wext.default_key = -1;
1194 else if (key_idx == dev->ieee80211_ptr->wext.default_mgmt_key)
1195 dev->ieee80211_ptr->wext.default_mgmt_key = -1;
1196 }
1197#endif
1198
1052 out: 1199 out:
1053 cfg80211_put_dev(drv); 1200 cfg80211_put_dev(drv);
1054 dev_put(dev); 1201 dev_put(dev);
@@ -1069,6 +1216,9 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1069 struct beacon_parameters params; 1216 struct beacon_parameters params;
1070 int haveinfo = 0; 1217 int haveinfo = 0;
1071 1218
1219 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
1220 return -EINVAL;
1221
1072 rtnl_lock(); 1222 rtnl_lock();
1073 1223
1074 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1224 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -1186,15 +1336,36 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
1186 [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, 1336 [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG },
1187 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, 1337 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG },
1188 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, 1338 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
1339 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
1189}; 1340};
1190 1341
1191static int parse_station_flags(struct nlattr *nla, u32 *staflags) 1342static int parse_station_flags(struct genl_info *info,
1343 struct station_parameters *params)
1192{ 1344{
1193 struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; 1345 struct nlattr *flags[NL80211_STA_FLAG_MAX + 1];
1346 struct nlattr *nla;
1194 int flag; 1347 int flag;
1195 1348
1196 *staflags = 0; 1349 /*
1350 * Try parsing the new attribute first so userspace
1351 * can specify both for older kernels.
1352 */
1353 nla = info->attrs[NL80211_ATTR_STA_FLAGS2];
1354 if (nla) {
1355 struct nl80211_sta_flag_update *sta_flags;
1356
1357 sta_flags = nla_data(nla);
1358 params->sta_flags_mask = sta_flags->mask;
1359 params->sta_flags_set = sta_flags->set;
1360 if ((params->sta_flags_mask |
1361 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID))
1362 return -EINVAL;
1363 return 0;
1364 }
1365
1366 /* if present, parse the old attribute */
1197 1367
1368 nla = info->attrs[NL80211_ATTR_STA_FLAGS];
1198 if (!nla) 1369 if (!nla)
1199 return 0; 1370 return 0;
1200 1371
@@ -1202,11 +1373,12 @@ static int parse_station_flags(struct nlattr *nla, u32 *staflags)
1202 nla, sta_flags_policy)) 1373 nla, sta_flags_policy))
1203 return -EINVAL; 1374 return -EINVAL;
1204 1375
1205 *staflags = STATION_FLAG_CHANGED; 1376 params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1;
1377 params->sta_flags_mask &= ~1;
1206 1378
1207 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) 1379 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
1208 if (flags[flag]) 1380 if (flags[flag])
1209 *staflags |= (1<<flag); 1381 params->sta_flags_set |= (1<<flag);
1210 1382
1211 return 0; 1383 return 0;
1212} 1384}
@@ -1424,7 +1596,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
1424 if (err) 1596 if (err)
1425 goto out; 1597 goto out;
1426 1598
1427 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1599 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1428 if (!msg) 1600 if (!msg)
1429 goto out; 1601 goto out;
1430 1602
@@ -1502,8 +1674,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1502 params.ht_capa = 1674 params.ht_capa =
1503 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 1675 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1504 1676
1505 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1677 if (parse_station_flags(info, &params))
1506 &params.station_flags))
1507 return -EINVAL; 1678 return -EINVAL;
1508 1679
1509 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 1680 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
@@ -1516,6 +1687,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1516 if (err) 1687 if (err)
1517 goto out_rtnl; 1688 goto out_rtnl;
1518 1689
1690 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1691 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1692 err = -EINVAL;
1693 goto out;
1694 }
1695
1519 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1696 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1520 if (err) 1697 if (err)
1521 goto out; 1698 goto out;
@@ -1567,13 +1744,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1567 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 1744 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
1568 params.listen_interval = 1745 params.listen_interval =
1569 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1746 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1747
1570 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1748 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1749 if (!params.aid || params.aid > IEEE80211_MAX_AID)
1750 return -EINVAL;
1751
1571 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 1752 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1572 params.ht_capa = 1753 params.ht_capa =
1573 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 1754 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1574 1755
1575 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1756 if (parse_station_flags(info, &params))
1576 &params.station_flags))
1577 return -EINVAL; 1757 return -EINVAL;
1578 1758
1579 rtnl_lock(); 1759 rtnl_lock();
@@ -1582,6 +1762,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1582 if (err) 1762 if (err)
1583 goto out_rtnl; 1763 goto out_rtnl;
1584 1764
1765 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1766 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1767 err = -EINVAL;
1768 goto out;
1769 }
1770
1585 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1771 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1586 if (err) 1772 if (err)
1587 goto out; 1773 goto out;
@@ -1625,6 +1811,12 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1625 if (err) 1811 if (err)
1626 goto out_rtnl; 1812 goto out_rtnl;
1627 1813
1814 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1815 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1816 err = -EINVAL;
1817 goto out;
1818 }
1819
1628 if (!drv->ops->del_station) { 1820 if (!drv->ops->del_station) {
1629 err = -EOPNOTSUPP; 1821 err = -EOPNOTSUPP;
1630 goto out; 1822 goto out;
@@ -1808,7 +2000,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
1808 if (err) 2000 if (err)
1809 goto out; 2001 goto out;
1810 2002
1811 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2003 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1812 if (!msg) 2004 if (!msg)
1813 goto out; 2005 goto out;
1814 2006
@@ -2124,7 +2316,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
2124 goto out; 2316 goto out;
2125 2317
2126 /* Draw up a netlink message to send back */ 2318 /* Draw up a netlink message to send back */
2127 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2319 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2128 if (!msg) { 2320 if (!msg) {
2129 err = -ENOBUFS; 2321 err = -ENOBUFS;
2130 goto out; 2322 goto out;
@@ -2302,7 +2494,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
2302 if (!cfg80211_regdomain) 2494 if (!cfg80211_regdomain)
2303 goto out; 2495 goto out;
2304 2496
2305 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2497 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2306 if (!msg) { 2498 if (!msg) {
2307 err = -ENOBUFS; 2499 err = -ENOBUFS;
2308 goto out; 2500 goto out;
@@ -2385,18 +2577,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2385 rem_reg_rules) { 2577 rem_reg_rules) {
2386 num_rules++; 2578 num_rules++;
2387 if (num_rules > NL80211_MAX_SUPP_REG_RULES) 2579 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
2388 goto bad_reg; 2580 return -EINVAL;
2389 } 2581 }
2390 2582
2391 if (!reg_is_valid_request(alpha2)) 2583 mutex_lock(&cfg80211_mutex);
2392 return -EINVAL; 2584
2585 if (!reg_is_valid_request(alpha2)) {
2586 r = -EINVAL;
2587 goto bad_reg;
2588 }
2393 2589
2394 size_of_regd = sizeof(struct ieee80211_regdomain) + 2590 size_of_regd = sizeof(struct ieee80211_regdomain) +
2395 (num_rules * sizeof(struct ieee80211_reg_rule)); 2591 (num_rules * sizeof(struct ieee80211_reg_rule));
2396 2592
2397 rd = kzalloc(size_of_regd, GFP_KERNEL); 2593 rd = kzalloc(size_of_regd, GFP_KERNEL);
2398 if (!rd) 2594 if (!rd) {
2399 return -ENOMEM; 2595 r = -ENOMEM;
2596 goto bad_reg;
2597 }
2400 2598
2401 rd->n_reg_rules = num_rules; 2599 rd->n_reg_rules = num_rules;
2402 rd->alpha2[0] = alpha2[0]; 2600 rd->alpha2[0] = alpha2[0];
@@ -2413,20 +2611,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2413 2611
2414 rule_idx++; 2612 rule_idx++;
2415 2613
2416 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) 2614 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) {
2615 r = -EINVAL;
2417 goto bad_reg; 2616 goto bad_reg;
2617 }
2418 } 2618 }
2419 2619
2420 BUG_ON(rule_idx != num_rules); 2620 BUG_ON(rule_idx != num_rules);
2421 2621
2422 mutex_lock(&cfg80211_mutex);
2423 r = set_regdom(rd); 2622 r = set_regdom(rd);
2623
2424 mutex_unlock(&cfg80211_mutex); 2624 mutex_unlock(&cfg80211_mutex);
2625
2425 return r; 2626 return r;
2426 2627
2427 bad_reg: 2628 bad_reg:
2629 mutex_unlock(&cfg80211_mutex);
2428 kfree(rd); 2630 kfree(rd);
2429 return -EINVAL; 2631 return r;
2430} 2632}
2431 2633
2432static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) 2634static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
@@ -2442,6 +2644,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2442 enum ieee80211_band band; 2644 enum ieee80211_band band;
2443 size_t ie_len; 2645 size_t ie_len;
2444 2646
2647 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
2648 return -EINVAL;
2649
2445 rtnl_lock(); 2650 rtnl_lock();
2446 2651
2447 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2652 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2492,6 +2697,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2492 else 2697 else
2493 ie_len = 0; 2698 ie_len = 0;
2494 2699
2700 if (ie_len > wiphy->max_scan_ie_len) {
2701 err = -EINVAL;
2702 goto out;
2703 }
2704
2495 request = kzalloc(sizeof(*request) 2705 request = kzalloc(sizeof(*request)
2496 + sizeof(*ssid) * n_ssids 2706 + sizeof(*ssid) * n_ssids
2497 + sizeof(channel) * n_channels 2707 + sizeof(channel) * n_channels
@@ -2554,7 +2764,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2554 2764
2555 if (info->attrs[NL80211_ATTR_IE]) { 2765 if (info->attrs[NL80211_ATTR_IE]) {
2556 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2766 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2557 memcpy(request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), 2767 memcpy((void *)request->ie,
2768 nla_data(info->attrs[NL80211_ATTR_IE]),
2558 request->ie_len); 2769 request->ie_len);
2559 } 2770 }
2560 2771
@@ -2710,6 +2921,15 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2710 struct wiphy *wiphy; 2921 struct wiphy *wiphy;
2711 int err; 2922 int err;
2712 2923
2924 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
2925 return -EINVAL;
2926
2927 if (!info->attrs[NL80211_ATTR_MAC])
2928 return -EINVAL;
2929
2930 if (!info->attrs[NL80211_ATTR_AUTH_TYPE])
2931 return -EINVAL;
2932
2713 rtnl_lock(); 2933 rtnl_lock();
2714 2934
2715 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2935 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2731,11 +2951,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2731 goto out; 2951 goto out;
2732 } 2952 }
2733 2953
2734 if (!info->attrs[NL80211_ATTR_MAC]) {
2735 err = -EINVAL;
2736 goto out;
2737 }
2738
2739 wiphy = &drv->wiphy; 2954 wiphy = &drv->wiphy;
2740 memset(&req, 0, sizeof(req)); 2955 memset(&req, 0, sizeof(req));
2741 2956
@@ -2761,13 +2976,10 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2761 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2976 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2762 } 2977 }
2763 2978
2764 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { 2979 req.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
2765 req.auth_type = 2980 if (!nl80211_valid_auth_type(req.auth_type)) {
2766 nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); 2981 err = -EINVAL;
2767 if (!nl80211_valid_auth_type(req.auth_type)) { 2982 goto out;
2768 err = -EINVAL;
2769 goto out;
2770 }
2771 } 2983 }
2772 2984
2773 err = drv->ops->auth(&drv->wiphy, dev, &req); 2985 err = drv->ops->auth(&drv->wiphy, dev, &req);
@@ -2788,6 +3000,13 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2788 struct wiphy *wiphy; 3000 struct wiphy *wiphy;
2789 int err; 3001 int err;
2790 3002
3003 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3004 return -EINVAL;
3005
3006 if (!info->attrs[NL80211_ATTR_MAC] ||
3007 !info->attrs[NL80211_ATTR_SSID])
3008 return -EINVAL;
3009
2791 rtnl_lock(); 3010 rtnl_lock();
2792 3011
2793 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3012 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2809,12 +3028,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2809 goto out; 3028 goto out;
2810 } 3029 }
2811 3030
2812 if (!info->attrs[NL80211_ATTR_MAC] ||
2813 !info->attrs[NL80211_ATTR_SSID]) {
2814 err = -EINVAL;
2815 goto out;
2816 }
2817
2818 wiphy = &drv->wiphy; 3031 wiphy = &drv->wiphy;
2819 memset(&req, 0, sizeof(req)); 3032 memset(&req, 0, sizeof(req));
2820 3033
@@ -2838,6 +3051,19 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2838 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3051 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2839 } 3052 }
2840 3053
3054 if (info->attrs[NL80211_ATTR_USE_MFP]) {
3055 enum nl80211_mfp use_mfp =
3056 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
3057 if (use_mfp == NL80211_MFP_REQUIRED)
3058 req.use_mfp = true;
3059 else if (use_mfp != NL80211_MFP_NO) {
3060 err = -EINVAL;
3061 goto out;
3062 }
3063 }
3064
3065 req.control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
3066
2841 err = drv->ops->assoc(&drv->wiphy, dev, &req); 3067 err = drv->ops->assoc(&drv->wiphy, dev, &req);
2842 3068
2843out: 3069out:
@@ -2856,6 +3082,15 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
2856 struct wiphy *wiphy; 3082 struct wiphy *wiphy;
2857 int err; 3083 int err;
2858 3084
3085 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3086 return -EINVAL;
3087
3088 if (!info->attrs[NL80211_ATTR_MAC])
3089 return -EINVAL;
3090
3091 if (!info->attrs[NL80211_ATTR_REASON_CODE])
3092 return -EINVAL;
3093
2859 rtnl_lock(); 3094 rtnl_lock();
2860 3095
2861 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3096 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2877,24 +3112,16 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
2877 goto out; 3112 goto out;
2878 } 3113 }
2879 3114
2880 if (!info->attrs[NL80211_ATTR_MAC]) {
2881 err = -EINVAL;
2882 goto out;
2883 }
2884
2885 wiphy = &drv->wiphy; 3115 wiphy = &drv->wiphy;
2886 memset(&req, 0, sizeof(req)); 3116 memset(&req, 0, sizeof(req));
2887 3117
2888 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 3118 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2889 3119
2890 if (info->attrs[NL80211_ATTR_REASON_CODE]) { 3120 req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2891 req.reason_code = 3121 if (req.reason_code == 0) {
2892 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); 3122 /* Reason Code 0 is reserved */
2893 if (req.reason_code == 0) { 3123 err = -EINVAL;
2894 /* Reason Code 0 is reserved */ 3124 goto out;
2895 err = -EINVAL;
2896 goto out;
2897 }
2898 } 3125 }
2899 3126
2900 if (info->attrs[NL80211_ATTR_IE]) { 3127 if (info->attrs[NL80211_ATTR_IE]) {
@@ -2920,6 +3147,15 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
2920 struct wiphy *wiphy; 3147 struct wiphy *wiphy;
2921 int err; 3148 int err;
2922 3149
3150 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3151 return -EINVAL;
3152
3153 if (!info->attrs[NL80211_ATTR_MAC])
3154 return -EINVAL;
3155
3156 if (!info->attrs[NL80211_ATTR_REASON_CODE])
3157 return -EINVAL;
3158
2923 rtnl_lock(); 3159 rtnl_lock();
2924 3160
2925 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3161 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2941,24 +3177,16 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
2941 goto out; 3177 goto out;
2942 } 3178 }
2943 3179
2944 if (!info->attrs[NL80211_ATTR_MAC]) {
2945 err = -EINVAL;
2946 goto out;
2947 }
2948
2949 wiphy = &drv->wiphy; 3180 wiphy = &drv->wiphy;
2950 memset(&req, 0, sizeof(req)); 3181 memset(&req, 0, sizeof(req));
2951 3182
2952 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 3183 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2953 3184
2954 if (info->attrs[NL80211_ATTR_REASON_CODE]) { 3185 req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2955 req.reason_code = 3186 if (req.reason_code == 0) {
2956 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); 3187 /* Reason Code 0 is reserved */
2957 if (req.reason_code == 0) { 3188 err = -EINVAL;
2958 /* Reason Code 0 is reserved */ 3189 goto out;
2959 err = -EINVAL;
2960 goto out;
2961 }
2962 } 3190 }
2963 3191
2964 if (info->attrs[NL80211_ATTR_IE]) { 3192 if (info->attrs[NL80211_ATTR_IE]) {
@@ -2976,6 +3204,124 @@ unlock_rtnl:
2976 return err; 3204 return err;
2977} 3205}
2978 3206
3207static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
3208{
3209 struct cfg80211_registered_device *drv;
3210 struct net_device *dev;
3211 struct cfg80211_ibss_params ibss;
3212 struct wiphy *wiphy;
3213 int err;
3214
3215 memset(&ibss, 0, sizeof(ibss));
3216
3217 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3218 return -EINVAL;
3219
3220 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
3221 !info->attrs[NL80211_ATTR_SSID] ||
3222 !nla_len(info->attrs[NL80211_ATTR_SSID]))
3223 return -EINVAL;
3224
3225 ibss.beacon_interval = 100;
3226
3227 if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
3228 ibss.beacon_interval =
3229 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
3230 if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
3231 return -EINVAL;
3232 }
3233
3234 rtnl_lock();
3235
3236 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
3237 if (err)
3238 goto unlock_rtnl;
3239
3240 if (!drv->ops->join_ibss) {
3241 err = -EOPNOTSUPP;
3242 goto out;
3243 }
3244
3245 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
3246 err = -EOPNOTSUPP;
3247 goto out;
3248 }
3249
3250 if (!netif_running(dev)) {
3251 err = -ENETDOWN;
3252 goto out;
3253 }
3254
3255 wiphy = &drv->wiphy;
3256
3257 if (info->attrs[NL80211_ATTR_MAC])
3258 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
3259 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
3260 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
3261
3262 if (info->attrs[NL80211_ATTR_IE]) {
3263 ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
3264 ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3265 }
3266
3267 ibss.channel = ieee80211_get_channel(wiphy,
3268 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
3269 if (!ibss.channel ||
3270 ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
3271 ibss.channel->flags & IEEE80211_CHAN_DISABLED) {
3272 err = -EINVAL;
3273 goto out;
3274 }
3275
3276 ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
3277
3278 err = cfg80211_join_ibss(drv, dev, &ibss);
3279
3280out:
3281 cfg80211_put_dev(drv);
3282 dev_put(dev);
3283unlock_rtnl:
3284 rtnl_unlock();
3285 return err;
3286}
3287
3288static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
3289{
3290 struct cfg80211_registered_device *drv;
3291 struct net_device *dev;
3292 int err;
3293
3294 rtnl_lock();
3295
3296 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
3297 if (err)
3298 goto unlock_rtnl;
3299
3300 if (!drv->ops->leave_ibss) {
3301 err = -EOPNOTSUPP;
3302 goto out;
3303 }
3304
3305 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
3306 err = -EOPNOTSUPP;
3307 goto out;
3308 }
3309
3310 if (!netif_running(dev)) {
3311 err = -ENETDOWN;
3312 goto out;
3313 }
3314
3315 err = cfg80211_leave_ibss(drv, dev, false);
3316
3317out:
3318 cfg80211_put_dev(drv);
3319 dev_put(dev);
3320unlock_rtnl:
3321 rtnl_unlock();
3322 return err;
3323}
3324
2979static struct genl_ops nl80211_ops[] = { 3325static struct genl_ops nl80211_ops[] = {
2980 { 3326 {
2981 .cmd = NL80211_CMD_GET_WIPHY, 3327 .cmd = NL80211_CMD_GET_WIPHY,
@@ -3177,6 +3523,18 @@ static struct genl_ops nl80211_ops[] = {
3177 .policy = nl80211_policy, 3523 .policy = nl80211_policy,
3178 .flags = GENL_ADMIN_PERM, 3524 .flags = GENL_ADMIN_PERM,
3179 }, 3525 },
3526 {
3527 .cmd = NL80211_CMD_JOIN_IBSS,
3528 .doit = nl80211_join_ibss,
3529 .policy = nl80211_policy,
3530 .flags = GENL_ADMIN_PERM,
3531 },
3532 {
3533 .cmd = NL80211_CMD_LEAVE_IBSS,
3534 .doit = nl80211_leave_ibss,
3535 .policy = nl80211_policy,
3536 .flags = GENL_ADMIN_PERM,
3537 },
3180}; 3538};
3181static struct genl_multicast_group nl80211_mlme_mcgrp = { 3539static struct genl_multicast_group nl80211_mlme_mcgrp = {
3182 .name = "mlme", 3540 .name = "mlme",
@@ -3199,7 +3557,7 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
3199{ 3557{
3200 struct sk_buff *msg; 3558 struct sk_buff *msg;
3201 3559
3202 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3560 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3203 if (!msg) 3561 if (!msg)
3204 return; 3562 return;
3205 3563
@@ -3211,11 +3569,43 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
3211 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); 3569 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL);
3212} 3570}
3213 3571
3572static int nl80211_add_scan_req(struct sk_buff *msg,
3573 struct cfg80211_registered_device *rdev)
3574{
3575 struct cfg80211_scan_request *req = rdev->scan_req;
3576 struct nlattr *nest;
3577 int i;
3578
3579 if (WARN_ON(!req))
3580 return 0;
3581
3582 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
3583 if (!nest)
3584 goto nla_put_failure;
3585 for (i = 0; i < req->n_ssids; i++)
3586 NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
3587 nla_nest_end(msg, nest);
3588
3589 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
3590 if (!nest)
3591 goto nla_put_failure;
3592 for (i = 0; i < req->n_channels; i++)
3593 NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
3594 nla_nest_end(msg, nest);
3595
3596 if (req->ie)
3597 NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
3598
3599 return 0;
3600 nla_put_failure:
3601 return -ENOBUFS;
3602}
3603
3214static int nl80211_send_scan_donemsg(struct sk_buff *msg, 3604static int nl80211_send_scan_donemsg(struct sk_buff *msg,
3215 struct cfg80211_registered_device *rdev, 3605 struct cfg80211_registered_device *rdev,
3216 struct net_device *netdev, 3606 struct net_device *netdev,
3217 u32 pid, u32 seq, int flags, 3607 u32 pid, u32 seq, int flags,
3218 u32 cmd) 3608 u32 cmd)
3219{ 3609{
3220 void *hdr; 3610 void *hdr;
3221 3611
@@ -3226,7 +3616,8 @@ static int nl80211_send_scan_donemsg(struct sk_buff *msg,
3226 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 3616 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3227 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 3617 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3228 3618
3229 /* XXX: we should probably bounce back the request? */ 3619 /* ignore errors and send incomplete event anyway */
3620 nl80211_add_scan_req(msg, rdev);
3230 3621
3231 return genlmsg_end(msg, hdr); 3622 return genlmsg_end(msg, hdr);
3232 3623
@@ -3240,7 +3631,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
3240{ 3631{
3241 struct sk_buff *msg; 3632 struct sk_buff *msg;
3242 3633
3243 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3634 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3244 if (!msg) 3635 if (!msg)
3245 return; 3636 return;
3246 3637
@@ -3258,7 +3649,7 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
3258{ 3649{
3259 struct sk_buff *msg; 3650 struct sk_buff *msg;
3260 3651
3261 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3652 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3262 if (!msg) 3653 if (!msg)
3263 return; 3654 return;
3264 3655
@@ -3280,7 +3671,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
3280 struct sk_buff *msg; 3671 struct sk_buff *msg;
3281 void *hdr; 3672 void *hdr;
3282 3673
3283 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3674 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3284 if (!msg) 3675 if (!msg)
3285 return; 3676 return;
3286 3677
@@ -3334,7 +3725,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3334 struct sk_buff *msg; 3725 struct sk_buff *msg;
3335 void *hdr; 3726 void *hdr;
3336 3727
3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 3728 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3338 if (!msg) 3729 if (!msg)
3339 return; 3730 return;
3340 3731
@@ -3375,38 +3766,208 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
3375 nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE); 3766 nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE);
3376} 3767}
3377 3768
3378void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, 3769void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
3379 struct net_device *netdev, const u8 *buf, 3770 struct net_device *netdev, const u8 *buf, size_t len)
3380 size_t len)
3381{ 3771{
3382 nl80211_send_mlme_event(rdev, netdev, buf, len, 3772 nl80211_send_mlme_event(rdev, netdev, buf, len,
3383 NL80211_CMD_DEAUTHENTICATE); 3773 NL80211_CMD_DEAUTHENTICATE);
3384} 3774}
3385 3775
3386void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, 3776void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
3387 struct net_device *netdev, const u8 *buf, 3777 struct net_device *netdev, const u8 *buf,
3388 size_t len) 3778 size_t len)
3389{ 3779{
3390 nl80211_send_mlme_event(rdev, netdev, buf, len, 3780 nl80211_send_mlme_event(rdev, netdev, buf, len,
3391 NL80211_CMD_DISASSOCIATE); 3781 NL80211_CMD_DISASSOCIATE);
3392} 3782}
3393 3783
3784static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
3785 struct net_device *netdev, int cmd,
3786 const u8 *addr)
3787{
3788 struct sk_buff *msg;
3789 void *hdr;
3790
3791 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3792 if (!msg)
3793 return;
3794
3795 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
3796 if (!hdr) {
3797 nlmsg_free(msg);
3798 return;
3799 }
3800
3801 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3802 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3803 NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
3804 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
3805
3806 if (genlmsg_end(msg, hdr) < 0) {
3807 nlmsg_free(msg);
3808 return;
3809 }
3810
3811 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3812 return;
3813
3814 nla_put_failure:
3815 genlmsg_cancel(msg, hdr);
3816 nlmsg_free(msg);
3817}
3818
3819void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
3820 struct net_device *netdev, const u8 *addr)
3821{
3822 nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE,
3823 addr);
3824}
3825
3826void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
3827 struct net_device *netdev, const u8 *addr)
3828{
3829 nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr);
3830}
3831
3832void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
3833 struct net_device *netdev, const u8 *bssid,
3834 gfp_t gfp)
3835{
3836 struct sk_buff *msg;
3837 void *hdr;
3838
3839 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3840 if (!msg)
3841 return;
3842
3843 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS);
3844 if (!hdr) {
3845 nlmsg_free(msg);
3846 return;
3847 }
3848
3849 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3850 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3851 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
3852
3853 if (genlmsg_end(msg, hdr) < 0) {
3854 nlmsg_free(msg);
3855 return;
3856 }
3857
3858 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
3859 return;
3860
3861 nla_put_failure:
3862 genlmsg_cancel(msg, hdr);
3863 nlmsg_free(msg);
3864}
3865
3866void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
3867 struct net_device *netdev, const u8 *addr,
3868 enum nl80211_key_type key_type, int key_id,
3869 const u8 *tsc)
3870{
3871 struct sk_buff *msg;
3872 void *hdr;
3873
3874 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3875 if (!msg)
3876 return;
3877
3878 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE);
3879 if (!hdr) {
3880 nlmsg_free(msg);
3881 return;
3882 }
3883
3884 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3885 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3886 if (addr)
3887 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
3888 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
3889 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
3890 if (tsc)
3891 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
3892
3893 if (genlmsg_end(msg, hdr) < 0) {
3894 nlmsg_free(msg);
3895 return;
3896 }
3897
3898 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3899 return;
3900
3901 nla_put_failure:
3902 genlmsg_cancel(msg, hdr);
3903 nlmsg_free(msg);
3904}
3905
3906void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
3907 struct ieee80211_channel *channel_before,
3908 struct ieee80211_channel *channel_after)
3909{
3910 struct sk_buff *msg;
3911 void *hdr;
3912 struct nlattr *nl_freq;
3913
3914 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3915 if (!msg)
3916 return;
3917
3918 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT);
3919 if (!hdr) {
3920 nlmsg_free(msg);
3921 return;
3922 }
3923
3924 /*
3925 * Since we are applying the beacon hint to a wiphy we know its
3926 * wiphy_idx is valid
3927 */
3928 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
3929
3930 /* Before */
3931 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
3932 if (!nl_freq)
3933 goto nla_put_failure;
3934 if (nl80211_msg_put_channel(msg, channel_before))
3935 goto nla_put_failure;
3936 nla_nest_end(msg, nl_freq);
3937
3938 /* After */
3939 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER);
3940 if (!nl_freq)
3941 goto nla_put_failure;
3942 if (nl80211_msg_put_channel(msg, channel_after))
3943 goto nla_put_failure;
3944 nla_nest_end(msg, nl_freq);
3945
3946 if (genlmsg_end(msg, hdr) < 0) {
3947 nlmsg_free(msg);
3948 return;
3949 }
3950
3951 genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC);
3952
3953 return;
3954
3955nla_put_failure:
3956 genlmsg_cancel(msg, hdr);
3957 nlmsg_free(msg);
3958}
3959
3394/* initialisation/exit functions */ 3960/* initialisation/exit functions */
3395 3961
3396int nl80211_init(void) 3962int nl80211_init(void)
3397{ 3963{
3398 int err, i; 3964 int err;
3399 3965
3400 err = genl_register_family(&nl80211_fam); 3966 err = genl_register_family_with_ops(&nl80211_fam,
3967 nl80211_ops, ARRAY_SIZE(nl80211_ops));
3401 if (err) 3968 if (err)
3402 return err; 3969 return err;
3403 3970
3404 for (i = 0; i < ARRAY_SIZE(nl80211_ops); i++) {
3405 err = genl_register_ops(&nl80211_fam, &nl80211_ops[i]);
3406 if (err)
3407 goto err_out;
3408 }
3409
3410 err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); 3971 err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp);
3411 if (err) 3972 if (err)
3412 goto err_out; 3973 goto err_out;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b77af4ab80be..5c12ad13499b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -17,11 +17,31 @@ extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
17extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, 17extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
18 struct net_device *netdev, 18 struct net_device *netdev,
19 const u8 *buf, size_t len); 19 const u8 *buf, size_t len);
20extern void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, 20extern void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
21 struct net_device *netdev, 21 struct net_device *netdev,
22 const u8 *buf, size_t len); 22 const u8 *buf, size_t len);
23extern void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, 23extern void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
24 struct net_device *netdev, 24 struct net_device *netdev,
25 const u8 *buf, size_t len); 25 const u8 *buf, size_t len);
26extern void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
27 struct net_device *netdev,
28 const u8 *addr);
29extern void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
30 struct net_device *netdev,
31 const u8 *addr);
32extern void
33nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
34 struct net_device *netdev, const u8 *addr,
35 enum nl80211_key_type key_type,
36 int key_id, const u8 *tsc);
37
38extern void
39nl80211_send_beacon_hint_event(struct wiphy *wiphy,
40 struct ieee80211_channel *channel_before,
41 struct ieee80211_channel *channel_after);
42
43void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
44 struct net_device *netdev, const u8 *bssid,
45 gfp_t gfp);
26 46
27#endif /* __NET_WIRELESS_NL80211_H */ 47#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 487cb627ddba..5e14371cda70 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -37,7 +37,6 @@
37#include <linux/random.h> 37#include <linux/random.h>
38#include <linux/nl80211.h> 38#include <linux/nl80211.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <net/wireless.h>
41#include <net/cfg80211.h> 40#include <net/cfg80211.h>
42#include "core.h" 41#include "core.h"
43#include "reg.h" 42#include "reg.h"
@@ -49,12 +48,6 @@ static struct regulatory_request *last_request;
49/* To trigger userspace events */ 48/* To trigger userspace events */
50static struct platform_device *reg_pdev; 49static struct platform_device *reg_pdev;
51 50
52/* Keep the ordering from large to small */
53static u32 supported_bandwidths[] = {
54 MHZ_TO_KHZ(40),
55 MHZ_TO_KHZ(20),
56};
57
58/* 51/*
59 * Central wireless core regulatory domains, we only need two, 52 * Central wireless core regulatory domains, we only need two,
60 * the current one and a world regulatory domain in case we have no 53 * the current one and a world regulatory domain in case we have no
@@ -389,6 +382,8 @@ static int call_crda(const char *alpha2)
389/* Used by nl80211 before kmalloc'ing our regulatory domain */ 382/* Used by nl80211 before kmalloc'ing our regulatory domain */
390bool reg_is_valid_request(const char *alpha2) 383bool reg_is_valid_request(const char *alpha2)
391{ 384{
385 assert_cfg80211_lock();
386
392 if (!last_request) 387 if (!last_request)
393 return false; 388 return false;
394 389
@@ -436,19 +431,20 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
436 return true; 431 return true;
437} 432}
438 433
439/* Returns value in KHz */ 434static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
440static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, 435 u32 center_freq_khz,
441 u32 freq) 436 u32 bw_khz)
442{ 437{
443 unsigned int i; 438 u32 start_freq_khz, end_freq_khz;
444 for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) { 439
445 u32 start_freq_khz = freq - supported_bandwidths[i]/2; 440 start_freq_khz = center_freq_khz - (bw_khz/2);
446 u32 end_freq_khz = freq + supported_bandwidths[i]/2; 441 end_freq_khz = center_freq_khz + (bw_khz/2);
447 if (start_freq_khz >= freq_range->start_freq_khz && 442
448 end_freq_khz <= freq_range->end_freq_khz) 443 if (start_freq_khz >= freq_range->start_freq_khz &&
449 return supported_bandwidths[i]; 444 end_freq_khz <= freq_range->end_freq_khz)
450 } 445 return true;
451 return 0; 446
447 return false;
452} 448}
453 449
454/** 450/**
@@ -848,14 +844,17 @@ static u32 map_regdom_flags(u32 rd_flags)
848 844
849static int freq_reg_info_regd(struct wiphy *wiphy, 845static int freq_reg_info_regd(struct wiphy *wiphy,
850 u32 center_freq, 846 u32 center_freq,
851 u32 *bandwidth, 847 u32 desired_bw_khz,
852 const struct ieee80211_reg_rule **reg_rule, 848 const struct ieee80211_reg_rule **reg_rule,
853 const struct ieee80211_regdomain *custom_regd) 849 const struct ieee80211_regdomain *custom_regd)
854{ 850{
855 int i; 851 int i;
856 bool band_rule_found = false; 852 bool band_rule_found = false;
857 const struct ieee80211_regdomain *regd; 853 const struct ieee80211_regdomain *regd;
858 u32 max_bandwidth = 0; 854 bool bw_fits = false;
855
856 if (!desired_bw_khz)
857 desired_bw_khz = MHZ_TO_KHZ(20);
859 858
860 regd = custom_regd ? custom_regd : cfg80211_regdomain; 859 regd = custom_regd ? custom_regd : cfg80211_regdomain;
861 860
@@ -888,38 +887,54 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
888 if (!band_rule_found) 887 if (!band_rule_found)
889 band_rule_found = freq_in_rule_band(fr, center_freq); 888 band_rule_found = freq_in_rule_band(fr, center_freq);
890 889
891 max_bandwidth = freq_max_bandwidth(fr, center_freq); 890 bw_fits = reg_does_bw_fit(fr,
891 center_freq,
892 desired_bw_khz);
892 893
893 if (max_bandwidth && *bandwidth <= max_bandwidth) { 894 if (band_rule_found && bw_fits) {
894 *reg_rule = rr; 895 *reg_rule = rr;
895 *bandwidth = max_bandwidth; 896 return 0;
896 break;
897 } 897 }
898 } 898 }
899 899
900 if (!band_rule_found) 900 if (!band_rule_found)
901 return -ERANGE; 901 return -ERANGE;
902 902
903 return !max_bandwidth; 903 return -EINVAL;
904} 904}
905EXPORT_SYMBOL(freq_reg_info); 905EXPORT_SYMBOL(freq_reg_info);
906 906
907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy,
908 const struct ieee80211_reg_rule **reg_rule) 908 u32 center_freq,
909 u32 desired_bw_khz,
910 const struct ieee80211_reg_rule **reg_rule)
909{ 911{
910 assert_cfg80211_lock(); 912 assert_cfg80211_lock();
911 return freq_reg_info_regd(wiphy, center_freq, 913 return freq_reg_info_regd(wiphy,
912 bandwidth, reg_rule, NULL); 914 center_freq,
915 desired_bw_khz,
916 reg_rule,
917 NULL);
913} 918}
914 919
920/*
921 * Note that right now we assume the desired channel bandwidth
922 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
923 * per channel, the primary and the extension channel). To support
924 * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
925 * new ieee80211_channel.target_bw and re run the regulatory check
926 * on the wiphy with the target_bw specified. Then we can simply use
927 * that below for the desired_bw_khz below.
928 */
915static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 929static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
916 unsigned int chan_idx) 930 unsigned int chan_idx)
917{ 931{
918 int r; 932 int r;
919 u32 flags; 933 u32 flags, bw_flags = 0;
920 u32 max_bandwidth = 0; 934 u32 desired_bw_khz = MHZ_TO_KHZ(20);
921 const struct ieee80211_reg_rule *reg_rule = NULL; 935 const struct ieee80211_reg_rule *reg_rule = NULL;
922 const struct ieee80211_power_rule *power_rule = NULL; 936 const struct ieee80211_power_rule *power_rule = NULL;
937 const struct ieee80211_freq_range *freq_range = NULL;
923 struct ieee80211_supported_band *sband; 938 struct ieee80211_supported_band *sband;
924 struct ieee80211_channel *chan; 939 struct ieee80211_channel *chan;
925 struct wiphy *request_wiphy = NULL; 940 struct wiphy *request_wiphy = NULL;
@@ -934,8 +949,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
934 949
935 flags = chan->orig_flags; 950 flags = chan->orig_flags;
936 951
937 r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq), 952 r = freq_reg_info(wiphy,
938 &max_bandwidth, &reg_rule); 953 MHZ_TO_KHZ(chan->center_freq),
954 desired_bw_khz,
955 &reg_rule);
939 956
940 if (r) { 957 if (r) {
941 /* 958 /*
@@ -978,6 +995,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
978 } 995 }
979 996
980 power_rule = &reg_rule->power_rule; 997 power_rule = &reg_rule->power_rule;
998 freq_range = &reg_rule->freq_range;
999
1000 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1001 bw_flags = IEEE80211_CHAN_NO_HT40;
981 1002
982 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1003 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
983 request_wiphy && request_wiphy == wiphy && 1004 request_wiphy && request_wiphy == wiphy &&
@@ -988,19 +1009,19 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
988 * settings 1009 * settings
989 */ 1010 */
990 chan->flags = chan->orig_flags = 1011 chan->flags = chan->orig_flags =
991 map_regdom_flags(reg_rule->flags); 1012 map_regdom_flags(reg_rule->flags) | bw_flags;
992 chan->max_antenna_gain = chan->orig_mag = 1013 chan->max_antenna_gain = chan->orig_mag =
993 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1014 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
994 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1015 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
995 chan->max_power = chan->orig_mpwr = 1016 chan->max_power = chan->orig_mpwr =
996 (int) MBM_TO_DBM(power_rule->max_eirp); 1017 (int) MBM_TO_DBM(power_rule->max_eirp);
997 return; 1018 return;
998 } 1019 }
999 1020
1000 chan->flags = flags | map_regdom_flags(reg_rule->flags); 1021 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
1001 chan->max_antenna_gain = min(chan->orig_mag, 1022 chan->max_antenna_gain = min(chan->orig_mag,
1002 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 1023 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
1003 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1024 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1004 if (chan->orig_mpwr) 1025 if (chan->orig_mpwr)
1005 chan->max_power = min(chan->orig_mpwr, 1026 chan->max_power = min(chan->orig_mpwr,
1006 (int) MBM_TO_DBM(power_rule->max_eirp)); 1027 (int) MBM_TO_DBM(power_rule->max_eirp));
@@ -1050,18 +1071,10 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1050 unsigned int chan_idx, 1071 unsigned int chan_idx,
1051 struct reg_beacon *reg_beacon) 1072 struct reg_beacon *reg_beacon)
1052{ 1073{
1053#ifdef CONFIG_CFG80211_REG_DEBUG
1054#define REG_DEBUG_BEACON_FLAG(desc) \
1055 printk(KERN_DEBUG "cfg80211: Enabling " desc " on " \
1056 "frequency: %d MHz (Ch %d) on %s\n", \
1057 reg_beacon->chan.center_freq, \
1058 ieee80211_frequency_to_channel(reg_beacon->chan.center_freq), \
1059 wiphy_name(wiphy));
1060#else
1061#define REG_DEBUG_BEACON_FLAG(desc) do {} while (0)
1062#endif
1063 struct ieee80211_supported_band *sband; 1074 struct ieee80211_supported_band *sband;
1064 struct ieee80211_channel *chan; 1075 struct ieee80211_channel *chan;
1076 bool channel_changed = false;
1077 struct ieee80211_channel chan_before;
1065 1078
1066 assert_cfg80211_lock(); 1079 assert_cfg80211_lock();
1067 1080
@@ -1071,18 +1084,28 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1071 if (likely(chan->center_freq != reg_beacon->chan.center_freq)) 1084 if (likely(chan->center_freq != reg_beacon->chan.center_freq))
1072 return; 1085 return;
1073 1086
1074 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { 1087 if (chan->beacon_found)
1088 return;
1089
1090 chan->beacon_found = true;
1091
1092 chan_before.center_freq = chan->center_freq;
1093 chan_before.flags = chan->flags;
1094
1095 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
1096 !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1075 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1097 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
1076 REG_DEBUG_BEACON_FLAG("active scanning"); 1098 channel_changed = true;
1077 } 1099 }
1078 1100
1079 if (chan->flags & IEEE80211_CHAN_NO_IBSS) { 1101 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
1102 !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) {
1080 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1103 chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
1081 REG_DEBUG_BEACON_FLAG("beaconing"); 1104 channel_changed = true;
1082 } 1105 }
1083 1106
1084 chan->beacon_found = true; 1107 if (channel_changed)
1085#undef REG_DEBUG_BEACON_FLAG 1108 nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
1086} 1109}
1087 1110
1088/* 1111/*
@@ -1155,6 +1178,93 @@ static void reg_process_beacons(struct wiphy *wiphy)
1155 wiphy_update_beacon_reg(wiphy); 1178 wiphy_update_beacon_reg(wiphy);
1156} 1179}
1157 1180
1181static bool is_ht40_not_allowed(struct ieee80211_channel *chan)
1182{
1183 if (!chan)
1184 return true;
1185 if (chan->flags & IEEE80211_CHAN_DISABLED)
1186 return true;
1187 /* This would happen when regulatory rules disallow HT40 completely */
1188 if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40)))
1189 return true;
1190 return false;
1191}
1192
1193static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1194 enum ieee80211_band band,
1195 unsigned int chan_idx)
1196{
1197 struct ieee80211_supported_band *sband;
1198 struct ieee80211_channel *channel;
1199 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
1200 unsigned int i;
1201
1202 assert_cfg80211_lock();
1203
1204 sband = wiphy->bands[band];
1205 BUG_ON(chan_idx >= sband->n_channels);
1206 channel = &sband->channels[chan_idx];
1207
1208 if (is_ht40_not_allowed(channel)) {
1209 channel->flags |= IEEE80211_CHAN_NO_HT40;
1210 return;
1211 }
1212
1213 /*
1214 * We need to ensure the extension channels exist to
1215 * be able to use HT40- or HT40+, this finds them (or not)
1216 */
1217 for (i = 0; i < sband->n_channels; i++) {
1218 struct ieee80211_channel *c = &sband->channels[i];
1219 if (c->center_freq == (channel->center_freq - 20))
1220 channel_before = c;
1221 if (c->center_freq == (channel->center_freq + 20))
1222 channel_after = c;
1223 }
1224
1225 /*
1226 * Please note that this assumes target bandwidth is 20 MHz,
1227 * if that ever changes we also need to change the below logic
1228 * to include that as well.
1229 */
1230 if (is_ht40_not_allowed(channel_before))
1231 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
1232 else
1233 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
1234
1235 if (is_ht40_not_allowed(channel_after))
1236 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
1237 else
1238 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
1239}
1240
1241static void reg_process_ht_flags_band(struct wiphy *wiphy,
1242 enum ieee80211_band band)
1243{
1244 unsigned int i;
1245 struct ieee80211_supported_band *sband;
1246
1247 BUG_ON(!wiphy->bands[band]);
1248 sband = wiphy->bands[band];
1249
1250 for (i = 0; i < sband->n_channels; i++)
1251 reg_process_ht_flags_channel(wiphy, band, i);
1252}
1253
1254static void reg_process_ht_flags(struct wiphy *wiphy)
1255{
1256 enum ieee80211_band band;
1257
1258 if (!wiphy)
1259 return;
1260
1261 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1262 if (wiphy->bands[band])
1263 reg_process_ht_flags_band(wiphy, band);
1264 }
1265
1266}
1267
1158void wiphy_update_regulatory(struct wiphy *wiphy, 1268void wiphy_update_regulatory(struct wiphy *wiphy,
1159 enum nl80211_reg_initiator initiator) 1269 enum nl80211_reg_initiator initiator)
1160{ 1270{
@@ -1168,6 +1278,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1168 } 1278 }
1169out: 1279out:
1170 reg_process_beacons(wiphy); 1280 reg_process_beacons(wiphy);
1281 reg_process_ht_flags(wiphy);
1171 if (wiphy->reg_notifier) 1282 if (wiphy->reg_notifier)
1172 wiphy->reg_notifier(wiphy, last_request); 1283 wiphy->reg_notifier(wiphy, last_request);
1173} 1284}
@@ -1178,9 +1289,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1178 const struct ieee80211_regdomain *regd) 1289 const struct ieee80211_regdomain *regd)
1179{ 1290{
1180 int r; 1291 int r;
1181 u32 max_bandwidth = 0; 1292 u32 desired_bw_khz = MHZ_TO_KHZ(20);
1293 u32 bw_flags = 0;
1182 const struct ieee80211_reg_rule *reg_rule = NULL; 1294 const struct ieee80211_reg_rule *reg_rule = NULL;
1183 const struct ieee80211_power_rule *power_rule = NULL; 1295 const struct ieee80211_power_rule *power_rule = NULL;
1296 const struct ieee80211_freq_range *freq_range = NULL;
1184 struct ieee80211_supported_band *sband; 1297 struct ieee80211_supported_band *sband;
1185 struct ieee80211_channel *chan; 1298 struct ieee80211_channel *chan;
1186 1299
@@ -1190,8 +1303,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1190 BUG_ON(chan_idx >= sband->n_channels); 1303 BUG_ON(chan_idx >= sband->n_channels);
1191 chan = &sband->channels[chan_idx]; 1304 chan = &sband->channels[chan_idx];
1192 1305
1193 r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), 1306 r = freq_reg_info_regd(wiphy,
1194 &max_bandwidth, &reg_rule, regd); 1307 MHZ_TO_KHZ(chan->center_freq),
1308 desired_bw_khz,
1309 &reg_rule,
1310 regd);
1195 1311
1196 if (r) { 1312 if (r) {
1197 chan->flags = IEEE80211_CHAN_DISABLED; 1313 chan->flags = IEEE80211_CHAN_DISABLED;
@@ -1199,10 +1315,14 @@ static void handle_channel_custom(struct wiphy *wiphy,
1199 } 1315 }
1200 1316
1201 power_rule = &reg_rule->power_rule; 1317 power_rule = &reg_rule->power_rule;
1318 freq_range = &reg_rule->freq_range;
1319
1320 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1321 bw_flags = IEEE80211_CHAN_NO_HT40;
1202 1322
1203 chan->flags |= map_regdom_flags(reg_rule->flags); 1323 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1204 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1324 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1205 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1325 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1206 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 1326 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
1207} 1327}
1208 1328
@@ -1224,13 +1344,22 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1224 const struct ieee80211_regdomain *regd) 1344 const struct ieee80211_regdomain *regd)
1225{ 1345{
1226 enum ieee80211_band band; 1346 enum ieee80211_band band;
1347 unsigned int bands_set = 0;
1227 1348
1228 mutex_lock(&cfg80211_mutex); 1349 mutex_lock(&cfg80211_mutex);
1229 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1350 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1230 if (wiphy->bands[band]) 1351 if (!wiphy->bands[band])
1231 handle_band_custom(wiphy, band, regd); 1352 continue;
1353 handle_band_custom(wiphy, band, regd);
1354 bands_set++;
1232 } 1355 }
1233 mutex_unlock(&cfg80211_mutex); 1356 mutex_unlock(&cfg80211_mutex);
1357
1358 /*
1359 * no point in calling this if it won't have any effect
1360 * on your device's supportd bands.
1361 */
1362 WARN_ON(!bands_set);
1234} 1363}
1235EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1364EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1236 1365
@@ -2000,7 +2129,12 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2000 * driver wanted to the wiphy to deal with conflicts 2129 * driver wanted to the wiphy to deal with conflicts
2001 */ 2130 */
2002 2131
2003 BUG_ON(request_wiphy->regd); 2132 /*
2133 * Userspace could have sent two replies with only
2134 * one kernel request.
2135 */
2136 if (request_wiphy->regd)
2137 return -EALREADY;
2004 2138
2005 r = reg_copy_regd(&request_wiphy->regd, rd); 2139 r = reg_copy_regd(&request_wiphy->regd, rd);
2006 if (r) 2140 if (r)
@@ -2042,7 +2176,13 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2042 * the country IE rd with what CRDA believes that country should have 2176 * the country IE rd with what CRDA believes that country should have
2043 */ 2177 */
2044 2178
2045 BUG_ON(!country_ie_regdomain); 2179 /*
2180 * Userspace could have sent two replies with only
2181 * one kernel request. By the second reply we would have
2182 * already processed and consumed the country_ie_regdomain.
2183 */
2184 if (!country_ie_regdomain)
2185 return -EALREADY;
2046 BUG_ON(rd == country_ie_regdomain); 2186 BUG_ON(rd == country_ie_regdomain);
2047 2187
2048 /* 2188 /*
@@ -2119,14 +2259,14 @@ void reg_device_remove(struct wiphy *wiphy)
2119 2259
2120 assert_cfg80211_lock(); 2260 assert_cfg80211_lock();
2121 2261
2262 kfree(wiphy->regd);
2263
2122 if (last_request) 2264 if (last_request)
2123 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2265 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2124 2266
2125 kfree(wiphy->regd); 2267 if (!request_wiphy || request_wiphy != wiphy)
2126 if (!last_request || !request_wiphy)
2127 return;
2128 if (request_wiphy != wiphy)
2129 return; 2268 return;
2269
2130 last_request->wiphy_idx = WIPHY_IDX_STALE; 2270 last_request->wiphy_idx = WIPHY_IDX_STALE;
2131 last_request->country_ie_env = ENVIRON_ANY; 2271 last_request->country_ie_env = ENVIRON_ANY;
2132} 2272}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 1f260c40b6ca..e95b638b919f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -29,13 +29,14 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
29 goto out; 29 goto out;
30 30
31 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 31 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
32 wiphy_to_dev(request->wiphy)->scan_req = NULL;
33 32
34 if (aborted) 33 if (aborted)
35 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); 34 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev);
36 else 35 else
37 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); 36 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev);
38 37
38 wiphy_to_dev(request->wiphy)->scan_req = NULL;
39
39#ifdef CONFIG_WIRELESS_EXT 40#ifdef CONFIG_WIRELESS_EXT
40 if (!aborted) { 41 if (!aborted) {
41 memset(&wrqu, 0, sizeof(wrqu)); 42 memset(&wrqu, 0, sizeof(wrqu));
@@ -377,18 +378,16 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
377 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 378 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
378 size_t ielen = res->pub.len_information_elements; 379 size_t ielen = res->pub.len_information_elements;
379 380
380 if (ksize(found) >= used + ielen) { 381 if (!found->ies_allocated && ksize(found) >= used + ielen) {
381 memcpy(found->pub.information_elements, 382 memcpy(found->pub.information_elements,
382 res->pub.information_elements, ielen); 383 res->pub.information_elements, ielen);
383 found->pub.len_information_elements = ielen; 384 found->pub.len_information_elements = ielen;
384 } else { 385 } else {
385 u8 *ies = found->pub.information_elements; 386 u8 *ies = found->pub.information_elements;
386 387
387 if (found->ies_allocated) { 388 if (found->ies_allocated)
388 if (ksize(ies) < ielen) 389 ies = krealloc(ies, ielen, GFP_ATOMIC);
389 ies = krealloc(ies, ielen, 390 else
390 GFP_ATOMIC);
391 } else
392 ies = kmalloc(ielen, GFP_ATOMIC); 391 ies = kmalloc(ielen, GFP_ATOMIC);
393 392
394 if (ies) { 393 if (ies) {
@@ -415,6 +414,55 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
415 return found; 414 return found;
416} 415}
417 416
417struct cfg80211_bss*
418cfg80211_inform_bss(struct wiphy *wiphy,
419 struct ieee80211_channel *channel,
420 const u8 *bssid,
421 u64 timestamp, u16 capability, u16 beacon_interval,
422 const u8 *ie, size_t ielen,
423 s32 signal, gfp_t gfp)
424{
425 struct cfg80211_internal_bss *res;
426 size_t privsz;
427
428 if (WARN_ON(!wiphy))
429 return NULL;
430
431 privsz = wiphy->bss_priv_size;
432
433 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
434 (signal < 0 || signal > 100)))
435 return NULL;
436
437 res = kzalloc(sizeof(*res) + privsz + ielen, gfp);
438 if (!res)
439 return NULL;
440
441 memcpy(res->pub.bssid, bssid, ETH_ALEN);
442 res->pub.channel = channel;
443 res->pub.signal = signal;
444 res->pub.tsf = timestamp;
445 res->pub.beacon_interval = beacon_interval;
446 res->pub.capability = capability;
447 /* point to after the private area */
448 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
449 memcpy(res->pub.information_elements, ie, ielen);
450 res->pub.len_information_elements = ielen;
451
452 kref_init(&res->ref);
453
454 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0);
455 if (!res)
456 return NULL;
457
458 if (res->pub.capability & WLAN_CAPABILITY_ESS)
459 regulatory_hint_found_beacon(wiphy, channel, gfp);
460
461 /* cfg80211_bss_update gives us a referenced result */
462 return &res->pub;
463}
464EXPORT_SYMBOL(cfg80211_inform_bss);
465
418struct cfg80211_bss * 466struct cfg80211_bss *
419cfg80211_inform_bss_frame(struct wiphy *wiphy, 467cfg80211_inform_bss_frame(struct wiphy *wiphy,
420 struct ieee80211_channel *channel, 468 struct ieee80211_channel *channel,
@@ -605,7 +653,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
605 cfg80211_put_dev(rdev); 653 cfg80211_put_dev(rdev);
606 return err; 654 return err;
607} 655}
608EXPORT_SYMBOL(cfg80211_wext_siwscan); 656EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
609 657
610static void ieee80211_scan_add_ies(struct iw_request_info *info, 658static void ieee80211_scan_add_ies(struct iw_request_info *info,
611 struct cfg80211_bss *bss, 659 struct cfg80211_bss *bss,
@@ -914,5 +962,5 @@ int cfg80211_wext_giwscan(struct net_device *dev,
914 cfg80211_put_dev(rdev); 962 cfg80211_put_dev(rdev);
915 return res; 963 return res;
916} 964}
917EXPORT_SYMBOL(cfg80211_wext_giwscan); 965EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
918#endif 966#endif
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 487cdd9bcffc..25550692dda6 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1,10 +1,12 @@
1/* 1/*
2 * Wireless utility functions 2 * Wireless utility functions
3 * 3 *
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <net/wireless.h> 6#include <linux/bitops.h>
7#include <asm/bitops.h> 7#include <linux/etherdevice.h>
8#include <net/cfg80211.h>
9#include <net/ip.h>
8#include "core.h" 10#include "core.h"
9 11
10struct ieee80211_rate * 12struct ieee80211_rate *
@@ -138,3 +140,365 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
138 if (wiphy->bands[band]) 140 if (wiphy->bands[band])
139 set_mandatory_flags_band(wiphy->bands[band], band); 141 set_mandatory_flags_band(wiphy->bands[band], band);
140} 142}
143
144int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
145 const u8 *mac_addr)
146{
147 if (key_idx > 5)
148 return -EINVAL;
149
150 /*
151 * Disallow pairwise keys with non-zero index unless it's WEP
152 * (because current deployments use pairwise WEP keys with
153 * non-zero indizes but 802.11i clearly specifies to use zero)
154 */
155 if (mac_addr && key_idx &&
156 params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
157 params->cipher != WLAN_CIPHER_SUITE_WEP104)
158 return -EINVAL;
159
160 switch (params->cipher) {
161 case WLAN_CIPHER_SUITE_WEP40:
162 if (params->key_len != WLAN_KEY_LEN_WEP40)
163 return -EINVAL;
164 break;
165 case WLAN_CIPHER_SUITE_TKIP:
166 if (params->key_len != WLAN_KEY_LEN_TKIP)
167 return -EINVAL;
168 break;
169 case WLAN_CIPHER_SUITE_CCMP:
170 if (params->key_len != WLAN_KEY_LEN_CCMP)
171 return -EINVAL;
172 break;
173 case WLAN_CIPHER_SUITE_WEP104:
174 if (params->key_len != WLAN_KEY_LEN_WEP104)
175 return -EINVAL;
176 break;
177 case WLAN_CIPHER_SUITE_AES_CMAC:
178 if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
179 return -EINVAL;
180 break;
181 default:
182 return -EINVAL;
183 }
184
185 if (params->seq) {
186 switch (params->cipher) {
187 case WLAN_CIPHER_SUITE_WEP40:
188 case WLAN_CIPHER_SUITE_WEP104:
189 /* These ciphers do not use key sequence */
190 return -EINVAL;
191 case WLAN_CIPHER_SUITE_TKIP:
192 case WLAN_CIPHER_SUITE_CCMP:
193 case WLAN_CIPHER_SUITE_AES_CMAC:
194 if (params->seq_len != 6)
195 return -EINVAL;
196 break;
197 }
198 }
199
200 return 0;
201}
202
203/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
204/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
205const unsigned char rfc1042_header[] __aligned(2) =
206 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
207EXPORT_SYMBOL(rfc1042_header);
208
209/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
210const unsigned char bridge_tunnel_header[] __aligned(2) =
211 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
212EXPORT_SYMBOL(bridge_tunnel_header);
213
214unsigned int ieee80211_hdrlen(__le16 fc)
215{
216 unsigned int hdrlen = 24;
217
218 if (ieee80211_is_data(fc)) {
219 if (ieee80211_has_a4(fc))
220 hdrlen = 30;
221 if (ieee80211_is_data_qos(fc))
222 hdrlen += IEEE80211_QOS_CTL_LEN;
223 goto out;
224 }
225
226 if (ieee80211_is_ctl(fc)) {
227 /*
228 * ACK and CTS are 10 bytes, all others 16. To see how
229 * to get this condition consider
230 * subtype mask: 0b0000000011110000 (0x00F0)
231 * ACK subtype: 0b0000000011010000 (0x00D0)
232 * CTS subtype: 0b0000000011000000 (0x00C0)
233 * bits that matter: ^^^ (0x00E0)
234 * value of those: 0b0000000011000000 (0x00C0)
235 */
236 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
237 hdrlen = 10;
238 else
239 hdrlen = 16;
240 }
241out:
242 return hdrlen;
243}
244EXPORT_SYMBOL(ieee80211_hdrlen);
245
246unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
247{
248 const struct ieee80211_hdr *hdr =
249 (const struct ieee80211_hdr *)skb->data;
250 unsigned int hdrlen;
251
252 if (unlikely(skb->len < 10))
253 return 0;
254 hdrlen = ieee80211_hdrlen(hdr->frame_control);
255 if (unlikely(hdrlen > skb->len))
256 return 0;
257 return hdrlen;
258}
259EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
260
261static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
262{
263 int ae = meshhdr->flags & MESH_FLAGS_AE;
264 /* 7.1.3.5a.2 */
265 switch (ae) {
266 case 0:
267 return 6;
268 case 1:
269 return 12;
270 case 2:
271 return 18;
272 case 3:
273 return 24;
274 default:
275 return 6;
276 }
277}
278
279int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
280 enum nl80211_iftype iftype)
281{
282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
283 u16 hdrlen, ethertype;
284 u8 *payload;
285 u8 dst[ETH_ALEN];
286 u8 src[ETH_ALEN] __aligned(2);
287
288 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
289 return -1;
290
291 hdrlen = ieee80211_hdrlen(hdr->frame_control);
292
293 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
294 * header
295 * IEEE 802.11 address fields:
296 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
297 * 0 0 DA SA BSSID n/a
298 * 0 1 DA BSSID SA n/a
299 * 1 0 BSSID SA DA n/a
300 * 1 1 RA TA DA SA
301 */
302 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
303 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
304
305 switch (hdr->frame_control &
306 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
307 case cpu_to_le16(IEEE80211_FCTL_TODS):
308 if (unlikely(iftype != NL80211_IFTYPE_AP &&
309 iftype != NL80211_IFTYPE_AP_VLAN))
310 return -1;
311 break;
312 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
313 if (unlikely(iftype != NL80211_IFTYPE_WDS &&
314 iftype != NL80211_IFTYPE_MESH_POINT))
315 return -1;
316 if (iftype == NL80211_IFTYPE_MESH_POINT) {
317 struct ieee80211s_hdr *meshdr =
318 (struct ieee80211s_hdr *) (skb->data + hdrlen);
319 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
320 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
321 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
322 memcpy(src, meshdr->eaddr2, ETH_ALEN);
323 }
324 }
325 break;
326 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
327 if (iftype != NL80211_IFTYPE_STATION ||
328 (is_multicast_ether_addr(dst) &&
329 !compare_ether_addr(src, addr)))
330 return -1;
331 break;
332 case cpu_to_le16(0):
333 if (iftype != NL80211_IFTYPE_ADHOC)
334 return -1;
335 break;
336 }
337
338 if (unlikely(skb->len - hdrlen < 8))
339 return -1;
340
341 payload = skb->data + hdrlen;
342 ethertype = (payload[6] << 8) | payload[7];
343
344 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
345 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
346 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
347 /* remove RFC1042 or Bridge-Tunnel encapsulation and
348 * replace EtherType */
349 skb_pull(skb, hdrlen + 6);
350 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
351 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
352 } else {
353 struct ethhdr *ehdr;
354 __be16 len;
355
356 skb_pull(skb, hdrlen);
357 len = htons(skb->len);
358 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
359 memcpy(ehdr->h_dest, dst, ETH_ALEN);
360 memcpy(ehdr->h_source, src, ETH_ALEN);
361 ehdr->h_proto = len;
362 }
363 return 0;
364}
365EXPORT_SYMBOL(ieee80211_data_to_8023);
366
367int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
368 enum nl80211_iftype iftype, u8 *bssid, bool qos)
369{
370 struct ieee80211_hdr hdr;
371 u16 hdrlen, ethertype;
372 __le16 fc;
373 const u8 *encaps_data;
374 int encaps_len, skip_header_bytes;
375 int nh_pos, h_pos;
376 int head_need;
377
378 if (unlikely(skb->len < ETH_HLEN))
379 return -EINVAL;
380
381 nh_pos = skb_network_header(skb) - skb->data;
382 h_pos = skb_transport_header(skb) - skb->data;
383
384 /* convert Ethernet header to proper 802.11 header (based on
385 * operation mode) */
386 ethertype = (skb->data[12] << 8) | skb->data[13];
387 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
388
389 switch (iftype) {
390 case NL80211_IFTYPE_AP:
391 case NL80211_IFTYPE_AP_VLAN:
392 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
393 /* DA BSSID SA */
394 memcpy(hdr.addr1, skb->data, ETH_ALEN);
395 memcpy(hdr.addr2, addr, ETH_ALEN);
396 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
397 hdrlen = 24;
398 break;
399 case NL80211_IFTYPE_STATION:
400 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
401 /* BSSID SA DA */
402 memcpy(hdr.addr1, bssid, ETH_ALEN);
403 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
404 memcpy(hdr.addr3, skb->data, ETH_ALEN);
405 hdrlen = 24;
406 break;
407 case NL80211_IFTYPE_ADHOC:
408 /* DA SA BSSID */
409 memcpy(hdr.addr1, skb->data, ETH_ALEN);
410 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
411 memcpy(hdr.addr3, bssid, ETH_ALEN);
412 hdrlen = 24;
413 break;
414 default:
415 return -EOPNOTSUPP;
416 }
417
418 if (qos) {
419 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
420 hdrlen += 2;
421 }
422
423 hdr.frame_control = fc;
424 hdr.duration_id = 0;
425 hdr.seq_ctrl = 0;
426
427 skip_header_bytes = ETH_HLEN;
428 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
429 encaps_data = bridge_tunnel_header;
430 encaps_len = sizeof(bridge_tunnel_header);
431 skip_header_bytes -= 2;
432 } else if (ethertype > 0x600) {
433 encaps_data = rfc1042_header;
434 encaps_len = sizeof(rfc1042_header);
435 skip_header_bytes -= 2;
436 } else {
437 encaps_data = NULL;
438 encaps_len = 0;
439 }
440
441 skb_pull(skb, skip_header_bytes);
442 nh_pos -= skip_header_bytes;
443 h_pos -= skip_header_bytes;
444
445 head_need = hdrlen + encaps_len - skb_headroom(skb);
446
447 if (head_need > 0 || skb_cloned(skb)) {
448 head_need = max(head_need, 0);
449 if (head_need)
450 skb_orphan(skb);
451
452 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
453 printk(KERN_ERR "failed to reallocate Tx buffer\n");
454 return -ENOMEM;
455 }
456 skb->truesize += head_need;
457 }
458
459 if (encaps_data) {
460 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
461 nh_pos += encaps_len;
462 h_pos += encaps_len;
463 }
464
465 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
466
467 nh_pos += hdrlen;
468 h_pos += hdrlen;
469
470 /* Update skb pointers to various headers since this modified frame
471 * is going to go through Linux networking code that may potentially
472 * need things like pointer to IP header. */
473 skb_set_mac_header(skb, 0);
474 skb_set_network_header(skb, nh_pos);
475 skb_set_transport_header(skb, h_pos);
476
477 return 0;
478}
479EXPORT_SYMBOL(ieee80211_data_from_8023);
480
481/* Given a data frame determine the 802.1p/1d tag to use. */
482unsigned int cfg80211_classify8021d(struct sk_buff *skb)
483{
484 unsigned int dscp;
485
486 /* skb->priority values from 256->263 are magic values to
487 * directly indicate a specific 802.1d priority. This is used
488 * to allow 802.1d priority to be passed directly in from VLAN
489 * tags, etc.
490 */
491 if (skb->priority >= 256 && skb->priority <= 263)
492 return skb->priority - 256;
493
494 switch (skb->protocol) {
495 case htons(ETH_P_IP):
496 dscp = ip_hdr(skb)->tos & 0xfc;
497 break;
498 default:
499 return 0;
500 }
501
502 return dscp >> 5;
503}
504EXPORT_SYMBOL(cfg80211_classify8021d);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0fd1db6e95bb..d030c5315672 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -5,13 +5,14 @@
5 * into cfg80211, when that happens all the exports here go away and 5 * into cfg80211, when that happens all the exports here go away and
6 * we directly assign the wireless handlers of wireless interfaces. 6 * we directly assign the wireless handlers of wireless interfaces.
7 * 7 *
8 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> 8 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
9 */ 9 */
10 10
11#include <linux/wireless.h> 11#include <linux/wireless.h>
12#include <linux/nl80211.h> 12#include <linux/nl80211.h>
13#include <linux/if_arp.h>
14#include <linux/etherdevice.h>
13#include <net/iw_handler.h> 15#include <net/iw_handler.h>
14#include <net/wireless.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
16#include "core.h" 17#include "core.h"
17 18
@@ -57,7 +58,7 @@ int cfg80211_wext_giwname(struct net_device *dev,
57 58
58 return 0; 59 return 0;
59} 60}
60EXPORT_SYMBOL(cfg80211_wext_giwname); 61EXPORT_SYMBOL_GPL(cfg80211_wext_giwname);
61 62
62int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, 63int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
63 u32 *mode, char *extra) 64 u32 *mode, char *extra)
@@ -108,7 +109,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
108 109
109 return ret; 110 return ret;
110} 111}
111EXPORT_SYMBOL(cfg80211_wext_siwmode); 112EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
112 113
113int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, 114int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
114 u32 *mode, char *extra) 115 u32 *mode, char *extra)
@@ -143,7 +144,7 @@ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
143 } 144 }
144 return 0; 145 return 0;
145} 146}
146EXPORT_SYMBOL(cfg80211_wext_giwmode); 147EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode);
147 148
148 149
149int cfg80211_wext_giwrange(struct net_device *dev, 150int cfg80211_wext_giwrange(struct net_device *dev,
@@ -206,7 +207,6 @@ int cfg80211_wext_giwrange(struct net_device *dev,
206 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 207 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
207 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 208 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
208 209
209
210 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { 210 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
211 int i; 211 int i;
212 struct ieee80211_supported_band *sband; 212 struct ieee80211_supported_band *sband;
@@ -240,4 +240,590 @@ int cfg80211_wext_giwrange(struct net_device *dev,
240 240
241 return 0; 241 return 0;
242} 242}
243EXPORT_SYMBOL(cfg80211_wext_giwrange); 243EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
244
245int cfg80211_wext_siwmlme(struct net_device *dev,
246 struct iw_request_info *info,
247 struct iw_point *data, char *extra)
248{
249 struct wireless_dev *wdev = dev->ieee80211_ptr;
250 struct iw_mlme *mlme = (struct iw_mlme *)extra;
251 struct cfg80211_registered_device *rdev;
252 union {
253 struct cfg80211_disassoc_request disassoc;
254 struct cfg80211_deauth_request deauth;
255 } cmd;
256
257 if (!wdev)
258 return -EOPNOTSUPP;
259
260 rdev = wiphy_to_dev(wdev->wiphy);
261
262 if (wdev->iftype != NL80211_IFTYPE_STATION)
263 return -EINVAL;
264
265 if (mlme->addr.sa_family != ARPHRD_ETHER)
266 return -EINVAL;
267
268 memset(&cmd, 0, sizeof(cmd));
269
270 switch (mlme->cmd) {
271 case IW_MLME_DEAUTH:
272 if (!rdev->ops->deauth)
273 return -EOPNOTSUPP;
274 cmd.deauth.peer_addr = mlme->addr.sa_data;
275 cmd.deauth.reason_code = mlme->reason_code;
276 return rdev->ops->deauth(wdev->wiphy, dev, &cmd.deauth);
277 case IW_MLME_DISASSOC:
278 if (!rdev->ops->disassoc)
279 return -EOPNOTSUPP;
280 cmd.disassoc.peer_addr = mlme->addr.sa_data;
281 cmd.disassoc.reason_code = mlme->reason_code;
282 return rdev->ops->disassoc(wdev->wiphy, dev, &cmd.disassoc);
283 default:
284 return -EOPNOTSUPP;
285 }
286}
287EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
288
289
290/**
291 * cfg80211_wext_freq - get wext frequency for non-"auto"
292 * @wiphy: the wiphy
293 * @freq: the wext freq encoding
294 *
295 * Returns a channel, %NULL for auto, or an ERR_PTR for errors!
296 */
297struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy,
298 struct iw_freq *freq)
299{
300 struct ieee80211_channel *chan;
301 int f;
302
303 /*
304 * Parse frequency - return NULL for auto and
305 * -EINVAL for impossible things.
306 */
307 if (freq->e == 0) {
308 if (freq->m < 0)
309 return NULL;
310 f = ieee80211_channel_to_frequency(freq->m);
311 } else {
312 int i, div = 1000000;
313 for (i = 0; i < freq->e; i++)
314 div /= 10;
315 if (div <= 0)
316 return ERR_PTR(-EINVAL);
317 f = freq->m / div;
318 }
319
320 /*
321 * Look up channel struct and return -EINVAL when
322 * it cannot be found.
323 */
324 chan = ieee80211_get_channel(wiphy, f);
325 if (!chan)
326 return ERR_PTR(-EINVAL);
327 return chan;
328}
329EXPORT_SYMBOL_GPL(cfg80211_wext_freq);
330
331int cfg80211_wext_siwrts(struct net_device *dev,
332 struct iw_request_info *info,
333 struct iw_param *rts, char *extra)
334{
335 struct wireless_dev *wdev = dev->ieee80211_ptr;
336 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
337 u32 orts = wdev->wiphy->rts_threshold;
338 int err;
339
340 if (rts->disabled || !rts->fixed)
341 wdev->wiphy->rts_threshold = (u32) -1;
342 else if (rts->value < 0)
343 return -EINVAL;
344 else
345 wdev->wiphy->rts_threshold = rts->value;
346
347 err = rdev->ops->set_wiphy_params(wdev->wiphy,
348 WIPHY_PARAM_RTS_THRESHOLD);
349 if (err)
350 wdev->wiphy->rts_threshold = orts;
351
352 return err;
353}
354EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts);
355
356int cfg80211_wext_giwrts(struct net_device *dev,
357 struct iw_request_info *info,
358 struct iw_param *rts, char *extra)
359{
360 struct wireless_dev *wdev = dev->ieee80211_ptr;
361
362 rts->value = wdev->wiphy->rts_threshold;
363 rts->disabled = rts->value == (u32) -1;
364 rts->fixed = 1;
365
366 return 0;
367}
368EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts);
369
370int cfg80211_wext_siwfrag(struct net_device *dev,
371 struct iw_request_info *info,
372 struct iw_param *frag, char *extra)
373{
374 struct wireless_dev *wdev = dev->ieee80211_ptr;
375 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
376 u32 ofrag = wdev->wiphy->frag_threshold;
377 int err;
378
379 if (frag->disabled || !frag->fixed)
380 wdev->wiphy->frag_threshold = (u32) -1;
381 else if (frag->value < 256)
382 return -EINVAL;
383 else {
384 /* Fragment length must be even, so strip LSB. */
385 wdev->wiphy->frag_threshold = frag->value & ~0x1;
386 }
387
388 err = rdev->ops->set_wiphy_params(wdev->wiphy,
389 WIPHY_PARAM_FRAG_THRESHOLD);
390 if (err)
391 wdev->wiphy->frag_threshold = ofrag;
392
393 return err;
394}
395EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag);
396
397int cfg80211_wext_giwfrag(struct net_device *dev,
398 struct iw_request_info *info,
399 struct iw_param *frag, char *extra)
400{
401 struct wireless_dev *wdev = dev->ieee80211_ptr;
402
403 frag->value = wdev->wiphy->frag_threshold;
404 frag->disabled = frag->value == (u32) -1;
405 frag->fixed = 1;
406
407 return 0;
408}
409EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
410
411int cfg80211_wext_siwretry(struct net_device *dev,
412 struct iw_request_info *info,
413 struct iw_param *retry, char *extra)
414{
415 struct wireless_dev *wdev = dev->ieee80211_ptr;
416 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
417 u32 changed = 0;
418 u8 olong = wdev->wiphy->retry_long;
419 u8 oshort = wdev->wiphy->retry_short;
420 int err;
421
422 if (retry->disabled ||
423 (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
424 return -EINVAL;
425
426 if (retry->flags & IW_RETRY_LONG) {
427 wdev->wiphy->retry_long = retry->value;
428 changed |= WIPHY_PARAM_RETRY_LONG;
429 } else if (retry->flags & IW_RETRY_SHORT) {
430 wdev->wiphy->retry_short = retry->value;
431 changed |= WIPHY_PARAM_RETRY_SHORT;
432 } else {
433 wdev->wiphy->retry_short = retry->value;
434 wdev->wiphy->retry_long = retry->value;
435 changed |= WIPHY_PARAM_RETRY_LONG;
436 changed |= WIPHY_PARAM_RETRY_SHORT;
437 }
438
439 if (!changed)
440 return 0;
441
442 err = rdev->ops->set_wiphy_params(wdev->wiphy, changed);
443 if (err) {
444 wdev->wiphy->retry_short = oshort;
445 wdev->wiphy->retry_long = olong;
446 }
447
448 return err;
449}
450EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
451
452int cfg80211_wext_giwretry(struct net_device *dev,
453 struct iw_request_info *info,
454 struct iw_param *retry, char *extra)
455{
456 struct wireless_dev *wdev = dev->ieee80211_ptr;
457
458 retry->disabled = 0;
459
460 if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) {
461 /*
462 * First return short value, iwconfig will ask long value
463 * later if needed
464 */
465 retry->flags |= IW_RETRY_LIMIT;
466 retry->value = wdev->wiphy->retry_short;
467 if (wdev->wiphy->retry_long != wdev->wiphy->retry_short)
468 retry->flags |= IW_RETRY_LONG;
469
470 return 0;
471 }
472
473 if (retry->flags & IW_RETRY_LONG) {
474 retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
475 retry->value = wdev->wiphy->retry_long;
476 }
477
478 return 0;
479}
480EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
481
482static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
483 struct net_device *dev, const u8 *addr,
484 bool remove, bool tx_key, int idx,
485 struct key_params *params)
486{
487 struct wireless_dev *wdev = dev->ieee80211_ptr;
488 int err;
489
490 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
491 if (!rdev->ops->set_default_mgmt_key)
492 return -EOPNOTSUPP;
493
494 if (idx < 4 || idx > 5)
495 return -EINVAL;
496 } else if (idx < 0 || idx > 3)
497 return -EINVAL;
498
499 if (remove) {
500 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
501 if (!err) {
502 if (idx == wdev->wext.default_key)
503 wdev->wext.default_key = -1;
504 else if (idx == wdev->wext.default_mgmt_key)
505 wdev->wext.default_mgmt_key = -1;
506 }
507 /*
508 * Applications using wireless extensions expect to be
509 * able to delete keys that don't exist, so allow that.
510 */
511 if (err == -ENOENT)
512 return 0;
513
514 return err;
515 } else {
516 if (addr)
517 tx_key = false;
518
519 if (cfg80211_validate_key_settings(params, idx, addr))
520 return -EINVAL;
521
522 err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params);
523 if (err)
524 return err;
525
526 if (tx_key || (!addr && wdev->wext.default_key == -1)) {
527 err = rdev->ops->set_default_key(&rdev->wiphy,
528 dev, idx);
529 if (!err)
530 wdev->wext.default_key = idx;
531 return err;
532 }
533
534 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
535 (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
536 err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
537 dev, idx);
538 if (!err)
539 wdev->wext.default_mgmt_key = idx;
540 return err;
541 }
542
543 return 0;
544 }
545}
546
547int cfg80211_wext_siwencode(struct net_device *dev,
548 struct iw_request_info *info,
549 struct iw_point *erq, char *keybuf)
550{
551 struct wireless_dev *wdev = dev->ieee80211_ptr;
552 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
553 int idx, err;
554 bool remove = false;
555 struct key_params params;
556
557 /* no use -- only MFP (set_default_mgmt_key) is optional */
558 if (!rdev->ops->del_key ||
559 !rdev->ops->add_key ||
560 !rdev->ops->set_default_key)
561 return -EOPNOTSUPP;
562
563 idx = erq->flags & IW_ENCODE_INDEX;
564 if (idx == 0) {
565 idx = wdev->wext.default_key;
566 if (idx < 0)
567 idx = 0;
568 } else if (idx < 1 || idx > 4)
569 return -EINVAL;
570 else
571 idx--;
572
573 if (erq->flags & IW_ENCODE_DISABLED)
574 remove = true;
575 else if (erq->length == 0) {
576 /* No key data - just set the default TX key index */
577 err = rdev->ops->set_default_key(&rdev->wiphy, dev, idx);
578 if (!err)
579 wdev->wext.default_key = idx;
580 return err;
581 }
582
583 memset(&params, 0, sizeof(params));
584 params.key = keybuf;
585 params.key_len = erq->length;
586 if (erq->length == 5)
587 params.cipher = WLAN_CIPHER_SUITE_WEP40;
588 else if (erq->length == 13)
589 params.cipher = WLAN_CIPHER_SUITE_WEP104;
590 else if (!remove)
591 return -EINVAL;
592
593 return cfg80211_set_encryption(rdev, dev, NULL, remove,
594 wdev->wext.default_key == -1,
595 idx, &params);
596}
597EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
598
599int cfg80211_wext_siwencodeext(struct net_device *dev,
600 struct iw_request_info *info,
601 struct iw_point *erq, char *extra)
602{
603 struct wireless_dev *wdev = dev->ieee80211_ptr;
604 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
605 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
606 const u8 *addr;
607 int idx;
608 bool remove = false;
609 struct key_params params;
610 u32 cipher;
611
612 /* no use -- only MFP (set_default_mgmt_key) is optional */
613 if (!rdev->ops->del_key ||
614 !rdev->ops->add_key ||
615 !rdev->ops->set_default_key)
616 return -EOPNOTSUPP;
617
618 switch (ext->alg) {
619 case IW_ENCODE_ALG_NONE:
620 remove = true;
621 cipher = 0;
622 break;
623 case IW_ENCODE_ALG_WEP:
624 if (ext->key_len == 5)
625 cipher = WLAN_CIPHER_SUITE_WEP40;
626 else if (ext->key_len == 13)
627 cipher = WLAN_CIPHER_SUITE_WEP104;
628 else
629 return -EINVAL;
630 break;
631 case IW_ENCODE_ALG_TKIP:
632 cipher = WLAN_CIPHER_SUITE_TKIP;
633 break;
634 case IW_ENCODE_ALG_CCMP:
635 cipher = WLAN_CIPHER_SUITE_CCMP;
636 break;
637 case IW_ENCODE_ALG_AES_CMAC:
638 cipher = WLAN_CIPHER_SUITE_AES_CMAC;
639 break;
640 default:
641 return -EOPNOTSUPP;
642 }
643
644 if (erq->flags & IW_ENCODE_DISABLED)
645 remove = true;
646
647 idx = erq->flags & IW_ENCODE_INDEX;
648 if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
649 if (idx < 4 || idx > 5) {
650 idx = wdev->wext.default_mgmt_key;
651 if (idx < 0)
652 return -EINVAL;
653 } else
654 idx--;
655 } else {
656 if (idx < 1 || idx > 4) {
657 idx = wdev->wext.default_key;
658 if (idx < 0)
659 return -EINVAL;
660 } else
661 idx--;
662 }
663
664 addr = ext->addr.sa_data;
665 if (is_broadcast_ether_addr(addr))
666 addr = NULL;
667
668 memset(&params, 0, sizeof(params));
669 params.key = ext->key;
670 params.key_len = ext->key_len;
671 params.cipher = cipher;
672
673 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
674 params.seq = ext->rx_seq;
675 params.seq_len = 6;
676 }
677
678 return cfg80211_set_encryption(
679 rdev, dev, addr, remove,
680 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
681 idx, &params);
682}
683EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
684
685struct giwencode_cookie {
686 size_t buflen;
687 char *keybuf;
688};
689
690static void giwencode_get_key_cb(void *cookie, struct key_params *params)
691{
692 struct giwencode_cookie *data = cookie;
693
694 if (!params->key) {
695 data->buflen = 0;
696 return;
697 }
698
699 data->buflen = min_t(size_t, data->buflen, params->key_len);
700 memcpy(data->keybuf, params->key, data->buflen);
701}
702
703int cfg80211_wext_giwencode(struct net_device *dev,
704 struct iw_request_info *info,
705 struct iw_point *erq, char *keybuf)
706{
707 struct wireless_dev *wdev = dev->ieee80211_ptr;
708 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
709 int idx, err;
710 struct giwencode_cookie data = {
711 .keybuf = keybuf,
712 .buflen = erq->length,
713 };
714
715 if (!rdev->ops->get_key)
716 return -EOPNOTSUPP;
717
718 idx = erq->flags & IW_ENCODE_INDEX;
719 if (idx == 0) {
720 idx = wdev->wext.default_key;
721 if (idx < 0)
722 idx = 0;
723 } else if (idx < 1 || idx > 4)
724 return -EINVAL;
725 else
726 idx--;
727
728 erq->flags = idx + 1;
729
730 err = rdev->ops->get_key(&rdev->wiphy, dev, idx, NULL, &data,
731 giwencode_get_key_cb);
732 if (!err) {
733 erq->length = data.buflen;
734 erq->flags |= IW_ENCODE_ENABLED;
735 return 0;
736 }
737
738 if (err == -ENOENT) {
739 erq->flags |= IW_ENCODE_DISABLED;
740 erq->length = 0;
741 return 0;
742 }
743
744 return err;
745}
746EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
747
748int cfg80211_wext_siwtxpower(struct net_device *dev,
749 struct iw_request_info *info,
750 union iwreq_data *data, char *extra)
751{
752 struct wireless_dev *wdev = dev->ieee80211_ptr;
753 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
754 enum tx_power_setting type;
755 int dbm = 0;
756
757 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
758 return -EINVAL;
759 if (data->txpower.flags & IW_TXPOW_RANGE)
760 return -EINVAL;
761
762 if (!rdev->ops->set_tx_power)
763 return -EOPNOTSUPP;
764
765 /* only change when not disabling */
766 if (!data->txpower.disabled) {
767 rfkill_set_sw_state(rdev->rfkill, false);
768
769 if (data->txpower.fixed) {
770 /*
771 * wext doesn't support negative values, see
772 * below where it's for automatic
773 */
774 if (data->txpower.value < 0)
775 return -EINVAL;
776 dbm = data->txpower.value;
777 type = TX_POWER_FIXED;
778 /* TODO: do regulatory check! */
779 } else {
780 /*
781 * Automatic power level setting, max being the value
782 * passed in from userland.
783 */
784 if (data->txpower.value < 0) {
785 type = TX_POWER_AUTOMATIC;
786 } else {
787 dbm = data->txpower.value;
788 type = TX_POWER_LIMITED;
789 }
790 }
791 } else {
792 rfkill_set_sw_state(rdev->rfkill, true);
793 schedule_work(&rdev->rfkill_sync);
794 return 0;
795 }
796
797 return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);;
798}
799EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
800
801int cfg80211_wext_giwtxpower(struct net_device *dev,
802 struct iw_request_info *info,
803 union iwreq_data *data, char *extra)
804{
805 struct wireless_dev *wdev = dev->ieee80211_ptr;
806 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
807 int err, val;
808
809 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
810 return -EINVAL;
811 if (data->txpower.flags & IW_TXPOW_RANGE)
812 return -EINVAL;
813
814 if (!rdev->ops->get_tx_power)
815 return -EOPNOTSUPP;
816
817 err = rdev->ops->get_tx_power(wdev->wiphy, &val);
818 if (err)
819 return err;
820
821 /* well... oh well */
822 data->txpower.fixed = 1;
823 data->txpower.disabled = rfkill_blocked(rdev->rfkill);
824 data->txpower.value = val;
825 data->txpower.flags = IW_TXPOW_DBM;
826
827 return 0;
828}
829EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 0e59f9ae9b81..252c2010c2e2 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -636,8 +636,10 @@ static void wireless_seq_printf_stats(struct seq_file *seq,
636/* 636/*
637 * Print info for /proc/net/wireless (print all entries) 637 * Print info for /proc/net/wireless (print all entries)
638 */ 638 */
639static int wireless_seq_show(struct seq_file *seq, void *v) 639static int wireless_dev_seq_show(struct seq_file *seq, void *v)
640{ 640{
641 might_sleep();
642
641 if (v == SEQ_START_TOKEN) 643 if (v == SEQ_START_TOKEN)
642 seq_printf(seq, "Inter-| sta-| Quality | Discarded " 644 seq_printf(seq, "Inter-| sta-| Quality | Discarded "
643 "packets | Missed | WE\n" 645 "packets | Missed | WE\n"
@@ -649,14 +651,46 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
649 return 0; 651 return 0;
650} 652}
651 653
654static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos)
655{
656 struct net *net = seq_file_net(seq);
657 loff_t off;
658 struct net_device *dev;
659
660 rtnl_lock();
661 if (!*pos)
662 return SEQ_START_TOKEN;
663
664 off = 1;
665 for_each_netdev(net, dev)
666 if (off++ == *pos)
667 return dev;
668 return NULL;
669}
670
671static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
672{
673 struct net *net = seq_file_net(seq);
674
675 ++*pos;
676
677 return v == SEQ_START_TOKEN ?
678 first_net_device(net) : next_net_device(v);
679}
680
681static void wireless_dev_seq_stop(struct seq_file *seq, void *v)
682{
683 rtnl_unlock();
684}
685
652static const struct seq_operations wireless_seq_ops = { 686static const struct seq_operations wireless_seq_ops = {
653 .start = dev_seq_start, 687 .start = wireless_dev_seq_start,
654 .next = dev_seq_next, 688 .next = wireless_dev_seq_next,
655 .stop = dev_seq_stop, 689 .stop = wireless_dev_seq_stop,
656 .show = wireless_seq_show, 690 .show = wireless_dev_seq_show,
657}; 691};
658 692
659static int wireless_seq_open(struct inode *inode, struct file *file) 693static int seq_open_wireless(struct inode *inode, struct file *file)
660{ 694{
661 return seq_open_net(inode, file, &wireless_seq_ops, 695 return seq_open_net(inode, file, &wireless_seq_ops,
662 sizeof(struct seq_net_private)); 696 sizeof(struct seq_net_private));
@@ -664,7 +698,7 @@ static int wireless_seq_open(struct inode *inode, struct file *file)
664 698
665static const struct file_operations wireless_seq_fops = { 699static const struct file_operations wireless_seq_fops = {
666 .owner = THIS_MODULE, 700 .owner = THIS_MODULE,
667 .open = wireless_seq_open, 701 .open = seq_open_wireless,
668 .read = seq_read, 702 .read = seq_read,
669 .llseek = seq_lseek, 703 .llseek = seq_lseek,
670 .release = seq_release_net, 704 .release = seq_release_net,
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 96036cf2216d..d31ccb487730 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -696,8 +696,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
696{ 696{
697 int start = skb_headlen(skb); 697 int start = skb_headlen(skb);
698 int i, copy = start - offset; 698 int i, copy = start - offset;
699 int err; 699 struct sk_buff *frag_iter;
700 struct scatterlist sg; 700 struct scatterlist sg;
701 int err;
701 702
702 /* Checksum header. */ 703 /* Checksum header. */
703 if (copy > 0) { 704 if (copy > 0) {
@@ -742,28 +743,24 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
742 start = end; 743 start = end;
743 } 744 }
744 745
745 if (skb_shinfo(skb)->frag_list) { 746 skb_walk_frags(skb, frag_iter) {
746 struct sk_buff *list = skb_shinfo(skb)->frag_list; 747 int end;
747 748
748 for (; list; list = list->next) { 749 WARN_ON(start > offset + len);
749 int end; 750
750 751 end = start + frag_iter->len;
751 WARN_ON(start > offset + len); 752 if ((copy = end - offset) > 0) {
752 753 if (copy > len)
753 end = start + list->len; 754 copy = len;
754 if ((copy = end - offset) > 0) { 755 err = skb_icv_walk(frag_iter, desc, offset-start,
755 if (copy > len) 756 copy, icv_update);
756 copy = len; 757 if (unlikely(err))
757 err = skb_icv_walk(list, desc, offset-start, 758 return err;
758 copy, icv_update); 759 if ((len -= copy) == 0)
759 if (unlikely(err)) 760 return 0;
760 return err; 761 offset += copy;
761 if ((len -= copy) == 0)
762 return 0;
763 offset += copy;
764 }
765 start = end;
766 } 762 }
763 start = end;
767 } 764 }
768 BUG_ON(len); 765 BUG_ON(len);
769 return 0; 766 return 0;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b4a13178fb40..e0009c17d809 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -251,8 +251,7 @@ resume:
251 nf_reset(skb); 251 nf_reset(skb);
252 252
253 if (decaps) { 253 if (decaps) {
254 dst_release(skb->dst); 254 skb_dst_drop(skb);
255 skb->dst = NULL;
256 netif_rx(skb); 255 netif_rx(skb);
257 return 0; 256 return 0;
258 } else { 257 } else {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index c235597ba8dd..b9fe13138c07 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -22,7 +22,7 @@ static int xfrm_output2(struct sk_buff *skb);
22 22
23static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) 23static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
24{ 24{
25 struct dst_entry *dst = skb->dst; 25 struct dst_entry *dst = skb_dst(skb);
26 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 26 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
27 - skb_headroom(skb); 27 - skb_headroom(skb);
28 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); 28 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
@@ -39,7 +39,7 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
39 39
40static int xfrm_output_one(struct sk_buff *skb, int err) 40static int xfrm_output_one(struct sk_buff *skb, int err)
41{ 41{
42 struct dst_entry *dst = skb->dst; 42 struct dst_entry *dst = skb_dst(skb);
43 struct xfrm_state *x = dst->xfrm; 43 struct xfrm_state *x = dst->xfrm;
44 struct net *net = xs_net(x); 44 struct net *net = xs_net(x);
45 45
@@ -94,12 +94,13 @@ resume:
94 goto error_nolock; 94 goto error_nolock;
95 } 95 }
96 96
97 if (!(skb->dst = dst_pop(dst))) { 97 dst = dst_pop(dst);
98 if (!dst) {
98 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 99 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
99 err = -EHOSTUNREACH; 100 err = -EHOSTUNREACH;
100 goto error_nolock; 101 goto error_nolock;
101 } 102 }
102 dst = skb->dst; 103 skb_dst_set(skb, dst);
103 x = dst->xfrm; 104 x = dst->xfrm;
104 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 105 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
105 106
@@ -119,16 +120,16 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
119 while (likely((err = xfrm_output_one(skb, err)) == 0)) { 120 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
120 nf_reset(skb); 121 nf_reset(skb);
121 122
122 err = skb->dst->ops->local_out(skb); 123 err = skb_dst(skb)->ops->local_out(skb);
123 if (unlikely(err != 1)) 124 if (unlikely(err != 1))
124 goto out; 125 goto out;
125 126
126 if (!skb->dst->xfrm) 127 if (!skb_dst(skb)->xfrm)
127 return dst_output(skb); 128 return dst_output(skb);
128 129
129 err = nf_hook(skb->dst->ops->family, 130 err = nf_hook(skb_dst(skb)->ops->family,
130 NF_INET_POST_ROUTING, skb, 131 NF_INET_POST_ROUTING, skb,
131 NULL, skb->dst->dev, xfrm_output2); 132 NULL, skb_dst(skb)->dev, xfrm_output2);
132 if (unlikely(err != 1)) 133 if (unlikely(err != 1))
133 goto out; 134 goto out;
134 } 135 }
@@ -179,7 +180,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
179 180
180int xfrm_output(struct sk_buff *skb) 181int xfrm_output(struct sk_buff *skb)
181{ 182{
182 struct net *net = dev_net(skb->dst->dev); 183 struct net *net = dev_net(skb_dst(skb)->dev);
183 int err; 184 int err;
184 185
185 if (skb_is_gso(skb)) 186 if (skb_is_gso(skb))
@@ -202,7 +203,7 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
202 struct xfrm_mode *inner_mode; 203 struct xfrm_mode *inner_mode;
203 if (x->sel.family == AF_UNSPEC) 204 if (x->sel.family == AF_UNSPEC)
204 inner_mode = xfrm_ip2inner_mode(x, 205 inner_mode = xfrm_ip2inner_mode(x,
205 xfrm_af2proto(skb->dst->ops->family)); 206 xfrm_af2proto(skb_dst(skb)->ops->family));
206 else 207 else
207 inner_mode = x->inner_mode; 208 inner_mode = x->inner_mode;
208 209
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9c068ab3a834..cb81ca35b0d6 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2027,6 +2027,8 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2027{ 2027{
2028 struct net *net = dev_net(skb->dev); 2028 struct net *net = dev_net(skb->dev);
2029 struct flowi fl; 2029 struct flowi fl;
2030 struct dst_entry *dst;
2031 int res;
2030 2032
2031 if (xfrm_decode_session(skb, &fl, family) < 0) { 2033 if (xfrm_decode_session(skb, &fl, family) < 0) {
2032 /* XXX: we should have something like FWDHDRERROR here. */ 2034 /* XXX: we should have something like FWDHDRERROR here. */
@@ -2034,7 +2036,11 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2034 return 0; 2036 return 0;
2035 } 2037 }
2036 2038
2037 return xfrm_lookup(net, &skb->dst, &fl, NULL, 0) == 0; 2039 dst = skb_dst(skb);
2040
2041 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
2042 skb_dst_set(skb, dst);
2043 return res;
2038} 2044}
2039EXPORT_SYMBOL(__xfrm_route_forward); 2045EXPORT_SYMBOL(__xfrm_route_forward);
2040 2046