aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-11-02 23:56:40 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-11-02 23:56:40 -0400
commit31cbecb4ab538f433145bc5a46f3bea9b9627031 (patch)
treed6206d42dea7298f7ef05fd1f7bf474245f0d43a /net
parent2b72c9ccd22c4a3299e5a358dcd639fb253730f4 (diff)
parent278c023a99b0d6b471d0f4a79835c703482e29ac (diff)
Merge branch 'osd-devel' into nfs-for-next
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c4
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/9p/client.c469
-rw-r--r--net/9p/protocol.c99
-rw-r--r--net/9p/protocol.h4
-rw-r--r--net/9p/trans_common.c53
-rw-r--r--net/9p/trans_common.h21
-rw-r--r--net/9p/trans_virtio.c319
-rw-r--r--net/appletalk/ddp.c5
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c293
-rw-r--r--net/batman-adv/aggregation.h46
-rw-r--r--net/batman-adv/bat_iv_ogm.c1170
-rw-r--r--net/batman-adv/bat_ogm.h35
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/gateway_client.c10
-rw-r--r--net/batman-adv/hard-interface.c88
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/hash.h25
-rw-r--r--net/batman-adv/main.c4
-rw-r--r--net/batman-adv/main.h8
-rw-r--r--net/batman-adv/originator.c21
-rw-r--r--net/batman-adv/packet.h19
-rw-r--r--net/batman-adv/routing.c669
-rw-r--r--net/batman-adv/routing.h17
-rw-r--r--net/batman-adv/send.c313
-rw-r--r--net/batman-adv/send.h9
-rw-r--r--net/batman-adv/soft-interface.c36
-rw-r--r--net/batman-adv/translation-table.c223
-rw-r--r--net/batman-adv/translation-table.h21
-rw-r--r--net/batman-adv/types.h9
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c10
-rw-r--r--net/bluetooth/af_bluetooth.c30
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c57
-rw-r--r--net/bluetooth/hci_event.c36
-rw-r--r--net/bluetooth/hci_sock.c18
-rw-r--r--net/bluetooth/hci_sysfs.c2
-rw-r--r--net/bluetooth/hidp/core.c13
-rw-r--r--net/bluetooth/l2cap_core.c273
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/mgmt.c212
-rw-r--r--net/bluetooth/rfcomm/core.c5
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bluetooth/smp.c421
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_if.c50
-rw-r--r--net/bridge/br_input.c33
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/br_sysfs_br.c34
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/cfcnfg.c38
-rw-r--r--net/caif/cfctrl.c23
-rw-r--r--net/caif/cfdbgl.c7
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c7
-rw-r--r--net/caif/cfmuxl.c6
-rw-r--r--net/caif/cfrfml.c7
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c8
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c7
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/gw.c957
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/ceph/Kconfig14
-rw-r--r--net/ceph/ceph_common.c47
-rw-r--r--net/ceph/messenger.c130
-rw-r--r--net/ceph/mon_client.c79
-rw-r--r--net/ceph/msgpool.c4
-rw-r--r--net/ceph/osd_client.c34
-rw-r--r--net/core/datagram.c24
-rw-r--r--net/core/dev.c339
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c20
-rw-r--r--net/core/fib_rules.c9
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/flow.c14
-rw-r--r--net/core/kmap_skb.h2
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/core/neighbour.c44
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c25
-rw-r--r--net/core/rtnetlink.c34
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c174
-rw-r--r--net/core/sock.c24
-rw-r--r--net/core/timestamping.c12
-rw-r--r--net/core/user_dma.c6
-rw-r--r--net/dcb/dcbnl.c30
-rw-r--r--net/dccp/ccids/ccid2.c84
-rw-r--r--net/dccp/ccids/ccid2.h6
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan.c891
-rw-r--r--net/ieee802154/6lowpan.h212
-rw-r--r--net/ieee802154/Kconfig6
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/inet_lro.c10
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_fragment.c40
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c17
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipip.c10
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/route.c53
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c97
-rw-r--r--net/ipv4/tcp_input.c244
-rw-r--r--net/ipv4/tcp_ipv4.c57
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c152
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c11
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv6/addrconf.c124
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c7
-rw-r--r--net/ipv6/icmp.c28
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_output.c37
-rw-r--r--net/ipv6/ip6_tunnel.c54
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/tcp_ipv6.c65
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_output.c56
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c870
-rw-r--r--net/iucv/iucv.c23
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/lapb/lapb_iface.c29
-rw-r--r--net/mac80211/Kconfig25
-rw-r--r--net/mac80211/agg-rx.c25
-rw-r--r--net/mac80211/agg-tx.c64
-rw-r--r--net/mac80211/cfg.c497
-rw-r--r--net/mac80211/debugfs.c71
-rw-r--r--net/mac80211/debugfs_netdev.c59
-rw-r--r--net/mac80211/debugfs_sta.c37
-rw-r--r--net/mac80211/driver-ops.h91
-rw-r--r--net/mac80211/driver-trace.h117
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ibss.c16
-rw-r--r--net/mac80211/ieee80211_i.h104
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/key.c4
-rw-r--r--net/mac80211/main.c27
-rw-r--r--net/mac80211/mesh.c213
-rw-r--r--net/mac80211/mesh.h38
-rw-r--r--net/mac80211/mesh_hwmp.c177
-rw-r--r--net/mac80211/mesh_pathtbl.c481
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mlme.c140
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c37
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c13
-rw-r--r--net/mac80211/rx.c199
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c977
-rw-r--r--net/mac80211/sta_info.h171
-rw-r--r--net/mac80211/status.c251
-rw-r--r--net/mac80211/tx.c557
-rw-r--r--net/mac80211/util.c280
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c10
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c131
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c12
-rw-r--r--net/netfilter/nf_conntrack_ecache.c8
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c6
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/x_tables.c5
-rw-r--r--net/netlabel/netlabel_domainhash.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/nfc/Kconfig2
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/core.c83
-rw-r--r--net/nfc/nci/Kconfig10
-rw-r--r--net/nfc/nci/Makefile7
-rw-r--r--net/nfc/nci/core.c797
-rw-r--r--net/nfc/nci/data.c247
-rw-r--r--net/nfc/nci/lib.c94
-rw-r--r--net/nfc/nci/ntf.c258
-rw-r--r--net/nfc/nci/rsp.c226
-rw-r--r--net/nfc/netlink.c56
-rw-r--r--net/nfc/nfc.h8
-rw-r--r--net/nfc/rawsock.c13
-rw-r--r--net/packet/af_packet.c987
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/rds/ib_cm.c6
-rw-r--r--net/rds/ib_rdma.c112
-rw-r--r--net/rds/rds.h8
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/rfkill-gpio.c11
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/sched/cls_flow.c188
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c1
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/svc.c38
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcsock.c23
-rw-r--r--net/tipc/bcast.c111
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/discover.c6
-rw-r--r--net/tipc/eth_media.c32
-rw-r--r--net/tipc/link.c111
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/tipc/net.c11
-rw-r--r--net/tipc/node.c45
-rw-r--r--net/tipc/node.h10
-rw-r--r--net/tipc/socket.c51
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/subscr.h6
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/lib80211.c15
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/lib80211_crypt_wep.c4
-rw-r--r--net/wireless/mesh.c3
-rw-r--r--net/wireless/mlme.c16
-rw-r--r--net/wireless/nl80211.c405
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c47
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c28
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/util.c194
-rw-r--r--net/wireless/wext-compat.c137
-rw-r--r--net/wireless/wext-compat.h8
-rw-r--r--net/wireless/wext-sme.c3
-rw-r--r--net/x25/af_x25.c40
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c43
-rw-r--r--net/x25/x25_link.c3
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--net/xfrm/xfrm_ipcomp.c13
-rw-r--r--net/xfrm/xfrm_replay.c98
-rw-r--r--net/xfrm/xfrm_user.c4
325 files changed, 15275 insertions, 6196 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 16102951d36a..070bf4403bf8 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -553,7 +553,7 @@ static void garp_release_port(struct net_device *dev)
553 if (rtnl_dereference(port->applicants[i])) 553 if (rtnl_dereference(port->applicants[i]))
554 return; 554 return;
555 } 555 }
556 rcu_assign_pointer(dev->garp_port, NULL); 556 RCU_INIT_POINTER(dev->garp_port, NULL);
557 kfree_rcu(port, rcu); 557 kfree_rcu(port, rcu);
558} 558}
559 559
@@ -605,7 +605,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
605 605
606 ASSERT_RTNL(); 606 ASSERT_RTNL();
607 607
608 rcu_assign_pointer(port->applicants[appl->type], NULL); 608 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
609 609
610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
611 * all pending messages before the applicant is gone. */ 611 * all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b1b36b..0e136ef1e4ba 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -88,9 +88,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
88{ 88{
89 mutex_lock(&stp_proto_mutex); 89 mutex_lock(&stp_proto_mutex);
90 if (is_zero_ether_addr(proto->group_address)) 90 if (is_zero_ether_addr(proto->group_address))
91 rcu_assign_pointer(stp_proto, NULL); 91 RCU_INIT_POINTER(stp_proto, NULL);
92 else 92 else
93 rcu_assign_pointer(garp_protos[proto->group_address[5] - 93 RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
94 GARP_ADDR_MIN], NULL); 94 GARP_ADDR_MIN], NULL);
95 synchronize_rcu(); 95 synchronize_rcu();
96 96
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8970ba139d73..5471628d3ffe 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
133 if (grp->nr_vlans == 0) { 133 if (grp->nr_vlans == 0) {
134 vlan_gvrp_uninit_applicant(real_dev); 134 vlan_gvrp_uninit_applicant(real_dev);
135 135
136 rcu_assign_pointer(real_dev->vlgrp, NULL); 136 RCU_INIT_POINTER(real_dev->vlgrp, NULL);
137 137
138 /* Free the group, after all cpu's are done. */ 138 /* Free the group, after all cpu's are done. */
139 call_rcu(&grp->rcu, vlan_rcu_free); 139 call_rcu(&grp->rcu, vlan_rcu_free);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f1f2f7bb6661..163397f1fd5a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,7 +4,7 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7bool vlan_do_receive(struct sk_buff **skbp) 7bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
8{ 8{
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -13,7 +13,10 @@ bool vlan_do_receive(struct sk_buff **skbp)
13 13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) { 15 if (!vlan_dev) {
16 if (vlan_id) 16 /* Only the last call to vlan_do_receive() should change
17 * pkt_type to PACKET_OTHERHOST
18 */
19 if (vlan_id && last_handler)
17 skb->pkt_type = PACKET_OTHERHOST; 20 skb->pkt_type = PACKET_OTHERHOST;
18 return false; 21 return false;
19 } 22 }
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d40a071d038..c8cf9391417e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -610,7 +610,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
610 struct ethtool_cmd *cmd) 610 struct ethtool_cmd *cmd)
611{ 611{
612 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 612 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
613 return dev_ethtool_get_settings(vlan->real_dev, cmd); 613
614 return __ethtool_get_settings(vlan->real_dev, cmd);
614} 615}
615 616
616static void vlan_ethtool_get_drvinfo(struct net_device *dev, 617static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -674,7 +675,6 @@ static const struct net_device_ops vlan_netdev_ops = {
674 .ndo_validate_addr = eth_validate_addr, 675 .ndo_validate_addr = eth_validate_addr,
675 .ndo_set_mac_address = vlan_dev_set_mac_address, 676 .ndo_set_mac_address = vlan_dev_set_mac_address,
676 .ndo_set_rx_mode = vlan_dev_set_rx_mode, 677 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
677 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
678 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 678 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
679 .ndo_do_ioctl = vlan_dev_ioctl, 679 .ndo_do_ioctl = vlan_dev_ioctl,
680 .ndo_neigh_setup = vlan_dev_neigh_setup, 680 .ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/9p/client.c b/net/9p/client.c
index 0505a03c374c..854ca7a911c4 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -38,6 +38,9 @@
38#include <net/9p/transport.h> 38#include <net/9p/transport.h>
39#include "protocol.h" 39#include "protocol.h"
40 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/9p.h>
43
41/* 44/*
42 * Client Option Parsing (code inspired by NFS code) 45 * Client Option Parsing (code inspired by NFS code)
43 * - a little lazy - parse all client options 46 * - a little lazy - parse all client options
@@ -123,21 +126,19 @@ static int parse_opts(char *opts, struct p9_client *clnt)
123 options = tmp_options; 126 options = tmp_options;
124 127
125 while ((p = strsep(&options, ",")) != NULL) { 128 while ((p = strsep(&options, ",")) != NULL) {
126 int token; 129 int token, r;
127 if (!*p) 130 if (!*p)
128 continue; 131 continue;
129 token = match_token(p, tokens, args); 132 token = match_token(p, tokens, args);
130 if (token < Opt_trans) { 133 switch (token) {
131 int r = match_int(&args[0], &option); 134 case Opt_msize:
135 r = match_int(&args[0], &option);
132 if (r < 0) { 136 if (r < 0) {
133 P9_DPRINTK(P9_DEBUG_ERROR, 137 P9_DPRINTK(P9_DEBUG_ERROR,
134 "integer field, but no integer?\n"); 138 "integer field, but no integer?\n");
135 ret = r; 139 ret = r;
136 continue; 140 continue;
137 } 141 }
138 }
139 switch (token) {
140 case Opt_msize:
141 clnt->msize = option; 142 clnt->msize = option;
142 break; 143 break;
143 case Opt_trans: 144 case Opt_trans:
@@ -203,11 +204,13 @@ free_and_return:
203 * 204 *
204 */ 205 */
205 206
206static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) 207static struct p9_req_t *
208p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
207{ 209{
208 unsigned long flags; 210 unsigned long flags;
209 int row, col; 211 int row, col;
210 struct p9_req_t *req; 212 struct p9_req_t *req;
213 int alloc_msize = min(c->msize, max_size);
211 214
212 /* This looks up the original request by tag so we know which 215 /* This looks up the original request by tag so we know which
213 * buffer to read the data into */ 216 * buffer to read the data into */
@@ -245,23 +248,10 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
245 return ERR_PTR(-ENOMEM); 248 return ERR_PTR(-ENOMEM);
246 } 249 }
247 init_waitqueue_head(req->wq); 250 init_waitqueue_head(req->wq);
248 if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 251 req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
249 P9_TRANS_PREF_PAYLOAD_SEP) { 252 GFP_NOFS);
250 int alloc_msize = min(c->msize, 4096); 253 req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
251 req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize, 254 GFP_NOFS);
252 GFP_NOFS);
253 req->tc->capacity = alloc_msize;
254 req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
255 GFP_NOFS);
256 req->rc->capacity = alloc_msize;
257 } else {
258 req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
259 GFP_NOFS);
260 req->tc->capacity = c->msize;
261 req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
262 GFP_NOFS);
263 req->rc->capacity = c->msize;
264 }
265 if ((!req->tc) || (!req->rc)) { 255 if ((!req->tc) || (!req->rc)) {
266 printk(KERN_ERR "Couldn't grow tag array\n"); 256 printk(KERN_ERR "Couldn't grow tag array\n");
267 kfree(req->tc); 257 kfree(req->tc);
@@ -271,6 +261,8 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
271 req->wq = NULL; 261 req->wq = NULL;
272 return ERR_PTR(-ENOMEM); 262 return ERR_PTR(-ENOMEM);
273 } 263 }
264 req->tc->capacity = alloc_msize;
265 req->rc->capacity = alloc_msize;
274 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); 266 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
275 req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall); 267 req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
276 } 268 }
@@ -475,37 +467,22 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
475 int ecode; 467 int ecode;
476 468
477 err = p9_parse_header(req->rc, NULL, &type, NULL, 0); 469 err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
470 /*
471 * dump the response from server
472 * This should be after check errors which poplulate pdu_fcall.
473 */
474 trace_9p_protocol_dump(c, req->rc);
478 if (err) { 475 if (err) {
479 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); 476 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
480 return err; 477 return err;
481 } 478 }
482
483 if (type != P9_RERROR && type != P9_RLERROR) 479 if (type != P9_RERROR && type != P9_RLERROR)
484 return 0; 480 return 0;
485 481
486 if (!p9_is_proto_dotl(c)) { 482 if (!p9_is_proto_dotl(c)) {
487 char *ename; 483 char *ename;
488
489 if (req->tc->pbuf_size) {
490 /* Handle user buffers */
491 size_t len = req->rc->size - req->rc->offset;
492 if (req->tc->pubuf) {
493 /* User Buffer */
494 err = copy_from_user(
495 &req->rc->sdata[req->rc->offset],
496 req->tc->pubuf, len);
497 if (err) {
498 err = -EFAULT;
499 goto out_err;
500 }
501 } else {
502 /* Kernel Buffer */
503 memmove(&req->rc->sdata[req->rc->offset],
504 req->tc->pkbuf, len);
505 }
506 }
507 err = p9pdu_readf(req->rc, c->proto_version, "s?d", 484 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
508 &ename, &ecode); 485 &ename, &ecode);
509 if (err) 486 if (err)
510 goto out_err; 487 goto out_err;
511 488
@@ -515,11 +492,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
515 if (!err || !IS_ERR_VALUE(err)) { 492 if (!err || !IS_ERR_VALUE(err)) {
516 err = p9_errstr2errno(ename, strlen(ename)); 493 err = p9_errstr2errno(ename, strlen(ename));
517 494
518 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, 495 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
519 ename); 496 -ecode, ename);
520
521 kfree(ename);
522 } 497 }
498 kfree(ename);
523 } else { 499 } else {
524 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); 500 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
525 err = -ecode; 501 err = -ecode;
@@ -527,7 +503,6 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
527 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); 503 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
528 } 504 }
529 505
530
531 return err; 506 return err;
532 507
533out_err: 508out_err:
@@ -536,6 +511,115 @@ out_err:
536 return err; 511 return err;
537} 512}
538 513
514/**
515 * p9_check_zc_errors - check 9p packet for error return and process it
516 * @c: current client instance
517 * @req: request to parse and check for error conditions
518 * @in_hdrlen: Size of response protocol buffer.
519 *
520 * returns error code if one is discovered, otherwise returns 0
521 *
522 * this will have to be more complicated if we have multiple
523 * error packet types
524 */
525
526static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
527 char *uidata, int in_hdrlen, int kern_buf)
528{
529 int err;
530 int ecode;
531 int8_t type;
532 char *ename = NULL;
533
534 err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
535 /*
536 * dump the response from server
537 * This should be after parse_header which poplulate pdu_fcall.
538 */
539 trace_9p_protocol_dump(c, req->rc);
540 if (err) {
541 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
542 return err;
543 }
544
545 if (type != P9_RERROR && type != P9_RLERROR)
546 return 0;
547
548 if (!p9_is_proto_dotl(c)) {
549 /* Error is reported in string format */
550 uint16_t len;
551 /* 7 = header size for RERROR, 2 is the size of string len; */
552 int inline_len = in_hdrlen - (7 + 2);
553
554 /* Read the size of error string */
555 err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
556 if (err)
557 goto out_err;
558
559 ename = kmalloc(len + 1, GFP_NOFS);
560 if (!ename) {
561 err = -ENOMEM;
562 goto out_err;
563 }
564 if (len <= inline_len) {
565 /* We have error in protocol buffer itself */
566 if (pdu_read(req->rc, ename, len)) {
567 err = -EFAULT;
568 goto out_free;
569
570 }
571 } else {
572 /*
573 * Part of the data is in user space buffer.
574 */
575 if (pdu_read(req->rc, ename, inline_len)) {
576 err = -EFAULT;
577 goto out_free;
578
579 }
580 if (kern_buf) {
581 memcpy(ename + inline_len, uidata,
582 len - inline_len);
583 } else {
584 err = copy_from_user(ename + inline_len,
585 uidata, len - inline_len);
586 if (err) {
587 err = -EFAULT;
588 goto out_free;
589 }
590 }
591 }
592 ename[len] = 0;
593 if (p9_is_proto_dotu(c)) {
594 /* For dotu we also have error code */
595 err = p9pdu_readf(req->rc,
596 c->proto_version, "d", &ecode);
597 if (err)
598 goto out_free;
599 err = -ecode;
600 }
601 if (!err || !IS_ERR_VALUE(err)) {
602 err = p9_errstr2errno(ename, strlen(ename));
603
604 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
605 -ecode, ename);
606 }
607 kfree(ename);
608 } else {
609 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
610 err = -ecode;
611
612 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
613 }
614 return err;
615
616out_free:
617 kfree(ename);
618out_err:
619 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
620 return err;
621}
622
539static struct p9_req_t * 623static struct p9_req_t *
540p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); 624p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
541 625
@@ -579,23 +663,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
579 return 0; 663 return 0;
580} 664}
581 665
582/** 666static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
583 * p9_client_rpc - issue a request and wait for a response 667 int8_t type, int req_size,
584 * @c: client session 668 const char *fmt, va_list ap)
585 * @type: type of request
586 * @fmt: protocol format string (see protocol.c)
587 *
588 * Returns request structure (which client must free using p9_free_req)
589 */
590
591static struct p9_req_t *
592p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
593{ 669{
594 va_list ap;
595 int tag, err; 670 int tag, err;
596 struct p9_req_t *req; 671 struct p9_req_t *req;
597 unsigned long flags;
598 int sigpending;
599 672
600 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); 673 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
601 674
@@ -607,12 +680,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
607 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) 680 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
608 return ERR_PTR(-EIO); 681 return ERR_PTR(-EIO);
609 682
610 if (signal_pending(current)) {
611 sigpending = 1;
612 clear_thread_flag(TIF_SIGPENDING);
613 } else
614 sigpending = 0;
615
616 tag = P9_NOTAG; 683 tag = P9_NOTAG;
617 if (type != P9_TVERSION) { 684 if (type != P9_TVERSION) {
618 tag = p9_idpool_get(c->tagpool); 685 tag = p9_idpool_get(c->tagpool);
@@ -620,18 +687,51 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
620 return ERR_PTR(-ENOMEM); 687 return ERR_PTR(-ENOMEM);
621 } 688 }
622 689
623 req = p9_tag_alloc(c, tag); 690 req = p9_tag_alloc(c, tag, req_size);
624 if (IS_ERR(req)) 691 if (IS_ERR(req))
625 return req; 692 return req;
626 693
627 /* marshall the data */ 694 /* marshall the data */
628 p9pdu_prepare(req->tc, tag, type); 695 p9pdu_prepare(req->tc, tag, type);
629 va_start(ap, fmt);
630 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); 696 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
631 va_end(ap);
632 if (err) 697 if (err)
633 goto reterr; 698 goto reterr;
634 p9pdu_finalize(req->tc); 699 p9pdu_finalize(c, req->tc);
700 trace_9p_client_req(c, type, tag);
701 return req;
702reterr:
703 p9_free_req(c, req);
704 return ERR_PTR(err);
705}
706
707/**
708 * p9_client_rpc - issue a request and wait for a response
709 * @c: client session
710 * @type: type of request
711 * @fmt: protocol format string (see protocol.c)
712 *
713 * Returns request structure (which client must free using p9_free_req)
714 */
715
716static struct p9_req_t *
717p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
718{
719 va_list ap;
720 int sigpending, err;
721 unsigned long flags;
722 struct p9_req_t *req;
723
724 va_start(ap, fmt);
725 req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
726 va_end(ap);
727 if (IS_ERR(req))
728 return req;
729
730 if (signal_pending(current)) {
731 sigpending = 1;
732 clear_thread_flag(TIF_SIGPENDING);
733 } else
734 sigpending = 0;
635 735
636 err = c->trans_mod->request(c, req); 736 err = c->trans_mod->request(c, req);
637 if (err < 0) { 737 if (err < 0) {
@@ -639,18 +739,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
639 c->status = Disconnected; 739 c->status = Disconnected;
640 goto reterr; 740 goto reterr;
641 } 741 }
642 742 /* Wait for the response */
643 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
644 err = wait_event_interruptible(*req->wq, 743 err = wait_event_interruptible(*req->wq,
645 req->status >= REQ_STATUS_RCVD); 744 req->status >= REQ_STATUS_RCVD);
646 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
647 req->wq, tag, err);
648 745
649 if (req->status == REQ_STATUS_ERROR) { 746 if (req->status == REQ_STATUS_ERROR) {
650 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); 747 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
651 err = req->t_err; 748 err = req->t_err;
652 } 749 }
653
654 if ((err == -ERESTARTSYS) && (c->status == Connected)) { 750 if ((err == -ERESTARTSYS) && (c->status == Connected)) {
655 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); 751 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
656 sigpending = 1; 752 sigpending = 1;
@@ -663,25 +759,102 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
663 if (req->status == REQ_STATUS_RCVD) 759 if (req->status == REQ_STATUS_RCVD)
664 err = 0; 760 err = 0;
665 } 761 }
666
667 if (sigpending) { 762 if (sigpending) {
668 spin_lock_irqsave(&current->sighand->siglock, flags); 763 spin_lock_irqsave(&current->sighand->siglock, flags);
669 recalc_sigpending(); 764 recalc_sigpending();
670 spin_unlock_irqrestore(&current->sighand->siglock, flags); 765 spin_unlock_irqrestore(&current->sighand->siglock, flags);
671 } 766 }
672
673 if (err < 0) 767 if (err < 0)
674 goto reterr; 768 goto reterr;
675 769
676 err = p9_check_errors(c, req); 770 err = p9_check_errors(c, req);
677 if (!err) { 771 trace_9p_client_res(c, type, req->rc->tag, err);
678 P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type); 772 if (!err)
773 return req;
774reterr:
775 p9_free_req(c, req);
776 return ERR_PTR(err);
777}
778
779/**
780 * p9_client_zc_rpc - issue a request and wait for a response
781 * @c: client session
782 * @type: type of request
783 * @uidata: user bffer that should be ued for zero copy read
784 * @uodata: user buffer that shoud be user for zero copy write
785 * @inlen: read buffer size
786 * @olen: write buffer size
787 * @hdrlen: reader header size, This is the size of response protocol data
788 * @fmt: protocol format string (see protocol.c)
789 *
790 * Returns request structure (which client must free using p9_free_req)
791 */
792static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
793 char *uidata, char *uodata,
794 int inlen, int olen, int in_hdrlen,
795 int kern_buf, const char *fmt, ...)
796{
797 va_list ap;
798 int sigpending, err;
799 unsigned long flags;
800 struct p9_req_t *req;
801
802 va_start(ap, fmt);
803 /*
804 * We allocate a inline protocol data of only 4k bytes.
805 * The actual content is passed in zero-copy fashion.
806 */
807 req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
808 va_end(ap);
809 if (IS_ERR(req))
679 return req; 810 return req;
811
812 if (signal_pending(current)) {
813 sigpending = 1;
814 clear_thread_flag(TIF_SIGPENDING);
815 } else
816 sigpending = 0;
817
818 /* If we are called with KERNEL_DS force kern_buf */
819 if (segment_eq(get_fs(), KERNEL_DS))
820 kern_buf = 1;
821
822 err = c->trans_mod->zc_request(c, req, uidata, uodata,
823 inlen, olen, in_hdrlen, kern_buf);
824 if (err < 0) {
825 if (err == -EIO)
826 c->status = Disconnected;
827 goto reterr;
828 }
829 if (req->status == REQ_STATUS_ERROR) {
830 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
831 err = req->t_err;
832 }
833 if ((err == -ERESTARTSYS) && (c->status == Connected)) {
834 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
835 sigpending = 1;
836 clear_thread_flag(TIF_SIGPENDING);
837
838 if (c->trans_mod->cancel(c, req))
839 p9_client_flush(c, req);
840
841 /* if we received the response anyway, don't signal error */
842 if (req->status == REQ_STATUS_RCVD)
843 err = 0;
844 }
845 if (sigpending) {
846 spin_lock_irqsave(&current->sighand->siglock, flags);
847 recalc_sigpending();
848 spin_unlock_irqrestore(&current->sighand->siglock, flags);
680 } 849 }
850 if (err < 0)
851 goto reterr;
681 852
853 err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
854 trace_9p_client_res(c, type, req->rc->tag, err);
855 if (!err)
856 return req;
682reterr: 857reterr:
683 P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
684 err);
685 p9_free_req(c, req); 858 p9_free_req(c, req);
686 return ERR_PTR(err); 859 return ERR_PTR(err);
687} 860}
@@ -769,7 +942,7 @@ static int p9_client_version(struct p9_client *c)
769 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); 942 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
770 if (err) { 943 if (err) {
771 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); 944 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
772 P9_DUMP_PKT(1, req->rc); 945 trace_9p_protocol_dump(c, req->rc);
773 goto error; 946 goto error;
774 } 947 }
775 948
@@ -906,15 +1079,14 @@ EXPORT_SYMBOL(p9_client_begin_disconnect);
906struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 1079struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
907 char *uname, u32 n_uname, char *aname) 1080 char *uname, u32 n_uname, char *aname)
908{ 1081{
909 int err; 1082 int err = 0;
910 struct p9_req_t *req; 1083 struct p9_req_t *req;
911 struct p9_fid *fid; 1084 struct p9_fid *fid;
912 struct p9_qid qid; 1085 struct p9_qid qid;
913 1086
914 P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
915 afid ? afid->fid : -1, uname, aname);
916 err = 0;
917 1087
1088 P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
1089 afid ? afid->fid : -1, uname, aname);
918 fid = p9_fid_create(clnt); 1090 fid = p9_fid_create(clnt);
919 if (IS_ERR(fid)) { 1091 if (IS_ERR(fid)) {
920 err = PTR_ERR(fid); 1092 err = PTR_ERR(fid);
@@ -931,7 +1103,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
931 1103
932 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); 1104 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
933 if (err) { 1105 if (err) {
934 P9_DUMP_PKT(1, req->rc); 1106 trace_9p_protocol_dump(clnt, req->rc);
935 p9_free_req(clnt, req); 1107 p9_free_req(clnt, req);
936 goto error; 1108 goto error;
937 } 1109 }
@@ -991,7 +1163,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
991 1163
992 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); 1164 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
993 if (err) { 1165 if (err) {
994 P9_DUMP_PKT(1, req->rc); 1166 trace_9p_protocol_dump(clnt, req->rc);
995 p9_free_req(clnt, req); 1167 p9_free_req(clnt, req);
996 goto clunk_fid; 1168 goto clunk_fid;
997 } 1169 }
@@ -1058,7 +1230,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
1058 1230
1059 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1231 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1060 if (err) { 1232 if (err) {
1061 P9_DUMP_PKT(1, req->rc); 1233 trace_9p_protocol_dump(clnt, req->rc);
1062 goto free_and_error; 1234 goto free_and_error;
1063 } 1235 }
1064 1236
@@ -1101,7 +1273,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
1101 1273
1102 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); 1274 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
1103 if (err) { 1275 if (err) {
1104 P9_DUMP_PKT(1, req->rc); 1276 trace_9p_protocol_dump(clnt, req->rc);
1105 goto free_and_error; 1277 goto free_and_error;
1106 } 1278 }
1107 1279
@@ -1146,7 +1318,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1146 1318
1147 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1319 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1148 if (err) { 1320 if (err) {
1149 P9_DUMP_PKT(1, req->rc); 1321 trace_9p_protocol_dump(clnt, req->rc);
1150 goto free_and_error; 1322 goto free_and_error;
1151 } 1323 }
1152 1324
@@ -1185,7 +1357,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
1185 1357
1186 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 1358 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1187 if (err) { 1359 if (err) {
1188 P9_DUMP_PKT(1, req->rc); 1360 trace_9p_protocol_dump(clnt, req->rc);
1189 goto free_and_error; 1361 goto free_and_error;
1190 } 1362 }
1191 1363
@@ -1330,13 +1502,15 @@ int
1330p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1502p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1331 u32 count) 1503 u32 count)
1332{ 1504{
1333 int err, rsize;
1334 struct p9_client *clnt;
1335 struct p9_req_t *req;
1336 char *dataptr; 1505 char *dataptr;
1506 int kernel_buf = 0;
1507 struct p9_req_t *req;
1508 struct p9_client *clnt;
1509 int err, rsize, non_zc = 0;
1510
1337 1511
1338 P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid, 1512 P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1339 (long long unsigned) offset, count); 1513 fid->fid, (long long unsigned) offset, count);
1340 err = 0; 1514 err = 0;
1341 clnt = fid->clnt; 1515 clnt = fid->clnt;
1342 1516
@@ -1348,13 +1522,24 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1348 rsize = count; 1522 rsize = count;
1349 1523
1350 /* Don't bother zerocopy for small IO (< 1024) */ 1524 /* Don't bother zerocopy for small IO (< 1024) */
1351 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1525 if (clnt->trans_mod->zc_request && rsize > 1024) {
1352 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1526 char *indata;
1353 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1527 if (data) {
1354 rsize, data, udata); 1528 kernel_buf = 1;
1529 indata = data;
1530 } else
1531 indata = (char *)udata;
1532 /*
1533 * response header len is 11
1534 * PDU Header(7) + IO Size (4)
1535 */
1536 req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
1537 11, kernel_buf, "dqd", fid->fid,
1538 offset, rsize);
1355 } else { 1539 } else {
1540 non_zc = 1;
1356 req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, 1541 req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
1357 rsize); 1542 rsize);
1358 } 1543 }
1359 if (IS_ERR(req)) { 1544 if (IS_ERR(req)) {
1360 err = PTR_ERR(req); 1545 err = PTR_ERR(req);
@@ -1363,14 +1548,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1363 1548
1364 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 1549 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1365 if (err) { 1550 if (err) {
1366 P9_DUMP_PKT(1, req->rc); 1551 trace_9p_protocol_dump(clnt, req->rc);
1367 goto free_and_error; 1552 goto free_and_error;
1368 } 1553 }
1369 1554
1370 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count); 1555 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
1371 P9_DUMP_PKT(1, req->rc);
1372 1556
1373 if (!req->tc->pbuf_size) { 1557 if (non_zc) {
1374 if (data) { 1558 if (data) {
1375 memmove(data, dataptr, count); 1559 memmove(data, dataptr, count);
1376 } else { 1560 } else {
@@ -1396,6 +1580,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1396 u64 offset, u32 count) 1580 u64 offset, u32 count)
1397{ 1581{
1398 int err, rsize; 1582 int err, rsize;
1583 int kernel_buf = 0;
1399 struct p9_client *clnt; 1584 struct p9_client *clnt;
1400 struct p9_req_t *req; 1585 struct p9_req_t *req;
1401 1586
@@ -1411,19 +1596,24 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1411 if (count < rsize) 1596 if (count < rsize)
1412 rsize = count; 1597 rsize = count;
1413 1598
1414 /* Don't bother zerocopy form small IO (< 1024) */ 1599 /* Don't bother zerocopy for small IO (< 1024) */
1415 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1600 if (clnt->trans_mod->zc_request && rsize > 1024) {
1416 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1601 char *odata;
1417 req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset, 1602 if (data) {
1418 rsize, data, udata); 1603 kernel_buf = 1;
1604 odata = data;
1605 } else
1606 odata = (char *)udata;
1607 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
1608 P9_ZC_HDR_SZ, kernel_buf, "dqd",
1609 fid->fid, offset, rsize);
1419 } else { 1610 } else {
1420
1421 if (data) 1611 if (data)
1422 req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, 1612 req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
1423 offset, rsize, data); 1613 offset, rsize, data);
1424 else 1614 else
1425 req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, 1615 req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
1426 offset, rsize, udata); 1616 offset, rsize, udata);
1427 } 1617 }
1428 if (IS_ERR(req)) { 1618 if (IS_ERR(req)) {
1429 err = PTR_ERR(req); 1619 err = PTR_ERR(req);
@@ -1432,7 +1622,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1432 1622
1433 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); 1623 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
1434 if (err) { 1624 if (err) {
1435 P9_DUMP_PKT(1, req->rc); 1625 trace_9p_protocol_dump(clnt, req->rc);
1436 goto free_and_error; 1626 goto free_and_error;
1437 } 1627 }
1438 1628
@@ -1472,7 +1662,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1472 1662
1473 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); 1663 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
1474 if (err) { 1664 if (err) {
1475 P9_DUMP_PKT(1, req->rc); 1665 trace_9p_protocol_dump(clnt, req->rc);
1476 p9_free_req(clnt, req); 1666 p9_free_req(clnt, req);
1477 goto error; 1667 goto error;
1478 } 1668 }
@@ -1523,7 +1713,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
1523 1713
1524 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); 1714 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
1525 if (err) { 1715 if (err) {
1526 P9_DUMP_PKT(1, req->rc); 1716 trace_9p_protocol_dump(clnt, req->rc);
1527 p9_free_req(clnt, req); 1717 p9_free_req(clnt, req);
1528 goto error; 1718 goto error;
1529 } 1719 }
@@ -1671,7 +1861,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
1671 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, 1861 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
1672 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); 1862 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
1673 if (err) { 1863 if (err) {
1674 P9_DUMP_PKT(1, req->rc); 1864 trace_9p_protocol_dump(clnt, req->rc);
1675 p9_free_req(clnt, req); 1865 p9_free_req(clnt, req);
1676 goto error; 1866 goto error;
1677 } 1867 }
@@ -1778,7 +1968,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
1778 } 1968 }
1779 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); 1969 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
1780 if (err) { 1970 if (err) {
1781 P9_DUMP_PKT(1, req->rc); 1971 trace_9p_protocol_dump(clnt, req->rc);
1782 p9_free_req(clnt, req); 1972 p9_free_req(clnt, req);
1783 goto clunk_fid; 1973 goto clunk_fid;
1784 } 1974 }
@@ -1824,7 +2014,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1824 2014
1825int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 2015int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1826{ 2016{
1827 int err, rsize; 2017 int err, rsize, non_zc = 0;
1828 struct p9_client *clnt; 2018 struct p9_client *clnt;
1829 struct p9_req_t *req; 2019 struct p9_req_t *req;
1830 char *dataptr; 2020 char *dataptr;
@@ -1842,13 +2032,18 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1842 if (count < rsize) 2032 if (count < rsize)
1843 rsize = count; 2033 rsize = count;
1844 2034
1845 if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 2035 /* Don't bother zerocopy for small IO (< 1024) */
1846 P9_TRANS_PREF_PAYLOAD_SEP) { 2036 if (clnt->trans_mod->zc_request && rsize > 1024) {
1847 req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid, 2037 /*
1848 offset, rsize, data); 2038 * response header len is 11
2039 * PDU Header(7) + IO Size (4)
2040 */
2041 req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
2042 11, 1, "dqd", fid->fid, offset, rsize);
1849 } else { 2043 } else {
2044 non_zc = 1;
1850 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, 2045 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
1851 offset, rsize); 2046 offset, rsize);
1852 } 2047 }
1853 if (IS_ERR(req)) { 2048 if (IS_ERR(req)) {
1854 err = PTR_ERR(req); 2049 err = PTR_ERR(req);
@@ -1857,13 +2052,13 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1857 2052
1858 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 2053 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1859 if (err) { 2054 if (err) {
1860 P9_DUMP_PKT(1, req->rc); 2055 trace_9p_protocol_dump(clnt, req->rc);
1861 goto free_and_error; 2056 goto free_and_error;
1862 } 2057 }
1863 2058
1864 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); 2059 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
1865 2060
1866 if (!req->tc->pbuf_size && data) 2061 if (non_zc)
1867 memmove(data, dataptr, count); 2062 memmove(data, dataptr, count);
1868 2063
1869 p9_free_req(clnt, req); 2064 p9_free_req(clnt, req);
@@ -1894,7 +2089,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
1894 2089
1895 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 2090 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1896 if (err) { 2091 if (err) {
1897 P9_DUMP_PKT(1, req->rc); 2092 trace_9p_protocol_dump(clnt, req->rc);
1898 goto error; 2093 goto error;
1899 } 2094 }
1900 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, 2095 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1925,7 +2120,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
1925 2120
1926 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 2121 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1927 if (err) { 2122 if (err) {
1928 P9_DUMP_PKT(1, req->rc); 2123 trace_9p_protocol_dump(clnt, req->rc);
1929 goto error; 2124 goto error;
1930 } 2125 }
1931 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, 2126 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1960,7 +2155,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
1960 2155
1961 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); 2156 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
1962 if (err) { 2157 if (err) {
1963 P9_DUMP_PKT(1, req->rc); 2158 trace_9p_protocol_dump(clnt, req->rc);
1964 goto error; 2159 goto error;
1965 } 2160 }
1966 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); 2161 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1993,7 +2188,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
1993 &glock->start, &glock->length, &glock->proc_id, 2188 &glock->start, &glock->length, &glock->proc_id,
1994 &glock->client_id); 2189 &glock->client_id);
1995 if (err) { 2190 if (err) {
1996 P9_DUMP_PKT(1, req->rc); 2191 trace_9p_protocol_dump(clnt, req->rc);
1997 goto error; 2192 goto error;
1998 } 2193 }
1999 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " 2194 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -2021,7 +2216,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
2021 2216
2022 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); 2217 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
2023 if (err) { 2218 if (err) {
2024 P9_DUMP_PKT(1, req->rc); 2219 trace_9p_protocol_dump(clnt, req->rc);
2025 goto error; 2220 goto error;
2026 } 2221 }
2027 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); 2222 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index df58375ea6b3..55e10a96c902 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -37,40 +37,11 @@
37#include <net/9p/client.h> 37#include <net/9p/client.h>
38#include "protocol.h" 38#include "protocol.h"
39 39
40#include <trace/events/9p.h>
41
40static int 42static int
41p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 43p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
42 44
43#ifdef CONFIG_NET_9P_DEBUG
44void
45p9pdu_dump(int way, struct p9_fcall *pdu)
46{
47 int len = pdu->size;
48
49 if ((p9_debug_level & P9_DEBUG_VPKT) != P9_DEBUG_VPKT) {
50 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) {
51 if (len > 32)
52 len = 32;
53 } else {
54 /* shouldn't happen */
55 return;
56 }
57 }
58
59 if (way)
60 print_hex_dump_bytes("[9P] ", DUMP_PREFIX_OFFSET, pdu->sdata,
61 len);
62 else
63 print_hex_dump_bytes("]9P[ ", DUMP_PREFIX_OFFSET, pdu->sdata,
64 len);
65}
66#else
67void
68p9pdu_dump(int way, struct p9_fcall *pdu)
69{
70}
71#endif
72EXPORT_SYMBOL(p9pdu_dump);
73
74void p9stat_free(struct p9_wstat *stbuf) 45void p9stat_free(struct p9_wstat *stbuf)
75{ 46{
76 kfree(stbuf->name); 47 kfree(stbuf->name);
@@ -81,7 +52,7 @@ void p9stat_free(struct p9_wstat *stbuf)
81} 52}
82EXPORT_SYMBOL(p9stat_free); 53EXPORT_SYMBOL(p9stat_free);
83 54
84static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) 55size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
85{ 56{
86 size_t len = min(pdu->size - pdu->offset, size); 57 size_t len = min(pdu->size - pdu->offset, size);
87 memcpy(data, &pdu->sdata[pdu->offset], len); 58 memcpy(data, &pdu->sdata[pdu->offset], len);
@@ -108,26 +79,6 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
108 return size - len; 79 return size - len;
109} 80}
110 81
111static size_t
112pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
113 size_t size)
114{
115 BUG_ON(pdu->size > P9_IOHDRSZ);
116 pdu->pubuf = (char __user *)udata;
117 pdu->pkbuf = (char *)kdata;
118 pdu->pbuf_size = size;
119 return 0;
120}
121
122static size_t
123pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
124{
125 BUG_ON(pdu->size > P9_READDIRHDRSZ);
126 pdu->pkbuf = (char *)kdata;
127 pdu->pbuf_size = size;
128 return 0;
129}
130
131/* 82/*
132 b - int8_t 83 b - int8_t
133 w - int16_t 84 w - int16_t
@@ -459,26 +410,6 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
459 errcode = -EFAULT; 410 errcode = -EFAULT;
460 } 411 }
461 break; 412 break;
462 case 'E':{
463 int32_t cnt = va_arg(ap, int32_t);
464 const char *k = va_arg(ap, const void *);
465 const char __user *u = va_arg(ap,
466 const void __user *);
467 errcode = p9pdu_writef(pdu, proto_version, "d",
468 cnt);
469 if (!errcode && pdu_write_urw(pdu, k, u, cnt))
470 errcode = -EFAULT;
471 }
472 break;
473 case 'F':{
474 int32_t cnt = va_arg(ap, int32_t);
475 const char *k = va_arg(ap, const void *);
476 errcode = p9pdu_writef(pdu, proto_version, "d",
477 cnt);
478 if (!errcode && pdu_write_readdir(pdu, k, cnt))
479 errcode = -EFAULT;
480 }
481 break;
482 case 'U':{ 413 case 'U':{
483 int32_t count = va_arg(ap, int32_t); 414 int32_t count = va_arg(ap, int32_t);
484 const char __user *udata = 415 const char __user *udata =
@@ -591,7 +522,7 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
591 return ret; 522 return ret;
592} 523}
593 524
594int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version) 525int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
595{ 526{
596 struct p9_fcall fake_pdu; 527 struct p9_fcall fake_pdu;
597 int ret; 528 int ret;
@@ -601,10 +532,10 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
601 fake_pdu.sdata = buf; 532 fake_pdu.sdata = buf;
602 fake_pdu.offset = 0; 533 fake_pdu.offset = 0;
603 534
604 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st); 535 ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
605 if (ret) { 536 if (ret) {
606 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); 537 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
607 P9_DUMP_PKT(0, &fake_pdu); 538 trace_9p_protocol_dump(clnt, &fake_pdu);
608 } 539 }
609 540
610 return ret; 541 return ret;
@@ -617,7 +548,7 @@ int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
617 return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); 548 return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
618} 549}
619 550
620int p9pdu_finalize(struct p9_fcall *pdu) 551int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
621{ 552{
622 int size = pdu->size; 553 int size = pdu->size;
623 int err; 554 int err;
@@ -626,7 +557,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
626 err = p9pdu_writef(pdu, 0, "d", size); 557 err = p9pdu_writef(pdu, 0, "d", size);
627 pdu->size = size; 558 pdu->size = size;
628 559
629 P9_DUMP_PKT(0, pdu); 560 trace_9p_protocol_dump(clnt, pdu);
630 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, 561 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
631 pdu->id, pdu->tag); 562 pdu->id, pdu->tag);
632 563
@@ -637,14 +568,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
637{ 568{
638 pdu->offset = 0; 569 pdu->offset = 0;
639 pdu->size = 0; 570 pdu->size = 0;
640 pdu->private = NULL;
641 pdu->pubuf = NULL;
642 pdu->pkbuf = NULL;
643 pdu->pbuf_size = 0;
644} 571}
645 572
646int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, 573int p9dirent_read(struct p9_client *clnt, char *buf, int len,
647 int proto_version) 574 struct p9_dirent *dirent)
648{ 575{
649 struct p9_fcall fake_pdu; 576 struct p9_fcall fake_pdu;
650 int ret; 577 int ret;
@@ -655,11 +582,11 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
655 fake_pdu.sdata = buf; 582 fake_pdu.sdata = buf;
656 fake_pdu.offset = 0; 583 fake_pdu.offset = 0;
657 584
658 ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid, 585 ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
659 &dirent->d_off, &dirent->d_type, &nameptr); 586 &dirent->d_off, &dirent->d_type, &nameptr);
660 if (ret) { 587 if (ret) {
661 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); 588 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
662 P9_DUMP_PKT(1, &fake_pdu); 589 trace_9p_protocol_dump(clnt, &fake_pdu);
663 goto out; 590 goto out;
664 } 591 }
665 592
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index 2431c0f38d56..2cc525fa49fa 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -29,6 +29,6 @@ int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
29 va_list ap); 29 va_list ap);
30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); 31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
32int p9pdu_finalize(struct p9_fcall *pdu); 32int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
33void p9pdu_dump(int, struct p9_fcall *);
34void p9pdu_reset(struct p9_fcall *pdu); 33void p9pdu_reset(struct p9_fcall *pdu);
34size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9a70ebdec56e..de8df957867d 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -21,30 +21,25 @@
21 21
22/** 22/**
23 * p9_release_req_pages - Release pages after the transaction. 23 * p9_release_req_pages - Release pages after the transaction.
24 * @*private: PDU's private page of struct trans_rpage_info
25 */ 24 */
26void 25void p9_release_pages(struct page **pages, int nr_pages)
27p9_release_req_pages(struct trans_rpage_info *rpinfo)
28{ 26{
29 int i = 0; 27 int i = 0;
30 28 while (pages[i] && nr_pages--) {
31 while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) { 29 put_page(pages[i]);
32 put_page(rpinfo->rp_data[i]);
33 i++; 30 i++;
34 } 31 }
35} 32}
36EXPORT_SYMBOL(p9_release_req_pages); 33EXPORT_SYMBOL(p9_release_pages);
37 34
38/** 35/**
39 * p9_nr_pages - Return number of pages needed to accommodate the payload. 36 * p9_nr_pages - Return number of pages needed to accommodate the payload.
40 */ 37 */
41int 38int p9_nr_pages(char *data, int len)
42p9_nr_pages(struct p9_req_t *req)
43{ 39{
44 unsigned long start_page, end_page; 40 unsigned long start_page, end_page;
45 start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT; 41 start_page = (unsigned long)data >> PAGE_SHIFT;
46 end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size + 42 end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
47 PAGE_SIZE - 1) >> PAGE_SHIFT;
48 return end_page - start_page; 43 return end_page - start_page;
49} 44}
50EXPORT_SYMBOL(p9_nr_pages); 45EXPORT_SYMBOL(p9_nr_pages);
@@ -58,35 +53,17 @@ EXPORT_SYMBOL(p9_nr_pages);
58 * @nr_pages: number of pages to accommodate the payload 53 * @nr_pages: number of pages to accommodate the payload
59 * @rw: Indicates if the pages are for read or write. 54 * @rw: Indicates if the pages are for read or write.
60 */ 55 */
61int
62p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
63 int nr_pages, u8 rw)
64{
65 uint32_t first_page_bytes = 0;
66 int32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo;
68
69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
70 56
71 if (*pdata_off) 57int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), 58{
73 req->tc->pbuf_size); 59 int nr_mapped_pages;
74 60
75 rpinfo = req->tc->private; 61 nr_mapped_pages = get_user_pages_fast((unsigned long)data,
76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, 62 *nr_pages, write, pages);
77 nr_pages, rw, &rpinfo->rp_data[0]); 63 if (nr_mapped_pages <= 0)
78 if (pdata_mapped_pages <= 0) 64 return nr_mapped_pages;
79 return pdata_mapped_pages;
80 65
81 rpinfo->rp_nr_pages = pdata_mapped_pages; 66 *nr_pages = nr_mapped_pages;
82 if (*pdata_off) {
83 *pdata_len = first_page_bytes;
84 *pdata_len += min((req->tc->pbuf_size - *pdata_len),
85 ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
86 } else {
87 *pdata_len = min(req->tc->pbuf_size,
88 (size_t)pdata_mapped_pages << PAGE_SHIFT);
89 }
90 return 0; 67 return 0;
91} 68}
92EXPORT_SYMBOL(p9_payload_gup); 69EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 76309223bb02..173bb550a9eb 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,21 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15/* TRUE if it is user context */ 15void p9_release_pages(struct page **, int);
16#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS)) 16int p9_payload_gup(char *, int *, struct page **, int);
17 17int p9_nr_pages(char *, int);
18/**
19 * struct trans_rpage_info - To store mapped page information in PDU.
20 * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
21 * @rp_nr_pages: Number of mapped pages
22 * @rp_data: Array of page pointers
23 */
24struct trans_rpage_info {
25 u8 rp_alloc;
26 int rp_nr_pages;
27 struct page *rp_data[0];
28};
29
30void p9_release_req_pages(struct trans_rpage_info *);
31int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
32int p9_nr_pages(struct p9_req_t *);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e317583fcc73..32aa9834229c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -150,12 +150,10 @@ static void req_done(struct virtqueue *vq)
150 while (1) { 150 while (1) {
151 spin_lock_irqsave(&chan->lock, flags); 151 spin_lock_irqsave(&chan->lock, flags);
152 rc = virtqueue_get_buf(chan->vq, &len); 152 rc = virtqueue_get_buf(chan->vq, &len);
153
154 if (rc == NULL) { 153 if (rc == NULL) {
155 spin_unlock_irqrestore(&chan->lock, flags); 154 spin_unlock_irqrestore(&chan->lock, flags);
156 break; 155 break;
157 } 156 }
158
159 chan->ring_bufs_avail = 1; 157 chan->ring_bufs_avail = 1;
160 spin_unlock_irqrestore(&chan->lock, flags); 158 spin_unlock_irqrestore(&chan->lock, flags);
161 /* Wakeup if anyone waiting for VirtIO ring space. */ 159 /* Wakeup if anyone waiting for VirtIO ring space. */
@@ -163,17 +161,6 @@ static void req_done(struct virtqueue *vq)
163 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); 161 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
164 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 162 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
165 req = p9_tag_lookup(chan->client, rc->tag); 163 req = p9_tag_lookup(chan->client, rc->tag);
166 if (req->tc->private) {
167 struct trans_rpage_info *rp = req->tc->private;
168 int p = rp->rp_nr_pages;
169 /*Release pages */
170 p9_release_req_pages(rp);
171 atomic_sub(p, &vp_pinned);
172 wake_up(&vp_wq);
173 if (rp->rp_alloc)
174 kfree(rp);
175 req->tc->private = NULL;
176 }
177 req->status = REQ_STATUS_RCVD; 164 req->status = REQ_STATUS_RCVD;
178 p9_client_cb(chan->client, req); 165 p9_client_cb(chan->client, req);
179 } 166 }
@@ -193,9 +180,8 @@ static void req_done(struct virtqueue *vq)
193 * 180 *
194 */ 181 */
195 182
196static int 183static int pack_sg_list(struct scatterlist *sg, int start,
197pack_sg_list(struct scatterlist *sg, int start, int limit, char *data, 184 int limit, char *data, int count)
198 int count)
199{ 185{
200 int s; 186 int s;
201 int index = start; 187 int index = start;
@@ -224,31 +210,36 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
224 * this takes a list of pages. 210 * this takes a list of pages.
225 * @sg: scatter/gather list to pack into 211 * @sg: scatter/gather list to pack into
226 * @start: which segment of the sg_list to start at 212 * @start: which segment of the sg_list to start at
227 * @pdata_off: Offset into the first page
228 * @**pdata: a list of pages to add into sg. 213 * @**pdata: a list of pages to add into sg.
214 * @nr_pages: number of pages to pack into the scatter/gather list
215 * @data: data to pack into scatter/gather list
229 * @count: amount of data to pack into the scatter/gather list 216 * @count: amount of data to pack into the scatter/gather list
230 */ 217 */
231static int 218static int
232pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off, 219pack_sg_list_p(struct scatterlist *sg, int start, int limit,
233 struct page **pdata, int count) 220 struct page **pdata, int nr_pages, char *data, int count)
234{ 221{
235 int s; 222 int i = 0, s;
236 int i = 0; 223 int data_off;
237 int index = start; 224 int index = start;
238 225
239 if (pdata_off) { 226 BUG_ON(nr_pages > (limit - start));
240 s = min((int)(PAGE_SIZE - pdata_off), count); 227 /*
241 sg_set_page(&sg[index++], pdata[i++], s, pdata_off); 228 * if the first page doesn't start at
242 count -= s; 229 * page boundary find the offset
243 } 230 */
244 231 data_off = offset_in_page(data);
245 while (count) { 232 while (nr_pages) {
246 BUG_ON(index > limit); 233 s = rest_of_page(data);
247 s = min((int)PAGE_SIZE, count); 234 if (s > count)
248 sg_set_page(&sg[index++], pdata[i++], s, 0); 235 s = count;
236 sg_set_page(&sg[index++], pdata[i++], s, data_off);
237 data_off = 0;
238 data += s;
249 count -= s; 239 count -= s;
240 nr_pages--;
250 } 241 }
251 return index-start; 242 return index - start;
252} 243}
253 244
254/** 245/**
@@ -261,114 +252,166 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
261static int 252static int
262p9_virtio_request(struct p9_client *client, struct p9_req_t *req) 253p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 254{
264 int in, out, inp, outp; 255 int err;
265 struct virtio_chan *chan = client->trans; 256 int in, out;
266 unsigned long flags; 257 unsigned long flags;
267 size_t pdata_off = 0; 258 struct virtio_chan *chan = client->trans;
268 struct trans_rpage_info *rpinfo = NULL;
269 int err, pdata_len = 0;
270 259
271 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 260 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
272 261
273 req->status = REQ_STATUS_SENT; 262 req->status = REQ_STATUS_SENT;
263req_retry:
264 spin_lock_irqsave(&chan->lock, flags);
265
266 /* Handle out VirtIO ring buffers */
267 out = pack_sg_list(chan->sg, 0,
268 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
274 269
275 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) { 270 in = pack_sg_list(chan->sg, out,
276 int nr_pages = p9_nr_pages(req); 271 VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
277 int rpinfo_size = sizeof(struct trans_rpage_info) +
278 sizeof(struct page *) * nr_pages;
279 272
280 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { 273 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
281 err = wait_event_interruptible(vp_wq, 274 if (err < 0) {
282 atomic_read(&vp_pinned) < chan->p9_max_pages); 275 if (err == -ENOSPC) {
276 chan->ring_bufs_avail = 0;
277 spin_unlock_irqrestore(&chan->lock, flags);
278 err = wait_event_interruptible(*chan->vc_wq,
279 chan->ring_bufs_avail);
283 if (err == -ERESTARTSYS) 280 if (err == -ERESTARTSYS)
284 return err; 281 return err;
285 P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
286 }
287 282
288 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) { 283 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
289 /* We can use sdata */ 284 goto req_retry;
290 req->tc->private = req->tc->sdata + req->tc->size;
291 rpinfo = (struct trans_rpage_info *)req->tc->private;
292 rpinfo->rp_alloc = 0;
293 } else { 285 } else {
294 req->tc->private = kmalloc(rpinfo_size, GFP_NOFS); 286 spin_unlock_irqrestore(&chan->lock, flags);
295 if (!req->tc->private) { 287 P9_DPRINTK(P9_DEBUG_TRANS,
296 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: " 288 "9p debug: "
297 "private kmalloc returned NULL"); 289 "virtio rpc add_buf returned failure");
298 return -ENOMEM; 290 return -EIO;
299 }
300 rpinfo = (struct trans_rpage_info *)req->tc->private;
301 rpinfo->rp_alloc = 1;
302 } 291 }
292 }
293 virtqueue_kick(chan->vq);
294 spin_unlock_irqrestore(&chan->lock, flags);
303 295
304 err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages, 296 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
305 req->tc->id == P9_TREAD ? 1 : 0); 297 return 0;
306 if (err < 0) { 298}
307 if (rpinfo->rp_alloc) 299
308 kfree(rpinfo); 300static int p9_get_mapped_pages(struct virtio_chan *chan,
301 struct page **pages, char *data,
302 int nr_pages, int write, int kern_buf)
303{
304 int err;
305 if (!kern_buf) {
306 /*
307 * We allow only p9_max_pages pinned. We wait for the
308 * Other zc request to finish here
309 */
310 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
311 err = wait_event_interruptible(vp_wq,
312 (atomic_read(&vp_pinned) < chan->p9_max_pages));
313 if (err == -ERESTARTSYS)
314 return err;
315 }
316 err = p9_payload_gup(data, &nr_pages, pages, write);
317 if (err < 0)
309 return err; 318 return err;
310 } else { 319 atomic_add(nr_pages, &vp_pinned);
311 atomic_add(rpinfo->rp_nr_pages, &vp_pinned); 320 } else {
321 /* kernel buffer, no need to pin pages */
322 int s, index = 0;
323 int count = nr_pages;
324 while (nr_pages) {
325 s = rest_of_page(data);
326 pages[index++] = virt_to_page(data);
327 data += s;
328 nr_pages--;
312 } 329 }
330 nr_pages = count;
313 } 331 }
332 return nr_pages;
333}
314 334
315req_retry_pinned: 335/**
316 spin_lock_irqsave(&chan->lock, flags); 336 * p9_virtio_zc_request - issue a zero copy request
337 * @client: client instance issuing the request
338 * @req: request to be issued
339 * @uidata: user bffer that should be ued for zero copy read
340 * @uodata: user buffer that shoud be user for zero copy write
341 * @inlen: read buffer size
342 * @olen: write buffer size
343 * @hdrlen: reader header size, This is the size of response protocol data
344 *
345 */
346static int
347p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
348 char *uidata, char *uodata, int inlen,
349 int outlen, int in_hdr_len, int kern_buf)
350{
351 int in, out, err;
352 unsigned long flags;
353 int in_nr_pages = 0, out_nr_pages = 0;
354 struct page **in_pages = NULL, **out_pages = NULL;
355 struct virtio_chan *chan = client->trans;
317 356
318 /* Handle out VirtIO ring buffers */ 357 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
319 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, 358
320 req->tc->size); 359 if (uodata) {
321 360 out_nr_pages = p9_nr_pages(uodata, outlen);
322 if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) { 361 out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
323 /* We have additional write payload buffer to take care */ 362 GFP_NOFS);
324 if (req->tc->pubuf && P9_IS_USER_CONTEXT) { 363 if (!out_pages) {
325 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 364 err = -ENOMEM;
326 pdata_off, rpinfo->rp_data, pdata_len); 365 goto err_out;
327 } else { 366 }
328 char *pbuf; 367 out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
329 if (req->tc->pubuf) 368 out_nr_pages, 0, kern_buf);
330 pbuf = (__force char *) req->tc->pubuf; 369 if (out_nr_pages < 0) {
331 else 370 err = out_nr_pages;
332 pbuf = req->tc->pkbuf; 371 kfree(out_pages);
333 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, 372 out_pages = NULL;
334 req->tc->pbuf_size); 373 goto err_out;
335 } 374 }
336 out += outp;
337 } 375 }
338 376 if (uidata) {
339 /* Handle in VirtIO ring buffers */ 377 in_nr_pages = p9_nr_pages(uidata, inlen);
340 if (req->tc->pbuf_size && 378 in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
341 ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) { 379 GFP_NOFS);
342 /* 380 if (!in_pages) {
343 * Take care of additional Read payload. 381 err = -ENOMEM;
344 * 11 is the read/write header = PDU Header(7) + IO Size (4). 382 goto err_out;
345 * Arrange in such a way that server places header in the 383 }
346 * alloced memory and payload onto the user buffer. 384 in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
347 */ 385 in_nr_pages, 1, kern_buf);
348 inp = pack_sg_list(chan->sg, out, 386 if (in_nr_pages < 0) {
349 VIRTQUEUE_NUM, req->rc->sdata, 11); 387 err = in_nr_pages;
350 /* 388 kfree(in_pages);
351 * Running executables in the filesystem may result in 389 in_pages = NULL;
352 * a read request with kernel buffer as opposed to user buffer. 390 goto err_out;
353 */
354 if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
355 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
356 pdata_off, rpinfo->rp_data, pdata_len);
357 } else {
358 char *pbuf;
359 if (req->tc->pubuf)
360 pbuf = (__force char *) req->tc->pubuf;
361 else
362 pbuf = req->tc->pkbuf;
363
364 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
365 pbuf, req->tc->pbuf_size);
366 } 391 }
367 in += inp;
368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 req->rc->sdata, req->rc->capacity);
371 } 392 }
393 req->status = REQ_STATUS_SENT;
394req_retry_pinned:
395 spin_lock_irqsave(&chan->lock, flags);
396 /* out data */
397 out = pack_sg_list(chan->sg, 0,
398 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
399
400 if (out_pages)
401 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
402 out_pages, out_nr_pages, uodata, outlen);
403 /*
404 * Take care of in data
405 * For example TREAD have 11.
406 * 11 is the read/write header = PDU Header(7) + IO Size (4).
407 * Arrange in such a way that server places header in the
408 * alloced memory and payload onto the user buffer.
409 */
410 in = pack_sg_list(chan->sg, out,
411 VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
412 if (in_pages)
413 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
414 in_pages, in_nr_pages, uidata, inlen);
372 415
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 416 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
374 if (err < 0) { 417 if (err < 0) {
@@ -376,28 +419,45 @@ req_retry_pinned:
376 chan->ring_bufs_avail = 0; 419 chan->ring_bufs_avail = 0;
377 spin_unlock_irqrestore(&chan->lock, flags); 420 spin_unlock_irqrestore(&chan->lock, flags);
378 err = wait_event_interruptible(*chan->vc_wq, 421 err = wait_event_interruptible(*chan->vc_wq,
379 chan->ring_bufs_avail); 422 chan->ring_bufs_avail);
380 if (err == -ERESTARTSYS) 423 if (err == -ERESTARTSYS)
381 return err; 424 goto err_out;
382 425
383 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n"); 426 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
384 goto req_retry_pinned; 427 goto req_retry_pinned;
385 } else { 428 } else {
386 spin_unlock_irqrestore(&chan->lock, flags); 429 spin_unlock_irqrestore(&chan->lock, flags);
387 P9_DPRINTK(P9_DEBUG_TRANS, 430 P9_DPRINTK(P9_DEBUG_TRANS,
388 "9p debug: " 431 "9p debug: "
389 "virtio rpc add_buf returned failure"); 432 "virtio rpc add_buf returned failure");
390 if (rpinfo && rpinfo->rp_alloc) 433 err = -EIO;
391 kfree(rpinfo); 434 goto err_out;
392 return -EIO;
393 } 435 }
394 } 436 }
395
396 virtqueue_kick(chan->vq); 437 virtqueue_kick(chan->vq);
397 spin_unlock_irqrestore(&chan->lock, flags); 438 spin_unlock_irqrestore(&chan->lock, flags);
398
399 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); 439 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
400 return 0; 440 err = wait_event_interruptible(*req->wq,
441 req->status >= REQ_STATUS_RCVD);
442 /*
443 * Non kernel buffers are pinned, unpin them
444 */
445err_out:
446 if (!kern_buf) {
447 if (in_pages) {
448 p9_release_pages(in_pages, in_nr_pages);
449 atomic_sub(in_nr_pages, &vp_pinned);
450 }
451 if (out_pages) {
452 p9_release_pages(out_pages, out_nr_pages);
453 atomic_sub(out_nr_pages, &vp_pinned);
454 }
455 /* wakeup anybody waiting for slots to pin pages */
456 wake_up(&vp_wq);
457 }
458 kfree(in_pages);
459 kfree(out_pages);
460 return err;
401} 461}
402 462
403static ssize_t p9_mount_tag_show(struct device *dev, 463static ssize_t p9_mount_tag_show(struct device *dev,
@@ -591,8 +651,8 @@ static struct p9_trans_module p9_virtio_trans = {
591 .create = p9_virtio_create, 651 .create = p9_virtio_create,
592 .close = p9_virtio_close, 652 .close = p9_virtio_close,
593 .request = p9_virtio_request, 653 .request = p9_virtio_request,
654 .zc_request = p9_virtio_zc_request,
594 .cancel = p9_virtio_cancel, 655 .cancel = p9_virtio_cancel,
595
596 /* 656 /*
597 * We leave one entry for input and one entry for response 657 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address 658 * headers. We also skip one more entry to accomodate, address
@@ -600,7 +660,6 @@ static struct p9_trans_module p9_virtio_trans = {
600 * page in zero copy. 660 * page in zero copy.
601 */ 661 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), 662 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
604 .def = 0, 663 .def = 0,
605 .owner = THIS_MODULE, 664 .owner = THIS_MODULE,
606}; 665};
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b1fe7c35e8d1..bfa9ab93eda5 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -951,13 +951,12 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
951 /* checksum stuff in frags */ 951 /* checksum stuff in frags */
952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
953 int end; 953 int end;
954 954 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
955 WARN_ON(start > offset + len); 955 WARN_ON(start > offset + len);
956 956
957 end = start + skb_shinfo(skb)->frags[i].size; 957 end = start + skb_frag_size(frag);
958 if ((copy = end - offset) > 0) { 958 if ((copy = end - offset) > 0) {
959 u8 *vaddr; 959 u8 *vaddr;
960 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
961 960
962 if (copy > len) 961 if (copy > len)
963 copy = len; 962 copy = len;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 215c9fad7cdf..f1964caa0f83 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
643 .ndo_start_xmit = lec_start_xmit, 643 .ndo_start_xmit = lec_start_xmit,
644 .ndo_change_mtu = lec_change_mtu, 644 .ndo_change_mtu = lec_change_mtu,
645 .ndo_tx_timeout = lec_tx_timeout, 645 .ndo_tx_timeout = lec_tx_timeout,
646 .ndo_set_multicast_list = lec_set_multicast_list, 646 .ndo_set_rx_mode = lec_set_multicast_list,
647}; 647};
648 648
649static const unsigned char lec_ctrl_magic[] = { 649static const unsigned char lec_ctrl_magic[] = {
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 2de93d00631b..ce6861166499 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,8 +19,8 @@
19# 19#
20 20
21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o 21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
22batman-adv-y += aggregation.o
23batman-adv-y += bat_debugfs.o 22batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o 24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 25batman-adv-y += bitarray.o
26batman-adv-y += gateway_client.o 26batman-adv-y += gateway_client.o
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
deleted file mode 100644
index 69467fe71ff2..000000000000
--- a/net/batman-adv/aggregation.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "translation-table.h"
24#include "aggregation.h"
25#include "send.h"
26#include "routing.h"
27#include "hard-interface.h"
28
29/* return true if new_packet can be aggregated with forw_packet */
30static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
31 struct bat_priv *bat_priv,
32 int packet_len,
33 unsigned long send_time,
34 bool directlink,
35 const struct hard_iface *if_incoming,
36 const struct forw_packet *forw_packet)
37{
38 struct batman_packet *batman_packet =
39 (struct batman_packet *)forw_packet->skb->data;
40 int aggregated_bytes = forw_packet->packet_len + packet_len;
41 struct hard_iface *primary_if = NULL;
42 bool res = false;
43
44 /**
45 * we can aggregate the current packet to this aggregated packet
46 * if:
47 *
48 * - the send time is within our MAX_AGGREGATION_MS time
49 * - the resulting packet wont be bigger than
50 * MAX_AGGREGATION_BYTES
51 */
52
53 if (time_before(send_time, forw_packet->send_time) &&
54 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
55 forw_packet->send_time) &&
56 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
57
58 /**
59 * check aggregation compatibility
60 * -> direct link packets are broadcasted on
61 * their interface only
62 * -> aggregate packet if the current packet is
63 * a "global" packet as well as the base
64 * packet
65 */
66
67 primary_if = primary_if_get_selected(bat_priv);
68 if (!primary_if)
69 goto out;
70
71 /* packets without direct link flag and high TTL
72 * are flooded through the net */
73 if ((!directlink) &&
74 (!(batman_packet->flags & DIRECTLINK)) &&
75 (batman_packet->ttl != 1) &&
76
77 /* own packets originating non-primary
78 * interfaces leave only that interface */
79 ((!forw_packet->own) ||
80 (forw_packet->if_incoming == primary_if))) {
81 res = true;
82 goto out;
83 }
84
85 /* if the incoming packet is sent via this one
86 * interface only - we still can aggregate */
87 if ((directlink) &&
88 (new_batman_packet->ttl == 1) &&
89 (forw_packet->if_incoming == if_incoming) &&
90
91 /* packets from direct neighbors or
92 * own secondary interface packets
93 * (= secondary interface packets in general) */
94 (batman_packet->flags & DIRECTLINK ||
95 (forw_packet->own &&
96 forw_packet->if_incoming != primary_if))) {
97 res = true;
98 goto out;
99 }
100 }
101
102out:
103 if (primary_if)
104 hardif_free_ref(primary_if);
105 return res;
106}
107
108/* create a new aggregated packet and add this packet to it */
109static void new_aggregated_packet(const unsigned char *packet_buff,
110 int packet_len, unsigned long send_time,
111 bool direct_link,
112 struct hard_iface *if_incoming,
113 int own_packet)
114{
115 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
116 struct forw_packet *forw_packet_aggr;
117 unsigned char *skb_buff;
118
119 if (!atomic_inc_not_zero(&if_incoming->refcount))
120 return;
121
122 /* own packet should always be scheduled */
123 if (!own_packet) {
124 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
125 bat_dbg(DBG_BATMAN, bat_priv,
126 "batman packet queue full\n");
127 goto out;
128 }
129 }
130
131 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
132 if (!forw_packet_aggr) {
133 if (!own_packet)
134 atomic_inc(&bat_priv->batman_queue_left);
135 goto out;
136 }
137
138 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
139 (packet_len < MAX_AGGREGATION_BYTES))
140 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
141 sizeof(struct ethhdr));
142 else
143 forw_packet_aggr->skb = dev_alloc_skb(packet_len +
144 sizeof(struct ethhdr));
145
146 if (!forw_packet_aggr->skb) {
147 if (!own_packet)
148 atomic_inc(&bat_priv->batman_queue_left);
149 kfree(forw_packet_aggr);
150 goto out;
151 }
152 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
153
154 INIT_HLIST_NODE(&forw_packet_aggr->list);
155
156 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
157 forw_packet_aggr->packet_len = packet_len;
158 memcpy(skb_buff, packet_buff, packet_len);
159
160 forw_packet_aggr->own = own_packet;
161 forw_packet_aggr->if_incoming = if_incoming;
162 forw_packet_aggr->num_packets = 0;
163 forw_packet_aggr->direct_link_flags = NO_FLAGS;
164 forw_packet_aggr->send_time = send_time;
165
166 /* save packet direct link flag status */
167 if (direct_link)
168 forw_packet_aggr->direct_link_flags |= 1;
169
170 /* add new packet to packet list */
171 spin_lock_bh(&bat_priv->forw_bat_list_lock);
172 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
173 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
174
175 /* start timer for this packet */
176 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
177 send_outstanding_bat_packet);
178 queue_delayed_work(bat_event_workqueue,
179 &forw_packet_aggr->delayed_work,
180 send_time - jiffies);
181
182 return;
183out:
184 hardif_free_ref(if_incoming);
185}
186
187/* aggregate a new packet into the existing aggregation */
188static void aggregate(struct forw_packet *forw_packet_aggr,
189 const unsigned char *packet_buff, int packet_len,
190 bool direct_link)
191{
192 unsigned char *skb_buff;
193
194 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
195 memcpy(skb_buff, packet_buff, packet_len);
196 forw_packet_aggr->packet_len += packet_len;
197 forw_packet_aggr->num_packets++;
198
199 /* save packet direct link flag status */
200 if (direct_link)
201 forw_packet_aggr->direct_link_flags |=
202 (1 << forw_packet_aggr->num_packets);
203}
204
205void add_bat_packet_to_list(struct bat_priv *bat_priv,
206 unsigned char *packet_buff, int packet_len,
207 struct hard_iface *if_incoming, int own_packet,
208 unsigned long send_time)
209{
210 /**
211 * _aggr -> pointer to the packet we want to aggregate with
212 * _pos -> pointer to the position in the queue
213 */
214 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
215 struct hlist_node *tmp_node;
216 struct batman_packet *batman_packet =
217 (struct batman_packet *)packet_buff;
218 bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
219
220 /* find position for the packet in the forward queue */
221 spin_lock_bh(&bat_priv->forw_bat_list_lock);
222 /* own packets are not to be aggregated */
223 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
224 hlist_for_each_entry(forw_packet_pos, tmp_node,
225 &bat_priv->forw_bat_list, list) {
226 if (can_aggregate_with(batman_packet,
227 bat_priv,
228 packet_len,
229 send_time,
230 direct_link,
231 if_incoming,
232 forw_packet_pos)) {
233 forw_packet_aggr = forw_packet_pos;
234 break;
235 }
236 }
237 }
238
239 /* nothing to aggregate with - either aggregation disabled or no
240 * suitable aggregation packet found */
241 if (!forw_packet_aggr) {
242 /* the following section can run without the lock */
243 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
244
245 /**
246 * if we could not aggregate this packet with one of the others
247 * we hold it back for a while, so that it might be aggregated
248 * later on
249 */
250 if ((!own_packet) &&
251 (atomic_read(&bat_priv->aggregated_ogms)))
252 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
253
254 new_aggregated_packet(packet_buff, packet_len,
255 send_time, direct_link,
256 if_incoming, own_packet);
257 } else {
258 aggregate(forw_packet_aggr,
259 packet_buff, packet_len,
260 direct_link);
261 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
262 }
263}
264
265/* unpack the aggregated packets and process them one by one */
266void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
267 unsigned char *packet_buff, int packet_len,
268 struct hard_iface *if_incoming)
269{
270 struct batman_packet *batman_packet;
271 int buff_pos = 0;
272 unsigned char *tt_buff;
273
274 batman_packet = (struct batman_packet *)packet_buff;
275
276 do {
277 /* network to host order for our 32bit seqno and the
278 orig_interval */
279 batman_packet->seqno = ntohl(batman_packet->seqno);
280 batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
281
282 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
283
284 receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
285
286 buff_pos += BAT_PACKET_LEN +
287 tt_len(batman_packet->tt_num_changes);
288
289 batman_packet = (struct batman_packet *)
290 (packet_buff + buff_pos);
291 } while (aggregated_packet(buff_pos, packet_len,
292 batman_packet->tt_num_changes));
293}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
deleted file mode 100644
index 216337bb841f..000000000000
--- a/net/batman-adv/aggregation.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
23#define _NET_BATMAN_ADV_AGGREGATION_H_
24
25#include "main.h"
26
27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len,
29 int tt_num_changes)
30{
31 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
32 sizeof(struct tt_change));
33
34 return (next_buff_pos <= packet_len) &&
35 (next_buff_pos <= MAX_AGGREGATION_BYTES);
36}
37
38void add_bat_packet_to_list(struct bat_priv *bat_priv,
39 unsigned char *packet_buff, int packet_len,
40 struct hard_iface *if_incoming, int own_packet,
41 unsigned long send_time);
42void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
43 unsigned char *packet_buff, int packet_len,
44 struct hard_iface *if_incoming);
45
46#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
new file mode 100644
index 000000000000..3512e251545b
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -0,0 +1,1170 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "bat_ogm.h"
24#include "translation-table.h"
25#include "ring_buffer.h"
26#include "originator.h"
27#include "routing.h"
28#include "gateway_common.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "send.h"
32
33void bat_ogm_init(struct hard_iface *hard_iface)
34{
35 struct batman_ogm_packet *batman_ogm_packet;
36
37 hard_iface->packet_len = BATMAN_OGM_LEN;
38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
39
40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
41 batman_ogm_packet->packet_type = BAT_OGM;
42 batman_ogm_packet->version = COMPAT_VERSION;
43 batman_ogm_packet->flags = NO_FLAGS;
44 batman_ogm_packet->ttl = 2;
45 batman_ogm_packet->tq = TQ_MAX_VALUE;
46 batman_ogm_packet->tt_num_changes = 0;
47 batman_ogm_packet->ttvn = 0;
48}
49
50void bat_ogm_init_primary(struct hard_iface *hard_iface)
51{
52 struct batman_ogm_packet *batman_ogm_packet;
53
54 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
55 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
56 batman_ogm_packet->ttl = TTL;
57}
58
59void bat_ogm_update_mac(struct hard_iface *hard_iface)
60{
61 struct batman_ogm_packet *batman_ogm_packet;
62
63 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
64 memcpy(batman_ogm_packet->orig,
65 hard_iface->net_dev->dev_addr, ETH_ALEN);
66 memcpy(batman_ogm_packet->prev_sender,
67 hard_iface->net_dev->dev_addr, ETH_ALEN);
68}
69
70/* when do we schedule our own ogm to be sent */
71static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
72{
73 return jiffies + msecs_to_jiffies(
74 atomic_read(&bat_priv->orig_interval) -
75 JITTER + (random32() % 2*JITTER));
76}
77
78/* when do we schedule a ogm packet to be sent */
79static unsigned long bat_ogm_fwd_send_time(void)
80{
81 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
82}
83
84/* apply hop penalty for a normal link */
85static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
86{
87 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
88 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
89}
90
91/* is there another aggregated packet here? */
92static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
93 int tt_num_changes)
94{
95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
96
97 return (next_buff_pos <= packet_len) &&
98 (next_buff_pos <= MAX_AGGREGATION_BYTES);
99}
100
101/* send a batman ogm to a given interface */
102static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
103 struct hard_iface *hard_iface)
104{
105 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
106 char *fwd_str;
107 uint8_t packet_num;
108 int16_t buff_pos;
109 struct batman_ogm_packet *batman_ogm_packet;
110 struct sk_buff *skb;
111
112 if (hard_iface->if_status != IF_ACTIVE)
113 return;
114
115 packet_num = 0;
116 buff_pos = 0;
117 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
118
119 /* adjust all flags and log packets */
120 while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
121 batman_ogm_packet->tt_num_changes)) {
122
123 /* we might have aggregated direct link packets with an
124 * ordinary base packet */
125 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
126 (forw_packet->if_incoming == hard_iface))
127 batman_ogm_packet->flags |= DIRECTLINK;
128 else
129 batman_ogm_packet->flags &= ~DIRECTLINK;
130
131 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
132 "Sending own" :
133 "Forwarding"));
134 bat_dbg(DBG_BATMAN, bat_priv,
135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
136 " IDF %s, ttvn %d) on interface %s [%pM]\n",
137 fwd_str, (packet_num > 0 ? "aggregated " : ""),
138 batman_ogm_packet->orig,
139 ntohl(batman_ogm_packet->seqno),
140 batman_ogm_packet->tq, batman_ogm_packet->ttl,
141 (batman_ogm_packet->flags & DIRECTLINK ?
142 "on" : "off"),
143 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
144 hard_iface->net_dev->dev_addr);
145
146 buff_pos += BATMAN_OGM_LEN +
147 tt_len(batman_ogm_packet->tt_num_changes);
148 packet_num++;
149 batman_ogm_packet = (struct batman_ogm_packet *)
150 (forw_packet->skb->data + buff_pos);
151 }
152
153 /* create clone because function is called more than once */
154 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
155 if (skb)
156 send_skb_packet(skb, hard_iface, broadcast_addr);
157}
158
159/* send a batman ogm packet */
160void bat_ogm_emit(struct forw_packet *forw_packet)
161{
162 struct hard_iface *hard_iface;
163 struct net_device *soft_iface;
164 struct bat_priv *bat_priv;
165 struct hard_iface *primary_if = NULL;
166 struct batman_ogm_packet *batman_ogm_packet;
167 unsigned char directlink;
168
169 batman_ogm_packet = (struct batman_ogm_packet *)
170 (forw_packet->skb->data);
171 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
172
173 if (!forw_packet->if_incoming) {
174 pr_err("Error - can't forward packet: incoming iface not "
175 "specified\n");
176 goto out;
177 }
178
179 soft_iface = forw_packet->if_incoming->soft_iface;
180 bat_priv = netdev_priv(soft_iface);
181
182 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
183 goto out;
184
185 primary_if = primary_if_get_selected(bat_priv);
186 if (!primary_if)
187 goto out;
188
189 /* multihomed peer assumed */
190 /* non-primary OGMs are only broadcasted on their interface */
191 if ((directlink && (batman_ogm_packet->ttl == 1)) ||
192 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
193
194 /* FIXME: what about aggregated packets ? */
195 bat_dbg(DBG_BATMAN, bat_priv,
196 "%s packet (originator %pM, seqno %d, TTL %d) "
197 "on interface %s [%pM]\n",
198 (forw_packet->own ? "Sending own" : "Forwarding"),
199 batman_ogm_packet->orig,
200 ntohl(batman_ogm_packet->seqno),
201 batman_ogm_packet->ttl,
202 forw_packet->if_incoming->net_dev->name,
203 forw_packet->if_incoming->net_dev->dev_addr);
204
205 /* skb is only used once and than forw_packet is free'd */
206 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
207 broadcast_addr);
208 forw_packet->skb = NULL;
209
210 goto out;
211 }
212
213 /* broadcast on every interface */
214 rcu_read_lock();
215 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
216 if (hard_iface->soft_iface != soft_iface)
217 continue;
218
219 bat_ogm_send_to_if(forw_packet, hard_iface);
220 }
221 rcu_read_unlock();
222
223out:
224 if (primary_if)
225 hardif_free_ref(primary_if);
226}
227
228/* return true if new_packet can be aggregated with forw_packet */
229static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
230 *new_batman_ogm_packet,
231 struct bat_priv *bat_priv,
232 int packet_len, unsigned long send_time,
233 bool directlink,
234 const struct hard_iface *if_incoming,
235 const struct forw_packet *forw_packet)
236{
237 struct batman_ogm_packet *batman_ogm_packet;
238 int aggregated_bytes = forw_packet->packet_len + packet_len;
239 struct hard_iface *primary_if = NULL;
240 bool res = false;
241
242 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
243
244 /**
245 * we can aggregate the current packet to this aggregated packet
246 * if:
247 *
248 * - the send time is within our MAX_AGGREGATION_MS time
249 * - the resulting packet wont be bigger than
250 * MAX_AGGREGATION_BYTES
251 */
252
253 if (time_before(send_time, forw_packet->send_time) &&
254 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
255 forw_packet->send_time) &&
256 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
257
258 /**
259 * check aggregation compatibility
260 * -> direct link packets are broadcasted on
261 * their interface only
262 * -> aggregate packet if the current packet is
263 * a "global" packet as well as the base
264 * packet
265 */
266
267 primary_if = primary_if_get_selected(bat_priv);
268 if (!primary_if)
269 goto out;
270
271 /* packets without direct link flag and high TTL
272 * are flooded through the net */
273 if ((!directlink) &&
274 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
275 (batman_ogm_packet->ttl != 1) &&
276
277 /* own packets originating non-primary
278 * interfaces leave only that interface */
279 ((!forw_packet->own) ||
280 (forw_packet->if_incoming == primary_if))) {
281 res = true;
282 goto out;
283 }
284
285 /* if the incoming packet is sent via this one
286 * interface only - we still can aggregate */
287 if ((directlink) &&
288 (new_batman_ogm_packet->ttl == 1) &&
289 (forw_packet->if_incoming == if_incoming) &&
290
291 /* packets from direct neighbors or
292 * own secondary interface packets
293 * (= secondary interface packets in general) */
294 (batman_ogm_packet->flags & DIRECTLINK ||
295 (forw_packet->own &&
296 forw_packet->if_incoming != primary_if))) {
297 res = true;
298 goto out;
299 }
300 }
301
302out:
303 if (primary_if)
304 hardif_free_ref(primary_if);
305 return res;
306}
307
308/* create a new aggregated packet and add this packet to it */
309static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
310 int packet_len, unsigned long send_time,
311 bool direct_link,
312 struct hard_iface *if_incoming,
313 int own_packet)
314{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct forw_packet *forw_packet_aggr;
317 unsigned char *skb_buff;
318
319 if (!atomic_inc_not_zero(&if_incoming->refcount))
320 return;
321
322 /* own packet should always be scheduled */
323 if (!own_packet) {
324 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
325 bat_dbg(DBG_BATMAN, bat_priv,
326 "batman packet queue full\n");
327 goto out;
328 }
329 }
330
331 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
332 if (!forw_packet_aggr) {
333 if (!own_packet)
334 atomic_inc(&bat_priv->batman_queue_left);
335 goto out;
336 }
337
338 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
339 (packet_len < MAX_AGGREGATION_BYTES))
340 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
341 sizeof(struct ethhdr));
342 else
343 forw_packet_aggr->skb = dev_alloc_skb(packet_len +
344 sizeof(struct ethhdr));
345
346 if (!forw_packet_aggr->skb) {
347 if (!own_packet)
348 atomic_inc(&bat_priv->batman_queue_left);
349 kfree(forw_packet_aggr);
350 goto out;
351 }
352 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
353
354 INIT_HLIST_NODE(&forw_packet_aggr->list);
355
356 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
357 forw_packet_aggr->packet_len = packet_len;
358 memcpy(skb_buff, packet_buff, packet_len);
359
360 forw_packet_aggr->own = own_packet;
361 forw_packet_aggr->if_incoming = if_incoming;
362 forw_packet_aggr->num_packets = 0;
363 forw_packet_aggr->direct_link_flags = NO_FLAGS;
364 forw_packet_aggr->send_time = send_time;
365
366 /* save packet direct link flag status */
367 if (direct_link)
368 forw_packet_aggr->direct_link_flags |= 1;
369
370 /* add new packet to packet list */
371 spin_lock_bh(&bat_priv->forw_bat_list_lock);
372 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
373 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
374
375 /* start timer for this packet */
376 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
377 send_outstanding_bat_ogm_packet);
378 queue_delayed_work(bat_event_workqueue,
379 &forw_packet_aggr->delayed_work,
380 send_time - jiffies);
381
382 return;
383out:
384 hardif_free_ref(if_incoming);
385}
386
387/* aggregate a new packet into the existing ogm packet */
388static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
389 const unsigned char *packet_buff,
390 int packet_len, bool direct_link)
391{
392 unsigned char *skb_buff;
393
394 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
395 memcpy(skb_buff, packet_buff, packet_len);
396 forw_packet_aggr->packet_len += packet_len;
397 forw_packet_aggr->num_packets++;
398
399 /* save packet direct link flag status */
400 if (direct_link)
401 forw_packet_aggr->direct_link_flags |=
402 (1 << forw_packet_aggr->num_packets);
403}
404
405static void bat_ogm_queue_add(struct bat_priv *bat_priv,
406 unsigned char *packet_buff,
407 int packet_len, struct hard_iface *if_incoming,
408 int own_packet, unsigned long send_time)
409{
410 /**
411 * _aggr -> pointer to the packet we want to aggregate with
412 * _pos -> pointer to the position in the queue
413 */
414 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
415 struct hlist_node *tmp_node;
416 struct batman_ogm_packet *batman_ogm_packet;
417 bool direct_link;
418
419 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
420 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
421
422 /* find position for the packet in the forward queue */
423 spin_lock_bh(&bat_priv->forw_bat_list_lock);
424 /* own packets are not to be aggregated */
425 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
426 hlist_for_each_entry(forw_packet_pos, tmp_node,
427 &bat_priv->forw_bat_list, list) {
428 if (bat_ogm_can_aggregate(batman_ogm_packet,
429 bat_priv, packet_len,
430 send_time, direct_link,
431 if_incoming,
432 forw_packet_pos)) {
433 forw_packet_aggr = forw_packet_pos;
434 break;
435 }
436 }
437 }
438
439 /* nothing to aggregate with - either aggregation disabled or no
440 * suitable aggregation packet found */
441 if (!forw_packet_aggr) {
442 /* the following section can run without the lock */
443 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
444
445 /**
446 * if we could not aggregate this packet with one of the others
447 * we hold it back for a while, so that it might be aggregated
448 * later on
449 */
450 if ((!own_packet) &&
451 (atomic_read(&bat_priv->aggregated_ogms)))
452 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
453
454 bat_ogm_aggregate_new(packet_buff, packet_len,
455 send_time, direct_link,
456 if_incoming, own_packet);
457 } else {
458 bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
459 direct_link);
460 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
461 }
462}
463
464static void bat_ogm_forward(struct orig_node *orig_node,
465 const struct ethhdr *ethhdr,
466 struct batman_ogm_packet *batman_ogm_packet,
467 int directlink, struct hard_iface *if_incoming)
468{
469 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
470 struct neigh_node *router;
471 uint8_t in_tq, in_ttl, tq_avg = 0;
472 uint8_t tt_num_changes;
473
474 if (batman_ogm_packet->ttl <= 1) {
475 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
476 return;
477 }
478
479 router = orig_node_get_router(orig_node);
480
481 in_tq = batman_ogm_packet->tq;
482 in_ttl = batman_ogm_packet->ttl;
483 tt_num_changes = batman_ogm_packet->tt_num_changes;
484
485 batman_ogm_packet->ttl--;
486 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
487
488 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
489 * of our best tq value */
490 if (router && router->tq_avg != 0) {
491
492 /* rebroadcast ogm of best ranking neighbor as is */
493 if (!compare_eth(router->addr, ethhdr->h_source)) {
494 batman_ogm_packet->tq = router->tq_avg;
495
496 if (router->last_ttl)
497 batman_ogm_packet->ttl = router->last_ttl - 1;
498 }
499
500 tq_avg = router->tq_avg;
501 }
502
503 if (router)
504 neigh_node_free_ref(router);
505
506 /* apply hop penalty */
507 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
508
509 bat_dbg(DBG_BATMAN, bat_priv,
510 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
511 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
512 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
513 batman_ogm_packet->ttl);
514
515 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
516 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
517
518 /* switch of primaries first hop flag when forwarding */
519 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
520 if (directlink)
521 batman_ogm_packet->flags |= DIRECTLINK;
522 else
523 batman_ogm_packet->flags &= ~DIRECTLINK;
524
525 bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
526 BATMAN_OGM_LEN + tt_len(tt_num_changes),
527 if_incoming, 0, bat_ogm_fwd_send_time());
528}
529
530void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
531{
532 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
533 struct batman_ogm_packet *batman_ogm_packet;
534 struct hard_iface *primary_if;
535 int vis_server;
536
537 vis_server = atomic_read(&bat_priv->vis_mode);
538 primary_if = primary_if_get_selected(bat_priv);
539
540 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
541
542 /* change sequence number to network order */
543 batman_ogm_packet->seqno =
544 htonl((uint32_t)atomic_read(&hard_iface->seqno));
545
546 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
547 batman_ogm_packet->tt_crc = htons((uint16_t)
548 atomic_read(&bat_priv->tt_crc));
549 if (tt_num_changes >= 0)
550 batman_ogm_packet->tt_num_changes = tt_num_changes;
551
552 if (vis_server == VIS_TYPE_SERVER_SYNC)
553 batman_ogm_packet->flags |= VIS_SERVER;
554 else
555 batman_ogm_packet->flags &= ~VIS_SERVER;
556
557 if ((hard_iface == primary_if) &&
558 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
559 batman_ogm_packet->gw_flags =
560 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
561 else
562 batman_ogm_packet->gw_flags = NO_FLAGS;
563
564 atomic_inc(&hard_iface->seqno);
565
566 slide_own_bcast_window(hard_iface);
567 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
568 hard_iface->packet_len, hard_iface, 1,
569 bat_ogm_emit_send_time(bat_priv));
570
571 if (primary_if)
572 hardif_free_ref(primary_if);
573}
574
575static void bat_ogm_orig_update(struct bat_priv *bat_priv,
576 struct orig_node *orig_node,
577 const struct ethhdr *ethhdr,
578 const struct batman_ogm_packet
579 *batman_ogm_packet,
580 struct hard_iface *if_incoming,
581 const unsigned char *tt_buff, int is_duplicate)
582{
583 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
584 struct neigh_node *router = NULL;
585 struct orig_node *orig_node_tmp;
586 struct hlist_node *node;
587 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
588
589 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
590 "Searching and updating originator entry of received packet\n");
591
592 rcu_read_lock();
593 hlist_for_each_entry_rcu(tmp_neigh_node, node,
594 &orig_node->neigh_list, list) {
595 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
596 (tmp_neigh_node->if_incoming == if_incoming) &&
597 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
598 if (neigh_node)
599 neigh_node_free_ref(neigh_node);
600 neigh_node = tmp_neigh_node;
601 continue;
602 }
603
604 if (is_duplicate)
605 continue;
606
607 spin_lock_bh(&tmp_neigh_node->tq_lock);
608 ring_buffer_set(tmp_neigh_node->tq_recv,
609 &tmp_neigh_node->tq_index, 0);
610 tmp_neigh_node->tq_avg =
611 ring_buffer_avg(tmp_neigh_node->tq_recv);
612 spin_unlock_bh(&tmp_neigh_node->tq_lock);
613 }
614
615 if (!neigh_node) {
616 struct orig_node *orig_tmp;
617
618 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
619 if (!orig_tmp)
620 goto unlock;
621
622 neigh_node = create_neighbor(orig_node, orig_tmp,
623 ethhdr->h_source, if_incoming);
624
625 orig_node_free_ref(orig_tmp);
626 if (!neigh_node)
627 goto unlock;
628 } else
629 bat_dbg(DBG_BATMAN, bat_priv,
630 "Updating existing last-hop neighbor of originator\n");
631
632 rcu_read_unlock();
633
634 orig_node->flags = batman_ogm_packet->flags;
635 neigh_node->last_valid = jiffies;
636
637 spin_lock_bh(&neigh_node->tq_lock);
638 ring_buffer_set(neigh_node->tq_recv,
639 &neigh_node->tq_index,
640 batman_ogm_packet->tq);
641 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
642 spin_unlock_bh(&neigh_node->tq_lock);
643
644 if (!is_duplicate) {
645 orig_node->last_ttl = batman_ogm_packet->ttl;
646 neigh_node->last_ttl = batman_ogm_packet->ttl;
647 }
648
649 bonding_candidate_add(orig_node, neigh_node);
650
651 /* if this neighbor already is our next hop there is nothing
652 * to change */
653 router = orig_node_get_router(orig_node);
654 if (router == neigh_node)
655 goto update_tt;
656
657 /* if this neighbor does not offer a better TQ we won't consider it */
658 if (router && (router->tq_avg > neigh_node->tq_avg))
659 goto update_tt;
660
661 /* if the TQ is the same and the link not more symmetric we
662 * won't consider it either */
663 if (router && (neigh_node->tq_avg == router->tq_avg)) {
664 orig_node_tmp = router->orig_node;
665 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
666 bcast_own_sum_orig =
667 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
668 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
669
670 orig_node_tmp = neigh_node->orig_node;
671 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
672 bcast_own_sum_neigh =
673 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
674 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
675
676 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
677 goto update_tt;
678 }
679
680 update_route(bat_priv, orig_node, neigh_node);
681
682update_tt:
683 /* I have to check for transtable changes only if the OGM has been
684 * sent through a primary interface */
685 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
686 (batman_ogm_packet->ttl > 2)) ||
687 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
688 tt_update_orig(bat_priv, orig_node, tt_buff,
689 batman_ogm_packet->tt_num_changes,
690 batman_ogm_packet->ttvn,
691 batman_ogm_packet->tt_crc);
692
693 if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
694 gw_node_update(bat_priv, orig_node,
695 batman_ogm_packet->gw_flags);
696
697 orig_node->gw_flags = batman_ogm_packet->gw_flags;
698
699 /* restart gateway selection if fast or late switching was enabled */
700 if ((orig_node->gw_flags) &&
701 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
702 (atomic_read(&bat_priv->gw_sel_class) > 2))
703 gw_check_election(bat_priv, orig_node);
704
705 goto out;
706
707unlock:
708 rcu_read_unlock();
709out:
710 if (neigh_node)
711 neigh_node_free_ref(neigh_node);
712 if (router)
713 neigh_node_free_ref(router);
714}
715
716static int bat_ogm_calc_tq(struct orig_node *orig_node,
717 struct orig_node *orig_neigh_node,
718 struct batman_ogm_packet *batman_ogm_packet,
719 struct hard_iface *if_incoming)
720{
721 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
722 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
723 struct hlist_node *node;
724 uint8_t total_count;
725 uint8_t orig_eq_count, neigh_rq_count, tq_own;
726 int tq_asym_penalty, ret = 0;
727
728 /* find corresponding one hop neighbor */
729 rcu_read_lock();
730 hlist_for_each_entry_rcu(tmp_neigh_node, node,
731 &orig_neigh_node->neigh_list, list) {
732
733 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
734 continue;
735
736 if (tmp_neigh_node->if_incoming != if_incoming)
737 continue;
738
739 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
740 continue;
741
742 neigh_node = tmp_neigh_node;
743 break;
744 }
745 rcu_read_unlock();
746
747 if (!neigh_node)
748 neigh_node = create_neighbor(orig_neigh_node,
749 orig_neigh_node,
750 orig_neigh_node->orig,
751 if_incoming);
752
753 if (!neigh_node)
754 goto out;
755
756 /* if orig_node is direct neighbor update neigh_node last_valid */
757 if (orig_node == orig_neigh_node)
758 neigh_node->last_valid = jiffies;
759
760 orig_node->last_valid = jiffies;
761
762 /* find packet count of corresponding one hop neighbor */
763 spin_lock_bh(&orig_node->ogm_cnt_lock);
764 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
765 neigh_rq_count = neigh_node->real_packet_count;
766 spin_unlock_bh(&orig_node->ogm_cnt_lock);
767
768 /* pay attention to not get a value bigger than 100 % */
769 total_count = (orig_eq_count > neigh_rq_count ?
770 neigh_rq_count : orig_eq_count);
771
772 /* if we have too few packets (too less data) we set tq_own to zero */
773 /* if we receive too few packets it is not considered bidirectional */
774 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
775 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
776 tq_own = 0;
777 else
778 /* neigh_node->real_packet_count is never zero as we
779 * only purge old information when getting new
780 * information */
781 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
782
783 /*
784 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
785 * affect the nearly-symmetric links only a little, but
786 * punishes asymmetric links more. This will give a value
787 * between 0 and TQ_MAX_VALUE
788 */
789 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
790 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
791 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
792 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
793 (TQ_LOCAL_WINDOW_SIZE *
794 TQ_LOCAL_WINDOW_SIZE *
795 TQ_LOCAL_WINDOW_SIZE);
796
797 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
798 * tq_asym_penalty) /
799 (TQ_MAX_VALUE * TQ_MAX_VALUE));
800
801 bat_dbg(DBG_BATMAN, bat_priv,
802 "bidirectional: "
803 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
804 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
805 "total tq: %3i\n",
806 orig_node->orig, orig_neigh_node->orig, total_count,
807 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
808
809 /* if link has the minimum required transmission quality
810 * consider it bidirectional */
811 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
812 ret = 1;
813
814out:
815 if (neigh_node)
816 neigh_node_free_ref(neigh_node);
817 return ret;
818}
819
820/* processes a batman packet for all interfaces, adjusts the sequence number and
821 * finds out whether it is a duplicate.
822 * returns:
823 * 1 the packet is a duplicate
824 * 0 the packet has not yet been received
825 * -1 the packet is old and has been received while the seqno window
826 * was protected. Caller should drop it.
827 */
828static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
829 const struct batman_ogm_packet
830 *batman_ogm_packet,
831 const struct hard_iface *if_incoming)
832{
833 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
834 struct orig_node *orig_node;
835 struct neigh_node *tmp_neigh_node;
836 struct hlist_node *node;
837 int is_duplicate = 0;
838 int32_t seq_diff;
839 int need_update = 0;
840 int set_mark, ret = -1;
841
842 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
843 if (!orig_node)
844 return 0;
845
846 spin_lock_bh(&orig_node->ogm_cnt_lock);
847 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
848
849 /* signalize caller that the packet is to be dropped. */
850 if (window_protected(bat_priv, seq_diff,
851 &orig_node->batman_seqno_reset))
852 goto out;
853
854 rcu_read_lock();
855 hlist_for_each_entry_rcu(tmp_neigh_node, node,
856 &orig_node->neigh_list, list) {
857
858 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
859 orig_node->last_real_seqno,
860 batman_ogm_packet->seqno);
861
862 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
863 (tmp_neigh_node->if_incoming == if_incoming))
864 set_mark = 1;
865 else
866 set_mark = 0;
867
868 /* if the window moved, set the update flag. */
869 need_update |= bit_get_packet(bat_priv,
870 tmp_neigh_node->real_bits,
871 seq_diff, set_mark);
872
873 tmp_neigh_node->real_packet_count =
874 bit_packet_count(tmp_neigh_node->real_bits);
875 }
876 rcu_read_unlock();
877
878 if (need_update) {
879 bat_dbg(DBG_BATMAN, bat_priv,
880 "updating last_seqno: old %d, new %d\n",
881 orig_node->last_real_seqno, batman_ogm_packet->seqno);
882 orig_node->last_real_seqno = batman_ogm_packet->seqno;
883 }
884
885 ret = is_duplicate;
886
887out:
888 spin_unlock_bh(&orig_node->ogm_cnt_lock);
889 orig_node_free_ref(orig_node);
890 return ret;
891}
892
893static void bat_ogm_process(const struct ethhdr *ethhdr,
894 struct batman_ogm_packet *batman_ogm_packet,
895 const unsigned char *tt_buff,
896 struct hard_iface *if_incoming)
897{
898 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
899 struct hard_iface *hard_iface;
900 struct orig_node *orig_neigh_node, *orig_node;
901 struct neigh_node *router = NULL, *router_router = NULL;
902 struct neigh_node *orig_neigh_router = NULL;
903 int has_directlink_flag;
904 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
905 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
906 int is_duplicate;
907 uint32_t if_incoming_seqno;
908
909 /* Silently drop when the batman packet is actually not a
910 * correct packet.
911 *
912 * This might happen if a packet is padded (e.g. Ethernet has a
913 * minimum frame length of 64 byte) and the aggregation interprets
914 * it as an additional length.
915 *
916 * TODO: A more sane solution would be to have a bit in the
917 * batman_ogm_packet to detect whether the packet is the last
918 * packet in an aggregation. Here we expect that the padding
919 * is always zero (or not 0x01)
920 */
921 if (batman_ogm_packet->packet_type != BAT_OGM)
922 return;
923
924 /* could be changed by schedule_own_packet() */
925 if_incoming_seqno = atomic_read(&if_incoming->seqno);
926
927 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
928
929 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
930 batman_ogm_packet->orig) ? 1 : 0);
931
932 bat_dbg(DBG_BATMAN, bat_priv,
933 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
934 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
935 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
936 ethhdr->h_source, if_incoming->net_dev->name,
937 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
938 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
939 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
940 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
941 batman_ogm_packet->ttl, batman_ogm_packet->version,
942 has_directlink_flag);
943
944 rcu_read_lock();
945 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
946 if (hard_iface->if_status != IF_ACTIVE)
947 continue;
948
949 if (hard_iface->soft_iface != if_incoming->soft_iface)
950 continue;
951
952 if (compare_eth(ethhdr->h_source,
953 hard_iface->net_dev->dev_addr))
954 is_my_addr = 1;
955
956 if (compare_eth(batman_ogm_packet->orig,
957 hard_iface->net_dev->dev_addr))
958 is_my_orig = 1;
959
960 if (compare_eth(batman_ogm_packet->prev_sender,
961 hard_iface->net_dev->dev_addr))
962 is_my_oldorig = 1;
963
964 if (is_broadcast_ether_addr(ethhdr->h_source))
965 is_broadcast = 1;
966 }
967 rcu_read_unlock();
968
969 if (batman_ogm_packet->version != COMPAT_VERSION) {
970 bat_dbg(DBG_BATMAN, bat_priv,
971 "Drop packet: incompatible batman version (%i)\n",
972 batman_ogm_packet->version);
973 return;
974 }
975
976 if (is_my_addr) {
977 bat_dbg(DBG_BATMAN, bat_priv,
978 "Drop packet: received my own broadcast (sender: %pM"
979 ")\n",
980 ethhdr->h_source);
981 return;
982 }
983
984 if (is_broadcast) {
985 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
986 "ignoring all packets with broadcast source addr (sender: %pM"
987 ")\n", ethhdr->h_source);
988 return;
989 }
990
991 if (is_my_orig) {
992 unsigned long *word;
993 int offset;
994
995 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
996 if (!orig_neigh_node)
997 return;
998
999 /* neighbor has to indicate direct link and it has to
1000 * come via the corresponding interface */
1001 /* save packet seqno for bidirectional check */
1002 if (has_directlink_flag &&
1003 compare_eth(if_incoming->net_dev->dev_addr,
1004 batman_ogm_packet->orig)) {
1005 offset = if_incoming->if_num * NUM_WORDS;
1006
1007 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1008 word = &(orig_neigh_node->bcast_own[offset]);
1009 bit_mark(word,
1010 if_incoming_seqno -
1011 batman_ogm_packet->seqno - 2);
1012 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
1013 bit_packet_count(word);
1014 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1015 }
1016
1017 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
1018 "originator packet from myself (via neighbor)\n");
1019 orig_node_free_ref(orig_neigh_node);
1020 return;
1021 }
1022
1023 if (is_my_oldorig) {
1024 bat_dbg(DBG_BATMAN, bat_priv,
1025 "Drop packet: ignoring all rebroadcast echos (sender: "
1026 "%pM)\n", ethhdr->h_source);
1027 return;
1028 }
1029
1030 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
1031 if (!orig_node)
1032 return;
1033
1034 is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1035 if_incoming);
1036
1037 if (is_duplicate == -1) {
1038 bat_dbg(DBG_BATMAN, bat_priv,
1039 "Drop packet: packet within seqno protection time "
1040 "(sender: %pM)\n", ethhdr->h_source);
1041 goto out;
1042 }
1043
1044 if (batman_ogm_packet->tq == 0) {
1045 bat_dbg(DBG_BATMAN, bat_priv,
1046 "Drop packet: originator packet with tq equal 0\n");
1047 goto out;
1048 }
1049
1050 router = orig_node_get_router(orig_node);
1051 if (router)
1052 router_router = orig_node_get_router(router->orig_node);
1053
1054 /* avoid temporary routing loops */
1055 if (router && router_router &&
1056 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
1057 !(compare_eth(batman_ogm_packet->orig,
1058 batman_ogm_packet->prev_sender)) &&
1059 (compare_eth(router->addr, router_router->addr))) {
1060 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: ignoring all rebroadcast packets that "
1062 "may make me loop (sender: %pM)\n", ethhdr->h_source);
1063 goto out;
1064 }
1065
1066 /* if sender is a direct neighbor the sender mac equals
1067 * originator mac */
1068 orig_neigh_node = (is_single_hop_neigh ?
1069 orig_node :
1070 get_orig_node(bat_priv, ethhdr->h_source));
1071 if (!orig_neigh_node)
1072 goto out;
1073
1074 orig_neigh_router = orig_node_get_router(orig_neigh_node);
1075
1076 /* drop packet if sender is not a direct neighbor and if we
1077 * don't route towards it */
1078 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1079 bat_dbg(DBG_BATMAN, bat_priv,
1080 "Drop packet: OGM via unknown neighbor!\n");
1081 goto out_neigh;
1082 }
1083
1084 is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
1085 batman_ogm_packet, if_incoming);
1086
1087 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1088
1089 /* update ranking if it is not a duplicate or has the same
1090 * seqno and similar ttl as the non-duplicate */
1091 if (is_bidirectional &&
1092 (!is_duplicate ||
1093 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
1094 (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
1095 bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
1096 batman_ogm_packet, if_incoming,
1097 tt_buff, is_duplicate);
1098
1099 /* is single hop (direct) neighbor */
1100 if (is_single_hop_neigh) {
1101
1102 /* mark direct link on incoming interface */
1103 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1104 1, if_incoming);
1105
1106 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
1107 "rebroadcast neighbor packet with direct link flag\n");
1108 goto out_neigh;
1109 }
1110
1111 /* multihop originator */
1112 if (!is_bidirectional) {
1113 bat_dbg(DBG_BATMAN, bat_priv,
1114 "Drop packet: not received via bidirectional link\n");
1115 goto out_neigh;
1116 }
1117
1118 if (is_duplicate) {
1119 bat_dbg(DBG_BATMAN, bat_priv,
1120 "Drop packet: duplicate packet received\n");
1121 goto out_neigh;
1122 }
1123
1124 bat_dbg(DBG_BATMAN, bat_priv,
1125 "Forwarding packet: rebroadcast originator packet\n");
1126 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
1127
1128out_neigh:
1129 if ((orig_neigh_node) && (!is_single_hop_neigh))
1130 orig_node_free_ref(orig_neigh_node);
1131out:
1132 if (router)
1133 neigh_node_free_ref(router);
1134 if (router_router)
1135 neigh_node_free_ref(router_router);
1136 if (orig_neigh_router)
1137 neigh_node_free_ref(orig_neigh_router);
1138
1139 orig_node_free_ref(orig_node);
1140}
1141
1142void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
1143 int packet_len, struct hard_iface *if_incoming)
1144{
1145 struct batman_ogm_packet *batman_ogm_packet;
1146 int buff_pos = 0;
1147 unsigned char *tt_buff;
1148
1149 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1150
1151 /* unpack the aggregated packets and process them one by one */
1152 do {
1153 /* network to host order for our 32bit seqno and the
1154 orig_interval */
1155 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1156 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1157
1158 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
1159
1160 bat_ogm_process(ethhdr, batman_ogm_packet,
1161 tt_buff, if_incoming);
1162
1163 buff_pos += BATMAN_OGM_LEN +
1164 tt_len(batman_ogm_packet->tt_num_changes);
1165
1166 batman_ogm_packet = (struct batman_ogm_packet *)
1167 (packet_buff + buff_pos);
1168 } while (bat_ogm_aggr_packet(buff_pos, packet_len,
1169 batman_ogm_packet->tt_num_changes));
1170}
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_ogm.h
new file mode 100644
index 000000000000..69329c107e28
--- /dev/null
+++ b/net/batman-adv/bat_ogm.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_OGM_H_
23#define _NET_BATMAN_ADV_OGM_H_
24
25#include "main.h"
26
27void bat_ogm_init(struct hard_iface *hard_iface);
28void bat_ogm_init_primary(struct hard_iface *hard_iface);
29void bat_ogm_update_mac(struct hard_iface *hard_iface);
30void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
31void bat_ogm_emit(struct forw_packet *forw_packet);
32void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
33 int packet_len, struct hard_iface *if_incoming);
34
35#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd15deba60a1..b8a7414c3571 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -380,6 +380,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
380BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); 380BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
381BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); 381BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); 382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
383BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
383static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 384static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
384static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); 385static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
385BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); 386BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
@@ -396,6 +397,7 @@ static struct bat_attribute *mesh_attrs[] = {
396 &bat_attr_aggregated_ogms, 397 &bat_attr_aggregated_ogms,
397 &bat_attr_bonding, 398 &bat_attr_bonding,
398 &bat_attr_fragmentation, 399 &bat_attr_fragmentation,
400 &bat_attr_ap_isolation,
399 &bat_attr_vis_mode, 401 &bat_attr_vis_mode,
400 &bat_attr_gw_mode, 402 &bat_attr_gw_mode,
401 &bat_attr_orig_interval, 403 &bat_attr_orig_interval,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index c1f4bfc09cc3..0be9ff346fa0 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -97,12 +97,12 @@ static void bit_shift(unsigned long *seq_bits, int32_t n)
97 (seq_bits[i - word_num - 1] >> 97 (seq_bits[i - word_num - 1] >>
98 (WORD_BIT_SIZE-word_offset)); 98 (WORD_BIT_SIZE-word_offset));
99 /* and the upper part of the right half and shift it left to 99 /* and the upper part of the right half and shift it left to
100 * it's position */ 100 * its position */
101 /* for our example that would be: word[0] = 9800 + 0076 = 101 /* for our example that would be: word[0] = 9800 + 0076 =
102 * 9876 */ 102 * 9876 */
103 } 103 }
104 /* now for our last word, i==word_num, we only have the it's "left" 104 /* now for our last word, i==word_num, we only have its "left" half.
105 * half. that's the 1000 word in our example.*/ 105 * that's the 1000 word in our example.*/
106 106
107 seq_bits[i] = (seq_bits[i - word_num] << word_offset); 107 seq_bits[i] = (seq_bits[i - word_num] << word_offset);
108 108
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 056180ef9e1a..619fb73b3b76 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -532,14 +532,14 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; 532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
533 533
534 /* Access the dhcp option lists. Each entry is made up by: 534 /* Access the dhcp option lists. Each entry is made up by:
535 * - octect 1: option type 535 * - octet 1: option type
536 * - octect 2: option data len (only if type != 255 and 0) 536 * - octet 2: option data len (only if type != 255 and 0)
537 * - octect 3: option data */ 537 * - octet 3: option data */
538 while (*p != 255 && !ret) { 538 while (*p != 255 && !ret) {
539 /* p now points to the first octect: option type */ 539 /* p now points to the first octet: option type */
540 if (*p == 53) { 540 if (*p == 53) {
541 /* type 53 is the message type option. 541 /* type 53 is the message type option.
542 * Jump the len octect and go to the data octect */ 542 * Jump the len octet and go to the data octet */
543 if (pkt_len < 2) 543 if (pkt_len < 2)
544 goto out; 544 goto out;
545 p += 2; 545 p += 2;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index db7aacf1e095..7704df468e0b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bat_ogm.h"
31 32
32#include <linux/if_arp.h> 33#include <linux/if_arp.h>
33 34
@@ -131,7 +132,6 @@ static void primary_if_select(struct bat_priv *bat_priv,
131 struct hard_iface *new_hard_iface) 132 struct hard_iface *new_hard_iface)
132{ 133{
133 struct hard_iface *curr_hard_iface; 134 struct hard_iface *curr_hard_iface;
134 struct batman_packet *batman_packet;
135 135
136 ASSERT_RTNL(); 136 ASSERT_RTNL();
137 137
@@ -147,10 +147,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
147 if (!new_hard_iface) 147 if (!new_hard_iface)
148 return; 148 return;
149 149
150 batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff); 150 bat_ogm_init_primary(new_hard_iface);
151 batman_packet->flags = PRIMARIES_FIRST_HOP;
152 batman_packet->ttl = TTL;
153
154 primary_if_update_addr(bat_priv); 151 primary_if_update_addr(bat_priv);
155} 152}
156 153
@@ -162,14 +159,6 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
162 return false; 159 return false;
163} 160}
164 161
165static void update_mac_addresses(struct hard_iface *hard_iface)
166{
167 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
168 hard_iface->net_dev->dev_addr, ETH_ALEN);
169 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
170 hard_iface->net_dev->dev_addr, ETH_ALEN);
171}
172
173static void check_known_mac_addr(const struct net_device *net_dev) 162static void check_known_mac_addr(const struct net_device *net_dev)
174{ 163{
175 const struct hard_iface *hard_iface; 164 const struct hard_iface *hard_iface;
@@ -244,12 +233,12 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
244 233
245 bat_priv = netdev_priv(hard_iface->soft_iface); 234 bat_priv = netdev_priv(hard_iface->soft_iface);
246 235
247 update_mac_addresses(hard_iface); 236 bat_ogm_update_mac(hard_iface);
248 hard_iface->if_status = IF_TO_BE_ACTIVATED; 237 hard_iface->if_status = IF_TO_BE_ACTIVATED;
249 238
250 /** 239 /**
251 * the first active interface becomes our primary interface or 240 * the first active interface becomes our primary interface or
252 * the next active interface after the old primay interface was removed 241 * the next active interface after the old primary interface was removed
253 */ 242 */
254 primary_if = primary_if_get_selected(bat_priv); 243 primary_if = primary_if_get_selected(bat_priv);
255 if (!primary_if) 244 if (!primary_if)
@@ -283,7 +272,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
283 const char *iface_name) 272 const char *iface_name)
284{ 273{
285 struct bat_priv *bat_priv; 274 struct bat_priv *bat_priv;
286 struct batman_packet *batman_packet;
287 struct net_device *soft_iface; 275 struct net_device *soft_iface;
288 int ret; 276 int ret;
289 277
@@ -318,8 +306,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
318 306
319 hard_iface->soft_iface = soft_iface; 307 hard_iface->soft_iface = soft_iface;
320 bat_priv = netdev_priv(hard_iface->soft_iface); 308 bat_priv = netdev_priv(hard_iface->soft_iface);
321 hard_iface->packet_len = BAT_PACKET_LEN; 309
322 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 310 bat_ogm_init(hard_iface);
323 311
324 if (!hard_iface->packet_buff) { 312 if (!hard_iface->packet_buff) {
325 bat_err(hard_iface->soft_iface, "Can't add interface packet " 313 bat_err(hard_iface->soft_iface, "Can't add interface packet "
@@ -328,15 +316,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
328 goto err; 316 goto err;
329 } 317 }
330 318
331 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
332 batman_packet->packet_type = BAT_PACKET;
333 batman_packet->version = COMPAT_VERSION;
334 batman_packet->flags = NO_FLAGS;
335 batman_packet->ttl = 2;
336 batman_packet->tq = TQ_MAX_VALUE;
337 batman_packet->tt_num_changes = 0;
338 batman_packet->ttvn = 0;
339
340 hard_iface->if_num = bat_priv->num_ifaces; 319 hard_iface->if_num = bat_priv->num_ifaces;
341 bat_priv->num_ifaces++; 320 bat_priv->num_ifaces++;
342 hard_iface->if_status = IF_INACTIVE; 321 hard_iface->if_status = IF_INACTIVE;
@@ -381,7 +360,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
381 hard_iface->net_dev->name); 360 hard_iface->net_dev->name);
382 361
383 /* begin scheduling originator messages on that interface */ 362 /* begin scheduling originator messages on that interface */
384 schedule_own_packet(hard_iface); 363 schedule_bat_ogm(hard_iface);
385 364
386out: 365out:
387 return 0; 366 return 0;
@@ -455,11 +434,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
455 dev_hold(net_dev); 434 dev_hold(net_dev);
456 435
457 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC); 436 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
458 if (!hard_iface) { 437 if (!hard_iface)
459 pr_err("Can't add interface (%s): out of memory\n",
460 net_dev->name);
461 goto release_dev; 438 goto release_dev;
462 }
463 439
464 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); 440 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
465 if (ret) 441 if (ret)
@@ -551,7 +527,7 @@ static int hard_if_event(struct notifier_block *this,
551 goto hardif_put; 527 goto hardif_put;
552 528
553 check_known_mac_addr(hard_iface->net_dev); 529 check_known_mac_addr(hard_iface->net_dev);
554 update_mac_addresses(hard_iface); 530 bat_ogm_update_mac(hard_iface);
555 531
556 bat_priv = netdev_priv(hard_iface->soft_iface); 532 bat_priv = netdev_priv(hard_iface->soft_iface);
557 primary_if = primary_if_get_selected(bat_priv); 533 primary_if = primary_if_get_selected(bat_priv);
@@ -573,14 +549,14 @@ out:
573 return NOTIFY_DONE; 549 return NOTIFY_DONE;
574} 550}
575 551
576/* receive a packet with the batman ethertype coming on a hard 552/* incoming packets with the batman ethertype received on any active hard
577 * interface */ 553 * interface */
578static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 554static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
579 struct packet_type *ptype, 555 struct packet_type *ptype,
580 struct net_device *orig_dev) 556 struct net_device *orig_dev)
581{ 557{
582 struct bat_priv *bat_priv; 558 struct bat_priv *bat_priv;
583 struct batman_packet *batman_packet; 559 struct batman_ogm_packet *batman_ogm_packet;
584 struct hard_iface *hard_iface; 560 struct hard_iface *hard_iface;
585 int ret; 561 int ret;
586 562
@@ -612,22 +588,22 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
612 if (hard_iface->if_status != IF_ACTIVE) 588 if (hard_iface->if_status != IF_ACTIVE)
613 goto err_free; 589 goto err_free;
614 590
615 batman_packet = (struct batman_packet *)skb->data; 591 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
616 592
617 if (batman_packet->version != COMPAT_VERSION) { 593 if (batman_ogm_packet->version != COMPAT_VERSION) {
618 bat_dbg(DBG_BATMAN, bat_priv, 594 bat_dbg(DBG_BATMAN, bat_priv,
619 "Drop packet: incompatible batman version (%i)\n", 595 "Drop packet: incompatible batman version (%i)\n",
620 batman_packet->version); 596 batman_ogm_packet->version);
621 goto err_free; 597 goto err_free;
622 } 598 }
623 599
624 /* all receive handlers return whether they received or reused 600 /* all receive handlers return whether they received or reused
625 * the supplied skb. if not, we have to free the skb. */ 601 * the supplied skb. if not, we have to free the skb. */
626 602
627 switch (batman_packet->packet_type) { 603 switch (batman_ogm_packet->packet_type) {
628 /* batman originator packet */ 604 /* batman originator packet */
629 case BAT_PACKET: 605 case BAT_OGM:
630 ret = recv_bat_packet(skb, hard_iface); 606 ret = recv_bat_ogm_packet(skb, hard_iface);
631 break; 607 break;
632 608
633 /* batman icmp packet */ 609 /* batman icmp packet */
@@ -681,6 +657,36 @@ err_out:
681 return NET_RX_DROP; 657 return NET_RX_DROP;
682} 658}
683 659
660/* This function returns true if the interface represented by ifindex is a
661 * 802.11 wireless device */
662bool is_wifi_iface(int ifindex)
663{
664 struct net_device *net_device = NULL;
665 bool ret = false;
666
667 if (ifindex == NULL_IFINDEX)
668 goto out;
669
670 net_device = dev_get_by_index(&init_net, ifindex);
671 if (!net_device)
672 goto out;
673
674#ifdef CONFIG_WIRELESS_EXT
675 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
676 * check for wireless_handlers != NULL */
677 if (net_device->wireless_handlers)
678 ret = true;
679 else
680#endif
681 /* cfg80211 drivers have to set ieee80211_ptr */
682 if (net_device->ieee80211_ptr)
683 ret = true;
684out:
685 if (net_device)
686 dev_put(net_device);
687 return ret;
688}
689
684struct notifier_block hard_if_notifier = { 690struct notifier_block hard_if_notifier = {
685 .notifier_call = hard_if_event, 691 .notifier_call = hard_if_event,
686}; 692};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 442eacbc9e3a..67f78d1a63b4 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -42,6 +42,7 @@ void hardif_remove_interfaces(void);
42int hardif_min_mtu(struct net_device *soft_iface); 42int hardif_min_mtu(struct net_device *soft_iface);
43void update_min_mtu(struct net_device *soft_iface); 43void update_min_mtu(struct net_device *soft_iface);
44void hardif_free_rcu(struct rcu_head *rcu); 44void hardif_free_rcu(struct rcu_head *rcu);
45bool is_wifi_iface(int ifindex);
45 46
46static inline void hardif_free_ref(struct hard_iface *hard_iface) 47static inline void hardif_free_ref(struct hard_iface *hard_iface)
47{ 48{
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index dd5c9fd7a905..d20aa71ba1e8 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -76,19 +76,30 @@ static inline void hash_delete(struct hashtable_t *hash,
76 hash_destroy(hash); 76 hash_destroy(hash);
77} 77}
78 78
79/* adds data to the hashtable. returns 0 on success, -1 on error */ 79/**
80 * hash_add - adds data to the hashtable
81 * @hash: storage hash table
82 * @compare: callback to determine if 2 hash elements are identical
83 * @choose: callback calculating the hash index
84 * @data: data passed to the aforementioned callbacks as argument
85 * @data_node: to be added element
86 *
87 * Returns 0 on success, 1 if the element already is in the hash
88 * and -1 on error.
89 */
90
80static inline int hash_add(struct hashtable_t *hash, 91static inline int hash_add(struct hashtable_t *hash,
81 hashdata_compare_cb compare, 92 hashdata_compare_cb compare,
82 hashdata_choose_cb choose, 93 hashdata_choose_cb choose,
83 const void *data, struct hlist_node *data_node) 94 const void *data, struct hlist_node *data_node)
84{ 95{
85 int index; 96 int index, ret = -1;
86 struct hlist_head *head; 97 struct hlist_head *head;
87 struct hlist_node *node; 98 struct hlist_node *node;
88 spinlock_t *list_lock; /* spinlock to protect write access */ 99 spinlock_t *list_lock; /* spinlock to protect write access */
89 100
90 if (!hash) 101 if (!hash)
91 goto err; 102 goto out;
92 103
93 index = choose(data, hash->size); 104 index = choose(data, hash->size);
94 head = &hash->table[index]; 105 head = &hash->table[index];
@@ -99,6 +110,7 @@ static inline int hash_add(struct hashtable_t *hash,
99 if (!compare(node, data)) 110 if (!compare(node, data))
100 continue; 111 continue;
101 112
113 ret = 1;
102 goto err_unlock; 114 goto err_unlock;
103 } 115 }
104 rcu_read_unlock(); 116 rcu_read_unlock();
@@ -108,12 +120,13 @@ static inline int hash_add(struct hashtable_t *hash,
108 hlist_add_head_rcu(data_node, head); 120 hlist_add_head_rcu(data_node, head);
109 spin_unlock_bh(list_lock); 121 spin_unlock_bh(list_lock);
110 122
111 return 0; 123 ret = 0;
124 goto out;
112 125
113err_unlock: 126err_unlock:
114 rcu_read_unlock(); 127 rcu_read_unlock();
115err: 128out:
116 return -1; 129 return ret;
117} 130}
118 131
119/* removes data from hash, if found. returns pointer do data on success, so you 132/* removes data from hash, if found. returns pointer do data on success, so you
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b0f9068ade57..fb87bdc2ce9b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -107,7 +107,7 @@ int mesh_init(struct net_device *soft_iface)
107 if (tt_init(bat_priv) < 1) 107 if (tt_init(bat_priv) < 1)
108 goto err; 108 goto err;
109 109
110 tt_local_add(soft_iface, soft_iface->dev_addr); 110 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
111 111
112 if (vis_init(bat_priv) < 1) 112 if (vis_init(bat_priv) < 1)
113 goto err; 113 goto err;
@@ -117,8 +117,6 @@ int mesh_init(struct net_device *soft_iface)
117 goto end; 117 goto end;
118 118
119err: 119err:
120 pr_err("Unable to allocate memory for mesh information structures: "
121 "out of mem ?\n");
122 mesh_free(soft_iface); 120 mesh_free(soft_iface);
123 return -1; 121 return -1;
124 122
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a6df61a6933b..964ad4d8ba33 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#ifndef SOURCE_VERSION 30#ifndef SOURCE_VERSION
31#define SOURCE_VERSION "2011.3.0" 31#define SOURCE_VERSION "2011.4.0"
32#endif 32#endif
33 33
34/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
44#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200
45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600 46#define TT_CLIENT_ROAM_TIMEOUT 600
47/* sliding packet range of received originator messages in squence numbers 47/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
49#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */ 50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
@@ -62,6 +62,8 @@
62 62
63#define NO_FLAGS 0 63#define NO_FLAGS 0
64 64
65#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
66
65#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 67#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
66 68
67#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 69#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -133,7 +135,7 @@ enum dbg_level {
133#include <linux/mutex.h> /* mutex */ 135#include <linux/mutex.h> /* mutex */
134#include <linux/module.h> /* needed by all modules */ 136#include <linux/module.h> /* needed by all modules */
135#include <linux/netdevice.h> /* netdevice */ 137#include <linux/netdevice.h> /* netdevice */
136#include <linux/etherdevice.h> /* ethernet address classifaction */ 138#include <linux/etherdevice.h> /* ethernet address classification */
137#include <linux/if_ether.h> /* ethernet header */ 139#include <linux/if_ether.h> /* ethernet header */
138#include <linux/poll.h> /* poll_table */ 140#include <linux/poll.h> /* poll_table */
139#include <linux/kthread.h> /* kernel threads */ 141#include <linux/kthread.h> /* kernel threads */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f3c3f620d195..0e5b77255d99 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -252,7 +252,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
252 252
253 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 253 hash_added = hash_add(bat_priv->orig_hash, compare_orig,
254 choose_orig, orig_node, &orig_node->hash_entry); 254 choose_orig, orig_node, &orig_node->hash_entry);
255 if (hash_added < 0) 255 if (hash_added != 0)
256 goto free_bcast_own_sum; 256 goto free_bcast_own_sum;
257 257
258 return orig_node; 258 return orig_node;
@@ -336,8 +336,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
336 } else { 336 } else {
337 if (purge_orig_neighbors(bat_priv, orig_node, 337 if (purge_orig_neighbors(bat_priv, orig_node,
338 &best_neigh_node)) { 338 &best_neigh_node)) {
339 update_routes(bat_priv, orig_node, 339 update_route(bat_priv, orig_node, best_neigh_node);
340 best_neigh_node);
341 } 340 }
342 } 341 }
343 342
@@ -493,10 +492,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
493 492
494 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 493 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
495 GFP_ATOMIC); 494 GFP_ATOMIC);
496 if (!data_ptr) { 495 if (!data_ptr)
497 pr_err("Can't resize orig: out of memory\n");
498 return -1; 496 return -1;
499 }
500 497
501 memcpy(data_ptr, orig_node->bcast_own, 498 memcpy(data_ptr, orig_node->bcast_own,
502 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); 499 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
@@ -504,10 +501,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
504 orig_node->bcast_own = data_ptr; 501 orig_node->bcast_own = data_ptr;
505 502
506 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 503 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
507 if (!data_ptr) { 504 if (!data_ptr)
508 pr_err("Can't resize orig: out of memory\n");
509 return -1; 505 return -1;
510 }
511 506
512 memcpy(data_ptr, orig_node->bcast_own_sum, 507 memcpy(data_ptr, orig_node->bcast_own_sum,
513 (max_if_num - 1) * sizeof(uint8_t)); 508 (max_if_num - 1) * sizeof(uint8_t));
@@ -562,10 +557,8 @@ static int orig_node_del_if(struct orig_node *orig_node,
562 557
563 chunk_size = sizeof(unsigned long) * NUM_WORDS; 558 chunk_size = sizeof(unsigned long) * NUM_WORDS;
564 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 559 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
565 if (!data_ptr) { 560 if (!data_ptr)
566 pr_err("Can't resize orig: out of memory\n");
567 return -1; 561 return -1;
568 }
569 562
570 /* copy first part */ 563 /* copy first part */
571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 564 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -583,10 +576,8 @@ free_bcast_own:
583 goto free_own_sum; 576 goto free_own_sum;
584 577
585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 578 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
586 if (!data_ptr) { 579 if (!data_ptr)
587 pr_err("Can't resize orig: out of memory\n");
588 return -1; 580 return -1;
589 }
590 581
591 memcpy(data_ptr, orig_node->bcast_own_sum, 582 memcpy(data_ptr, orig_node->bcast_own_sum,
592 del_if_num * sizeof(uint8_t)); 583 del_if_num * sizeof(uint8_t));
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b76b4be10b92..4d9e54c57a36 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,14 +25,14 @@
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27enum bat_packettype { 27enum bat_packettype {
28 BAT_PACKET = 0x01, 28 BAT_OGM = 0x01,
29 BAT_ICMP = 0x02, 29 BAT_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 30 BAT_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 31 BAT_BCAST = 0x04,
32 BAT_VIS = 0x05, 32 BAT_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06, 33 BAT_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07, 34 BAT_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08 35 BAT_ROAM_ADV = 0x08
36}; 36};
37 37
38/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
@@ -84,12 +84,13 @@ enum tt_query_flags {
84enum tt_client_flags { 84enum tt_client_flags {
85 TT_CLIENT_DEL = 1 << 0, 85 TT_CLIENT_DEL = 1 << 0,
86 TT_CLIENT_ROAM = 1 << 1, 86 TT_CLIENT_ROAM = 1 << 1,
87 TT_CLIENT_WIFI = 1 << 2,
87 TT_CLIENT_NOPURGE = 1 << 8, 88 TT_CLIENT_NOPURGE = 1 << 8,
88 TT_CLIENT_NEW = 1 << 9, 89 TT_CLIENT_NEW = 1 << 9,
89 TT_CLIENT_PENDING = 1 << 10 90 TT_CLIENT_PENDING = 1 << 10
90}; 91};
91 92
92struct batman_packet { 93struct batman_ogm_packet {
93 uint8_t packet_type; 94 uint8_t packet_type;
94 uint8_t version; /* batman version field */ 95 uint8_t version; /* batman version field */
95 uint8_t ttl; 96 uint8_t ttl;
@@ -104,7 +105,7 @@ struct batman_packet {
104 uint16_t tt_crc; 105 uint16_t tt_crc;
105} __packed; 106} __packed;
106 107
107#define BAT_PACKET_LEN sizeof(struct batman_packet) 108#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
108 109
109struct icmp_packet { 110struct icmp_packet {
110 uint8_t packet_type; 111 uint8_t packet_type;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0f32c818874d..f961cc5eade5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -22,18 +22,14 @@
22#include "main.h" 22#include "main.h"
23#include "routing.h" 23#include "routing.h"
24#include "send.h" 24#include "send.h"
25#include "hash.h"
26#include "soft-interface.h" 25#include "soft-interface.h"
27#include "hard-interface.h" 26#include "hard-interface.h"
28#include "icmp_socket.h" 27#include "icmp_socket.h"
29#include "translation-table.h" 28#include "translation-table.h"
30#include "originator.h" 29#include "originator.h"
31#include "ring_buffer.h"
32#include "vis.h" 30#include "vis.h"
33#include "aggregation.h"
34#include "gateway_common.h"
35#include "gateway_client.h"
36#include "unicast.h" 31#include "unicast.h"
32#include "bat_ogm.h"
37 33
38void slide_own_bcast_window(struct hard_iface *hard_iface) 34void slide_own_bcast_window(struct hard_iface *hard_iface)
39{ 35{
@@ -64,69 +60,9 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 60 }
65} 61}
66 62
67static void update_transtable(struct bat_priv *bat_priv, 63static void _update_route(struct bat_priv *bat_priv,
68 struct orig_node *orig_node, 64 struct orig_node *orig_node,
69 const unsigned char *tt_buff, 65 struct neigh_node *neigh_node)
70 uint8_t tt_num_changes, uint8_t ttvn,
71 uint16_t tt_crc)
72{
73 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
74 bool full_table = true;
75
76 /* the ttvn increased by one -> we can apply the attached changes */
77 if (ttvn - orig_ttvn == 1) {
78 /* the OGM could not contain the changes because they were too
79 * many to fit in one frame or because they have already been
80 * sent TT_OGM_APPEND_MAX times. In this case send a tt
81 * request */
82 if (!tt_num_changes) {
83 full_table = false;
84 goto request_table;
85 }
86
87 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
88 (struct tt_change *)tt_buff);
89
90 /* Even if we received the crc into the OGM, we prefer
91 * to recompute it to spot any possible inconsistency
92 * in the global table */
93 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
94
95 /* The ttvn alone is not enough to guarantee consistency
96 * because a single value could repesent different states
97 * (due to the wrap around). Thus a node has to check whether
98 * the resulting table (after applying the changes) is still
99 * consistent or not. E.g. a node could disconnect while its
100 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
101 * checking the CRC value is mandatory to detect the
102 * inconsistency */
103 if (orig_node->tt_crc != tt_crc)
104 goto request_table;
105
106 /* Roaming phase is over: tables are in sync again. I can
107 * unset the flag */
108 orig_node->tt_poss_change = false;
109 } else {
110 /* if we missed more than one change or our tables are not
111 * in sync anymore -> request fresh tt data */
112 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
113request_table:
114 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
115 "Need to retrieve the correct information "
116 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
117 "%u num_changes: %u)\n", orig_node->orig, ttvn,
118 orig_ttvn, tt_crc, orig_node->tt_crc,
119 tt_num_changes);
120 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
121 full_table);
122 return;
123 }
124 }
125}
126
127static void update_route(struct bat_priv *bat_priv,
128 struct orig_node *orig_node,
129 struct neigh_node *neigh_node)
130{ 66{
131 struct neigh_node *curr_router; 67 struct neigh_node *curr_router;
132 68
@@ -170,8 +106,8 @@ static void update_route(struct bat_priv *bat_priv,
170 neigh_node_free_ref(curr_router); 106 neigh_node_free_ref(curr_router);
171} 107}
172 108
173void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 109void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
174 struct neigh_node *neigh_node) 110 struct neigh_node *neigh_node)
175{ 111{
176 struct neigh_node *router = NULL; 112 struct neigh_node *router = NULL;
177 113
@@ -181,116 +117,13 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
181 router = orig_node_get_router(orig_node); 117 router = orig_node_get_router(orig_node);
182 118
183 if (router != neigh_node) 119 if (router != neigh_node)
184 update_route(bat_priv, orig_node, neigh_node); 120 _update_route(bat_priv, orig_node, neigh_node);
185 121
186out: 122out:
187 if (router) 123 if (router)
188 neigh_node_free_ref(router); 124 neigh_node_free_ref(router);
189} 125}
190 126
191static int is_bidirectional_neigh(struct orig_node *orig_node,
192 struct orig_node *orig_neigh_node,
193 struct batman_packet *batman_packet,
194 struct hard_iface *if_incoming)
195{
196 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
197 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
198 struct hlist_node *node;
199 uint8_t total_count;
200 uint8_t orig_eq_count, neigh_rq_count, tq_own;
201 int tq_asym_penalty, ret = 0;
202
203 /* find corresponding one hop neighbor */
204 rcu_read_lock();
205 hlist_for_each_entry_rcu(tmp_neigh_node, node,
206 &orig_neigh_node->neigh_list, list) {
207
208 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
209 continue;
210
211 if (tmp_neigh_node->if_incoming != if_incoming)
212 continue;
213
214 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
215 continue;
216
217 neigh_node = tmp_neigh_node;
218 break;
219 }
220 rcu_read_unlock();
221
222 if (!neigh_node)
223 neigh_node = create_neighbor(orig_neigh_node,
224 orig_neigh_node,
225 orig_neigh_node->orig,
226 if_incoming);
227
228 if (!neigh_node)
229 goto out;
230
231 /* if orig_node is direct neighbour update neigh_node last_valid */
232 if (orig_node == orig_neigh_node)
233 neigh_node->last_valid = jiffies;
234
235 orig_node->last_valid = jiffies;
236
237 /* find packet count of corresponding one hop neighbor */
238 spin_lock_bh(&orig_node->ogm_cnt_lock);
239 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
240 neigh_rq_count = neigh_node->real_packet_count;
241 spin_unlock_bh(&orig_node->ogm_cnt_lock);
242
243 /* pay attention to not get a value bigger than 100 % */
244 total_count = (orig_eq_count > neigh_rq_count ?
245 neigh_rq_count : orig_eq_count);
246
247 /* if we have too few packets (too less data) we set tq_own to zero */
248 /* if we receive too few packets it is not considered bidirectional */
249 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
250 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
251 tq_own = 0;
252 else
253 /* neigh_node->real_packet_count is never zero as we
254 * only purge old information when getting new
255 * information */
256 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
257
258 /*
259 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
260 * affect the nearly-symmetric links only a little, but
261 * punishes asymmetric links more. This will give a value
262 * between 0 and TQ_MAX_VALUE
263 */
264 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
265 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
266 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
267 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
268 (TQ_LOCAL_WINDOW_SIZE *
269 TQ_LOCAL_WINDOW_SIZE *
270 TQ_LOCAL_WINDOW_SIZE);
271
272 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
273 (TQ_MAX_VALUE * TQ_MAX_VALUE));
274
275 bat_dbg(DBG_BATMAN, bat_priv,
276 "bidirectional: "
277 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
278 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
279 "total tq: %3i\n",
280 orig_node->orig, orig_neigh_node->orig, total_count,
281 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
282
283 /* if link has the minimum required transmission quality
284 * consider it bidirectional */
285 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
286 ret = 1;
287
288out:
289 if (neigh_node)
290 neigh_node_free_ref(neigh_node);
291 return ret;
292}
293
294/* caller must hold the neigh_list_lock */ 127/* caller must hold the neigh_list_lock */
295void bonding_candidate_del(struct orig_node *orig_node, 128void bonding_candidate_del(struct orig_node *orig_node,
296 struct neigh_node *neigh_node) 129 struct neigh_node *neigh_node)
@@ -308,8 +141,8 @@ out:
308 return; 141 return;
309} 142}
310 143
311static void bonding_candidate_add(struct orig_node *orig_node, 144void bonding_candidate_add(struct orig_node *orig_node,
312 struct neigh_node *neigh_node) 145 struct neigh_node *neigh_node)
313{ 146{
314 struct hlist_node *node; 147 struct hlist_node *node;
315 struct neigh_node *tmp_neigh_node, *router = NULL; 148 struct neigh_node *tmp_neigh_node, *router = NULL;
@@ -379,162 +212,23 @@ out:
379} 212}
380 213
381/* copy primary address for bonding */ 214/* copy primary address for bonding */
382static void bonding_save_primary(const struct orig_node *orig_node, 215void bonding_save_primary(const struct orig_node *orig_node,
383 struct orig_node *orig_neigh_node, 216 struct orig_node *orig_neigh_node,
384 const struct batman_packet *batman_packet) 217 const struct batman_ogm_packet *batman_ogm_packet)
385{ 218{
386 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 219 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
387 return; 220 return;
388 221
389 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 222 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
390} 223}
391 224
392static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
393 const struct ethhdr *ethhdr,
394 const struct batman_packet *batman_packet,
395 struct hard_iface *if_incoming,
396 const unsigned char *tt_buff, int is_duplicate)
397{
398 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
399 struct neigh_node *router = NULL;
400 struct orig_node *orig_node_tmp;
401 struct hlist_node *node;
402 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
403
404 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
405 "Searching and updating originator entry of received packet\n");
406
407 rcu_read_lock();
408 hlist_for_each_entry_rcu(tmp_neigh_node, node,
409 &orig_node->neigh_list, list) {
410 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
411 (tmp_neigh_node->if_incoming == if_incoming) &&
412 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
413 if (neigh_node)
414 neigh_node_free_ref(neigh_node);
415 neigh_node = tmp_neigh_node;
416 continue;
417 }
418
419 if (is_duplicate)
420 continue;
421
422 spin_lock_bh(&tmp_neigh_node->tq_lock);
423 ring_buffer_set(tmp_neigh_node->tq_recv,
424 &tmp_neigh_node->tq_index, 0);
425 tmp_neigh_node->tq_avg =
426 ring_buffer_avg(tmp_neigh_node->tq_recv);
427 spin_unlock_bh(&tmp_neigh_node->tq_lock);
428 }
429
430 if (!neigh_node) {
431 struct orig_node *orig_tmp;
432
433 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
434 if (!orig_tmp)
435 goto unlock;
436
437 neigh_node = create_neighbor(orig_node, orig_tmp,
438 ethhdr->h_source, if_incoming);
439
440 orig_node_free_ref(orig_tmp);
441 if (!neigh_node)
442 goto unlock;
443 } else
444 bat_dbg(DBG_BATMAN, bat_priv,
445 "Updating existing last-hop neighbor of originator\n");
446
447 rcu_read_unlock();
448
449 orig_node->flags = batman_packet->flags;
450 neigh_node->last_valid = jiffies;
451
452 spin_lock_bh(&neigh_node->tq_lock);
453 ring_buffer_set(neigh_node->tq_recv,
454 &neigh_node->tq_index,
455 batman_packet->tq);
456 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
457 spin_unlock_bh(&neigh_node->tq_lock);
458
459 if (!is_duplicate) {
460 orig_node->last_ttl = batman_packet->ttl;
461 neigh_node->last_ttl = batman_packet->ttl;
462 }
463
464 bonding_candidate_add(orig_node, neigh_node);
465
466 /* if this neighbor already is our next hop there is nothing
467 * to change */
468 router = orig_node_get_router(orig_node);
469 if (router == neigh_node)
470 goto update_tt;
471
472 /* if this neighbor does not offer a better TQ we won't consider it */
473 if (router && (router->tq_avg > neigh_node->tq_avg))
474 goto update_tt;
475
476 /* if the TQ is the same and the link not more symetric we
477 * won't consider it either */
478 if (router && (neigh_node->tq_avg == router->tq_avg)) {
479 orig_node_tmp = router->orig_node;
480 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
481 bcast_own_sum_orig =
482 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
483 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
484
485 orig_node_tmp = neigh_node->orig_node;
486 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
487 bcast_own_sum_neigh =
488 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
489 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
490
491 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
492 goto update_tt;
493 }
494
495 update_routes(bat_priv, orig_node, neigh_node);
496
497update_tt:
498 /* I have to check for transtable changes only if the OGM has been
499 * sent through a primary interface */
500 if (((batman_packet->orig != ethhdr->h_source) &&
501 (batman_packet->ttl > 2)) ||
502 (batman_packet->flags & PRIMARIES_FIRST_HOP))
503 update_transtable(bat_priv, orig_node, tt_buff,
504 batman_packet->tt_num_changes,
505 batman_packet->ttvn,
506 batman_packet->tt_crc);
507
508 if (orig_node->gw_flags != batman_packet->gw_flags)
509 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
510
511 orig_node->gw_flags = batman_packet->gw_flags;
512
513 /* restart gateway selection if fast or late switching was enabled */
514 if ((orig_node->gw_flags) &&
515 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
516 (atomic_read(&bat_priv->gw_sel_class) > 2))
517 gw_check_election(bat_priv, orig_node);
518
519 goto out;
520
521unlock:
522 rcu_read_unlock();
523out:
524 if (neigh_node)
525 neigh_node_free_ref(neigh_node);
526 if (router)
527 neigh_node_free_ref(router);
528}
529
530/* checks whether the host restarted and is in the protection time. 225/* checks whether the host restarted and is in the protection time.
531 * returns: 226 * returns:
532 * 0 if the packet is to be accepted 227 * 0 if the packet is to be accepted
533 * 1 if the packet is to be ignored. 228 * 1 if the packet is to be ignored.
534 */ 229 */
535static int window_protected(struct bat_priv *bat_priv, 230int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
536 int32_t seq_num_diff, 231 unsigned long *last_reset)
537 unsigned long *last_reset)
538{ 232{
539 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 233 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
540 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 234 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
@@ -552,330 +246,12 @@ static int window_protected(struct bat_priv *bat_priv,
552 return 0; 246 return 0;
553} 247}
554 248
555/* processes a batman packet for all interfaces, adjusts the sequence number and 249int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
556 * finds out whether it is a duplicate.
557 * returns:
558 * 1 the packet is a duplicate
559 * 0 the packet has not yet been received
560 * -1 the packet is old and has been received while the seqno window
561 * was protected. Caller should drop it.
562 */
563static int count_real_packets(const struct ethhdr *ethhdr,
564 const struct batman_packet *batman_packet,
565 const struct hard_iface *if_incoming)
566{
567 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
568 struct orig_node *orig_node;
569 struct neigh_node *tmp_neigh_node;
570 struct hlist_node *node;
571 int is_duplicate = 0;
572 int32_t seq_diff;
573 int need_update = 0;
574 int set_mark, ret = -1;
575
576 orig_node = get_orig_node(bat_priv, batman_packet->orig);
577 if (!orig_node)
578 return 0;
579
580 spin_lock_bh(&orig_node->ogm_cnt_lock);
581 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
582
583 /* signalize caller that the packet is to be dropped. */
584 if (window_protected(bat_priv, seq_diff,
585 &orig_node->batman_seqno_reset))
586 goto out;
587
588 rcu_read_lock();
589 hlist_for_each_entry_rcu(tmp_neigh_node, node,
590 &orig_node->neigh_list, list) {
591
592 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
593 orig_node->last_real_seqno,
594 batman_packet->seqno);
595
596 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
597 (tmp_neigh_node->if_incoming == if_incoming))
598 set_mark = 1;
599 else
600 set_mark = 0;
601
602 /* if the window moved, set the update flag. */
603 need_update |= bit_get_packet(bat_priv,
604 tmp_neigh_node->real_bits,
605 seq_diff, set_mark);
606
607 tmp_neigh_node->real_packet_count =
608 bit_packet_count(tmp_neigh_node->real_bits);
609 }
610 rcu_read_unlock();
611
612 if (need_update) {
613 bat_dbg(DBG_BATMAN, bat_priv,
614 "updating last_seqno: old %d, new %d\n",
615 orig_node->last_real_seqno, batman_packet->seqno);
616 orig_node->last_real_seqno = batman_packet->seqno;
617 }
618
619 ret = is_duplicate;
620
621out:
622 spin_unlock_bh(&orig_node->ogm_cnt_lock);
623 orig_node_free_ref(orig_node);
624 return ret;
625}
626
627void receive_bat_packet(const struct ethhdr *ethhdr,
628 struct batman_packet *batman_packet,
629 const unsigned char *tt_buff,
630 struct hard_iface *if_incoming)
631{
632 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
633 struct hard_iface *hard_iface;
634 struct orig_node *orig_neigh_node, *orig_node;
635 struct neigh_node *router = NULL, *router_router = NULL;
636 struct neigh_node *orig_neigh_router = NULL;
637 int has_directlink_flag;
638 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
639 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
640 int is_duplicate;
641 uint32_t if_incoming_seqno;
642
643 /* Silently drop when the batman packet is actually not a
644 * correct packet.
645 *
646 * This might happen if a packet is padded (e.g. Ethernet has a
647 * minimum frame length of 64 byte) and the aggregation interprets
648 * it as an additional length.
649 *
650 * TODO: A more sane solution would be to have a bit in the
651 * batman_packet to detect whether the packet is the last
652 * packet in an aggregation. Here we expect that the padding
653 * is always zero (or not 0x01)
654 */
655 if (batman_packet->packet_type != BAT_PACKET)
656 return;
657
658 /* could be changed by schedule_own_packet() */
659 if_incoming_seqno = atomic_read(&if_incoming->seqno);
660
661 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
662
663 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
664 batman_packet->orig) ? 1 : 0);
665
666 bat_dbg(DBG_BATMAN, bat_priv,
667 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
668 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
669 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
670 ethhdr->h_source, if_incoming->net_dev->name,
671 if_incoming->net_dev->dev_addr, batman_packet->orig,
672 batman_packet->prev_sender, batman_packet->seqno,
673 batman_packet->ttvn, batman_packet->tt_crc,
674 batman_packet->tt_num_changes, batman_packet->tq,
675 batman_packet->ttl, batman_packet->version,
676 has_directlink_flag);
677
678 rcu_read_lock();
679 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
680 if (hard_iface->if_status != IF_ACTIVE)
681 continue;
682
683 if (hard_iface->soft_iface != if_incoming->soft_iface)
684 continue;
685
686 if (compare_eth(ethhdr->h_source,
687 hard_iface->net_dev->dev_addr))
688 is_my_addr = 1;
689
690 if (compare_eth(batman_packet->orig,
691 hard_iface->net_dev->dev_addr))
692 is_my_orig = 1;
693
694 if (compare_eth(batman_packet->prev_sender,
695 hard_iface->net_dev->dev_addr))
696 is_my_oldorig = 1;
697
698 if (is_broadcast_ether_addr(ethhdr->h_source))
699 is_broadcast = 1;
700 }
701 rcu_read_unlock();
702
703 if (batman_packet->version != COMPAT_VERSION) {
704 bat_dbg(DBG_BATMAN, bat_priv,
705 "Drop packet: incompatible batman version (%i)\n",
706 batman_packet->version);
707 return;
708 }
709
710 if (is_my_addr) {
711 bat_dbg(DBG_BATMAN, bat_priv,
712 "Drop packet: received my own broadcast (sender: %pM"
713 ")\n",
714 ethhdr->h_source);
715 return;
716 }
717
718 if (is_broadcast) {
719 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
720 "ignoring all packets with broadcast source addr (sender: %pM"
721 ")\n", ethhdr->h_source);
722 return;
723 }
724
725 if (is_my_orig) {
726 unsigned long *word;
727 int offset;
728
729 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
730 if (!orig_neigh_node)
731 return;
732
733 /* neighbor has to indicate direct link and it has to
734 * come via the corresponding interface */
735 /* save packet seqno for bidirectional check */
736 if (has_directlink_flag &&
737 compare_eth(if_incoming->net_dev->dev_addr,
738 batman_packet->orig)) {
739 offset = if_incoming->if_num * NUM_WORDS;
740
741 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
742 word = &(orig_neigh_node->bcast_own[offset]);
743 bit_mark(word,
744 if_incoming_seqno - batman_packet->seqno - 2);
745 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
746 bit_packet_count(word);
747 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
748 }
749
750 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
751 "originator packet from myself (via neighbor)\n");
752 orig_node_free_ref(orig_neigh_node);
753 return;
754 }
755
756 if (is_my_oldorig) {
757 bat_dbg(DBG_BATMAN, bat_priv,
758 "Drop packet: ignoring all rebroadcast echos (sender: "
759 "%pM)\n", ethhdr->h_source);
760 return;
761 }
762
763 orig_node = get_orig_node(bat_priv, batman_packet->orig);
764 if (!orig_node)
765 return;
766
767 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
768
769 if (is_duplicate == -1) {
770 bat_dbg(DBG_BATMAN, bat_priv,
771 "Drop packet: packet within seqno protection time "
772 "(sender: %pM)\n", ethhdr->h_source);
773 goto out;
774 }
775
776 if (batman_packet->tq == 0) {
777 bat_dbg(DBG_BATMAN, bat_priv,
778 "Drop packet: originator packet with tq equal 0\n");
779 goto out;
780 }
781
782 router = orig_node_get_router(orig_node);
783 if (router)
784 router_router = orig_node_get_router(router->orig_node);
785
786 /* avoid temporary routing loops */
787 if (router && router_router &&
788 (compare_eth(router->addr, batman_packet->prev_sender)) &&
789 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
790 (compare_eth(router->addr, router_router->addr))) {
791 bat_dbg(DBG_BATMAN, bat_priv,
792 "Drop packet: ignoring all rebroadcast packets that "
793 "may make me loop (sender: %pM)\n", ethhdr->h_source);
794 goto out;
795 }
796
797 /* if sender is a direct neighbor the sender mac equals
798 * originator mac */
799 orig_neigh_node = (is_single_hop_neigh ?
800 orig_node :
801 get_orig_node(bat_priv, ethhdr->h_source));
802 if (!orig_neigh_node)
803 goto out;
804
805 orig_neigh_router = orig_node_get_router(orig_neigh_node);
806
807 /* drop packet if sender is not a direct neighbor and if we
808 * don't route towards it */
809 if (!is_single_hop_neigh && (!orig_neigh_router)) {
810 bat_dbg(DBG_BATMAN, bat_priv,
811 "Drop packet: OGM via unknown neighbor!\n");
812 goto out_neigh;
813 }
814
815 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
816 batman_packet, if_incoming);
817
818 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
819
820 /* update ranking if it is not a duplicate or has the same
821 * seqno and similar ttl as the non-duplicate */
822 if (is_bidirectional &&
823 (!is_duplicate ||
824 ((orig_node->last_real_seqno == batman_packet->seqno) &&
825 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
826 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
827 if_incoming, tt_buff, is_duplicate);
828
829 /* is single hop (direct) neighbor */
830 if (is_single_hop_neigh) {
831
832 /* mark direct link on incoming interface */
833 schedule_forward_packet(orig_node, ethhdr, batman_packet,
834 1, if_incoming);
835
836 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
837 "rebroadcast neighbor packet with direct link flag\n");
838 goto out_neigh;
839 }
840
841 /* multihop originator */
842 if (!is_bidirectional) {
843 bat_dbg(DBG_BATMAN, bat_priv,
844 "Drop packet: not received via bidirectional link\n");
845 goto out_neigh;
846 }
847
848 if (is_duplicate) {
849 bat_dbg(DBG_BATMAN, bat_priv,
850 "Drop packet: duplicate packet received\n");
851 goto out_neigh;
852 }
853
854 bat_dbg(DBG_BATMAN, bat_priv,
855 "Forwarding packet: rebroadcast originator packet\n");
856 schedule_forward_packet(orig_node, ethhdr, batman_packet,
857 0, if_incoming);
858
859out_neigh:
860 if ((orig_neigh_node) && (!is_single_hop_neigh))
861 orig_node_free_ref(orig_neigh_node);
862out:
863 if (router)
864 neigh_node_free_ref(router);
865 if (router_router)
866 neigh_node_free_ref(router_router);
867 if (orig_neigh_router)
868 neigh_node_free_ref(orig_neigh_router);
869
870 orig_node_free_ref(orig_node);
871}
872
873int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
874{ 250{
875 struct ethhdr *ethhdr; 251 struct ethhdr *ethhdr;
876 252
877 /* drop packet if it has not necessary minimum size */ 253 /* drop packet if it has not necessary minimum size */
878 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet)))) 254 if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
879 return NET_RX_DROP; 255 return NET_RX_DROP;
880 256
881 ethhdr = (struct ethhdr *)skb_mac_header(skb); 257 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -898,10 +274,7 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
898 274
899 ethhdr = (struct ethhdr *)skb_mac_header(skb); 275 ethhdr = (struct ethhdr *)skb_mac_header(skb);
900 276
901 receive_aggr_bat_packet(ethhdr, 277 bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
902 skb->data,
903 skb_headlen(skb),
904 hard_iface);
905 278
906 kfree_skb(skb); 279 kfree_skb(skb);
907 return NET_RX_SUCCESS; 280 return NET_RX_SUCCESS;
@@ -1243,7 +616,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
1243 } 616 }
1244 break; 617 break;
1245 case TT_RESPONSE: 618 case TT_RESPONSE:
1246 /* packet needs to be linearised to access the TT changes */ 619 /* packet needs to be linearized to access the TT changes */
1247 if (skb_linearize(skb) < 0) 620 if (skb_linearize(skb) < 0)
1248 goto out; 621 goto out;
1249 622
@@ -1300,7 +673,7 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
1300 roam_adv_packet->client); 673 roam_adv_packet->client);
1301 674
1302 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 675 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
1303 atomic_read(&orig_node->last_ttvn) + 1, true); 676 atomic_read(&orig_node->last_ttvn) + 1, true, false);
1304 677
1305 /* Roaming phase starts: I have new information but the ttvn has not 678 /* Roaming phase starts: I have new information but the ttvn has not
1306 * been incremented yet. This flag will make me check all the incoming 679 * been incremented yet. This flag will make me check all the incoming
@@ -1536,7 +909,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
1536 909
1537 ethhdr = (struct ethhdr *)(skb->data + 910 ethhdr = (struct ethhdr *)(skb->data +
1538 sizeof(struct unicast_packet)); 911 sizeof(struct unicast_packet));
1539 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 912 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
1540 913
1541 if (!orig_node) { 914 if (!orig_node) {
1542 if (!is_my_client(bat_priv, ethhdr->h_dest)) 915 if (!is_my_client(bat_priv, ethhdr->h_dest))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index fb14e9579b19..7aaee0fb0fdc 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,19 +23,15 @@
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(const struct ethhdr *ethhdr, 26void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
27 struct batman_packet *batman_packet, 27 struct neigh_node *neigh_node);
28 const unsigned char *tt_buff,
29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node);
32int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 28int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
33int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 29int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 30int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 31int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
36int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
37int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
38int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); 35int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
40int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); 36int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
41struct neigh_node *find_router(struct bat_priv *bat_priv, 37struct neigh_node *find_router(struct bat_priv *bat_priv,
@@ -43,5 +39,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
43 const struct hard_iface *recv_if); 39 const struct hard_iface *recv_if);
44void bonding_candidate_del(struct orig_node *orig_node, 40void bonding_candidate_del(struct orig_node *orig_node,
45 struct neigh_node *neigh_node); 41 struct neigh_node *neigh_node);
42void bonding_candidate_add(struct orig_node *orig_node,
43 struct neigh_node *neigh_node);
44void bonding_save_primary(const struct orig_node *orig_node,
45 struct orig_node *orig_neigh_node,
46 const struct batman_ogm_packet *batman_ogm_packet);
47int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
48 unsigned long *last_reset);
46 49
47#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ 50#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 58d14472068c..8a684eb738ad 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -26,33 +26,12 @@
26#include "soft-interface.h" 26#include "soft-interface.h"
27#include "hard-interface.h" 27#include "hard-interface.h"
28#include "vis.h" 28#include "vis.h"
29#include "aggregation.h"
30#include "gateway_common.h" 29#include "gateway_common.h"
31#include "originator.h" 30#include "originator.h"
31#include "bat_ogm.h"
32 32
33static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
34 34
35/* apply hop penalty for a normal link */
36static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40}
41
42/* when do we schedule our own packet to be sent */
43static unsigned long own_send_time(const struct bat_priv *bat_priv)
44{
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
48}
49
50/* when do we schedule a forwarded packet to be sent */
51static unsigned long forward_send_time(void)
52{
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54}
55
56/* send out an already prepared packet to the given address via the 35/* send out an already prepared packet to the given address via the
57 * specified batman interface */ 36 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 37int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
@@ -99,141 +78,17 @@ send_skb_err:
99 return NET_XMIT_DROP; 78 return NET_XMIT_DROP;
100} 79}
101 80
102/* Send a packet to a given interface */
103static void send_packet_to_if(struct forw_packet *forw_packet,
104 struct hard_iface *hard_iface)
105{
106 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
107 char *fwd_str;
108 uint8_t packet_num;
109 int16_t buff_pos;
110 struct batman_packet *batman_packet;
111 struct sk_buff *skb;
112
113 if (hard_iface->if_status != IF_ACTIVE)
114 return;
115
116 packet_num = 0;
117 buff_pos = 0;
118 batman_packet = (struct batman_packet *)forw_packet->skb->data;
119
120 /* adjust all flags and log packets */
121 while (aggregated_packet(buff_pos,
122 forw_packet->packet_len,
123 batman_packet->tt_num_changes)) {
124
125 /* we might have aggregated direct link packets with an
126 * ordinary base packet */
127 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
128 (forw_packet->if_incoming == hard_iface))
129 batman_packet->flags |= DIRECTLINK;
130 else
131 batman_packet->flags &= ~DIRECTLINK;
132
133 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
134 "Sending own" :
135 "Forwarding"));
136 bat_dbg(DBG_BATMAN, bat_priv,
137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
138 " IDF %s, hvn %d) on interface %s [%pM]\n",
139 fwd_str, (packet_num > 0 ? "aggregated " : ""),
140 batman_packet->orig, ntohl(batman_packet->seqno),
141 batman_packet->tq, batman_packet->ttl,
142 (batman_packet->flags & DIRECTLINK ?
143 "on" : "off"),
144 batman_packet->ttvn, hard_iface->net_dev->name,
145 hard_iface->net_dev->dev_addr);
146
147 buff_pos += sizeof(*batman_packet) +
148 tt_len(batman_packet->tt_num_changes);
149 packet_num++;
150 batman_packet = (struct batman_packet *)
151 (forw_packet->skb->data + buff_pos);
152 }
153
154 /* create clone because function is called more than once */
155 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156 if (skb)
157 send_skb_packet(skb, hard_iface, broadcast_addr);
158}
159
160/* send a batman packet */
161static void send_packet(struct forw_packet *forw_packet)
162{
163 struct hard_iface *hard_iface;
164 struct net_device *soft_iface;
165 struct bat_priv *bat_priv;
166 struct hard_iface *primary_if = NULL;
167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data);
169 int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170
171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n");
174 goto out;
175 }
176
177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface);
179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 goto out;
182
183 primary_if = primary_if_get_selected(bat_priv);
184 if (!primary_if)
185 goto out;
186
187 /* multihomed peer assumed */
188 /* non-primary OGMs are only broadcasted on their interface */
189 if ((directlink && (batman_packet->ttl == 1)) ||
190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
191
192 /* FIXME: what about aggregated packets ? */
193 bat_dbg(DBG_BATMAN, bat_priv,
194 "%s packet (originator %pM, seqno %d, TTL %d) "
195 "on interface %s [%pM]\n",
196 (forw_packet->own ? "Sending own" : "Forwarding"),
197 batman_packet->orig, ntohl(batman_packet->seqno),
198 batman_packet->ttl,
199 forw_packet->if_incoming->net_dev->name,
200 forw_packet->if_incoming->net_dev->dev_addr);
201
202 /* skb is only used once and than forw_packet is free'd */
203 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
204 broadcast_addr);
205 forw_packet->skb = NULL;
206
207 goto out;
208 }
209
210 /* broadcast on every interface */
211 rcu_read_lock();
212 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
213 if (hard_iface->soft_iface != soft_iface)
214 continue;
215
216 send_packet_to_if(forw_packet, hard_iface);
217 }
218 rcu_read_unlock();
219
220out:
221 if (primary_if)
222 hardif_free_ref(primary_if);
223}
224
225static void realloc_packet_buffer(struct hard_iface *hard_iface, 81static void realloc_packet_buffer(struct hard_iface *hard_iface,
226 int new_len) 82 int new_len)
227{ 83{
228 unsigned char *new_buff; 84 unsigned char *new_buff;
229 struct batman_packet *batman_packet;
230 85
231 new_buff = kmalloc(new_len, GFP_ATOMIC); 86 new_buff = kmalloc(new_len, GFP_ATOMIC);
232 87
233 /* keep old buffer if kmalloc should fail */ 88 /* keep old buffer if kmalloc should fail */
234 if (new_buff) { 89 if (new_buff) {
235 memcpy(new_buff, hard_iface->packet_buff, 90 memcpy(new_buff, hard_iface->packet_buff,
236 sizeof(*batman_packet)); 91 BATMAN_OGM_LEN);
237 92
238 kfree(hard_iface->packet_buff); 93 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff; 94 hard_iface->packet_buff = new_buff;
@@ -242,60 +97,48 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
242} 97}
243 98
244/* when calling this function (hard_iface == primary_if) has to be true */ 99/* when calling this function (hard_iface == primary_if) has to be true */
245static void prepare_packet_buffer(struct bat_priv *bat_priv, 100static int prepare_packet_buffer(struct bat_priv *bat_priv,
246 struct hard_iface *hard_iface) 101 struct hard_iface *hard_iface)
247{ 102{
248 int new_len; 103 int new_len;
249 struct batman_packet *batman_packet;
250 104
251 new_len = BAT_PACKET_LEN + 105 new_len = BATMAN_OGM_LEN +
252 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); 106 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253 107
254 /* if we have too many changes for one packet don't send any 108 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */ 109 * and wait for the tt table request which will be fragmented */
256 if (new_len > hard_iface->soft_iface->mtu) 110 if (new_len > hard_iface->soft_iface->mtu)
257 new_len = BAT_PACKET_LEN; 111 new_len = BATMAN_OGM_LEN;
258 112
259 realloc_packet_buffer(hard_iface, new_len); 113 realloc_packet_buffer(hard_iface, new_len);
260 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261 114
262 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); 115 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263 116
264 /* reset the sending counter */ 117 /* reset the sending counter */
265 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); 118 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266 119
267 batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv, 120 return tt_changes_fill_buffer(bat_priv,
268 hard_iface->packet_buff + BAT_PACKET_LEN, 121 hard_iface->packet_buff + BATMAN_OGM_LEN,
269 hard_iface->packet_len - BAT_PACKET_LEN); 122 hard_iface->packet_len - BATMAN_OGM_LEN);
270
271} 123}
272 124
273static void reset_packet_buffer(struct bat_priv *bat_priv, 125static int reset_packet_buffer(struct bat_priv *bat_priv,
274 struct hard_iface *hard_iface) 126 struct hard_iface *hard_iface)
275{ 127{
276 struct batman_packet *batman_packet; 128 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
277 129 return 0;
278 realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 batman_packet->tt_num_changes = 0;
282} 130}
283 131
284void schedule_own_packet(struct hard_iface *hard_iface) 132void schedule_bat_ogm(struct hard_iface *hard_iface)
285{ 133{
286 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 134 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
287 struct hard_iface *primary_if; 135 struct hard_iface *primary_if;
288 unsigned long send_time; 136 int tt_num_changes = -1;
289 struct batman_packet *batman_packet;
290 int vis_server;
291 137
292 if ((hard_iface->if_status == IF_NOT_IN_USE) || 138 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
293 (hard_iface->if_status == IF_TO_BE_REMOVED)) 139 (hard_iface->if_status == IF_TO_BE_REMOVED))
294 return; 140 return;
295 141
296 vis_server = atomic_read(&bat_priv->vis_mode);
297 primary_if = primary_if_get_selected(bat_priv);
298
299 /** 142 /**
300 * the interface gets activated here to avoid race conditions between 143 * the interface gets activated here to avoid race conditions between
301 * the moment of activating the interface in 144 * the moment of activating the interface in
@@ -306,124 +149,26 @@ void schedule_own_packet(struct hard_iface *hard_iface)
306 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 149 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
307 hard_iface->if_status = IF_ACTIVE; 150 hard_iface->if_status = IF_ACTIVE;
308 151
152 primary_if = primary_if_get_selected(bat_priv);
153
309 if (hard_iface == primary_if) { 154 if (hard_iface == primary_if) {
310 /* if at least one change happened */ 155 /* if at least one change happened */
311 if (atomic_read(&bat_priv->tt_local_changes) > 0) { 156 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
312 tt_commit_changes(bat_priv); 157 tt_commit_changes(bat_priv);
313 prepare_packet_buffer(bat_priv, hard_iface); 158 tt_num_changes = prepare_packet_buffer(bat_priv,
159 hard_iface);
314 } 160 }
315 161
316 /* if the changes have been sent enough times */ 162 /* if the changes have been sent often enough */
317 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) 163 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
318 reset_packet_buffer(bat_priv, hard_iface); 164 tt_num_changes = reset_packet_buffer(bat_priv,
165 hard_iface);
319 } 166 }
320 167
321 /**
322 * NOTE: packet_buff might just have been re-allocated in
323 * prepare_packet_buffer() or in reset_packet_buffer()
324 */
325 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
326
327 /* change sequence number to network order */
328 batman_packet->seqno =
329 htonl((uint32_t)atomic_read(&hard_iface->seqno));
330
331 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
332 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
333
334 if (vis_server == VIS_TYPE_SERVER_SYNC)
335 batman_packet->flags |= VIS_SERVER;
336 else
337 batman_packet->flags &= ~VIS_SERVER;
338
339 if ((hard_iface == primary_if) &&
340 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
341 batman_packet->gw_flags =
342 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
343 else
344 batman_packet->gw_flags = NO_FLAGS;
345
346 atomic_inc(&hard_iface->seqno);
347
348 slide_own_bcast_window(hard_iface);
349 send_time = own_send_time(bat_priv);
350 add_bat_packet_to_list(bat_priv,
351 hard_iface->packet_buff,
352 hard_iface->packet_len,
353 hard_iface, 1, send_time);
354
355 if (primary_if) 168 if (primary_if)
356 hardif_free_ref(primary_if); 169 hardif_free_ref(primary_if);
357}
358
359void schedule_forward_packet(struct orig_node *orig_node,
360 const struct ethhdr *ethhdr,
361 struct batman_packet *batman_packet,
362 int directlink,
363 struct hard_iface *if_incoming)
364{
365 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
366 struct neigh_node *router;
367 uint8_t in_tq, in_ttl, tq_avg = 0;
368 unsigned long send_time;
369 uint8_t tt_num_changes;
370
371 if (batman_packet->ttl <= 1) {
372 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
373 return;
374 }
375
376 router = orig_node_get_router(orig_node);
377
378 in_tq = batman_packet->tq;
379 in_ttl = batman_packet->ttl;
380 tt_num_changes = batman_packet->tt_num_changes;
381
382 batman_packet->ttl--;
383 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
384
385 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
386 * of our best tq value */
387 if (router && router->tq_avg != 0) {
388
389 /* rebroadcast ogm of best ranking neighbor as is */
390 if (!compare_eth(router->addr, ethhdr->h_source)) {
391 batman_packet->tq = router->tq_avg;
392
393 if (router->last_ttl)
394 batman_packet->ttl = router->last_ttl - 1;
395 }
396
397 tq_avg = router->tq_avg;
398 }
399
400 if (router)
401 neigh_node_free_ref(router);
402
403 /* apply hop penalty */
404 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
405
406 bat_dbg(DBG_BATMAN, bat_priv,
407 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
408 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
409 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
410 batman_packet->ttl);
411
412 batman_packet->seqno = htonl(batman_packet->seqno);
413 batman_packet->tt_crc = htons(batman_packet->tt_crc);
414
415 /* switch of primaries first hop flag when forwarding */
416 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
417 if (directlink)
418 batman_packet->flags |= DIRECTLINK;
419 else
420 batman_packet->flags &= ~DIRECTLINK;
421 170
422 send_time = forward_send_time(); 171 bat_ogm_schedule(hard_iface, tt_num_changes);
423 add_bat_packet_to_list(bat_priv,
424 (unsigned char *)batman_packet,
425 sizeof(*batman_packet) + tt_len(tt_num_changes),
426 if_incoming, 0, send_time);
427} 172}
428 173
429static void forw_packet_free(struct forw_packet *forw_packet) 174static void forw_packet_free(struct forw_packet *forw_packet)
@@ -454,7 +199,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
454} 199}
455 200
456/* add a broadcast packet to the queue and setup timers. broadcast packets 201/* add a broadcast packet to the queue and setup timers. broadcast packets
457 * are sent multiple times to increase probability for beeing received. 202 * are sent multiple times to increase probability for being received.
458 * 203 *
459 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on 204 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
460 * errors. 205 * errors.
@@ -557,7 +302,7 @@ out:
557 atomic_inc(&bat_priv->bcast_queue_left); 302 atomic_inc(&bat_priv->bcast_queue_left);
558} 303}
559 304
560void send_outstanding_bat_packet(struct work_struct *work) 305void send_outstanding_bat_ogm_packet(struct work_struct *work)
561{ 306{
562 struct delayed_work *delayed_work = 307 struct delayed_work *delayed_work =
563 container_of(work, struct delayed_work, work); 308 container_of(work, struct delayed_work, work);
@@ -573,7 +318,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
573 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 318 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
574 goto out; 319 goto out;
575 320
576 send_packet(forw_packet); 321 bat_ogm_emit(forw_packet);
577 322
578 /** 323 /**
579 * we have to have at least one packet in the queue 324 * we have to have at least one packet in the queue
@@ -581,7 +326,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
581 * shutting down 326 * shutting down
582 */ 327 */
583 if (forw_packet->own) 328 if (forw_packet->own)
584 schedule_own_packet(forw_packet->if_incoming); 329 schedule_bat_ogm(forw_packet->if_incoming);
585 330
586out: 331out:
587 /* don't count own packet */ 332 /* don't count own packet */
@@ -612,7 +357,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
612 &bat_priv->forw_bcast_list, list) { 357 &bat_priv->forw_bcast_list, list) {
613 358
614 /** 359 /**
615 * if purge_outstanding_packets() was called with an argmument 360 * if purge_outstanding_packets() was called with an argument
616 * we delete only packets belonging to the given interface 361 * we delete only packets belonging to the given interface
617 */ 362 */
618 if ((hard_iface) && 363 if ((hard_iface) &&
@@ -641,7 +386,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
641 &bat_priv->forw_bat_list, list) { 386 &bat_priv->forw_bat_list, list) {
642 387
643 /** 388 /**
644 * if purge_outstanding_packets() was called with an argmument 389 * if purge_outstanding_packets() was called with an argument
645 * we delete only packets belonging to the given interface 390 * we delete only packets belonging to the given interface
646 */ 391 */
647 if ((hard_iface) && 392 if ((hard_iface) &&
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 1f2d1e877663..c8ca3ef7385b 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -24,15 +24,10 @@
24 24
25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
26 const uint8_t *dst_addr); 26 const uint8_t *dst_addr);
27void schedule_own_packet(struct hard_iface *hard_iface); 27void schedule_bat_ogm(struct hard_iface *hard_iface);
28void schedule_forward_packet(struct orig_node *orig_node,
29 const struct ethhdr *ethhdr,
30 struct batman_packet *batman_packet,
31 int directlink,
32 struct hard_iface *if_outgoing);
33int add_bcast_packet_to_list(struct bat_priv *bat_priv, 28int add_bcast_packet_to_list(struct bat_priv *bat_priv,
34 const struct sk_buff *skb, unsigned long delay); 29 const struct sk_buff *skb, unsigned long delay);
35void send_outstanding_bat_packet(struct work_struct *work); 30void send_outstanding_bat_ogm_packet(struct work_struct *work);
36void purge_outstanding_packets(struct bat_priv *bat_priv, 31void purge_outstanding_packets(struct bat_priv *bat_priv,
37 const struct hard_iface *hard_iface); 32 const struct hard_iface *hard_iface);
38 33
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 05dd35114a27..f9cc95728989 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -445,30 +445,31 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
445{ 445{
446 struct bat_priv *bat_priv = netdev_priv(dev); 446 struct bat_priv *bat_priv = netdev_priv(dev);
447 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 447 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
448 struct batman_packet *batman_packet; 448 struct batman_ogm_packet *batman_ogm_packet;
449 struct softif_neigh *softif_neigh = NULL; 449 struct softif_neigh *softif_neigh = NULL;
450 struct hard_iface *primary_if = NULL; 450 struct hard_iface *primary_if = NULL;
451 struct softif_neigh *curr_softif_neigh = NULL; 451 struct softif_neigh *curr_softif_neigh = NULL;
452 452
453 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 453 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
454 batman_packet = (struct batman_packet *) 454 batman_ogm_packet = (struct batman_ogm_packet *)
455 (skb->data + ETH_HLEN + VLAN_HLEN); 455 (skb->data + ETH_HLEN + VLAN_HLEN);
456 else 456 else
457 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN); 457 batman_ogm_packet = (struct batman_ogm_packet *)
458 (skb->data + ETH_HLEN);
458 459
459 if (batman_packet->version != COMPAT_VERSION) 460 if (batman_ogm_packet->version != COMPAT_VERSION)
460 goto out; 461 goto out;
461 462
462 if (batman_packet->packet_type != BAT_PACKET) 463 if (batman_ogm_packet->packet_type != BAT_OGM)
463 goto out; 464 goto out;
464 465
465 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 466 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
466 goto out; 467 goto out;
467 468
468 if (is_my_mac(batman_packet->orig)) 469 if (is_my_mac(batman_ogm_packet->orig))
469 goto out; 470 goto out;
470 471
471 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid); 472 softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
472 if (!softif_neigh) 473 if (!softif_neigh)
473 goto out; 474 goto out;
474 475
@@ -532,11 +533,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
532 if (!is_valid_ether_addr(addr->sa_data)) 533 if (!is_valid_ether_addr(addr->sa_data))
533 return -EADDRNOTAVAIL; 534 return -EADDRNOTAVAIL;
534 535
535 /* only modify transtable if it has been initialised before */ 536 /* only modify transtable if it has been initialized before */
536 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 537 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
537 tt_local_remove(bat_priv, dev->dev_addr, 538 tt_local_remove(bat_priv, dev->dev_addr,
538 "mac address changed", false); 539 "mac address changed", false);
539 tt_local_add(dev, addr->sa_data); 540 tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
540 } 541 }
541 542
542 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 543 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -595,11 +596,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
595 goto dropped; 596 goto dropped;
596 597
597 /* Register the client MAC in the transtable */ 598 /* Register the client MAC in the transtable */
598 tt_local_add(soft_iface, ethhdr->h_source); 599 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
599 600
600 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 601 orig_node = transtable_search(bat_priv, ethhdr->h_source,
602 ethhdr->h_dest);
601 do_bcast = is_multicast_ether_addr(ethhdr->h_dest); 603 do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
602 if (do_bcast || (orig_node && orig_node->gw_flags)) { 604 if (do_bcast || (orig_node && orig_node->gw_flags)) {
603 ret = gw_is_target(bat_priv, skb, orig_node); 605 ret = gw_is_target(bat_priv, skb, orig_node);
604 606
605 if (ret < 0) 607 if (ret < 0)
@@ -739,6 +741,9 @@ void interface_rx(struct net_device *soft_iface,
739 741
740 soft_iface->last_rx = jiffies; 742 soft_iface->last_rx = jiffies;
741 743
744 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
745 goto dropped;
746
742 netif_rx(skb); 747 netif_rx(skb);
743 goto out; 748 goto out;
744 749
@@ -796,10 +801,8 @@ struct net_device *softif_create(const char *name)
796 801
797 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup); 802 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
798 803
799 if (!soft_iface) { 804 if (!soft_iface)
800 pr_err("Unable to allocate the batman interface: %s\n", name);
801 goto out; 805 goto out;
802 }
803 806
804 ret = register_netdevice(soft_iface); 807 ret = register_netdevice(soft_iface);
805 if (ret < 0) { 808 if (ret < 0) {
@@ -812,6 +815,7 @@ struct net_device *softif_create(const char *name)
812 815
813 atomic_set(&bat_priv->aggregated_ogms, 1); 816 atomic_set(&bat_priv->aggregated_ogms, 1);
814 atomic_set(&bat_priv->bonding, 0); 817 atomic_set(&bat_priv->bonding, 0);
818 atomic_set(&bat_priv->ap_isolation, 0);
815 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 819 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
816 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 820 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
817 atomic_set(&bat_priv->gw_sel_class, 20); 821 atomic_set(&bat_priv->gw_sel_class, 20);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index fb6931d00cd7..c7aafc7c5ed4 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -137,10 +137,22 @@ static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
137 kfree_rcu(tt_local_entry, rcu); 137 kfree_rcu(tt_local_entry, rcu);
138} 138}
139 139
140static void tt_global_entry_free_rcu(struct rcu_head *rcu)
141{
142 struct tt_global_entry *tt_global_entry;
143
144 tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
145
146 if (tt_global_entry->orig_node)
147 orig_node_free_ref(tt_global_entry->orig_node);
148
149 kfree(tt_global_entry);
150}
151
140static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 152static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
141{ 153{
142 if (atomic_dec_and_test(&tt_global_entry->refcount)) 154 if (atomic_dec_and_test(&tt_global_entry->refcount))
143 kfree_rcu(tt_global_entry, rcu); 155 call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
144} 156}
145 157
146static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 158static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -183,7 +195,8 @@ static int tt_local_init(struct bat_priv *bat_priv)
183 return 1; 195 return 1;
184} 196}
185 197
186void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) 198void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
199 int ifindex)
187{ 200{
188 struct bat_priv *bat_priv = netdev_priv(soft_iface); 201 struct bat_priv *bat_priv = netdev_priv(soft_iface);
189 struct tt_local_entry *tt_local_entry = NULL; 202 struct tt_local_entry *tt_local_entry = NULL;
@@ -207,6 +220,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
207 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 220 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
208 tt_local_entry->last_seen = jiffies; 221 tt_local_entry->last_seen = jiffies;
209 tt_local_entry->flags = NO_FLAGS; 222 tt_local_entry->flags = NO_FLAGS;
223 if (is_wifi_iface(ifindex))
224 tt_local_entry->flags |= TT_CLIENT_WIFI;
210 atomic_set(&tt_local_entry->refcount, 2); 225 atomic_set(&tt_local_entry->refcount, 2);
211 226
212 /* the batman interface mac address should never be purged */ 227 /* the batman interface mac address should never be purged */
@@ -329,7 +344,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
329 344
330 rcu_read_lock(); 345 rcu_read_lock();
331 __hlist_for_each_rcu(node, head) 346 __hlist_for_each_rcu(node, head)
332 buf_size += 21; 347 buf_size += 29;
333 rcu_read_unlock(); 348 rcu_read_unlock();
334 } 349 }
335 350
@@ -348,8 +363,19 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
348 rcu_read_lock(); 363 rcu_read_lock();
349 hlist_for_each_entry_rcu(tt_local_entry, node, 364 hlist_for_each_entry_rcu(tt_local_entry, node,
350 head, hash_entry) { 365 head, hash_entry) {
351 pos += snprintf(buff + pos, 22, " * %pM\n", 366 pos += snprintf(buff + pos, 30, " * %pM "
352 tt_local_entry->addr); 367 "[%c%c%c%c%c]\n",
368 tt_local_entry->addr,
369 (tt_local_entry->flags &
370 TT_CLIENT_ROAM ? 'R' : '.'),
371 (tt_local_entry->flags &
372 TT_CLIENT_NOPURGE ? 'P' : '.'),
373 (tt_local_entry->flags &
374 TT_CLIENT_NEW ? 'N' : '.'),
375 (tt_local_entry->flags &
376 TT_CLIENT_PENDING ? 'X' : '.'),
377 (tt_local_entry->flags &
378 TT_CLIENT_WIFI ? 'W' : '.'));
353 } 379 }
354 rcu_read_unlock(); 380 rcu_read_unlock();
355 } 381 }
@@ -369,8 +395,8 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
369 tt_local_event(bat_priv, tt_local_entry->addr, 395 tt_local_event(bat_priv, tt_local_entry->addr,
370 tt_local_entry->flags | flags); 396 tt_local_entry->flags | flags);
371 397
372 /* The local client has to be merked as "pending to be removed" but has 398 /* The local client has to be marked as "pending to be removed" but has
373 * to be kept in the table in order to send it in an full tables 399 * to be kept in the table in order to send it in a full table
374 * response issued before the net ttvn increment (consistency check) */ 400 * response issued before the net ttvn increment (consistency check) */
375 tt_local_entry->flags |= TT_CLIENT_PENDING; 401 tt_local_entry->flags |= TT_CLIENT_PENDING;
376} 402}
@@ -495,7 +521,8 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
495 521
496/* caller must hold orig_node refcount */ 522/* caller must hold orig_node refcount */
497int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 523int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
498 const unsigned char *tt_addr, uint8_t ttvn, bool roaming) 524 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
525 bool wifi)
499{ 526{
500 struct tt_global_entry *tt_global_entry; 527 struct tt_global_entry *tt_global_entry;
501 struct orig_node *orig_node_tmp; 528 struct orig_node *orig_node_tmp;
@@ -537,6 +564,9 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
537 tt_global_entry->roam_at = 0; 564 tt_global_entry->roam_at = 0;
538 } 565 }
539 566
567 if (wifi)
568 tt_global_entry->flags |= TT_CLIENT_WIFI;
569
540 bat_dbg(DBG_TT, bat_priv, 570 bat_dbg(DBG_TT, bat_priv,
541 "Creating new global tt entry: %pM (via %pM)\n", 571 "Creating new global tt entry: %pM (via %pM)\n",
542 tt_global_entry->addr, orig_node->orig); 572 tt_global_entry->addr, orig_node->orig);
@@ -582,8 +612,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
582 seq_printf(seq, 612 seq_printf(seq,
583 "Globally announced TT entries received via the mesh %s\n", 613 "Globally announced TT entries received via the mesh %s\n",
584 net_dev->name); 614 net_dev->name);
585 seq_printf(seq, " %-13s %s %-15s %s\n", 615 seq_printf(seq, " %-13s %s %-15s %s %s\n",
586 "Client", "(TTVN)", "Originator", "(Curr TTVN)"); 616 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
587 617
588 buf_size = 1; 618 buf_size = 1;
589 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via 619 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
@@ -593,7 +623,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
593 623
594 rcu_read_lock(); 624 rcu_read_lock();
595 __hlist_for_each_rcu(node, head) 625 __hlist_for_each_rcu(node, head)
596 buf_size += 59; 626 buf_size += 67;
597 rcu_read_unlock(); 627 rcu_read_unlock();
598 } 628 }
599 629
@@ -612,14 +642,20 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
612 rcu_read_lock(); 642 rcu_read_lock();
613 hlist_for_each_entry_rcu(tt_global_entry, node, 643 hlist_for_each_entry_rcu(tt_global_entry, node,
614 head, hash_entry) { 644 head, hash_entry) {
615 pos += snprintf(buff + pos, 61, 645 pos += snprintf(buff + pos, 69,
616 " * %pM (%3u) via %pM (%3u)\n", 646 " * %pM (%3u) via %pM (%3u) "
617 tt_global_entry->addr, 647 "[%c%c%c]\n", tt_global_entry->addr,
618 tt_global_entry->ttvn, 648 tt_global_entry->ttvn,
619 tt_global_entry->orig_node->orig, 649 tt_global_entry->orig_node->orig,
620 (uint8_t) atomic_read( 650 (uint8_t) atomic_read(
621 &tt_global_entry->orig_node-> 651 &tt_global_entry->orig_node->
622 last_ttvn)); 652 last_ttvn),
653 (tt_global_entry->flags &
654 TT_CLIENT_ROAM ? 'R' : '.'),
655 (tt_global_entry->flags &
656 TT_CLIENT_PENDING ? 'X' : '.'),
657 (tt_global_entry->flags &
658 TT_CLIENT_WIFI ? 'W' : '.'));
623 } 659 }
624 rcu_read_unlock(); 660 rcu_read_unlock();
625 } 661 }
@@ -686,6 +722,9 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
686 struct hlist_head *head; 722 struct hlist_head *head;
687 spinlock_t *list_lock; /* protects write access to the hash lists */ 723 spinlock_t *list_lock; /* protects write access to the hash lists */
688 724
725 if (!hash)
726 return;
727
689 for (i = 0; i < hash->size; i++) { 728 for (i = 0; i < hash->size; i++) {
690 head = &hash->table[i]; 729 head = &hash->table[i];
691 list_lock = &hash->list_locks[i]; 730 list_lock = &hash->list_locks[i];
@@ -774,30 +813,56 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
774 bat_priv->tt_global_hash = NULL; 813 bat_priv->tt_global_hash = NULL;
775} 814}
776 815
816static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
817 struct tt_global_entry *tt_global_entry)
818{
819 bool ret = false;
820
821 if (tt_local_entry->flags & TT_CLIENT_WIFI &&
822 tt_global_entry->flags & TT_CLIENT_WIFI)
823 ret = true;
824
825 return ret;
826}
827
777struct orig_node *transtable_search(struct bat_priv *bat_priv, 828struct orig_node *transtable_search(struct bat_priv *bat_priv,
778 const uint8_t *addr) 829 const uint8_t *src, const uint8_t *addr)
779{ 830{
780 struct tt_global_entry *tt_global_entry; 831 struct tt_local_entry *tt_local_entry = NULL;
832 struct tt_global_entry *tt_global_entry = NULL;
781 struct orig_node *orig_node = NULL; 833 struct orig_node *orig_node = NULL;
782 834
783 tt_global_entry = tt_global_hash_find(bat_priv, addr); 835 if (src && atomic_read(&bat_priv->ap_isolation)) {
836 tt_local_entry = tt_local_hash_find(bat_priv, src);
837 if (!tt_local_entry)
838 goto out;
839 }
784 840
841 tt_global_entry = tt_global_hash_find(bat_priv, addr);
785 if (!tt_global_entry) 842 if (!tt_global_entry)
786 goto out; 843 goto out;
787 844
845 /* check whether the clients should not communicate due to AP
846 * isolation */
847 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
848 goto out;
849
788 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 850 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
789 goto free_tt; 851 goto out;
790 852
791 /* A global client marked as PENDING has already moved from that 853 /* A global client marked as PENDING has already moved from that
792 * originator */ 854 * originator */
793 if (tt_global_entry->flags & TT_CLIENT_PENDING) 855 if (tt_global_entry->flags & TT_CLIENT_PENDING)
794 goto free_tt; 856 goto out;
795 857
796 orig_node = tt_global_entry->orig_node; 858 orig_node = tt_global_entry->orig_node;
797 859
798free_tt:
799 tt_global_entry_free_ref(tt_global_entry);
800out: 860out:
861 if (tt_global_entry)
862 tt_global_entry_free_ref(tt_global_entry);
863 if (tt_local_entry)
864 tt_local_entry_free_ref(tt_local_entry);
865
801 return orig_node; 866 return orig_node;
802} 867}
803 868
@@ -999,7 +1064,6 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
999 tt_response = (struct tt_query_packet *)skb_put(skb, 1064 tt_response = (struct tt_query_packet *)skb_put(skb,
1000 tt_query_size + tt_len); 1065 tt_query_size + tt_len);
1001 tt_response->ttvn = ttvn; 1066 tt_response->ttvn = ttvn;
1002 tt_response->tt_data = htons(tt_tot);
1003 1067
1004 tt_change = (struct tt_change *)(skb->data + tt_query_size); 1068 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1005 tt_count = 0; 1069 tt_count = 0;
@@ -1025,12 +1089,17 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1025 } 1089 }
1026 rcu_read_unlock(); 1090 rcu_read_unlock();
1027 1091
1092 /* store in the message the number of entries we have successfully
1093 * copied */
1094 tt_response->tt_data = htons(tt_count);
1095
1028out: 1096out:
1029 return skb; 1097 return skb;
1030} 1098}
1031 1099
1032int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node, 1100static int send_tt_request(struct bat_priv *bat_priv,
1033 uint8_t ttvn, uint16_t tt_crc, bool full_table) 1101 struct orig_node *dst_orig_node,
1102 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1034{ 1103{
1035 struct sk_buff *skb = NULL; 1104 struct sk_buff *skb = NULL;
1036 struct tt_query_packet *tt_request; 1105 struct tt_query_packet *tt_request;
@@ -1137,12 +1206,12 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1137 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1206 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1138 req_ttvn = tt_request->ttvn; 1207 req_ttvn = tt_request->ttvn;
1139 1208
1140 /* I have not the requested data */ 1209 /* I don't have the requested data */
1141 if (orig_ttvn != req_ttvn || 1210 if (orig_ttvn != req_ttvn ||
1142 tt_request->tt_data != req_dst_orig_node->tt_crc) 1211 tt_request->tt_data != req_dst_orig_node->tt_crc)
1143 goto out; 1212 goto out;
1144 1213
1145 /* If it has explicitly been requested the full table */ 1214 /* If the full table has been explicitly requested */
1146 if (tt_request->flags & TT_FULL_TABLE || 1215 if (tt_request->flags & TT_FULL_TABLE ||
1147 !req_dst_orig_node->tt_buff) 1216 !req_dst_orig_node->tt_buff)
1148 full_table = true; 1217 full_table = true;
@@ -1363,7 +1432,9 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1363 (tt_change + i)->flags & TT_CLIENT_ROAM); 1432 (tt_change + i)->flags & TT_CLIENT_ROAM);
1364 else 1433 else
1365 if (!tt_global_add(bat_priv, orig_node, 1434 if (!tt_global_add(bat_priv, orig_node,
1366 (tt_change + i)->addr, ttvn, false)) 1435 (tt_change + i)->addr, ttvn, false,
1436 (tt_change + i)->flags &
1437 TT_CLIENT_WIFI))
1367 /* In case of problem while storing a 1438 /* In case of problem while storing a
1368 * global_entry, we stop the updating 1439 * global_entry, we stop the updating
1369 * procedure without committing the 1440 * procedure without committing the
@@ -1403,9 +1474,10 @@ out:
1403 orig_node_free_ref(orig_node); 1474 orig_node_free_ref(orig_node);
1404} 1475}
1405 1476
1406void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, 1477static void tt_update_changes(struct bat_priv *bat_priv,
1407 uint16_t tt_num_changes, uint8_t ttvn, 1478 struct orig_node *orig_node,
1408 struct tt_change *tt_change) 1479 uint16_t tt_num_changes, uint8_t ttvn,
1480 struct tt_change *tt_change)
1409{ 1481{
1410 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, 1482 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1411 ttvn); 1483 ttvn);
@@ -1668,6 +1740,8 @@ static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
1668 rcu_read_lock(); 1740 rcu_read_lock();
1669 hlist_for_each_entry_rcu(tt_local_entry, node, 1741 hlist_for_each_entry_rcu(tt_local_entry, node,
1670 head, hash_entry) { 1742 head, hash_entry) {
1743 if (!(tt_local_entry->flags & flags))
1744 continue;
1671 tt_local_entry->flags &= ~flags; 1745 tt_local_entry->flags &= ~flags;
1672 atomic_inc(&bat_priv->num_local_tt); 1746 atomic_inc(&bat_priv->num_local_tt);
1673 } 1747 }
@@ -1720,3 +1794,90 @@ void tt_commit_changes(struct bat_priv *bat_priv)
1720 atomic_inc(&bat_priv->ttvn); 1794 atomic_inc(&bat_priv->ttvn);
1721 bat_priv->tt_poss_change = false; 1795 bat_priv->tt_poss_change = false;
1722} 1796}
1797
1798bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
1799{
1800 struct tt_local_entry *tt_local_entry = NULL;
1801 struct tt_global_entry *tt_global_entry = NULL;
1802 bool ret = true;
1803
1804 if (!atomic_read(&bat_priv->ap_isolation))
1805 return false;
1806
1807 tt_local_entry = tt_local_hash_find(bat_priv, dst);
1808 if (!tt_local_entry)
1809 goto out;
1810
1811 tt_global_entry = tt_global_hash_find(bat_priv, src);
1812 if (!tt_global_entry)
1813 goto out;
1814
1815 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
1816 goto out;
1817
1818 ret = false;
1819
1820out:
1821 if (tt_global_entry)
1822 tt_global_entry_free_ref(tt_global_entry);
1823 if (tt_local_entry)
1824 tt_local_entry_free_ref(tt_local_entry);
1825 return ret;
1826}
1827
1828void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1829 const unsigned char *tt_buff, uint8_t tt_num_changes,
1830 uint8_t ttvn, uint16_t tt_crc)
1831{
1832 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1833 bool full_table = true;
1834
1835 /* the ttvn increased by one -> we can apply the attached changes */
1836 if (ttvn - orig_ttvn == 1) {
1837 /* the OGM could not contain the changes due to their size or
1838 * because they have already been sent TT_OGM_APPEND_MAX times.
1839 * In this case send a tt request */
1840 if (!tt_num_changes) {
1841 full_table = false;
1842 goto request_table;
1843 }
1844
1845 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
1846 (struct tt_change *)tt_buff);
1847
1848 /* Even if we received the precomputed crc with the OGM, we
1849 * prefer to recompute it to spot any possible inconsistency
1850 * in the global table */
1851 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1852
1853 /* The ttvn alone is not enough to guarantee consistency
1854 * because a single value could represent different states
1855 * (due to the wrap around). Thus a node has to check whether
1856 * the resulting table (after applying the changes) is still
1857 * consistent or not. E.g. a node could disconnect while its
1858 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1859 * checking the CRC value is mandatory to detect the
1860 * inconsistency */
1861 if (orig_node->tt_crc != tt_crc)
1862 goto request_table;
1863
1864 /* Roaming phase is over: tables are in sync again. I can
1865 * unset the flag */
1866 orig_node->tt_poss_change = false;
1867 } else {
1868 /* if we missed more than one change or our tables are not
1869 * in sync anymore -> request fresh tt data */
1870 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
1871request_table:
1872 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
1873 "Need to retrieve the correct information "
1874 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1875 "%u num_changes: %u)\n", orig_node->orig, ttvn,
1876 orig_ttvn, tt_crc, orig_node->tt_crc,
1877 tt_num_changes);
1878 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
1879 full_table);
1880 return;
1881 }
1882 }
1883}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index d4122cba53b8..30efd49881a3 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -26,15 +26,16 @@ int tt_len(int changes_num);
26int tt_changes_fill_buffer(struct bat_priv *bat_priv, 26int tt_changes_fill_buffer(struct bat_priv *bat_priv,
27 unsigned char *buff, int buff_len); 27 unsigned char *buff, int buff_len);
28int tt_init(struct bat_priv *bat_priv); 28int tt_init(struct bat_priv *bat_priv);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr); 29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
30 int ifindex);
30void tt_local_remove(struct bat_priv *bat_priv, 31void tt_local_remove(struct bat_priv *bat_priv,
31 const uint8_t *addr, const char *message, bool roaming); 32 const uint8_t *addr, const char *message, bool roaming);
32int tt_local_seq_print_text(struct seq_file *seq, void *offset); 33int tt_local_seq_print_text(struct seq_file *seq, void *offset);
33void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 34void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
34 const unsigned char *tt_buff, int tt_buff_len); 35 const unsigned char *tt_buff, int tt_buff_len);
35int tt_global_add(struct bat_priv *bat_priv, 36int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
36 struct orig_node *orig_node, const unsigned char *addr, 37 const unsigned char *addr, uint8_t ttvn, bool roaming,
37 uint8_t ttvn, bool roaming); 38 bool wifi);
38int tt_global_seq_print_text(struct seq_file *seq, void *offset); 39int tt_global_seq_print_text(struct seq_file *seq, void *offset);
39void tt_global_del_orig(struct bat_priv *bat_priv, 40void tt_global_del_orig(struct bat_priv *bat_priv,
40 struct orig_node *orig_node, const char *message); 41 struct orig_node *orig_node, const char *message);
@@ -42,25 +43,23 @@ void tt_global_del(struct bat_priv *bat_priv,
42 struct orig_node *orig_node, const unsigned char *addr, 43 struct orig_node *orig_node, const unsigned char *addr,
43 const char *message, bool roaming); 44 const char *message, bool roaming);
44struct orig_node *transtable_search(struct bat_priv *bat_priv, 45struct orig_node *transtable_search(struct bat_priv *bat_priv,
45 const uint8_t *addr); 46 const uint8_t *src, const uint8_t *addr);
46void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, 47void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
47 const unsigned char *tt_buff, uint8_t tt_num_changes); 48 const unsigned char *tt_buff, uint8_t tt_num_changes);
48uint16_t tt_local_crc(struct bat_priv *bat_priv); 49uint16_t tt_local_crc(struct bat_priv *bat_priv);
49uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); 50uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
50void tt_free(struct bat_priv *bat_priv); 51void tt_free(struct bat_priv *bat_priv);
51int send_tt_request(struct bat_priv *bat_priv,
52 struct orig_node *dst_orig_node, uint8_t hvn,
53 uint16_t tt_crc, bool full_table);
54bool send_tt_response(struct bat_priv *bat_priv, 52bool send_tt_response(struct bat_priv *bat_priv,
55 struct tt_query_packet *tt_request); 53 struct tt_query_packet *tt_request);
56void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
57 uint16_t tt_num_changes, uint8_t ttvn,
58 struct tt_change *tt_change);
59bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 54bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
60void handle_tt_response(struct bat_priv *bat_priv, 55void handle_tt_response(struct bat_priv *bat_priv,
61 struct tt_query_packet *tt_response); 56 struct tt_query_packet *tt_response);
62void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 57void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
63 struct orig_node *orig_node); 58 struct orig_node *orig_node);
64void tt_commit_changes(struct bat_priv *bat_priv); 59void tt_commit_changes(struct bat_priv *bat_priv);
60bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
61void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
62 const unsigned char *tt_buff, uint8_t tt_num_changes,
63 uint8_t ttvn, uint16_t tt_crc);
65 64
66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 65#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 25bd1db35370..ab8d0fe6df5a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -57,7 +57,7 @@ struct hard_iface {
57 * @batman_seqno_reset: time when the batman seqno window was reset 57 * @batman_seqno_reset: time when the batman seqno window was reset
58 * @gw_flags: flags related to gateway class 58 * @gw_flags: flags related to gateway class
59 * @flags: for now only VIS_SERVER flag 59 * @flags: for now only VIS_SERVER flag
60 * @last_real_seqno: last and best known squence number 60 * @last_real_seqno: last and best known sequence number
61 * @last_ttl: ttl of last received packet 61 * @last_ttl: ttl of last received packet
62 * @last_bcast_seqno: last broadcast sequence number received by this host 62 * @last_bcast_seqno: last broadcast sequence number received by this host
63 * 63 *
@@ -146,6 +146,7 @@ struct bat_priv {
146 atomic_t aggregated_ogms; /* boolean */ 146 atomic_t aggregated_ogms; /* boolean */
147 atomic_t bonding; /* boolean */ 147 atomic_t bonding; /* boolean */
148 atomic_t fragmentation; /* boolean */ 148 atomic_t fragmentation; /* boolean */
149 atomic_t ap_isolation; /* boolean */
149 atomic_t vis_mode; /* VIS_TYPE_* */ 150 atomic_t vis_mode; /* VIS_TYPE_* */
150 atomic_t gw_mode; /* GW_MODE_* */ 151 atomic_t gw_mode; /* GW_MODE_* */
151 atomic_t gw_sel_class; /* uint */ 152 atomic_t gw_sel_class; /* uint */
@@ -156,7 +157,7 @@ struct bat_priv {
156 atomic_t bcast_seqno; 157 atomic_t bcast_seqno;
157 atomic_t bcast_queue_left; 158 atomic_t bcast_queue_left;
158 atomic_t batman_queue_left; 159 atomic_t batman_queue_left;
159 atomic_t ttvn; /* tranlation table version number */ 160 atomic_t ttvn; /* translation table version number */
160 atomic_t tt_ogm_append_cnt; 161 atomic_t tt_ogm_append_cnt;
161 atomic_t tt_local_changes; /* changes registered in a OGM interval */ 162 atomic_t tt_local_changes; /* changes registered in a OGM interval */
162 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 163 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
@@ -223,22 +224,22 @@ struct socket_packet {
223 224
224struct tt_local_entry { 225struct tt_local_entry {
225 uint8_t addr[ETH_ALEN]; 226 uint8_t addr[ETH_ALEN];
227 struct hlist_node hash_entry;
226 unsigned long last_seen; 228 unsigned long last_seen;
227 uint16_t flags; 229 uint16_t flags;
228 atomic_t refcount; 230 atomic_t refcount;
229 struct rcu_head rcu; 231 struct rcu_head rcu;
230 struct hlist_node hash_entry;
231}; 232};
232 233
233struct tt_global_entry { 234struct tt_global_entry {
234 uint8_t addr[ETH_ALEN]; 235 uint8_t addr[ETH_ALEN];
236 struct hlist_node hash_entry; /* entry in the global table */
235 struct orig_node *orig_node; 237 struct orig_node *orig_node;
236 uint8_t ttvn; 238 uint8_t ttvn;
237 uint16_t flags; /* only TT_GLOBAL_ROAM is used */ 239 uint16_t flags; /* only TT_GLOBAL_ROAM is used */
238 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 240 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
239 atomic_t refcount; 241 atomic_t refcount;
240 struct rcu_head rcu; 242 struct rcu_head rcu;
241 struct hlist_node hash_entry; /* entry in the global table */
242}; 243};
243 244
244struct tt_change_node { 245struct tt_change_node {
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 32b125fb3d3b..07d1c1da89dd 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -299,8 +299,10 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
299 goto find_router; 299 goto find_router;
300 } 300 }
301 301
302 /* check for tt host - increases orig_node refcount */ 302 /* check for tt host - increases orig_node refcount.
303 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 303 * returns NULL in case of AP isolation */
304 orig_node = transtable_search(bat_priv, ethhdr->h_source,
305 ethhdr->h_dest);
304 306
305find_router: 307find_router:
306 /** 308 /**
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 62f54b954625..8fd5535544b9 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -24,7 +24,7 @@
24 24
25#include "packet.h" 25#include "packet.h"
26 26
27#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */ 27#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ 28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
29 29
30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 8a1b98589d76..f81a6b668b0c 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -131,7 +131,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
131 return; 131 return;
132 } 132 }
133 133
134 /* its a new address, add it to the list */ 134 /* it's a new address, add it to the list */
135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
136 if (!entry) 136 if (!entry)
137 return; 137 return;
@@ -465,7 +465,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
465 /* try to add it */ 465 /* try to add it */
466 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 466 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
467 info, &info->hash_entry); 467 info, &info->hash_entry);
468 if (hash_added < 0) { 468 if (hash_added != 0) {
469 /* did not work (for some reason) */ 469 /* did not work (for some reason) */
470 kref_put(&info->refcount, free_info); 470 kref_put(&info->refcount, free_info);
471 info = NULL; 471 info = NULL;
@@ -887,10 +887,8 @@ int vis_init(struct bat_priv *bat_priv)
887 } 887 }
888 888
889 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 889 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
890 if (!bat_priv->my_vis_info) { 890 if (!bat_priv->my_vis_info)
891 pr_err("Can't initialize vis packet\n");
892 goto err; 891 goto err;
893 }
894 892
895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 893 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
896 MAX_VIS_PACKET_SIZE + 894 MAX_VIS_PACKET_SIZE +
@@ -920,7 +918,7 @@ int vis_init(struct bat_priv *bat_priv)
920 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 918 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
921 bat_priv->my_vis_info, 919 bat_priv->my_vis_info,
922 &bat_priv->my_vis_info->hash_entry); 920 &bat_priv->my_vis_info->hash_entry);
923 if (hash_added < 0) { 921 if (hash_added != 0) {
924 pr_err("Can't add own vis packet into hash\n"); 922 pr_err("Can't add own vis packet into hash\n");
925 /* not in hash, need to remove it manually. */ 923 /* not in hash, need to remove it manually. */
926 kref_put(&bat_priv->my_vis_info->refcount, free_info); 924 kref_put(&bat_priv->my_vis_info->refcount, free_info);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d161780..062124cd89cf 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -349,7 +349,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
349 } 349 }
350 350
351 chunk = min_t(unsigned int, skb->len, size); 351 chunk = min_t(unsigned int, skb->len, size);
352 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 352 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
353 skb_queue_head(&sk->sk_receive_queue, skb); 353 skb_queue_head(&sk->sk_receive_queue, skb);
354 if (!copied) 354 if (!copied)
355 copied = -EFAULT; 355 copied = -EFAULT;
@@ -361,7 +361,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
361 sock_recv_ts_and_drops(msg, sk, skb); 361 sock_recv_ts_and_drops(msg, sk, skb);
362 362
363 if (!(flags & MSG_PEEK)) { 363 if (!(flags & MSG_PEEK)) {
364 skb_pull(skb, chunk); 364 int skb_len = skb_headlen(skb);
365
366 if (chunk <= skb_len) {
367 __skb_pull(skb, chunk);
368 } else {
369 struct sk_buff *frag;
370
371 __skb_pull(skb, skb_len);
372 chunk -= skb_len;
373
374 skb_walk_frags(skb, frag) {
375 if (chunk <= frag->len) {
376 /* Pulling partial data */
377 skb->len -= chunk;
378 skb->data_len -= chunk;
379 __skb_pull(frag, chunk);
380 break;
381 } else if (frag->len) {
382 /* Pulling all frag data */
383 chunk -= frag->len;
384 skb->len -= frag->len;
385 skb->data_len -= frag->len;
386 __skb_pull(frag, frag->len);
387 }
388 }
389 }
390
365 if (skb->len) { 391 if (skb->len) {
366 skb_queue_head(&sk->sk_receive_queue, skb); 392 skb_queue_head(&sk->sk_receive_queue, skb);
367 break; 393 break;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index d9edfe8bf9d6..91bcd3a961ec 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -492,7 +492,10 @@ static int bnep_session(void *arg)
492 /* RX */ 492 /* RX */
493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
494 skb_orphan(skb); 494 skb_orphan(skb);
495 bnep_rx_frame(s, skb); 495 if (!skb_linearize(skb))
496 bnep_rx_frame(s, skb);
497 else
498 kfree_skb(skb);
496 } 499 }
497 500
498 if (sk->sk_state != BT_CONNECTED) 501 if (sk->sk_state != BT_CONNECTED)
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d4f5dff7c955..bc4086480d97 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = {
217 .ndo_stop = bnep_net_close, 217 .ndo_stop = bnep_net_close,
218 .ndo_start_xmit = bnep_net_xmit, 218 .ndo_start_xmit = bnep_net_xmit,
219 .ndo_validate_addr = eth_validate_addr, 219 .ndo_validate_addr = eth_validate_addr,
220 .ndo_set_multicast_list = bnep_net_set_mc_list, 220 .ndo_set_rx_mode = bnep_net_set_mc_list,
221 .ndo_set_mac_address = bnep_net_set_mac_addr, 221 .ndo_set_mac_address = bnep_net_set_mac_addr,
222 .ndo_tx_timeout = bnep_net_timeout, 222 .ndo_tx_timeout = bnep_net_timeout,
223 .ndo_change_mtu = eth_change_mtu, 223 .ndo_change_mtu = eth_change_mtu,
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 521baa4fe835..7d00ddf9e9dc 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -302,7 +302,10 @@ static int cmtp_session(void *arg)
302 302
303 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 303 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
304 skb_orphan(skb); 304 skb_orphan(skb);
305 cmtp_recv_frame(session, skb); 305 if (!skb_linearize(skb))
306 cmtp_recv_frame(session, skb);
307 else
308 kfree_skb(skb);
306 } 309 }
307 310
308 cmtp_process_transmit(session); 311 cmtp_process_transmit(session);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ea7f031f3b04..c1c597e3e198 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -56,15 +56,15 @@ static void hci_le_connect(struct hci_conn *conn)
56 conn->sec_level = BT_SECURITY_LOW; 56 conn->sec_level = BT_SECURITY_LOW;
57 57
58 memset(&cp, 0, sizeof(cp)); 58 memset(&cp, 0, sizeof(cp));
59 cp.scan_interval = cpu_to_le16(0x0004); 59 cp.scan_interval = cpu_to_le16(0x0060);
60 cp.scan_window = cpu_to_le16(0x0004); 60 cp.scan_window = cpu_to_le16(0x0030);
61 bacpy(&cp.peer_addr, &conn->dst); 61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type; 62 cp.peer_addr_type = conn->dst_type;
63 cp.conn_interval_min = cpu_to_le16(0x0008); 63 cp.conn_interval_min = cpu_to_le16(0x0028);
64 cp.conn_interval_max = cpu_to_le16(0x0100); 64 cp.conn_interval_max = cpu_to_le16(0x0038);
65 cp.supervision_timeout = cpu_to_le16(0x0064); 65 cp.supervision_timeout = cpu_to_le16(0x002a);
66 cp.min_ce_len = cpu_to_le16(0x0001); 66 cp.min_ce_len = cpu_to_le16(0x0000);
67 cp.max_ce_len = cpu_to_le16(0x0001); 67 cp.max_ce_len = cpu_to_le16(0x0000);
68 68
69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70} 70}
@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
218 cp.handle = cpu_to_le16(conn->handle); 218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk)); 219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv; 220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(rand)); 221 memcpy(cp.rand, rand, sizeof(cp.rand));
222 222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224} 224}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 56943add45cc..b84458dcc226 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1312,59 +1312,41 @@ int hci_blacklist_clear(struct hci_dev *hdev)
1312int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) 1312int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1313{ 1313{
1314 struct bdaddr_list *entry; 1314 struct bdaddr_list *entry;
1315 int err;
1316 1315
1317 if (bacmp(bdaddr, BDADDR_ANY) == 0) 1316 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1318 return -EBADF; 1317 return -EBADF;
1319 1318
1320 hci_dev_lock_bh(hdev); 1319 if (hci_blacklist_lookup(hdev, bdaddr))
1321 1320 return -EEXIST;
1322 if (hci_blacklist_lookup(hdev, bdaddr)) {
1323 err = -EEXIST;
1324 goto err;
1325 }
1326 1321
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 1322 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1328 if (!entry) { 1323 if (!entry)
1329 err = -ENOMEM; 1324 return -ENOMEM;
1330 goto err;
1331 }
1332 1325
1333 bacpy(&entry->bdaddr, bdaddr); 1326 bacpy(&entry->bdaddr, bdaddr);
1334 1327
1335 list_add(&entry->list, &hdev->blacklist); 1328 list_add(&entry->list, &hdev->blacklist);
1336 1329
1337 err = 0; 1330 return mgmt_device_blocked(hdev->id, bdaddr);
1338
1339err:
1340 hci_dev_unlock_bh(hdev);
1341 return err;
1342} 1331}
1343 1332
1344int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) 1333int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{ 1334{
1346 struct bdaddr_list *entry; 1335 struct bdaddr_list *entry;
1347 int err = 0;
1348
1349 hci_dev_lock_bh(hdev);
1350 1336
1351 if (bacmp(bdaddr, BDADDR_ANY) == 0) { 1337 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1352 hci_blacklist_clear(hdev); 1338 return hci_blacklist_clear(hdev);
1353 goto done;
1354 } 1339 }
1355 1340
1356 entry = hci_blacklist_lookup(hdev, bdaddr); 1341 entry = hci_blacklist_lookup(hdev, bdaddr);
1357 if (!entry) { 1342 if (!entry) {
1358 err = -ENOENT; 1343 return -ENOENT;
1359 goto done;
1360 } 1344 }
1361 1345
1362 list_del(&entry->list); 1346 list_del(&entry->list);
1363 kfree(entry); 1347 kfree(entry);
1364 1348
1365done: 1349 return mgmt_device_unblocked(hdev->id, bdaddr);
1366 hci_dev_unlock_bh(hdev);
1367 return err;
1368} 1350}
1369 1351
1370static void hci_clear_adv_cache(unsigned long arg) 1352static void hci_clear_adv_cache(unsigned long arg)
@@ -1523,11 +1505,6 @@ int hci_register_dev(struct hci_dev *hdev)
1523 if (!hdev->workqueue) 1505 if (!hdev->workqueue)
1524 goto nomem; 1506 goto nomem;
1525 1507
1526 hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1527 if (IS_ERR(hdev->tfm))
1528 BT_INFO("Failed to load transform for ecb(aes): %ld",
1529 PTR_ERR(hdev->tfm));
1530
1531 hci_register_sysfs(hdev); 1508 hci_register_sysfs(hdev);
1532 1509
1533 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1510 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1576,9 +1553,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
1576 !test_bit(HCI_SETUP, &hdev->flags)) 1553 !test_bit(HCI_SETUP, &hdev->flags))
1577 mgmt_index_removed(hdev->id); 1554 mgmt_index_removed(hdev->id);
1578 1555
1579 if (!IS_ERR(hdev->tfm))
1580 crypto_free_blkcipher(hdev->tfm);
1581
1582 hci_notify(hdev, HCI_DEV_UNREG); 1556 hci_notify(hdev, HCI_DEV_UNREG);
1583 1557
1584 if (hdev->rfkill) { 1558 if (hdev->rfkill) {
@@ -2074,6 +2048,9 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2074 min = c->sent; 2048 min = c->sent;
2075 conn = c; 2049 conn = c;
2076 } 2050 }
2051
2052 if (hci_conn_num(hdev, type) == num)
2053 break;
2077 } 2054 }
2078 2055
2079 if (conn) { 2056 if (conn) {
@@ -2131,6 +2108,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2131 2108
2132 BT_DBG("%s", hdev->name); 2109 BT_DBG("%s", hdev->name);
2133 2110
2111 if (!hci_conn_num(hdev, ACL_LINK))
2112 return;
2113
2134 if (!test_bit(HCI_RAW, &hdev->flags)) { 2114 if (!test_bit(HCI_RAW, &hdev->flags)) {
2135 /* ACL tx timeout must be longer than maximum 2115 /* ACL tx timeout must be longer than maximum
2136 * link supervision timeout (40.9 seconds) */ 2116 * link supervision timeout (40.9 seconds) */
@@ -2162,6 +2142,9 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2162 2142
2163 BT_DBG("%s", hdev->name); 2143 BT_DBG("%s", hdev->name);
2164 2144
2145 if (!hci_conn_num(hdev, SCO_LINK))
2146 return;
2147
2165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { 2148 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2167 BT_DBG("skb %p len %d", skb, skb->len); 2150 BT_DBG("skb %p len %d", skb, skb->len);
@@ -2182,6 +2165,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2182 2165
2183 BT_DBG("%s", hdev->name); 2166 BT_DBG("%s", hdev->name);
2184 2167
2168 if (!hci_conn_num(hdev, ESCO_LINK))
2169 return;
2170
2185 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2171 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2186 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2187 BT_DBG("skb %p len %d", skb, skb->len); 2173 BT_DBG("skb %p len %d", skb, skb->len);
@@ -2202,6 +2188,9 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2202 2188
2203 BT_DBG("%s", hdev->name); 2189 BT_DBG("%s", hdev->name);
2204 2190
2191 if (!hci_conn_num(hdev, LE_LINK))
2192 return;
2193
2205 if (!test_bit(HCI_RAW, &hdev->flags)) { 2194 if (!test_bit(HCI_RAW, &hdev->flags)) {
2206 /* LE tx timeout must be longer than maximum 2195 /* LE tx timeout must be longer than maximum
2207 * link supervision timeout (40.9 seconds) */ 2196 * link supervision timeout (40.9 seconds) */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7ef4eb4435fb..d7d96b6b1f0d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -898,16 +898,15 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
898 if (!cp) 898 if (!cp)
899 return; 899 return;
900 900
901 hci_dev_lock(hdev);
902
903 if (cp->enable == 0x01) { 901 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer); 902 del_timer(&hdev->adv_timer);
903
904 hci_dev_lock(hdev);
905 hci_adv_entries_clear(hdev); 905 hci_adv_entries_clear(hdev);
906 hci_dev_unlock(hdev);
906 } else if (cp->enable == 0x00) { 907 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 908 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
908 } 909 }
909
910 hci_dev_unlock(hdev);
911} 910}
912 911
913static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 912static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1103,9 +1102,10 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1103 return 0; 1102 return 0;
1104 1103
1105 /* Only request authentication for SSP connections or non-SSP 1104 /* Only request authentication for SSP connections or non-SSP
1106 * devices with sec_level HIGH */ 1105 * devices with sec_level HIGH or if MITM protection is requested */
1107 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1106 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1108 conn->pending_sec_level != BT_SECURITY_HIGH) 1107 conn->pending_sec_level != BT_SECURITY_HIGH &&
1108 !(conn->auth_type & 0x01))
1109 return 0; 1109 return 0;
1110 1110
1111 return 1; 1111 return 1;
@@ -1412,7 +1412,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1412 conn->state = BT_CONFIG; 1412 conn->state = BT_CONFIG;
1413 hci_conn_hold(conn); 1413 hci_conn_hold(conn);
1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1415 mgmt_connected(hdev->id, &ev->bdaddr); 1415 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
1416 } else 1416 } else
1417 conn->state = BT_CONNECTED; 1417 conn->state = BT_CONNECTED;
1418 1418
@@ -2174,7 +2174,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2174 hci_dev_lock(hdev); 2174 hci_dev_lock(hdev);
2175 2175
2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2177 if (conn && conn->state == BT_CONNECTED) { 2177 if (!conn)
2178 goto unlock;
2179
2180 if (conn->state == BT_CONNECTED) {
2178 hci_conn_hold(conn); 2181 hci_conn_hold(conn);
2179 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2182 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2180 hci_conn_put(conn); 2183 hci_conn_put(conn);
@@ -2194,6 +2197,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2194 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2197 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2195 } 2198 }
2196 2199
2200unlock:
2197 hci_dev_unlock(hdev); 2201 hci_dev_unlock(hdev);
2198} 2202}
2199 2203
@@ -2816,7 +2820,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2816 goto unlock; 2820 goto unlock;
2817 } 2821 }
2818 2822
2819 mgmt_connected(hdev->id, &ev->bdaddr); 2823 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
2820 2824
2821 conn->sec_level = BT_SECURITY_LOW; 2825 conn->sec_level = BT_SECURITY_LOW;
2822 conn->handle = __le16_to_cpu(ev->handle); 2826 conn->handle = __le16_to_cpu(ev->handle);
@@ -2834,19 +2838,17 @@ unlock:
2834static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 2838static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2835 struct sk_buff *skb) 2839 struct sk_buff *skb)
2836{ 2840{
2837 struct hci_ev_le_advertising_info *ev; 2841 u8 num_reports = skb->data[0];
2838 u8 num_reports; 2842 void *ptr = &skb->data[1];
2839
2840 num_reports = skb->data[0];
2841 ev = (void *) &skb->data[1];
2842 2843
2843 hci_dev_lock(hdev); 2844 hci_dev_lock(hdev);
2844 2845
2845 hci_add_adv_entry(hdev, ev); 2846 while (num_reports--) {
2847 struct hci_ev_le_advertising_info *ev = ptr;
2846 2848
2847 while (--num_reports) {
2848 ev = (void *) (ev->data + ev->length + 1);
2849 hci_add_adv_entry(hdev, ev); 2849 hci_add_adv_entry(hdev, ev);
2850
2851 ptr += sizeof(*ev) + ev->length + 1;
2850 } 2852 }
2851 2853
2852 hci_dev_unlock(hdev); 2854 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ff02cf5e77cc..f6afe3d76a66 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -183,21 +183,35 @@ static int hci_sock_release(struct socket *sock)
183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) 183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184{ 184{
185 bdaddr_t bdaddr; 185 bdaddr_t bdaddr;
186 int err;
186 187
187 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 188 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
188 return -EFAULT; 189 return -EFAULT;
189 190
190 return hci_blacklist_add(hdev, &bdaddr); 191 hci_dev_lock_bh(hdev);
192
193 err = hci_blacklist_add(hdev, &bdaddr);
194
195 hci_dev_unlock_bh(hdev);
196
197 return err;
191} 198}
192 199
193static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) 200static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
194{ 201{
195 bdaddr_t bdaddr; 202 bdaddr_t bdaddr;
203 int err;
196 204
197 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 205 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
198 return -EFAULT; 206 return -EFAULT;
199 207
200 return hci_blacklist_del(hdev, &bdaddr); 208 hci_dev_lock_bh(hdev);
209
210 err = hci_blacklist_del(hdev, &bdaddr);
211
212 hci_dev_unlock_bh(hdev);
213
214 return err;
201} 215}
202 216
203/* Ioctls that require bound socket */ 217/* Ioctls that require bound socket */
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a6c3aa8be1f7..22f1a6c87035 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -23,6 +23,8 @@ static inline char *link_typetostr(int type)
23 return "SCO"; 23 return "SCO";
24 case ESCO_LINK: 24 case ESCO_LINK:
25 return "eSCO"; 25 return "eSCO";
26 case LE_LINK:
27 return "LE";
26 default: 28 default:
27 return "UNKNOWN"; 29 return "UNKNOWN";
28 } 30 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fb68f344c34a..075a3e920caf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -716,12 +716,18 @@ static int hidp_session(void *arg)
716 716
717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
718 skb_orphan(skb); 718 skb_orphan(skb);
719 hidp_recv_ctrl_frame(session, skb); 719 if (!skb_linearize(skb))
720 hidp_recv_ctrl_frame(session, skb);
721 else
722 kfree_skb(skb);
720 } 723 }
721 724
722 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { 725 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
723 skb_orphan(skb); 726 skb_orphan(skb);
724 hidp_recv_intr_frame(session, skb); 727 if (!skb_linearize(skb))
728 hidp_recv_intr_frame(session, skb);
729 else
730 kfree_skb(skb);
725 } 731 }
726 732
727 hidp_process_transmit(session); 733 hidp_process_transmit(session);
@@ -872,6 +878,9 @@ static int hidp_start(struct hid_device *hid)
872 struct hidp_session *session = hid->driver_data; 878 struct hidp_session *session = hid->driver_data;
873 struct hid_report *report; 879 struct hid_report *report;
874 880
881 if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
882 return 0;
883
875 list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. 884 list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
876 report_list, list) 885 report_list, list)
877 hidp_send_report(session, report); 886 hidp_send_report(session, report);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bdb482bbe6..8cd12917733b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -907,6 +907,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn); 908 l2cap_le_conn_ready(conn);
909 909
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
912
910 read_lock(&conn->chan_lock); 913 read_lock(&conn->chan_lock);
911 914
912 list_for_each_entry(chan, &conn->chan_l, list) { 915 list_for_each_entry(chan, &conn->chan_l, list) {
@@ -986,8 +989,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer); 990 del_timer_sync(&conn->info_timer);
988 991
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) 992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
990 del_timer(&conn->security_timer); 993 del_timer(&conn->security_timer);
994 smp_chan_destroy(conn);
995 }
991 996
992 hcon->l2cap_data = NULL; 997 hcon->l2cap_data = NULL;
993 kfree(conn); 998 kfree(conn);
@@ -1240,7 +1245,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1240 __clear_retrans_timer(chan); 1245 __clear_retrans_timer(chan);
1241} 1246}
1242 1247
1243void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1248static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1244{ 1249{
1245 struct hci_conn *hcon = chan->conn->hcon; 1250 struct hci_conn *hcon = chan->conn->hcon;
1246 u16 flags; 1251 u16 flags;
@@ -1256,7 +1261,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1256 hci_send_acl(hcon, skb, flags); 1261 hci_send_acl(hcon, skb, flags);
1257} 1262}
1258 1263
1259void l2cap_streaming_send(struct l2cap_chan *chan) 1264static void l2cap_streaming_send(struct l2cap_chan *chan)
1260{ 1265{
1261 struct sk_buff *skb; 1266 struct sk_buff *skb;
1262 u16 control, fcs; 1267 u16 control, fcs;
@@ -1322,7 +1327,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1322 l2cap_do_send(chan, tx_skb); 1327 l2cap_do_send(chan, tx_skb);
1323} 1328}
1324 1329
1325int l2cap_ertm_send(struct l2cap_chan *chan) 1330static int l2cap_ertm_send(struct l2cap_chan *chan)
1326{ 1331{
1327 struct sk_buff *skb, *tx_skb; 1332 struct sk_buff *skb, *tx_skb;
1328 u16 control, fcs; 1333 u16 control, fcs;
@@ -1460,7 +1465,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1460 return sent; 1465 return sent;
1461} 1466}
1462 1467
1463struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1468static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1464{ 1469{
1465 struct sock *sk = chan->sk; 1470 struct sock *sk = chan->sk;
1466 struct l2cap_conn *conn = chan->conn; 1471 struct l2cap_conn *conn = chan->conn;
@@ -1490,7 +1495,7 @@ struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr
1490 return skb; 1495 return skb;
1491} 1496}
1492 1497
1493struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1498static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1494{ 1499{
1495 struct sock *sk = chan->sk; 1500 struct sock *sk = chan->sk;
1496 struct l2cap_conn *conn = chan->conn; 1501 struct l2cap_conn *conn = chan->conn;
@@ -1519,7 +1524,9 @@ struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *m
1519 return skb; 1524 return skb;
1520} 1525}
1521 1526
1522struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1527static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 struct msghdr *msg, size_t len,
1529 u16 control, u16 sdulen)
1523{ 1530{
1524 struct sock *sk = chan->sk; 1531 struct sock *sk = chan->sk;
1525 struct l2cap_conn *conn = chan->conn; 1532 struct l2cap_conn *conn = chan->conn;
@@ -1565,7 +1572,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *
1565 return skb; 1572 return skb;
1566} 1573}
1567 1574
1568int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1575static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1569{ 1576{
1570 struct sk_buff *skb; 1577 struct sk_buff *skb;
1571 struct sk_buff_head sar_queue; 1578 struct sk_buff_head sar_queue;
@@ -3121,102 +3128,104 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
3121 return 0; 3128 return 0;
3122} 3129}
3123 3130
3124static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3131static void append_skb_frag(struct sk_buff *skb,
3132 struct sk_buff *new_frag, struct sk_buff **last_frag)
3125{ 3133{
3126 struct sk_buff *_skb; 3134 /* skb->len reflects data in skb as well as all fragments
3127 int err; 3135 * skb->data_len reflects only data in fragments
3136 */
3137 if (!skb_has_frag_list(skb))
3138 skb_shinfo(skb)->frag_list = new_frag;
3139
3140 new_frag->next = NULL;
3141
3142 (*last_frag)->next = new_frag;
3143 *last_frag = new_frag;
3144
3145 skb->len += new_frag->len;
3146 skb->data_len += new_frag->len;
3147 skb->truesize += new_frag->truesize;
3148}
3149
3150static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3151{
3152 int err = -EINVAL;
3128 3153
3129 switch (control & L2CAP_CTRL_SAR) { 3154 switch (control & L2CAP_CTRL_SAR) {
3130 case L2CAP_SDU_UNSEGMENTED: 3155 case L2CAP_SDU_UNSEGMENTED:
3131 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) 3156 if (chan->sdu)
3132 goto drop; 3157 break;
3133 3158
3134 return chan->ops->recv(chan->data, skb); 3159 err = chan->ops->recv(chan->data, skb);
3160 break;
3135 3161
3136 case L2CAP_SDU_START: 3162 case L2CAP_SDU_START:
3137 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) 3163 if (chan->sdu)
3138 goto drop; 3164 break;
3139 3165
3140 chan->sdu_len = get_unaligned_le16(skb->data); 3166 chan->sdu_len = get_unaligned_le16(skb->data);
3167 skb_pull(skb, 2);
3141 3168
3142 if (chan->sdu_len > chan->imtu) 3169 if (chan->sdu_len > chan->imtu) {
3143 goto disconnect; 3170 err = -EMSGSIZE;
3144 3171 break;
3145 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC); 3172 }
3146 if (!chan->sdu)
3147 return -ENOMEM;
3148 3173
3149 /* pull sdu_len bytes only after alloc, because of Local Busy 3174 if (skb->len >= chan->sdu_len)
3150 * condition we have to be sure that this will be executed 3175 break;
3151 * only once, i.e., when alloc does not fail */
3152 skb_pull(skb, 2);
3153 3176
3154 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3177 chan->sdu = skb;
3178 chan->sdu_last_frag = skb;
3155 3179
3156 set_bit(CONN_SAR_SDU, &chan->conn_state); 3180 skb = NULL;
3157 chan->partial_sdu_len = skb->len; 3181 err = 0;
3158 break; 3182 break;
3159 3183
3160 case L2CAP_SDU_CONTINUE: 3184 case L2CAP_SDU_CONTINUE:
3161 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3162 goto disconnect;
3163
3164 if (!chan->sdu) 3185 if (!chan->sdu)
3165 goto disconnect; 3186 break;
3166 3187
3167 chan->partial_sdu_len += skb->len; 3188 append_skb_frag(chan->sdu, skb,
3168 if (chan->partial_sdu_len > chan->sdu_len) 3189 &chan->sdu_last_frag);
3169 goto drop; 3190 skb = NULL;
3170 3191
3171 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3192 if (chan->sdu->len >= chan->sdu_len)
3193 break;
3172 3194
3195 err = 0;
3173 break; 3196 break;
3174 3197
3175 case L2CAP_SDU_END: 3198 case L2CAP_SDU_END:
3176 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3177 goto disconnect;
3178
3179 if (!chan->sdu) 3199 if (!chan->sdu)
3180 goto disconnect; 3200 break;
3181
3182 chan->partial_sdu_len += skb->len;
3183
3184 if (chan->partial_sdu_len > chan->imtu)
3185 goto drop;
3186 3201
3187 if (chan->partial_sdu_len != chan->sdu_len) 3202 append_skb_frag(chan->sdu, skb,
3188 goto drop; 3203 &chan->sdu_last_frag);
3204 skb = NULL;
3189 3205
3190 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3206 if (chan->sdu->len != chan->sdu_len)
3207 break;
3191 3208
3192 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3209 err = chan->ops->recv(chan->data, chan->sdu);
3193 if (!_skb) {
3194 return -ENOMEM;
3195 }
3196 3210
3197 err = chan->ops->recv(chan->data, _skb); 3211 if (!err) {
3198 if (err < 0) { 3212 /* Reassembly complete */
3199 kfree_skb(_skb); 3213 chan->sdu = NULL;
3200 return err; 3214 chan->sdu_last_frag = NULL;
3215 chan->sdu_len = 0;
3201 } 3216 }
3202
3203 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3204
3205 kfree_skb(chan->sdu);
3206 break; 3217 break;
3207 } 3218 }
3208 3219
3209 kfree_skb(skb); 3220 if (err) {
3210 return 0; 3221 kfree_skb(skb);
3211 3222 kfree_skb(chan->sdu);
3212drop: 3223 chan->sdu = NULL;
3213 kfree_skb(chan->sdu); 3224 chan->sdu_last_frag = NULL;
3214 chan->sdu = NULL; 3225 chan->sdu_len = 0;
3226 }
3215 3227
3216disconnect: 3228 return err;
3217 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3218 kfree_skb(skb);
3219 return 0;
3220} 3229}
3221 3230
3222static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 3231static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
@@ -3270,99 +3279,6 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3270 } 3279 }
3271} 3280}
3272 3281
3273static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3274{
3275 struct sk_buff *_skb;
3276 int err = -EINVAL;
3277
3278 /*
3279 * TODO: We have to notify the userland if some data is lost with the
3280 * Streaming Mode.
3281 */
3282
3283 switch (control & L2CAP_CTRL_SAR) {
3284 case L2CAP_SDU_UNSEGMENTED:
3285 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3286 kfree_skb(chan->sdu);
3287 break;
3288 }
3289
3290 err = chan->ops->recv(chan->data, skb);
3291 if (!err)
3292 return 0;
3293
3294 break;
3295
3296 case L2CAP_SDU_START:
3297 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3298 kfree_skb(chan->sdu);
3299 break;
3300 }
3301
3302 chan->sdu_len = get_unaligned_le16(skb->data);
3303 skb_pull(skb, 2);
3304
3305 if (chan->sdu_len > chan->imtu) {
3306 err = -EMSGSIZE;
3307 break;
3308 }
3309
3310 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3311 if (!chan->sdu) {
3312 err = -ENOMEM;
3313 break;
3314 }
3315
3316 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3317
3318 set_bit(CONN_SAR_SDU, &chan->conn_state);
3319 chan->partial_sdu_len = skb->len;
3320 err = 0;
3321 break;
3322
3323 case L2CAP_SDU_CONTINUE:
3324 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3325 break;
3326
3327 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3328
3329 chan->partial_sdu_len += skb->len;
3330 if (chan->partial_sdu_len > chan->sdu_len)
3331 kfree_skb(chan->sdu);
3332 else
3333 err = 0;
3334
3335 break;
3336
3337 case L2CAP_SDU_END:
3338 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3339 break;
3340
3341 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3342
3343 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3344 chan->partial_sdu_len += skb->len;
3345
3346 if (chan->partial_sdu_len > chan->imtu)
3347 goto drop;
3348
3349 if (chan->partial_sdu_len == chan->sdu_len) {
3350 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3351 err = chan->ops->recv(chan->data, _skb);
3352 if (err < 0)
3353 kfree_skb(_skb);
3354 }
3355 err = 0;
3356
3357drop:
3358 kfree_skb(chan->sdu);
3359 break;
3360 }
3361
3362 kfree_skb(skb);
3363 return err;
3364}
3365
3366static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) 3282static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3367{ 3283{
3368 struct sk_buff *skb; 3284 struct sk_buff *skb;
@@ -3377,7 +3293,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3377 3293
3378 skb = skb_dequeue(&chan->srej_q); 3294 skb = skb_dequeue(&chan->srej_q);
3379 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3295 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3380 err = l2cap_ertm_reassembly_sdu(chan, skb, control); 3296 err = l2cap_reassemble_sdu(chan, skb, control);
3381 3297
3382 if (err < 0) { 3298 if (err < 0) {
3383 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3299 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3537,7 +3453,7 @@ expected:
3537 return 0; 3453 return 0;
3538 } 3454 }
3539 3455
3540 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control); 3456 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3541 chan->buffer_seq = (chan->buffer_seq + 1) % 64; 3457 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3542 if (err < 0) { 3458 if (err < 0) {
3543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3459 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3853,12 +3769,20 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3853 3769
3854 tx_seq = __get_txseq(control); 3770 tx_seq = __get_txseq(control);
3855 3771
3856 if (chan->expected_tx_seq == tx_seq) 3772 if (chan->expected_tx_seq != tx_seq) {
3857 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3773 /* Frame(s) missing - must discard partial SDU */
3858 else 3774 kfree_skb(chan->sdu);
3859 chan->expected_tx_seq = (tx_seq + 1) % 64; 3775 chan->sdu = NULL;
3776 chan->sdu_last_frag = NULL;
3777 chan->sdu_len = 0;
3778
3779 /* TODO: Notify userland of missing data */
3780 }
3781
3782 chan->expected_tx_seq = (tx_seq + 1) % 64;
3860 3783
3861 l2cap_streaming_reassembly_sdu(chan, skb, control); 3784 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3785 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3862 3786
3863 goto done; 3787 goto done;
3864 3788
@@ -4093,6 +4017,11 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4093 4017
4094 BT_DBG("conn %p", conn); 4018 BT_DBG("conn %p", conn);
4095 4019
4020 if (hcon->type == LE_LINK) {
4021 smp_distribute_keys(conn, 0);
4022 del_timer(&conn->security_timer);
4023 }
4024
4096 read_lock(&conn->chan_lock); 4025 read_lock(&conn->chan_lock);
4097 4026
4098 list_for_each_entry(chan, &conn->chan_l, list) { 4027 list_for_each_entry(chan, &conn->chan_l, list) {
@@ -4105,9 +4034,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4105 if (chan->scid == L2CAP_CID_LE_DATA) { 4034 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) { 4035 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level; 4036 chan->sec_level = hcon->sec_level;
4108 del_timer(&conn->security_timer);
4109 l2cap_chan_ready(sk); 4037 l2cap_chan_ready(sk);
4110 smp_distribute_keys(conn, 0);
4111 } 4038 }
4112 4039
4113 bh_unlock_sock(sk); 4040 bh_unlock_sock(sk);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f623091d..e8292369cdcf 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -26,6 +26,8 @@
26 26
27/* Bluetooth L2CAP sockets. */ 27/* Bluetooth L2CAP sockets. */
28 28
29#include <linux/security.h>
30
29#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 33#include <net/bluetooth/l2cap.h>
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
933 chan->force_reliable = pchan->force_reliable; 935 chan->force_reliable = pchan->force_reliable;
934 chan->flushable = pchan->flushable; 936 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active; 937 chan->force_active = pchan->force_active;
938
939 security_sk_clone(parent, sk);
936 } else { 940 } else {
937 941
938 switch (sk->sk_type) { 942 switch (sk->sk_type) {
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 53e109eb043e..5a94eec06caa 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
908 struct hci_dev *hdev; 908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp; 909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len; 910 u16 key_count, expected_len;
911 int i, err; 911 int i;
912 912
913 cp = (void *) data; 913 cp = (void *) data;
914 914
@@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
918 key_count = get_unaligned_le16(&cp->key_count); 918 key_count = get_unaligned_le16(&cp->key_count);
919 919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len > len) { 921 if (expected_len != len) {
922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes", 922 BT_ERR("load_keys: expected %u bytes, got %u bytes",
923 expected_len, len); 923 len, expected_len);
924 return -EINVAL; 924 return -EINVAL;
925 } 925 }
926 926
@@ -942,36 +942,17 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
942 else 942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944 944
945 len -= sizeof(*cp); 945 for (i = 0; i < key_count; i++) {
946 i = 0; 946 struct mgmt_key_info *key = &cp->keys[i];
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
964 947
965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
966 key->pin_len); 949 key->pin_len);
967 } 950 }
968 951
969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev); 952 hci_dev_unlock_bh(hdev);
972 hci_dev_put(hdev); 953 hci_dev_put(hdev);
973 954
974 return err; 955 return 0;
975} 956}
976 957
977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 958static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1347,6 +1328,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1347 struct hci_dev *hdev; 1328 struct hci_dev *hdev;
1348 struct mgmt_cp_pair_device *cp; 1329 struct mgmt_cp_pair_device *cp;
1349 struct pending_cmd *cmd; 1330 struct pending_cmd *cmd;
1331 struct adv_entry *entry;
1350 u8 sec_level, auth_type; 1332 u8 sec_level, auth_type;
1351 struct hci_conn *conn; 1333 struct hci_conn *conn;
1352 int err; 1334 int err;
@@ -1364,15 +1346,20 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1364 1346
1365 hci_dev_lock_bh(hdev); 1347 hci_dev_lock_bh(hdev);
1366 1348
1367 if (cp->io_cap == 0x03) { 1349 sec_level = BT_SECURITY_MEDIUM;
1368 sec_level = BT_SECURITY_MEDIUM; 1350 if (cp->io_cap == 0x03)
1369 auth_type = HCI_AT_DEDICATED_BONDING; 1351 auth_type = HCI_AT_DEDICATED_BONDING;
1370 } else { 1352 else
1371 sec_level = BT_SECURITY_HIGH;
1372 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1353 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1373 }
1374 1354
1375 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); 1355 entry = hci_find_adv_entry(hdev, &cp->bdaddr);
1356 if (entry)
1357 conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
1358 auth_type);
1359 else
1360 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
1361 auth_type);
1362
1376 if (IS_ERR(conn)) { 1363 if (IS_ERR(conn)) {
1377 err = PTR_ERR(conn); 1364 err = PTR_ERR(conn);
1378 goto unlock; 1365 goto unlock;
@@ -1391,7 +1378,10 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1391 goto unlock; 1378 goto unlock;
1392 } 1379 }
1393 1380
1394 conn->connect_cfm_cb = pairing_complete_cb; 1381 /* For LE, just connecting isn't a proof that the pairing finished */
1382 if (!entry)
1383 conn->connect_cfm_cb = pairing_complete_cb;
1384
1395 conn->security_cfm_cb = pairing_complete_cb; 1385 conn->security_cfm_cb = pairing_complete_cb;
1396 conn->disconn_cfm_cb = pairing_complete_cb; 1386 conn->disconn_cfm_cb = pairing_complete_cb;
1397 conn->io_capability = cp->io_cap; 1387 conn->io_capability = cp->io_cap;
@@ -1689,13 +1679,12 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len) 1679 u16 len)
1690{ 1680{
1691 struct hci_dev *hdev; 1681 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp; 1682 struct pending_cmd *cmd;
1683 struct mgmt_cp_block_device *cp = (void *) data;
1693 int err; 1684 int err;
1694 1685
1695 BT_DBG("hci%u", index); 1686 BT_DBG("hci%u", index);
1696 1687
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp)) 1688 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 1689 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL); 1690 EINVAL);
@@ -1705,6 +1694,14 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 1694 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV); 1695 ENODEV);
1707 1696
1697 hci_dev_lock_bh(hdev);
1698
1699 cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
1700 if (!cmd) {
1701 err = -ENOMEM;
1702 goto failed;
1703 }
1704
1708 err = hci_blacklist_add(hdev, &cp->bdaddr); 1705 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709 1706
1710 if (err < 0) 1707 if (err < 0)
@@ -1712,6 +1709,11 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1712 else 1709 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 1710 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0); 1711 NULL, 0);
1712
1713 mgmt_pending_remove(cmd);
1714
1715failed:
1716 hci_dev_unlock_bh(hdev);
1715 hci_dev_put(hdev); 1717 hci_dev_put(hdev);
1716 1718
1717 return err; 1719 return err;
@@ -1721,13 +1723,12 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len) 1723 u16 len)
1722{ 1724{
1723 struct hci_dev *hdev; 1725 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp; 1726 struct pending_cmd *cmd;
1727 struct mgmt_cp_unblock_device *cp = (void *) data;
1725 int err; 1728 int err;
1726 1729
1727 BT_DBG("hci%u", index); 1730 BT_DBG("hci%u", index);
1728 1731
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp)) 1732 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1733 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL); 1734 EINVAL);
@@ -1737,6 +1738,14 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1738 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV); 1739 ENODEV);
1739 1740
1741 hci_dev_lock_bh(hdev);
1742
1743 cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
1744 if (!cmd) {
1745 err = -ENOMEM;
1746 goto failed;
1747 }
1748
1740 err = hci_blacklist_del(hdev, &cp->bdaddr); 1749 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741 1750
1742 if (err < 0) 1751 if (err < 0)
@@ -1744,6 +1753,67 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1744 else 1753 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1754 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0); 1755 NULL, 0);
1756
1757 mgmt_pending_remove(cmd);
1758
1759failed:
1760 hci_dev_unlock_bh(hdev);
1761 hci_dev_put(hdev);
1762
1763 return err;
1764}
1765
1766static int set_fast_connectable(struct sock *sk, u16 index,
1767 unsigned char *data, u16 len)
1768{
1769 struct hci_dev *hdev;
1770 struct mgmt_cp_set_fast_connectable *cp = (void *) data;
1771 struct hci_cp_write_page_scan_activity acp;
1772 u8 type;
1773 int err;
1774
1775 BT_DBG("hci%u", index);
1776
1777 if (len != sizeof(*cp))
1778 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1779 EINVAL);
1780
1781 hdev = hci_dev_get(index);
1782 if (!hdev)
1783 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1784 ENODEV);
1785
1786 hci_dev_lock(hdev);
1787
1788 if (cp->enable) {
1789 type = PAGE_SCAN_TYPE_INTERLACED;
1790 acp.interval = 0x0024; /* 22.5 msec page scan interval */
1791 } else {
1792 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1793 acp.interval = 0x0800; /* default 1.28 sec page scan */
1794 }
1795
1796 acp.window = 0x0012; /* default 11.25 msec page scan window */
1797
1798 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1799 sizeof(acp), &acp);
1800 if (err < 0) {
1801 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1802 -err);
1803 goto done;
1804 }
1805
1806 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1807 if (err < 0) {
1808 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1809 -err);
1810 goto done;
1811 }
1812
1813 err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1814 NULL, 0);
1815done:
1816 hci_dev_unlock(hdev);
1747 hci_dev_put(hdev); 1817 hci_dev_put(hdev);
1748 1818
1749 return err; 1819 return err;
@@ -1869,6 +1939,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1869 case MGMT_OP_UNBLOCK_DEVICE: 1939 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len); 1940 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break; 1941 break;
1942 case MGMT_OP_SET_FAST_CONNECTABLE:
1943 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
1944 len);
1945 break;
1872 default: 1946 default:
1873 BT_DBG("Unknown op %u", opcode); 1947 BT_DBG("Unknown op %u", opcode);
1874 err = cmd_status(sk, index, opcode, 0x01); 1948 err = cmd_status(sk, index, opcode, 0x01);
@@ -1977,35 +2051,25 @@ int mgmt_connectable(u16 index, u8 connectable)
1977 2051
1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 2052int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1979{ 2053{
1980 struct mgmt_ev_new_key *ev; 2054 struct mgmt_ev_new_key ev;
1981 int err, total;
1982
1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1987
1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1989 ev->key.type = key->type;
1990 memcpy(ev->key.val, key->val, 16);
1991 ev->key.pin_len = key->pin_len;
1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1994 2055
1995 memcpy(ev->key.data, key->data, key->dlen); 2056 memset(&ev, 0, sizeof(ev));
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998 2057
1999 kfree(ev); 2058 ev.store_hint = persistent;
2059 bacpy(&ev.key.bdaddr, &key->bdaddr);
2060 ev.key.type = key->type;
2061 memcpy(ev.key.val, key->val, 16);
2062 ev.key.pin_len = key->pin_len;
2000 2063
2001 return err; 2064 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
2002} 2065}
2003 2066
2004int mgmt_connected(u16 index, bdaddr_t *bdaddr) 2067int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
2005{ 2068{
2006 struct mgmt_ev_connected ev; 2069 struct mgmt_ev_connected ev;
2007 2070
2008 bacpy(&ev.bdaddr, bdaddr); 2071 bacpy(&ev.bdaddr, bdaddr);
2072 ev.link_type = link_type;
2009 2073
2010 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); 2074 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
2011} 2075}
@@ -2260,12 +2324,14 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2260 memset(&ev, 0, sizeof(ev)); 2324 memset(&ev, 0, sizeof(ev));
2261 2325
2262 bacpy(&ev.bdaddr, bdaddr); 2326 bacpy(&ev.bdaddr, bdaddr);
2263 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2264 ev.rssi = rssi; 2327 ev.rssi = rssi;
2265 2328
2266 if (eir) 2329 if (eir)
2267 memcpy(ev.eir, eir, sizeof(ev.eir)); 2330 memcpy(ev.eir, eir, sizeof(ev.eir));
2268 2331
2332 if (dev_class)
2333 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2334
2269 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); 2335 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2270} 2336}
2271 2337
@@ -2286,3 +2352,29 @@ int mgmt_discovering(u16 index, u8 discovering)
2286 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, 2352 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2287 sizeof(discovering), NULL); 2353 sizeof(discovering), NULL);
2288} 2354}
2355
2356int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
2357{
2358 struct pending_cmd *cmd;
2359 struct mgmt_ev_device_blocked ev;
2360
2361 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
2362
2363 bacpy(&ev.bdaddr, bdaddr);
2364
2365 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
2366 cmd ? cmd->sk : NULL);
2367}
2368
2369int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
2370{
2371 struct pending_cmd *cmd;
2372 struct mgmt_ev_device_unblocked ev;
2373
2374 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
2375
2376 bacpy(&ev.bdaddr, bdaddr);
2377
2378 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
2379 cmd ? cmd->sk : NULL);
2380}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5ba3f6df665c..38b618c96de6 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1853,7 +1853,10 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1853 /* Get data directly from socket receive queue without copying it. */ 1853 /* Get data directly from socket receive queue without copying it. */
1854 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 1854 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1855 skb_orphan(skb); 1855 skb_orphan(skb);
1856 rfcomm_recv_frame(s, skb); 1856 if (!skb_linearize(skb))
1857 rfcomm_recv_frame(s, skb);
1858 else
1859 kfree_skb(skb);
1857 } 1860 }
1858 1861
1859 if (sk->sk_state == BT_CLOSED) { 1862 if (sk->sk_state == BT_CLOSED) {
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 482722bbc7a0..5417f6127323 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,6 +42,7 @@
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/security.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
264 265
265 pi->sec_level = rfcomm_pi(parent)->sec_level; 266 pi->sec_level = rfcomm_pi(parent)->sec_level;
266 pi->role_switch = rfcomm_pi(parent)->role_switch; 267 pi->role_switch = rfcomm_pi(parent)->role_switch;
268
269 security_sk_clone(parent, sk);
267 } else { 270 } else {
268 pi->dlc->defer_setup = 0; 271 pi->dlc->defer_setup = 0;
269 272
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8270f05e3f1f..a324b009e34b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -41,6 +41,7 @@
41#include <linux/debugfs.h> 41#include <linux/debugfs.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/list.h> 43#include <linux/list.h>
44#include <linux/security.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
403{ 404{
404 BT_DBG("sk %p", sk); 405 BT_DBG("sk %p", sk);
405 406
406 if (parent) 407 if (parent) {
407 sk->sk_type = parent->sk_type; 408 sk->sk_type = parent->sk_type;
409 security_sk_clone(parent, sk);
410 }
408} 411}
409 412
410static struct proto sco_proto = { 413static struct proto sco_proto = {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b88a92..759b63572641 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -182,18 +182,9 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
182 return; 182 return;
183 183
184 hci_send_acl(conn->hcon, skb, 0); 184 hci_send_acl(conn->hcon, skb, 0);
185}
186
187static __u8 seclevel_to_authreq(__u8 level)
188{
189 switch (level) {
190 case BT_SECURITY_HIGH:
191 /* Right now we don't support bonding */
192 return SMP_AUTH_MITM;
193 185
194 default: 186 mod_timer(&conn->security_timer, jiffies +
195 return SMP_AUTH_NONE; 187 msecs_to_jiffies(SMP_TIMEOUT));
196 }
197} 188}
198 189
199static void build_pairing_cmd(struct l2cap_conn *conn, 190static void build_pairing_cmd(struct l2cap_conn *conn,
@@ -205,7 +196,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
205 196
206 dist_keys = 0; 197 dist_keys = 0;
207 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { 198 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
208 dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN; 199 dist_keys = SMP_DIST_ENC_KEY;
209 authreq |= SMP_AUTH_BONDING; 200 authreq |= SMP_AUTH_BONDING;
210 } 201 }
211 202
@@ -229,24 +220,184 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
229 220
230static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) 221static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
231{ 222{
223 struct smp_chan *smp = conn->smp_chan;
224
232 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || 225 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
233 (max_key_size < SMP_MIN_ENC_KEY_SIZE)) 226 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
234 return SMP_ENC_KEY_SIZE; 227 return SMP_ENC_KEY_SIZE;
235 228
236 conn->smp_key_size = max_key_size; 229 smp->smp_key_size = max_key_size;
237 230
238 return 0; 231 return 0;
239} 232}
240 233
234static void confirm_work(struct work_struct *work)
235{
236 struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
237 struct l2cap_conn *conn = smp->conn;
238 struct crypto_blkcipher *tfm;
239 struct smp_cmd_pairing_confirm cp;
240 int ret;
241 u8 res[16], reason;
242
243 BT_DBG("conn %p", conn);
244
245 tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
246 if (IS_ERR(tfm)) {
247 reason = SMP_UNSPECIFIED;
248 goto error;
249 }
250
251 smp->tfm = tfm;
252
253 if (conn->hcon->out)
254 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
255 conn->src, conn->hcon->dst_type, conn->dst,
256 res);
257 else
258 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
259 conn->hcon->dst_type, conn->dst, 0, conn->src,
260 res);
261 if (ret) {
262 reason = SMP_UNSPECIFIED;
263 goto error;
264 }
265
266 swap128(res, cp.confirm_val);
267 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
268
269 return;
270
271error:
272 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
273 smp_chan_destroy(conn);
274}
275
276static void random_work(struct work_struct *work)
277{
278 struct smp_chan *smp = container_of(work, struct smp_chan, random);
279 struct l2cap_conn *conn = smp->conn;
280 struct hci_conn *hcon = conn->hcon;
281 struct crypto_blkcipher *tfm = smp->tfm;
282 u8 reason, confirm[16], res[16], key[16];
283 int ret;
284
285 if (IS_ERR_OR_NULL(tfm)) {
286 reason = SMP_UNSPECIFIED;
287 goto error;
288 }
289
290 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
291
292 if (hcon->out)
293 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
294 conn->src, hcon->dst_type, conn->dst,
295 res);
296 else
297 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
298 hcon->dst_type, conn->dst, 0, conn->src,
299 res);
300 if (ret) {
301 reason = SMP_UNSPECIFIED;
302 goto error;
303 }
304
305 swap128(res, confirm);
306
307 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
308 BT_ERR("Pairing failed (confirmation values mismatch)");
309 reason = SMP_CONFIRM_FAILED;
310 goto error;
311 }
312
313 if (hcon->out) {
314 u8 stk[16], rand[8];
315 __le16 ediv;
316
317 memset(rand, 0, sizeof(rand));
318 ediv = 0;
319
320 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
321 swap128(key, stk);
322
323 memset(stk + smp->smp_key_size, 0,
324 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
325
326 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) {
327 reason = SMP_UNSPECIFIED;
328 goto error;
329 }
330
331 hci_le_start_enc(hcon, ediv, rand, stk);
332 hcon->enc_key_size = smp->smp_key_size;
333 } else {
334 u8 stk[16], r[16], rand[8];
335 __le16 ediv;
336
337 memset(rand, 0, sizeof(rand));
338 ediv = 0;
339
340 swap128(smp->prnd, r);
341 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
342
343 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
344 swap128(key, stk);
345
346 memset(stk + smp->smp_key_size, 0,
347 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
348
349 hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size,
350 ediv, rand, stk);
351 }
352
353 return;
354
355error:
356 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
357 smp_chan_destroy(conn);
358}
359
360static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
361{
362 struct smp_chan *smp;
363
364 smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
365 if (!smp)
366 return NULL;
367
368 INIT_WORK(&smp->confirm, confirm_work);
369 INIT_WORK(&smp->random, random_work);
370
371 smp->conn = conn;
372 conn->smp_chan = smp;
373
374 hci_conn_hold(conn->hcon);
375
376 return smp;
377}
378
379void smp_chan_destroy(struct l2cap_conn *conn)
380{
381 kfree(conn->smp_chan);
382 hci_conn_put(conn->hcon);
383}
384
241static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) 385static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
242{ 386{
243 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 387 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
388 struct smp_chan *smp;
244 u8 key_size; 389 u8 key_size;
390 int ret;
245 391
246 BT_DBG("conn %p", conn); 392 BT_DBG("conn %p", conn);
247 393
248 conn->preq[0] = SMP_CMD_PAIRING_REQ; 394 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
249 memcpy(&conn->preq[1], req, sizeof(*req)); 395 smp = smp_chan_create(conn);
396
397 smp = conn->smp_chan;
398
399 smp->preq[0] = SMP_CMD_PAIRING_REQ;
400 memcpy(&smp->preq[1], req, sizeof(*req));
250 skb_pull(skb, sizeof(*req)); 401 skb_pull(skb, sizeof(*req));
251 402
252 if (req->oob_flag) 403 if (req->oob_flag)
@@ -260,32 +411,33 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
260 return SMP_ENC_KEY_SIZE; 411 return SMP_ENC_KEY_SIZE;
261 412
262 /* Just works */ 413 /* Just works */
263 memset(conn->tk, 0, sizeof(conn->tk)); 414 memset(smp->tk, 0, sizeof(smp->tk));
415
416 ret = smp_rand(smp->prnd);
417 if (ret)
418 return SMP_UNSPECIFIED;
264 419
265 conn->prsp[0] = SMP_CMD_PAIRING_RSP; 420 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
266 memcpy(&conn->prsp[1], &rsp, sizeof(rsp)); 421 memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
267 422
268 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); 423 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
269 424
270 mod_timer(&conn->security_timer, jiffies +
271 msecs_to_jiffies(SMP_TIMEOUT));
272
273 return 0; 425 return 0;
274} 426}
275 427
276static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) 428static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
277{ 429{
278 struct smp_cmd_pairing *req, *rsp = (void *) skb->data; 430 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
279 struct smp_cmd_pairing_confirm cp; 431 struct smp_chan *smp = conn->smp_chan;
280 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; 432 struct hci_dev *hdev = conn->hcon->hdev;
433 u8 key_size;
281 int ret; 434 int ret;
282 u8 res[16], key_size;
283 435
284 BT_DBG("conn %p", conn); 436 BT_DBG("conn %p", conn);
285 437
286 skb_pull(skb, sizeof(*rsp)); 438 skb_pull(skb, sizeof(*rsp));
287 439
288 req = (void *) &conn->preq[1]; 440 req = (void *) &smp->preq[1];
289 441
290 key_size = min(req->max_key_size, rsp->max_key_size); 442 key_size = min(req->max_key_size, rsp->max_key_size);
291 if (check_enc_key_size(conn, key_size)) 443 if (check_enc_key_size(conn, key_size))
@@ -295,222 +447,154 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
295 return SMP_OOB_NOT_AVAIL; 447 return SMP_OOB_NOT_AVAIL;
296 448
297 /* Just works */ 449 /* Just works */
298 memset(conn->tk, 0, sizeof(conn->tk)); 450 memset(smp->tk, 0, sizeof(smp->tk));
299
300 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
301 memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
302
303 ret = smp_rand(conn->prnd);
304 if (ret)
305 return SMP_UNSPECIFIED;
306 451
307 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0, 452 ret = smp_rand(smp->prnd);
308 conn->src, conn->hcon->dst_type, conn->dst, res);
309 if (ret) 453 if (ret)
310 return SMP_UNSPECIFIED; 454 return SMP_UNSPECIFIED;
311 455
312 swap128(res, cp.confirm_val); 456 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
457 memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
313 458
314 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); 459 queue_work(hdev->workqueue, &smp->confirm);
315 460
316 return 0; 461 return 0;
317} 462}
318 463
319static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) 464static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
320{ 465{
321 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; 466 struct smp_chan *smp = conn->smp_chan;
467 struct hci_dev *hdev = conn->hcon->hdev;
322 468
323 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 469 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
324 470
325 memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf)); 471 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
326 skb_pull(skb, sizeof(conn->pcnf)); 472 skb_pull(skb, sizeof(smp->pcnf));
327 473
328 if (conn->hcon->out) { 474 if (conn->hcon->out) {
329 u8 random[16]; 475 u8 random[16];
330 476
331 swap128(conn->prnd, random); 477 swap128(smp->prnd, random);
332 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), 478 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
333 random); 479 random);
334 } else { 480 } else {
335 struct smp_cmd_pairing_confirm cp; 481 queue_work(hdev->workqueue, &smp->confirm);
336 int ret;
337 u8 res[16];
338
339 ret = smp_rand(conn->prnd);
340 if (ret)
341 return SMP_UNSPECIFIED;
342
343 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
344 conn->hcon->dst_type, conn->dst,
345 0, conn->src, res);
346 if (ret)
347 return SMP_CONFIRM_FAILED;
348
349 swap128(res, cp.confirm_val);
350
351 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
352 } 482 }
353 483
354 mod_timer(&conn->security_timer, jiffies +
355 msecs_to_jiffies(SMP_TIMEOUT));
356
357 return 0; 484 return 0;
358} 485}
359 486
360static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) 487static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
361{ 488{
362 struct hci_conn *hcon = conn->hcon; 489 struct smp_chan *smp = conn->smp_chan;
363 struct crypto_blkcipher *tfm = hcon->hdev->tfm; 490 struct hci_dev *hdev = conn->hcon->hdev;
364 int ret;
365 u8 key[16], res[16], random[16], confirm[16];
366 491
367 swap128(skb->data, random); 492 BT_DBG("conn %p", conn);
368 skb_pull(skb, sizeof(random));
369
370 if (conn->hcon->out)
371 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
372 conn->src, conn->hcon->dst_type, conn->dst,
373 res);
374 else
375 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
376 conn->hcon->dst_type, conn->dst, 0, conn->src,
377 res);
378 if (ret)
379 return SMP_UNSPECIFIED;
380
381 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
382
383 swap128(res, confirm);
384
385 if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
386 BT_ERR("Pairing failed (confirmation values mismatch)");
387 return SMP_CONFIRM_FAILED;
388 }
389
390 if (conn->hcon->out) {
391 u8 stk[16], rand[8];
392 __le16 ediv;
393
394 memset(rand, 0, sizeof(rand));
395 ediv = 0;
396 493
397 smp_s1(tfm, conn->tk, random, conn->prnd, key); 494 swap128(skb->data, smp->rrnd);
398 swap128(key, stk); 495 skb_pull(skb, sizeof(smp->rrnd));
399 496
400 memset(stk + conn->smp_key_size, 0, 497 queue_work(hdev->workqueue, &smp->random);
401 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
402 498
403 hci_le_start_enc(hcon, ediv, rand, stk); 499 return 0;
404 hcon->enc_key_size = conn->smp_key_size; 500}
405 } else {
406 u8 stk[16], r[16], rand[8];
407 __le16 ediv;
408 501
409 memset(rand, 0, sizeof(rand)); 502static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
410 ediv = 0; 503{
504 struct link_key *key;
505 struct key_master_id *master;
506 struct hci_conn *hcon = conn->hcon;
411 507
412 swap128(conn->prnd, r); 508 key = hci_find_link_key_type(hcon->hdev, conn->dst,
413 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); 509 HCI_LK_SMP_LTK);
510 if (!key)
511 return 0;
414 512
415 smp_s1(tfm, conn->tk, conn->prnd, random, key); 513 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND,
416 swap128(key, stk); 514 &hcon->pend))
515 return 1;
417 516
418 memset(stk + conn->smp_key_size, 0, 517 master = (void *) key->data;
419 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size); 518 hci_le_start_enc(hcon, master->ediv, master->rand,
519 key->val);
520 hcon->enc_key_size = key->pin_len;
420 521
421 hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size, 522 return 1;
422 ediv, rand, stk);
423 }
424 523
425 return 0;
426} 524}
427
428static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) 525static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
429{ 526{
430 struct smp_cmd_security_req *rp = (void *) skb->data; 527 struct smp_cmd_security_req *rp = (void *) skb->data;
431 struct smp_cmd_pairing cp; 528 struct smp_cmd_pairing cp;
432 struct hci_conn *hcon = conn->hcon; 529 struct hci_conn *hcon = conn->hcon;
530 struct smp_chan *smp;
433 531
434 BT_DBG("conn %p", conn); 532 BT_DBG("conn %p", conn);
435 533
436 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) 534 hcon->pending_sec_level = BT_SECURITY_MEDIUM;
535
536 if (smp_ltk_encrypt(conn))
437 return 0; 537 return 0;
438 538
539 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
540 return 0;
541
542 smp = smp_chan_create(conn);
543
439 skb_pull(skb, sizeof(*rp)); 544 skb_pull(skb, sizeof(*rp));
440 545
441 memset(&cp, 0, sizeof(cp)); 546 memset(&cp, 0, sizeof(cp));
442 build_pairing_cmd(conn, &cp, NULL, rp->auth_req); 547 build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
443 548
444 conn->preq[0] = SMP_CMD_PAIRING_REQ; 549 smp->preq[0] = SMP_CMD_PAIRING_REQ;
445 memcpy(&conn->preq[1], &cp, sizeof(cp)); 550 memcpy(&smp->preq[1], &cp, sizeof(cp));
446 551
447 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 552 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
448 553
449 mod_timer(&conn->security_timer, jiffies +
450 msecs_to_jiffies(SMP_TIMEOUT));
451
452 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
453
454 return 0; 554 return 0;
455} 555}
456 556
457int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) 557int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
458{ 558{
459 struct hci_conn *hcon = conn->hcon; 559 struct hci_conn *hcon = conn->hcon;
460 __u8 authreq; 560 struct smp_chan *smp = conn->smp_chan;
461 561
462 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); 562 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
463 563
464 if (!lmp_host_le_capable(hcon->hdev)) 564 if (!lmp_host_le_capable(hcon->hdev))
465 return 1; 565 return 1;
466 566
467 if (IS_ERR(hcon->hdev->tfm))
468 return 1;
469
470 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
471 return 0;
472
473 if (sec_level == BT_SECURITY_LOW) 567 if (sec_level == BT_SECURITY_LOW)
474 return 1; 568 return 1;
475 569
476 if (hcon->sec_level >= sec_level) 570 if (hcon->sec_level >= sec_level)
477 return 1; 571 return 1;
478 572
479 authreq = seclevel_to_authreq(sec_level); 573 if (hcon->link_mode & HCI_LM_MASTER)
480 574 if (smp_ltk_encrypt(conn))
481 if (hcon->link_mode & HCI_LM_MASTER) { 575 goto done;
482 struct smp_cmd_pairing cp;
483 struct link_key *key;
484 576
485 key = hci_find_link_key_type(hcon->hdev, conn->dst, 577 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
486 HCI_LK_SMP_LTK); 578 return 0;
487 if (key) {
488 struct key_master_id *master = (void *) key->data;
489 579
490 hci_le_start_enc(hcon, master->ediv, master->rand, 580 smp = smp_chan_create(conn);
491 key->val);
492 hcon->enc_key_size = key->pin_len;
493 581
494 goto done; 582 if (hcon->link_mode & HCI_LM_MASTER) {
495 } 583 struct smp_cmd_pairing cp;
496
497 build_pairing_cmd(conn, &cp, NULL, authreq);
498 conn->preq[0] = SMP_CMD_PAIRING_REQ;
499 memcpy(&conn->preq[1], &cp, sizeof(cp));
500 584
501 mod_timer(&conn->security_timer, jiffies + 585 build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
502 msecs_to_jiffies(SMP_TIMEOUT)); 586 smp->preq[0] = SMP_CMD_PAIRING_REQ;
587 memcpy(&smp->preq[1], &cp, sizeof(cp));
503 588
504 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 589 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
505 } else { 590 } else {
506 struct smp_cmd_security_req cp; 591 struct smp_cmd_security_req cp;
507 cp.auth_req = authreq; 592 cp.auth_req = SMP_AUTH_NONE;
508 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); 593 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
509 } 594 }
510 595
511done: 596done:
512 hcon->pending_sec_level = sec_level; 597 hcon->pending_sec_level = sec_level;
513 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
514 598
515 return 0; 599 return 0;
516} 600}
@@ -518,10 +602,11 @@ done:
518static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) 602static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
519{ 603{
520 struct smp_cmd_encrypt_info *rp = (void *) skb->data; 604 struct smp_cmd_encrypt_info *rp = (void *) skb->data;
605 struct smp_chan *smp = conn->smp_chan;
521 606
522 skb_pull(skb, sizeof(*rp)); 607 skb_pull(skb, sizeof(*rp));
523 608
524 memcpy(conn->tk, rp->ltk, sizeof(conn->tk)); 609 memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
525 610
526 return 0; 611 return 0;
527} 612}
@@ -529,11 +614,12 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
529static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) 614static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
530{ 615{
531 struct smp_cmd_master_ident *rp = (void *) skb->data; 616 struct smp_cmd_master_ident *rp = (void *) skb->data;
617 struct smp_chan *smp = conn->smp_chan;
532 618
533 skb_pull(skb, sizeof(*rp)); 619 skb_pull(skb, sizeof(*rp));
534 620
535 hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size, 621 hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
536 rp->ediv, rp->rand, conn->tk); 622 rp->ediv, rp->rand, smp->tk);
537 623
538 smp_distribute_keys(conn, 1); 624 smp_distribute_keys(conn, 1);
539 625
@@ -552,12 +638,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
552 goto done; 638 goto done;
553 } 639 }
554 640
555 if (IS_ERR(conn->hcon->hdev->tfm)) {
556 err = PTR_ERR(conn->hcon->hdev->tfm);
557 reason = SMP_PAIRING_NOTSUPP;
558 goto done;
559 }
560
561 skb_pull(skb, sizeof(code)); 641 skb_pull(skb, sizeof(code));
562 642
563 switch (code) { 643 switch (code) {
@@ -621,20 +701,21 @@ done:
621int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) 701int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
622{ 702{
623 struct smp_cmd_pairing *req, *rsp; 703 struct smp_cmd_pairing *req, *rsp;
704 struct smp_chan *smp = conn->smp_chan;
624 __u8 *keydist; 705 __u8 *keydist;
625 706
626 BT_DBG("conn %p force %d", conn, force); 707 BT_DBG("conn %p force %d", conn, force);
627 708
628 if (IS_ERR(conn->hcon->hdev->tfm)) 709 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
629 return PTR_ERR(conn->hcon->hdev->tfm); 710 return 0;
630 711
631 rsp = (void *) &conn->prsp[1]; 712 rsp = (void *) &smp->prsp[1];
632 713
633 /* The responder sends its keys first */ 714 /* The responder sends its keys first */
634 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) 715 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
635 return 0; 716 return 0;
636 717
637 req = (void *) &conn->preq[1]; 718 req = (void *) &smp->preq[1];
638 719
639 if (conn->hcon->out) { 720 if (conn->hcon->out) {
640 keydist = &rsp->init_key_dist; 721 keydist = &rsp->init_key_dist;
@@ -658,7 +739,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
658 739
659 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); 740 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
660 741
661 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size, 742 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
662 ediv, ident.rand, enc.ltk); 743 ediv, ident.rand, enc.ltk);
663 744
664 ident.ediv = cpu_to_le16(ediv); 745 ident.ediv = cpu_to_le16(ediv);
@@ -698,5 +779,11 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
698 *keydist &= ~SMP_DIST_SIGN; 779 *keydist &= ~SMP_DIST_SIGN;
699 } 780 }
700 781
782 if (conn->hcon->out || force) {
783 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
784 del_timer(&conn->security_timer);
785 smp_chan_destroy(conn);
786 }
787
701 return 0; 788 return 0;
702} 789}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ff3ed6086ce1..feb77ea7b58e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -301,7 +301,7 @@ static const struct net_device_ops br_netdev_ops = {
301 .ndo_start_xmit = br_dev_xmit, 301 .ndo_start_xmit = br_dev_xmit,
302 .ndo_get_stats64 = br_get_stats64, 302 .ndo_get_stats64 = br_get_stats64,
303 .ndo_set_mac_address = br_set_mac_address, 303 .ndo_set_mac_address = br_set_mac_address,
304 .ndo_set_multicast_list = br_dev_set_multicast_list, 304 .ndo_set_rx_mode = br_dev_set_multicast_list,
305 .ndo_change_mtu = br_change_mtu, 305 .ndo_change_mtu = br_change_mtu,
306 .ndo_do_ioctl = br_dev_ioctl, 306 .ndo_do_ioctl = br_dev_ioctl,
307#ifdef CONFIG_NET_POLL_CONTROLLER 307#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -358,6 +358,8 @@ void br_dev_setup(struct net_device *dev)
358 memcpy(br->group_addr, br_group_address, ETH_ALEN); 358 memcpy(br->group_addr, br_group_address, ETH_ALEN);
359 359
360 br->stp_enabled = BR_NO_STP; 360 br->stp_enabled = BR_NO_STP;
361 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
362
361 br->designated_root = br->bridge_id; 363 br->designated_root = br->bridge_id;
362 br->bridge_max_age = br->max_age = 20 * HZ; 364 br->bridge_max_age = br->max_age = 20 * HZ;
363 br->bridge_hello_time = br->hello_time = 2 * HZ; 365 br->bridge_hello_time = br->hello_time = 2 * HZ;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 68def3b7fb49..c8e7861b88b0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -558,19 +558,28 @@ skip:
558 558
559/* Create new static fdb entry */ 559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state) 561 __u16 state, __u16 flags)
562{ 562{
563 struct net_bridge *br = source->br; 563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb; 565 struct net_bridge_fdb_entry *fdb;
566 566
567 fdb = fdb_find(head, addr); 567 fdb = fdb_find(head, addr);
568 if (fdb) 568 if (fdb == NULL) {
569 return -EEXIST; 569 if (!(flags & NLM_F_CREATE))
570 return -ENOENT;
570 571
571 fdb = fdb_create(head, source, addr); 572 fdb = fdb_create(head, source, addr);
572 if (!fdb) 573 if (!fdb)
573 return -ENOMEM; 574 return -ENOMEM;
575 } else {
576 if (flags & NLM_F_EXCL)
577 return -EEXIST;
578
579 if (flags & NLM_F_REPLACE)
580 fdb->updated = fdb->used = jiffies;
581 fdb->is_local = fdb->is_static = 0;
582 }
574 583
575 if (state & NUD_PERMANENT) 584 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1; 585 fdb->is_local = fdb->is_static = 1;
@@ -626,7 +635,7 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
626 } 635 }
627 636
628 spin_lock_bh(&p->br->hash_lock); 637 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state); 638 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
630 spin_unlock_bh(&p->br->hash_lock); 639 spin_unlock_bh(&p->br->hash_lock);
631 640
632 return err; 641 return err;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e73815456adf..f603e5b0b930 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
16#include <linux/netpoll.h> 17#include <linux/netpoll.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/if_arp.h> 19#include <linux/if_arp.h>
@@ -33,20 +34,18 @@
33 */ 34 */
34static int port_cost(struct net_device *dev) 35static int port_cost(struct net_device *dev)
35{ 36{
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 37 struct ethtool_cmd ecmd;
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 38
38 39 if (!__ethtool_get_settings(dev, &ecmd)) {
39 if (!dev_ethtool_get_settings(dev, &ecmd)) { 40 switch (ethtool_cmd_speed(&ecmd)) {
40 switch (ethtool_cmd_speed(&ecmd)) { 41 case SPEED_10000:
41 case SPEED_10000: 42 return 2;
42 return 2; 43 case SPEED_1000:
43 case SPEED_1000: 44 return 4;
44 return 4; 45 case SPEED_100:
45 case SPEED_100: 46 return 19;
46 return 19; 47 case SPEED_10:
47 case SPEED_10: 48 return 100;
48 return 100;
49 }
50 } 49 }
51 } 50 }
52 51
@@ -161,9 +160,10 @@ static void del_nbp(struct net_bridge_port *p)
161 call_rcu(&p->rcu, destroy_nbp_rcu); 160 call_rcu(&p->rcu, destroy_nbp_rcu);
162} 161}
163 162
164/* called with RTNL */ 163/* Delete bridge device */
165static void del_br(struct net_bridge *br, struct list_head *head) 164void br_dev_delete(struct net_device *dev, struct list_head *head)
166{ 165{
166 struct net_bridge *br = netdev_priv(dev);
167 struct net_bridge_port *p, *n; 167 struct net_bridge_port *p, *n;
168 168
169 list_for_each_entry_safe(p, n, &br->port_list, list) { 169 list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +268,7 @@ int br_del_bridge(struct net *net, const char *name)
268 } 268 }
269 269
270 else 270 else
271 del_br(netdev_priv(dev), NULL); 271 br_dev_delete(dev, NULL);
272 272
273 rtnl_unlock(); 273 rtnl_unlock();
274 return ret; 274 return ret;
@@ -324,7 +324,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
324 324
325 /* Don't allow bridging non-ethernet like devices */ 325 /* Don't allow bridging non-ethernet like devices */
326 if ((dev->flags & IFF_LOOPBACK) || 326 if ((dev->flags & IFF_LOOPBACK) ||
327 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN) 327 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
328 !is_valid_ether_addr(dev->dev_addr))
328 return -EINVAL; 329 return -EINVAL;
329 330
330 /* No bridging of bridges */ 331 /* No bridging of bridges */
@@ -352,10 +353,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
352 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), 353 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
353 SYSFS_BRIDGE_PORT_ATTR); 354 SYSFS_BRIDGE_PORT_ATTR);
354 if (err) 355 if (err)
355 goto err0;
356
357 err = br_fdb_insert(br, p, dev->dev_addr);
358 if (err)
359 goto err1; 356 goto err1;
360 357
361 err = br_sysfs_addif(p); 358 err = br_sysfs_addif(p);
@@ -396,6 +393,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
396 393
397 dev_set_mtu(br->dev, br_min_mtu(br)); 394 dev_set_mtu(br->dev, br_min_mtu(br));
398 395
396 if (br_fdb_insert(br, p, dev->dev_addr))
397 netdev_err(dev, "failed insert local address bridge forwarding table\n");
398
399 kobject_uevent(&p->kobj, KOBJ_ADD); 399 kobject_uevent(&p->kobj, KOBJ_ADD);
400 400
401 return 0; 401 return 0;
@@ -405,11 +405,9 @@ err4:
405err3: 405err3:
406 sysfs_remove_link(br->ifobj, p->dev->name); 406 sysfs_remove_link(br->ifobj, p->dev->name);
407err2: 407err2:
408 br_fdb_delete_by_port(br, p, 1);
409err1:
410 kobject_put(&p->kobj); 408 kobject_put(&p->kobj);
411 p = NULL; /* kobject_put frees */ 409 p = NULL; /* kobject_put frees */
412err0: 410err1:
413 dev_set_promiscuity(dev, -1); 411 dev_set_promiscuity(dev, -1);
414put_back: 412put_back:
415 dev_put(dev); 413 dev_put(dev);
@@ -449,7 +447,7 @@ void __net_exit br_net_exit(struct net *net)
449 rtnl_lock(); 447 rtnl_lock();
450 for_each_netdev(net, dev) 448 for_each_netdev(net, dev)
451 if (dev->priv_flags & IFF_EBRIDGE) 449 if (dev->priv_flags & IFF_EBRIDGE)
452 del_br(netdev_priv(dev), &list); 450 br_dev_delete(dev, &list);
453 451
454 unregister_netdevice_many(&list); 452 unregister_netdevice_many(&list);
455 rtnl_unlock(); 453 rtnl_unlock();
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f06ee39c73fd..6f9f8c014725 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -162,14 +162,37 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
162 p = br_port_get_rcu(skb->dev); 162 p = br_port_get_rcu(skb->dev);
163 163
164 if (unlikely(is_link_local(dest))) { 164 if (unlikely(is_link_local(dest))) {
165 /* Pause frames shouldn't be passed up by driver anyway */ 165 /*
166 if (skb->protocol == htons(ETH_P_PAUSE)) 166 * See IEEE 802.1D Table 7-10 Reserved addresses
167 *
168 * Assignment Value
169 * Bridge Group Address 01-80-C2-00-00-00
170 * (MAC Control) 802.3 01-80-C2-00-00-01
171 * (Link Aggregation) 802.3 01-80-C2-00-00-02
172 * 802.1X PAE address 01-80-C2-00-00-03
173 *
174 * 802.1AB LLDP 01-80-C2-00-00-0E
175 *
176 * Others reserved for future standardization
177 */
178 switch (dest[5]) {
179 case 0x00: /* Bridge Group Address */
180 /* If STP is turned off,
181 then must forward to keep loop detection */
182 if (p->br->stp_enabled == BR_NO_STP)
183 goto forward;
184 break;
185
186 case 0x01: /* IEEE MAC (Pause) */
167 goto drop; 187 goto drop;
168 188
169 /* If STP is turned off, then forward */ 189 default:
170 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) 190 /* Allow selective forwarding for most other protocols */
171 goto forward; 191 if (p->br->group_fwd_mask & (1u << dest[5]))
192 goto forward;
193 }
172 194
195 /* Deliver packet to local host only */
173 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 196 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
174 NULL, br_handle_local_finish)) { 197 NULL, br_handle_local_finish)) {
175 return RX_HANDLER_CONSUMED; /* consumed by filter */ 198 return RX_HANDLER_CONSUMED; /* consumed by filter */
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5b1ed1ba9aa7..e5f9ece3c9a0 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
210 .priv_size = sizeof(struct net_bridge), 210 .priv_size = sizeof(struct net_bridge),
211 .setup = br_dev_setup, 211 .setup = br_dev_setup,
212 .validate = br_validate, 212 .validate = br_validate,
213 .dellink = br_dev_delete,
213}; 214};
214 215
215int __init br_netlink_init(void) 216int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 78cc364997d9..d7d6fb05411f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -29,6 +29,11 @@
29 29
30#define BR_VERSION "2.3" 30#define BR_VERSION "2.3"
31 31
32/* Control of forwarding link local multicast */
33#define BR_GROUPFWD_DEFAULT 0
34/* Don't allow forwarding control protocols like STP and LLDP */
35#define BR_GROUPFWD_RESTRICTED 0x4007u
36
32/* Path to usermode spanning tree program */ 37/* Path to usermode spanning tree program */
33#define BR_STP_PROG "/sbin/bridge-stp" 38#define BR_STP_PROG "/sbin/bridge-stp"
34 39
@@ -193,6 +198,8 @@ struct net_bridge
193 unsigned long flags; 198 unsigned long flags;
194#define BR_SET_MAC_ADDR 0x00000001 199#define BR_SET_MAC_ADDR 0x00000001
195 200
201 u16 group_fwd_mask;
202
196 /* STP */ 203 /* STP */
197 bridge_id designated_root; 204 bridge_id designated_root;
198 bridge_id bridge_id; 205 bridge_id bridge_id;
@@ -294,6 +301,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
294 301
295/* br_device.c */ 302/* br_device.c */
296extern void br_dev_setup(struct net_device *dev); 303extern void br_dev_setup(struct net_device *dev);
304extern void br_dev_delete(struct net_device *dev, struct list_head *list);
297extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 305extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
298 struct net_device *dev); 306 struct net_device *dev);
299#ifdef CONFIG_NET_POLL_CONTROLLER 307#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 68b893ea8c3a..c236c0e43984 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -149,6 +149,39 @@ static ssize_t store_stp_state(struct device *d,
149static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, 149static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
150 store_stp_state); 150 store_stp_state);
151 151
152static ssize_t show_group_fwd_mask(struct device *d,
153 struct device_attribute *attr, char *buf)
154{
155 struct net_bridge *br = to_bridge(d);
156 return sprintf(buf, "%#x\n", br->group_fwd_mask);
157}
158
159
160static ssize_t store_group_fwd_mask(struct device *d,
161 struct device_attribute *attr, const char *buf,
162 size_t len)
163{
164 struct net_bridge *br = to_bridge(d);
165 char *endp;
166 unsigned long val;
167
168 if (!capable(CAP_NET_ADMIN))
169 return -EPERM;
170
171 val = simple_strtoul(buf, &endp, 0);
172 if (endp == buf)
173 return -EINVAL;
174
175 if (val & BR_GROUPFWD_RESTRICTED)
176 return -EINVAL;
177
178 br->group_fwd_mask = val;
179
180 return len;
181}
182static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
183 store_group_fwd_mask);
184
152static ssize_t show_priority(struct device *d, struct device_attribute *attr, 185static ssize_t show_priority(struct device *d, struct device_attribute *attr,
153 char *buf) 186 char *buf)
154{ 187{
@@ -652,6 +685,7 @@ static struct attribute *bridge_attrs[] = {
652 &dev_attr_max_age.attr, 685 &dev_attr_max_age.attr,
653 &dev_attr_ageing_time.attr, 686 &dev_attr_ageing_time.attr,
654 &dev_attr_stp_state.attr, 687 &dev_attr_stp_state.attr,
688 &dev_attr_group_fwd_mask.attr,
655 &dev_attr_priority.attr, 689 &dev_attr_priority.attr,
656 &dev_attr_bridge_id.attr, 690 &dev_attr_bridge_id.attr,
657 &dev_attr_root_id.attr, 691 &dev_attr_root_id.attr,
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1bcaf36ad612..40d8258bf74f 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, 90 RCU_INIT_POINTER(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute); 91 (br_should_route_hook_t *)ebt_broute);
92 return 0; 92 return 0;
93} 93}
94 94
95static void __exit ebtable_broute_fini(void) 95static void __exit ebtable_broute_fini(void)
96{ 96{
97 rcu_assign_pointer(br_should_route_hook, NULL); 97 RCU_INIT_POINTER(br_should_route_hook, NULL);
98 synchronize_net(); 98 synchronize_net();
99 unregister_pernet_subsys(&broute_net_ops); 99 unregister_pernet_subsys(&broute_net_ops);
100} 100}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7f9ac0742d19..47fc8f3a47cf 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -212,8 +212,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
212 enum cfcnfg_phy_preference pref; 212 enum cfcnfg_phy_preference pref;
213 enum cfcnfg_phy_type phy_type; 213 enum cfcnfg_phy_type phy_type;
214 struct cfcnfg *cfg; 214 struct cfcnfg *cfg;
215 struct caif_device_entry_list *caifdevs = 215 struct caif_device_entry_list *caifdevs;
216 caif_device_list(dev_net(dev));
217 216
218 if (dev->type != ARPHRD_CAIF) 217 if (dev->type != ARPHRD_CAIF)
219 return 0; 218 return 0;
@@ -222,6 +221,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
222 if (cfg == NULL) 221 if (cfg == NULL)
223 return 0; 222 return 0;
224 223
224 caifdevs = caif_device_list(dev_net(dev));
225
225 switch (what) { 226 switch (what) {
226 case NETDEV_REGISTER: 227 case NETDEV_REGISTER:
227 caifd = caif_device_alloc(dev); 228 caifd = caif_device_alloc(dev);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 52fe33bee029..00523ecc4ced 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -78,10 +78,8 @@ struct cfcnfg *cfcnfg_create(void)
78 78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this)
82 pr_warn("Out of memory\n");
83 return NULL; 82 return NULL;
84 }
85 this->mux = cfmuxl_create(); 83 this->mux = cfmuxl_create();
86 if (!this->mux) 84 if (!this->mux)
87 goto out_of_mem; 85 goto out_of_mem;
@@ -108,8 +106,6 @@ struct cfcnfg *cfcnfg_create(void)
108 106
109 return this; 107 return this;
110out_of_mem: 108out_of_mem:
111 pr_warn("Out of memory\n");
112
113 synchronize_rcu(); 109 synchronize_rcu();
114 110
115 kfree(this->mux); 111 kfree(this->mux);
@@ -448,10 +444,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
448 "- unknown channel type\n"); 444 "- unknown channel type\n");
449 goto unlock; 445 goto unlock;
450 } 446 }
451 if (!servicel) { 447 if (!servicel)
452 pr_warn("Out of memory\n");
453 goto unlock; 448 goto unlock;
454 }
455 layer_set_dn(servicel, cnfg->mux); 449 layer_set_dn(servicel, cnfg->mux);
456 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 450 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
457 layer_set_up(servicel, adapt_layer); 451 layer_set_up(servicel, adapt_layer);
@@ -473,7 +467,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
473{ 467{
474 struct cflayer *frml; 468 struct cflayer *frml;
475 struct cflayer *phy_driver = NULL; 469 struct cflayer *phy_driver = NULL;
476 struct cfcnfg_phyinfo *phyinfo; 470 struct cfcnfg_phyinfo *phyinfo = NULL;
477 int i; 471 int i;
478 u8 phyid; 472 u8 phyid;
479 473
@@ -488,25 +482,25 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
488 goto got_phyid; 482 goto got_phyid;
489 } 483 }
490 pr_warn("Too many CAIF Link Layers (max 6)\n"); 484 pr_warn("Too many CAIF Link Layers (max 6)\n");
491 goto out; 485 goto out_err;
492 486
493got_phyid: 487got_phyid:
494 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); 488 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
489 if (!phyinfo)
490 goto out_err;
495 491
496 switch (phy_type) { 492 switch (phy_type) {
497 case CFPHYTYPE_FRAG: 493 case CFPHYTYPE_FRAG:
498 phy_driver = 494 phy_driver =
499 cfserl_create(CFPHYTYPE_FRAG, phyid, stx); 495 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
500 if (!phy_driver) { 496 if (!phy_driver)
501 pr_warn("Out of memory\n"); 497 goto out_err;
502 goto out;
503 }
504 break; 498 break;
505 case CFPHYTYPE_CAIF: 499 case CFPHYTYPE_CAIF:
506 phy_driver = NULL; 500 phy_driver = NULL;
507 break; 501 break;
508 default: 502 default:
509 goto out; 503 goto out_err;
510 } 504 }
511 phy_layer->id = phyid; 505 phy_layer->id = phyid;
512 phyinfo->pref = pref; 506 phyinfo->pref = pref;
@@ -520,11 +514,8 @@ got_phyid:
520 514
521 frml = cffrml_create(phyid, fcs); 515 frml = cffrml_create(phyid, fcs);
522 516
523 if (!frml) { 517 if (!frml)
524 pr_warn("Out of memory\n"); 518 goto out_err;
525 kfree(phyinfo);
526 goto out;
527 }
528 phyinfo->frm_layer = frml; 519 phyinfo->frm_layer = frml;
529 layer_set_up(frml, cnfg->mux); 520 layer_set_up(frml, cnfg->mux);
530 521
@@ -540,7 +531,12 @@ got_phyid:
540 } 531 }
541 532
542 list_add_rcu(&phyinfo->node, &cnfg->phys); 533 list_add_rcu(&phyinfo->node, &cnfg->phys);
543out: 534 mutex_unlock(&cnfg->lock);
535 return;
536
537out_err:
538 kfree(phy_driver);
539 kfree(phyinfo);
544 mutex_unlock(&cnfg->lock); 540 mutex_unlock(&cnfg->lock);
545} 541}
546EXPORT_SYMBOL(cfcnfg_add_phy_layer); 542EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index e22671bed669..5cf52225692e 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -35,15 +35,12 @@ struct cflayer *cfctrl_create(void)
35{ 35{
36 struct dev_info dev_info; 36 struct dev_info dev_info;
37 struct cfctrl *this = 37 struct cfctrl *this =
38 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 38 kzalloc(sizeof(struct cfctrl), GFP_ATOMIC);
39 if (!this) { 39 if (!this)
40 pr_warn("Out of memory\n");
41 return NULL; 40 return NULL;
42 }
43 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 41 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
44 memset(&dev_info, 0, sizeof(dev_info)); 42 memset(&dev_info, 0, sizeof(dev_info));
45 dev_info.id = 0xff; 43 dev_info.id = 0xff;
46 memset(this, 0, sizeof(*this));
47 cfsrvl_init(&this->serv, 0, &dev_info, false); 44 cfsrvl_init(&this->serv, 0, &dev_info, false);
48 atomic_set(&this->req_seq_no, 1); 45 atomic_set(&this->req_seq_no, 1);
49 atomic_set(&this->rsp_seq_no, 1); 46 atomic_set(&this->rsp_seq_no, 1);
@@ -180,10 +177,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
180 struct cfctrl *cfctrl = container_obj(layer); 177 struct cfctrl *cfctrl = container_obj(layer);
181 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
182 struct cflayer *dn = cfctrl->serv.layer.dn; 179 struct cflayer *dn = cfctrl->serv.layer.dn;
183 if (!pkt) { 180 if (!pkt)
184 pr_warn("Out of memory\n");
185 return; 181 return;
186 }
187 if (!dn) { 182 if (!dn) {
188 pr_debug("not able to send enum request\n"); 183 pr_debug("not able to send enum request\n");
189 return; 184 return;
@@ -224,10 +219,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
224 } 219 }
225 220
226 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 221 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
227 if (!pkt) { 222 if (!pkt)
228 pr_warn("Out of memory\n");
229 return -ENOMEM; 223 return -ENOMEM;
230 }
231 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 224 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
232 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype); 225 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
233 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid); 226 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
@@ -275,10 +268,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
275 return -EINVAL; 268 return -EINVAL;
276 } 269 }
277 req = kzalloc(sizeof(*req), GFP_KERNEL); 270 req = kzalloc(sizeof(*req), GFP_KERNEL);
278 if (!req) { 271 if (!req)
279 pr_warn("Out of memory\n");
280 return -ENOMEM; 272 return -ENOMEM;
281 }
282 req->client_layer = user_layer; 273 req->client_layer = user_layer;
283 req->cmd = CFCTRL_CMD_LINK_SETUP; 274 req->cmd = CFCTRL_CMD_LINK_SETUP;
284 req->param = *param; 275 req->param = *param;
@@ -312,10 +303,8 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
312 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 303 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
313 struct cflayer *dn = cfctrl->serv.layer.dn; 304 struct cflayer *dn = cfctrl->serv.layer.dn;
314 305
315 if (!pkt) { 306 if (!pkt)
316 pr_warn("Out of memory\n");
317 return -ENOMEM; 307 return -ENOMEM;
318 }
319 308
320 if (!dn) { 309 if (!dn) {
321 pr_debug("not able to send link-down request\n"); 310 pr_debug("not able to send link-down request\n");
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 11a2af4c162a..65d6ef3cf9aa 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -19,13 +19,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
19 19
20struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) 20struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
21{ 21{
22 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 22 struct cfsrvl *dbg = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!dbg) { 23 if (!dbg)
24 pr_warn("Out of memory\n");
25 return NULL; 24 return NULL;
26 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0); 25 caif_assert(offsetof(struct cfsrvl, layer) == 0);
28 memset(dbg, 0, sizeof(struct cfsrvl));
29 cfsrvl_init(dbg, channel_id, dev_info, false); 26 cfsrvl_init(dbg, channel_id, dev_info, false);
30 dbg->layer.receive = cfdbgl_receive; 27 dbg->layer.receive = cfdbgl_receive;
31 dbg->layer.transmit = cfdbgl_transmit; 28 dbg->layer.transmit = cfdbgl_transmit;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0382dec84fdc..0f5ff27aa41c 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -26,13 +26,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26 26
27struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) 27struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
28{ 28{
29 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *dgm = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
30 if (!dgm) { 30 if (!dgm)
31 pr_warn("Out of memory\n");
32 return NULL; 31 return NULL;
33 }
34 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
35 memset(dgm, 0, sizeof(struct cfsrvl));
36 cfsrvl_init(dgm, channel_id, dev_info, true); 33 cfsrvl_init(dgm, channel_id, dev_info, true);
37 dgm->layer.receive = cfdgml_receive; 34 dgm->layer.receive = cfdgml_receive;
38 dgm->layer.transmit = cfdgml_transmit; 35 dgm->layer.transmit = cfdgml_transmit;
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 04204b202718..f39921171d0d 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -34,11 +34,9 @@ static u32 cffrml_rcv_error;
34static u32 cffrml_rcv_checsum_error; 34static u32 cffrml_rcv_checsum_error;
35struct cflayer *cffrml_create(u16 phyid, bool use_fcs) 35struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
36{ 36{
37 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); 37 struct cffrml *this = kzalloc(sizeof(struct cffrml), GFP_ATOMIC);
38 if (!this) { 38 if (!this)
39 pr_warn("Out of memory\n");
40 return NULL; 39 return NULL;
41 }
42 this->pcpu_refcnt = alloc_percpu(int); 40 this->pcpu_refcnt = alloc_percpu(int);
43 if (this->pcpu_refcnt == NULL) { 41 if (this->pcpu_refcnt == NULL) {
44 kfree(this); 42 kfree(this);
@@ -47,7 +45,6 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
47 45
48 caif_assert(offsetof(struct cffrml, layer) == 0); 46 caif_assert(offsetof(struct cffrml, layer) == 0);
49 47
50 memset(this, 0, sizeof(struct cflayer));
51 this->layer.receive = cffrml_receive; 48 this->layer.receive = cffrml_receive;
52 this->layer.transmit = cffrml_transmit; 49 this->layer.transmit = cffrml_transmit;
53 this->layer.ctrlcmd = cffrml_ctrlcmd; 50 this->layer.ctrlcmd = cffrml_ctrlcmd;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index c23979e79dfa..b36f24a4c8e7 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
108 int idx = phyid % DN_CACHE_SIZE; 108 int idx = phyid % DN_CACHE_SIZE;
109 109
110 spin_lock_bh(&muxl->transmit_lock); 110 spin_lock_bh(&muxl->transmit_lock);
111 rcu_assign_pointer(muxl->dn_cache[idx], NULL); 111 RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
112 dn = get_from_id(&muxl->frml_list, phyid); 112 dn = get_from_id(&muxl->frml_list, phyid);
113 if (dn == NULL) 113 if (dn == NULL)
114 goto out; 114 goto out;
@@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
164 if (up == NULL) 164 if (up == NULL)
165 goto out; 165 goto out;
166 166
167 rcu_assign_pointer(muxl->up_cache[idx], NULL); 167 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
168 list_del_rcu(&up->node); 168 list_del_rcu(&up->node);
169out: 169out:
170 spin_unlock_bh(&muxl->receive_lock); 170 spin_unlock_bh(&muxl->receive_lock);
@@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
261 261
262 idx = layer->id % UP_CACHE_SIZE; 262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock); 263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL); 264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node); 265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock); 266 spin_unlock_bh(&muxl->receive_lock);
267 } 267 }
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 0deabb440051..81660f809713 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -46,13 +46,10 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
46 int mtu_size) 46 int mtu_size)
47{ 47{
48 int tmp; 48 int tmp;
49 struct cfrfml *this = 49 struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
50 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
51 50
52 if (!this) { 51 if (!this)
53 pr_warn("Out of memory\n");
54 return NULL; 52 return NULL;
55 }
56 53
57 cfsrvl_init(&this->serv, channel_id, dev_info, false); 54 cfsrvl_init(&this->serv, channel_id, dev_info, false);
58 this->serv.release = cfrfml_release; 55 this->serv.release = cfrfml_release;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 2715c84cfa87..797c8d165993 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -33,13 +33,10 @@ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
33 33
34struct cflayer *cfserl_create(int type, int instance, bool use_stx) 34struct cflayer *cfserl_create(int type, int instance, bool use_stx)
35{ 35{
36 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); 36 struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
37 if (!this) { 37 if (!this)
38 pr_warn("Out of memory\n");
39 return NULL; 38 return NULL;
40 }
41 caif_assert(offsetof(struct cfserl, layer) == 0); 39 caif_assert(offsetof(struct cfserl, layer) == 0);
42 memset(this, 0, sizeof(struct cfserl));
43 this->layer.receive = cfserl_receive; 40 this->layer.receive = cfserl_receive;
44 this->layer.transmit = cfserl_transmit; 41 this->layer.transmit = cfserl_transmit;
45 this->layer.ctrlcmd = cfserl_ctrlcmd; 42 this->layer.ctrlcmd = cfserl_ctrlcmd;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 535a1e72b366..b99f5b22689d 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -108,10 +108,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
108 struct caif_payload_info *info; 108 struct caif_payload_info *info;
109 u8 flow_on = SRVL_FLOW_ON; 109 u8 flow_on = SRVL_FLOW_ON;
110 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 110 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
111 if (!pkt) { 111 if (!pkt)
112 pr_warn("Out of memory\n");
113 return -ENOMEM; 112 return -ENOMEM;
114 }
115 113
116 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { 114 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
117 pr_err("Packet is erroneous!\n"); 115 pr_err("Packet is erroneous!\n");
@@ -130,10 +128,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
130 struct caif_payload_info *info; 128 struct caif_payload_info *info;
131 u8 flow_off = SRVL_FLOW_OFF; 129 u8 flow_off = SRVL_FLOW_OFF;
132 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 130 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
133 if (!pkt) { 131 if (!pkt)
134 pr_warn("Out of memory\n");
135 return -ENOMEM; 132 return -ENOMEM;
136 }
137 133
138 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 134 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
139 pr_err("Packet is erroneous!\n"); 135 pr_err("Packet is erroneous!\n");
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 98e027db18ed..53e49f3e3af3 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -26,13 +26,10 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
26 26
27struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) 27struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
28{ 28{
29 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *util = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
30 if (!util) { 30 if (!util)
31 pr_warn("Out of memory\n");
32 return NULL; 31 return NULL;
33 }
34 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
35 memset(util, 0, sizeof(struct cfsrvl));
36 cfsrvl_init(util, channel_id, dev_info, true); 33 cfsrvl_init(util, channel_id, dev_info, true);
37 util->layer.receive = cfutill_receive; 34 util->layer.receive = cfutill_receive;
38 util->layer.transmit = cfutill_transmit; 35 util->layer.transmit = cfutill_transmit;
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3ec83fbc2887..910ab0661f66 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -25,13 +25,10 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
25 25
26struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) 26struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
27{ 27{
28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 28 struct cfsrvl *vei = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
29 if (!vei) { 29 if (!vei)
30 pr_warn("Out of memory\n");
31 return NULL; 30 return NULL;
32 }
33 caif_assert(offsetof(struct cfsrvl, layer) == 0); 31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
34 memset(vei, 0, sizeof(struct cfsrvl));
35 cfsrvl_init(vei, channel_id, dev_info, true); 32 cfsrvl_init(vei, channel_id, dev_info, true);
36 vei->layer.receive = cfvei_receive; 33 vei->layer.receive = cfvei_receive;
37 vei->layer.transmit = cfvei_transmit; 34 vei->layer.transmit = cfvei_transmit;
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index b2f5989ad455..e3f37db40ac3 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -21,14 +21,11 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
21 21
22struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) 22struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
23{ 23{
24 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 24 struct cfsrvl *vid = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
25 if (!vid) { 25 if (!vid)
26 pr_warn("Out of memory\n");
27 return NULL; 26 return NULL;
28 }
29 caif_assert(offsetof(struct cfsrvl, layer) == 0); 27 caif_assert(offsetof(struct cfsrvl, layer) == 0);
30 28
31 memset(vid, 0, sizeof(struct cfsrvl));
32 cfsrvl_init(vid, channel_id, dev_info, false); 29 cfsrvl_init(vid, channel_id, dev_info, false);
33 vid->layer.receive = cfvidl_receive; 30 vid->layer.receive = cfvidl_receive;
34 vid->layer.transmit = cfvidl_transmit; 31 vid->layer.transmit = cfvidl_transmit;
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 89395b2c8bca..03200699d274 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -40,5 +40,16 @@ config CAN_BCM
40 CAN messages are used on the bus (e.g. in automotive environments). 40 CAN messages are used on the bus (e.g. in automotive environments).
41 To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM. 41 To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM.
42 42
43config CAN_GW
44 tristate "CAN Gateway/Router (with netlink configuration)"
45 depends on CAN
46 default N
47 ---help---
48 The CAN Gateway/Router is used to route (and modify) CAN frames.
49 It is based on the PF_CAN core infrastructure for msg filtering and
50 msg sending and can optionally modify routed CAN frames on the fly.
51 CAN frames can be routed between CAN network interfaces (one hop).
52 They can be modified with AND/OR/XOR/SET operations as configured
53 by the netlink configuration interface known e.g. from iptables.
43 54
44source "drivers/net/can/Kconfig" 55source "drivers/net/can/Kconfig"
diff --git a/net/can/Makefile b/net/can/Makefile
index 2d3894b32742..cef49eb1f5c7 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -10,3 +10,6 @@ can-raw-y := raw.o
10 10
11obj-$(CONFIG_CAN_BCM) += can-bcm.o 11obj-$(CONFIG_CAN_BCM) += can-bcm.o
12can-bcm-y := bcm.o 12can-bcm-y := bcm.o
13
14obj-$(CONFIG_CAN_GW) += can-gw.o
15can-gw-y := gw.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 9b0c32a2690c..0ce2ad0696da 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -38,8 +38,6 @@
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE. 39 * DAMAGE.
40 * 40 *
41 * Send feedback to <socketcan-users@lists.berlios.de>
42 *
43 */ 41 */
44 42
45#include <linux/module.h> 43#include <linux/module.h>
@@ -719,7 +717,7 @@ int can_proto_register(const struct can_proto *cp)
719 proto); 717 proto);
720 err = -EBUSY; 718 err = -EBUSY;
721 } else 719 } else
722 rcu_assign_pointer(proto_tab[proto], cp); 720 RCU_INIT_POINTER(proto_tab[proto], cp);
723 721
724 mutex_unlock(&proto_tab_lock); 722 mutex_unlock(&proto_tab_lock);
725 723
@@ -740,7 +738,7 @@ void can_proto_unregister(const struct can_proto *cp)
740 738
741 mutex_lock(&proto_tab_lock); 739 mutex_lock(&proto_tab_lock);
742 BUG_ON(proto_tab[proto] != cp); 740 BUG_ON(proto_tab[proto] != cp);
743 rcu_assign_pointer(proto_tab[proto], NULL); 741 RCU_INIT_POINTER(proto_tab[proto], NULL);
744 mutex_unlock(&proto_tab_lock); 742 mutex_unlock(&proto_tab_lock);
745 743
746 synchronize_rcu(); 744 synchronize_rcu();
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 34253b84e30f..fd882dbadad3 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -35,8 +35,6 @@
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
36 * DAMAGE. 36 * DAMAGE.
37 * 37 *
38 * Send feedback to <socketcan-users@lists.berlios.de>
39 *
40 */ 38 */
41 39
42#ifndef AF_CAN_H 40#ifndef AF_CAN_H
diff --git a/net/can/bcm.c b/net/can/bcm.c
index c84963d2dee6..151b7730c12c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/can/gw.c b/net/can/gw.c
new file mode 100644
index 000000000000..3d79b127881e
--- /dev/null
+++ b/net/can/gw.c
@@ -0,0 +1,957 @@
1/*
2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3 *
4 * Copyright (c) 2011 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/types.h>
45#include <linux/list.h>
46#include <linux/spinlock.h>
47#include <linux/rcupdate.h>
48#include <linux/rculist.h>
49#include <linux/net.h>
50#include <linux/netdevice.h>
51#include <linux/if_arp.h>
52#include <linux/skbuff.h>
53#include <linux/can.h>
54#include <linux/can/core.h>
55#include <linux/can/gw.h>
56#include <net/rtnetlink.h>
57#include <net/net_namespace.h>
58#include <net/sock.h>
59
60#define CAN_GW_VERSION "20101209"
61static __initdata const char banner[] =
62 KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
63
64MODULE_DESCRIPTION("PF_CAN netlink gateway");
65MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS("can-gw");
68
69HLIST_HEAD(cgw_list);
70static struct notifier_block notifier;
71
72static struct kmem_cache *cgw_cache __read_mostly;
73
74/* structure that contains the (on-the-fly) CAN frame modifications */
75struct cf_mod {
76 struct {
77 struct can_frame and;
78 struct can_frame or;
79 struct can_frame xor;
80 struct can_frame set;
81 } modframe;
82 struct {
83 u8 and;
84 u8 or;
85 u8 xor;
86 u8 set;
87 } modtype;
88 void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
89 struct cf_mod *mod);
90
91 /* CAN frame checksum calculation after CAN frame modifications */
92 struct {
93 struct cgw_csum_xor xor;
94 struct cgw_csum_crc8 crc8;
95 } csum;
96 struct {
97 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
98 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
99 } csumfunc;
100};
101
102
103/*
104 * So far we just support CAN -> CAN routing and frame modifications.
105 *
106 * The internal can_can_gw structure contains data and attributes for
107 * a CAN -> CAN gateway job.
108 */
109struct can_can_gw {
110 struct can_filter filter;
111 int src_idx;
112 int dst_idx;
113};
114
115/* list entry for CAN gateways jobs */
116struct cgw_job {
117 struct hlist_node list;
118 struct rcu_head rcu;
119 u32 handled_frames;
120 u32 dropped_frames;
121 struct cf_mod mod;
122 union {
123 /* CAN frame data source */
124 struct net_device *dev;
125 } src;
126 union {
127 /* CAN frame data destination */
128 struct net_device *dev;
129 } dst;
130 union {
131 struct can_can_gw ccgw;
132 /* tbc */
133 };
134 u8 gwtype;
135 u16 flags;
136};
137
138/* modification functions that are invoked in the hot path in can_can_gw_rcv */
139
140#define MODFUNC(func, op) static void func(struct can_frame *cf, \
141 struct cf_mod *mod) { op ; }
142
143MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
144MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
145MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
146MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
147MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
148MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
149MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
150MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
151MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
152MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
153MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
154MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
155
156static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
157{
158 /*
159 * Copy the struct members separately to ensure that no uninitialized
160 * data are copied in the 3 bytes hole of the struct. This is needed
161 * to make easy compares of the data in the struct cf_mod.
162 */
163
164 dst->can_id = src->can_id;
165 dst->can_dlc = src->can_dlc;
166 *(u64 *)dst->data = *(u64 *)src->data;
167}
168
169static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
170{
171 /*
172 * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
173 * relative to received dlc -1 .. -8 :
174 * e.g. for received dlc = 8
175 * -1 => index = 7 (data[7])
176 * -3 => index = 5 (data[5])
177 * -8 => index = 0 (data[0])
178 */
179
180 if (fr > -9 && fr < 8 &&
181 to > -9 && to < 8 &&
182 re > -9 && re < 8)
183 return 0;
184 else
185 return -EINVAL;
186}
187
188static inline int calc_idx(int idx, int rx_dlc)
189{
190 if (idx < 0)
191 return rx_dlc + idx;
192 else
193 return idx;
194}
195
196static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
197{
198 int from = calc_idx(xor->from_idx, cf->can_dlc);
199 int to = calc_idx(xor->to_idx, cf->can_dlc);
200 int res = calc_idx(xor->result_idx, cf->can_dlc);
201 u8 val = xor->init_xor_val;
202 int i;
203
204 if (from < 0 || to < 0 || res < 0)
205 return;
206
207 if (from <= to) {
208 for (i = from; i <= to; i++)
209 val ^= cf->data[i];
210 } else {
211 for (i = from; i >= to; i--)
212 val ^= cf->data[i];
213 }
214
215 cf->data[res] = val;
216}
217
218static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
219{
220 u8 val = xor->init_xor_val;
221 int i;
222
223 for (i = xor->from_idx; i <= xor->to_idx; i++)
224 val ^= cf->data[i];
225
226 cf->data[xor->result_idx] = val;
227}
228
229static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
230{
231 u8 val = xor->init_xor_val;
232 int i;
233
234 for (i = xor->from_idx; i >= xor->to_idx; i--)
235 val ^= cf->data[i];
236
237 cf->data[xor->result_idx] = val;
238}
239
240static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
241{
242 int from = calc_idx(crc8->from_idx, cf->can_dlc);
243 int to = calc_idx(crc8->to_idx, cf->can_dlc);
244 int res = calc_idx(crc8->result_idx, cf->can_dlc);
245 u8 crc = crc8->init_crc_val;
246 int i;
247
248 if (from < 0 || to < 0 || res < 0)
249 return;
250
251 if (from <= to) {
252 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
253 crc = crc8->crctab[crc^cf->data[i]];
254 } else {
255 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
256 crc = crc8->crctab[crc^cf->data[i]];
257 }
258
259 switch (crc8->profile) {
260
261 case CGW_CRC8PRF_1U8:
262 crc = crc8->crctab[crc^crc8->profile_data[0]];
263 break;
264
265 case CGW_CRC8PRF_16U8:
266 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
267 break;
268
269 case CGW_CRC8PRF_SFFID_XOR:
270 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
271 (cf->can_id >> 8 & 0xFF)];
272 break;
273
274 }
275
276 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
277}
278
279static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
280{
281 u8 crc = crc8->init_crc_val;
282 int i;
283
284 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
285 crc = crc8->crctab[crc^cf->data[i]];
286
287 switch (crc8->profile) {
288
289 case CGW_CRC8PRF_1U8:
290 crc = crc8->crctab[crc^crc8->profile_data[0]];
291 break;
292
293 case CGW_CRC8PRF_16U8:
294 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
295 break;
296
297 case CGW_CRC8PRF_SFFID_XOR:
298 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
299 (cf->can_id >> 8 & 0xFF)];
300 break;
301 }
302
303 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
304}
305
306static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
307{
308 u8 crc = crc8->init_crc_val;
309 int i;
310
311 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
312 crc = crc8->crctab[crc^cf->data[i]];
313
314 switch (crc8->profile) {
315
316 case CGW_CRC8PRF_1U8:
317 crc = crc8->crctab[crc^crc8->profile_data[0]];
318 break;
319
320 case CGW_CRC8PRF_16U8:
321 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
322 break;
323
324 case CGW_CRC8PRF_SFFID_XOR:
325 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
326 (cf->can_id >> 8 & 0xFF)];
327 break;
328 }
329
330 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
331}
332
333/* the receive & process & send function */
334static void can_can_gw_rcv(struct sk_buff *skb, void *data)
335{
336 struct cgw_job *gwj = (struct cgw_job *)data;
337 struct can_frame *cf;
338 struct sk_buff *nskb;
339 int modidx = 0;
340
341 /* do not handle already routed frames - see comment below */
342 if (skb_mac_header_was_set(skb))
343 return;
344
345 if (!(gwj->dst.dev->flags & IFF_UP)) {
346 gwj->dropped_frames++;
347 return;
348 }
349
350 /*
351 * clone the given skb, which has not been done in can_rcv()
352 *
353 * When there is at least one modification function activated,
354 * we need to copy the skb as we want to modify skb->data.
355 */
356 if (gwj->mod.modfunc[0])
357 nskb = skb_copy(skb, GFP_ATOMIC);
358 else
359 nskb = skb_clone(skb, GFP_ATOMIC);
360
361 if (!nskb) {
362 gwj->dropped_frames++;
363 return;
364 }
365
366 /*
367 * Mark routed frames by setting some mac header length which is
368 * not relevant for the CAN frames located in the skb->data section.
369 *
370 * As dev->header_ops is not set in CAN netdevices no one is ever
371 * accessing the various header offsets in the CAN skbuffs anyway.
372 * E.g. using the packet socket to read CAN frames is still working.
373 */
374 skb_set_mac_header(nskb, 8);
375 nskb->dev = gwj->dst.dev;
376
377 /* pointer to modifiable CAN frame */
378 cf = (struct can_frame *)nskb->data;
379
380 /* perform preprocessed modification functions if there are any */
381 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
382 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
383
384 /* check for checksum updates when the CAN frame has been modified */
385 if (modidx) {
386 if (gwj->mod.csumfunc.crc8)
387 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
388
389 if (gwj->mod.csumfunc.xor)
390 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
391 }
392
393 /* clear the skb timestamp if not configured the other way */
394 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
395 nskb->tstamp.tv64 = 0;
396
397 /* send to netdevice */
398 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
399 gwj->dropped_frames++;
400 else
401 gwj->handled_frames++;
402}
403
404static inline int cgw_register_filter(struct cgw_job *gwj)
405{
406 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
407 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
408 gwj, "gw");
409}
410
411static inline void cgw_unregister_filter(struct cgw_job *gwj)
412{
413 can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
414 gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
415}
416
417static int cgw_notifier(struct notifier_block *nb,
418 unsigned long msg, void *data)
419{
420 struct net_device *dev = (struct net_device *)data;
421
422 if (!net_eq(dev_net(dev), &init_net))
423 return NOTIFY_DONE;
424 if (dev->type != ARPHRD_CAN)
425 return NOTIFY_DONE;
426
427 if (msg == NETDEV_UNREGISTER) {
428
429 struct cgw_job *gwj = NULL;
430 struct hlist_node *n, *nx;
431
432 ASSERT_RTNL();
433
434 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
435
436 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
437 hlist_del(&gwj->list);
438 cgw_unregister_filter(gwj);
439 kfree(gwj);
440 }
441 }
442 }
443
444 return NOTIFY_DONE;
445}
446
447static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
448{
449 struct cgw_frame_mod mb;
450 struct rtcanmsg *rtcan;
451 struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
452 if (!nlh)
453 return -EMSGSIZE;
454
455 rtcan = nlmsg_data(nlh);
456 rtcan->can_family = AF_CAN;
457 rtcan->gwtype = gwj->gwtype;
458 rtcan->flags = gwj->flags;
459
460 /* add statistics if available */
461
462 if (gwj->handled_frames) {
463 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
464 goto cancel;
465 else
466 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
467 }
468
469 if (gwj->dropped_frames) {
470 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
471 goto cancel;
472 else
473 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
474 }
475
476 /* check non default settings of attributes */
477
478 if (gwj->mod.modtype.and) {
479 memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
480 mb.modtype = gwj->mod.modtype.and;
481 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
482 goto cancel;
483 else
484 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
485 }
486
487 if (gwj->mod.modtype.or) {
488 memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
489 mb.modtype = gwj->mod.modtype.or;
490 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
491 goto cancel;
492 else
493 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
494 }
495
496 if (gwj->mod.modtype.xor) {
497 memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
498 mb.modtype = gwj->mod.modtype.xor;
499 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
500 goto cancel;
501 else
502 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
503 }
504
505 if (gwj->mod.modtype.set) {
506 memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
507 mb.modtype = gwj->mod.modtype.set;
508 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
509 goto cancel;
510 else
511 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
512 }
513
514 if (gwj->mod.csumfunc.crc8) {
515 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
516 &gwj->mod.csum.crc8) < 0)
517 goto cancel;
518 else
519 nlh->nlmsg_len += NLA_HDRLEN + \
520 NLA_ALIGN(CGW_CS_CRC8_LEN);
521 }
522
523 if (gwj->mod.csumfunc.xor) {
524 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
525 &gwj->mod.csum.xor) < 0)
526 goto cancel;
527 else
528 nlh->nlmsg_len += NLA_HDRLEN + \
529 NLA_ALIGN(CGW_CS_XOR_LEN);
530 }
531
532 if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
533
534 if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
535 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
536 &gwj->ccgw.filter) < 0)
537 goto cancel;
538 else
539 nlh->nlmsg_len += NLA_HDRLEN +
540 NLA_ALIGN(sizeof(struct can_filter));
541 }
542
543 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
544 goto cancel;
545 else
546 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
547
548 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
549 goto cancel;
550 else
551 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
552 }
553
554 return skb->len;
555
556cancel:
557 nlmsg_cancel(skb, nlh);
558 return -EMSGSIZE;
559}
560
561/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
562static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
563{
564 struct cgw_job *gwj = NULL;
565 struct hlist_node *n;
566 int idx = 0;
567 int s_idx = cb->args[0];
568
569 rcu_read_lock();
570 hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
571 if (idx < s_idx)
572 goto cont;
573
574 if (cgw_put_job(skb, gwj) < 0)
575 break;
576cont:
577 idx++;
578 }
579 rcu_read_unlock();
580
581 cb->args[0] = idx;
582
583 return skb->len;
584}
585
586/* check for common and gwtype specific attributes */
587static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
588 u8 gwtype, void *gwtypeattr)
589{
590 struct nlattr *tb[CGW_MAX+1];
591 struct cgw_frame_mod mb;
592 int modidx = 0;
593 int err = 0;
594
595 /* initialize modification & checksum data space */
596 memset(mod, 0, sizeof(*mod));
597
598 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
599 if (err < 0)
600 return err;
601
602 /* check for AND/OR/XOR/SET modifications */
603
604 if (tb[CGW_MOD_AND] &&
605 nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
606 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
607
608 canframecpy(&mod->modframe.and, &mb.cf);
609 mod->modtype.and = mb.modtype;
610
611 if (mb.modtype & CGW_MOD_ID)
612 mod->modfunc[modidx++] = mod_and_id;
613
614 if (mb.modtype & CGW_MOD_DLC)
615 mod->modfunc[modidx++] = mod_and_dlc;
616
617 if (mb.modtype & CGW_MOD_DATA)
618 mod->modfunc[modidx++] = mod_and_data;
619 }
620
621 if (tb[CGW_MOD_OR] &&
622 nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
623 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
624
625 canframecpy(&mod->modframe.or, &mb.cf);
626 mod->modtype.or = mb.modtype;
627
628 if (mb.modtype & CGW_MOD_ID)
629 mod->modfunc[modidx++] = mod_or_id;
630
631 if (mb.modtype & CGW_MOD_DLC)
632 mod->modfunc[modidx++] = mod_or_dlc;
633
634 if (mb.modtype & CGW_MOD_DATA)
635 mod->modfunc[modidx++] = mod_or_data;
636 }
637
638 if (tb[CGW_MOD_XOR] &&
639 nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
640 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
641
642 canframecpy(&mod->modframe.xor, &mb.cf);
643 mod->modtype.xor = mb.modtype;
644
645 if (mb.modtype & CGW_MOD_ID)
646 mod->modfunc[modidx++] = mod_xor_id;
647
648 if (mb.modtype & CGW_MOD_DLC)
649 mod->modfunc[modidx++] = mod_xor_dlc;
650
651 if (mb.modtype & CGW_MOD_DATA)
652 mod->modfunc[modidx++] = mod_xor_data;
653 }
654
655 if (tb[CGW_MOD_SET] &&
656 nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
657 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
658
659 canframecpy(&mod->modframe.set, &mb.cf);
660 mod->modtype.set = mb.modtype;
661
662 if (mb.modtype & CGW_MOD_ID)
663 mod->modfunc[modidx++] = mod_set_id;
664
665 if (mb.modtype & CGW_MOD_DLC)
666 mod->modfunc[modidx++] = mod_set_dlc;
667
668 if (mb.modtype & CGW_MOD_DATA)
669 mod->modfunc[modidx++] = mod_set_data;
670 }
671
672 /* check for checksum operations after CAN frame modifications */
673 if (modidx) {
674
675 if (tb[CGW_CS_CRC8] &&
676 nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
677
678 struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
679 nla_data(tb[CGW_CS_CRC8]);
680
681 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
682 c->result_idx);
683 if (err)
684 return err;
685
686 nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
687 CGW_CS_CRC8_LEN);
688
689 /*
690 * select dedicated processing function to reduce
691 * runtime operations in receive hot path.
692 */
693 if (c->from_idx < 0 || c->to_idx < 0 ||
694 c->result_idx < 0)
695 mod->csumfunc.crc8 = cgw_csum_crc8_rel;
696 else if (c->from_idx <= c->to_idx)
697 mod->csumfunc.crc8 = cgw_csum_crc8_pos;
698 else
699 mod->csumfunc.crc8 = cgw_csum_crc8_neg;
700 }
701
702 if (tb[CGW_CS_XOR] &&
703 nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
704
705 struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
706 nla_data(tb[CGW_CS_XOR]);
707
708 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
709 c->result_idx);
710 if (err)
711 return err;
712
713 nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
714 CGW_CS_XOR_LEN);
715
716 /*
717 * select dedicated processing function to reduce
718 * runtime operations in receive hot path.
719 */
720 if (c->from_idx < 0 || c->to_idx < 0 ||
721 c->result_idx < 0)
722 mod->csumfunc.xor = cgw_csum_xor_rel;
723 else if (c->from_idx <= c->to_idx)
724 mod->csumfunc.xor = cgw_csum_xor_pos;
725 else
726 mod->csumfunc.xor = cgw_csum_xor_neg;
727 }
728 }
729
730 if (gwtype == CGW_TYPE_CAN_CAN) {
731
732 /* check CGW_TYPE_CAN_CAN specific attributes */
733
734 struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
735 memset(ccgw, 0, sizeof(*ccgw));
736
737 /* check for can_filter in attributes */
738 if (tb[CGW_FILTER] &&
739 nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
740 nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
741 sizeof(struct can_filter));
742
743 err = -ENODEV;
744
745 /* specifying two interfaces is mandatory */
746 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
747 return err;
748
749 if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
750 nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
751 sizeof(u32));
752
753 if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
754 nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
755 sizeof(u32));
756
757 /* both indices set to 0 for flushing all routing entries */
758 if (!ccgw->src_idx && !ccgw->dst_idx)
759 return 0;
760
761 /* only one index set to 0 is an error */
762 if (!ccgw->src_idx || !ccgw->dst_idx)
763 return err;
764 }
765
766 /* add the checks for other gwtypes here */
767
768 return 0;
769}
770
771static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
772 void *arg)
773{
774 struct rtcanmsg *r;
775 struct cgw_job *gwj;
776 int err = 0;
777
778 if (nlmsg_len(nlh) < sizeof(*r))
779 return -EINVAL;
780
781 r = nlmsg_data(nlh);
782 if (r->can_family != AF_CAN)
783 return -EPFNOSUPPORT;
784
785 /* so far we only support CAN -> CAN routings */
786 if (r->gwtype != CGW_TYPE_CAN_CAN)
787 return -EINVAL;
788
789 gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
790 if (!gwj)
791 return -ENOMEM;
792
793 gwj->handled_frames = 0;
794 gwj->dropped_frames = 0;
795 gwj->flags = r->flags;
796 gwj->gwtype = r->gwtype;
797
798 err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
799 if (err < 0)
800 goto out;
801
802 err = -ENODEV;
803
804 /* ifindex == 0 is not allowed for job creation */
805 if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
806 goto out;
807
808 gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
809
810 if (!gwj->src.dev)
811 goto out;
812
813 /* check for CAN netdev not using header_ops - see gw_rcv() */
814 if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
815 goto put_src_out;
816
817 gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
818
819 if (!gwj->dst.dev)
820 goto put_src_out;
821
822 /* check for CAN netdev not using header_ops - see gw_rcv() */
823 if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
824 goto put_src_dst_out;
825
826 ASSERT_RTNL();
827
828 err = cgw_register_filter(gwj);
829 if (!err)
830 hlist_add_head_rcu(&gwj->list, &cgw_list);
831
832put_src_dst_out:
833 dev_put(gwj->dst.dev);
834put_src_out:
835 dev_put(gwj->src.dev);
836out:
837 if (err)
838 kmem_cache_free(cgw_cache, gwj);
839
840 return err;
841}
842
843static void cgw_remove_all_jobs(void)
844{
845 struct cgw_job *gwj = NULL;
846 struct hlist_node *n, *nx;
847
848 ASSERT_RTNL();
849
850 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
851 hlist_del(&gwj->list);
852 cgw_unregister_filter(gwj);
853 kfree(gwj);
854 }
855}
856
857static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
858{
859 struct cgw_job *gwj = NULL;
860 struct hlist_node *n, *nx;
861 struct rtcanmsg *r;
862 struct cf_mod mod;
863 struct can_can_gw ccgw;
864 int err = 0;
865
866 if (nlmsg_len(nlh) < sizeof(*r))
867 return -EINVAL;
868
869 r = nlmsg_data(nlh);
870 if (r->can_family != AF_CAN)
871 return -EPFNOSUPPORT;
872
873 /* so far we only support CAN -> CAN routings */
874 if (r->gwtype != CGW_TYPE_CAN_CAN)
875 return -EINVAL;
876
877 err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
878 if (err < 0)
879 return err;
880
881 /* two interface indices both set to 0 => remove all entries */
882 if (!ccgw.src_idx && !ccgw.dst_idx) {
883 cgw_remove_all_jobs();
884 return 0;
885 }
886
887 err = -EINVAL;
888
889 ASSERT_RTNL();
890
891 /* remove only the first matching entry */
892 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
893
894 if (gwj->flags != r->flags)
895 continue;
896
897 if (memcmp(&gwj->mod, &mod, sizeof(mod)))
898 continue;
899
900 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
901 if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
902 continue;
903
904 hlist_del(&gwj->list);
905 cgw_unregister_filter(gwj);
906 kfree(gwj);
907 err = 0;
908 break;
909 }
910
911 return err;
912}
913
914static __init int cgw_module_init(void)
915{
916 printk(banner);
917
918 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
919 0, 0, NULL);
920
921 if (!cgw_cache)
922 return -ENOMEM;
923
924 /* set notifier */
925 notifier.notifier_call = cgw_notifier;
926 register_netdevice_notifier(&notifier);
927
928 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
929 unregister_netdevice_notifier(&notifier);
930 kmem_cache_destroy(cgw_cache);
931 return -ENOBUFS;
932 }
933
934 /* Only the first call to __rtnl_register can fail */
935 __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
936 __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
937
938 return 0;
939}
940
941static __exit void cgw_module_exit(void)
942{
943 rtnl_unregister_all(PF_CAN);
944
945 unregister_netdevice_notifier(&notifier);
946
947 rtnl_lock();
948 cgw_remove_all_jobs();
949 rtnl_unlock();
950
951 rcu_barrier(); /* Wait for completion of call_rcu()'s */
952
953 kmem_cache_destroy(cgw_cache);
954}
955
956module_init(cgw_module_init);
957module_exit(cgw_module_exit);
diff --git a/net/can/proc.c b/net/can/proc.c
index 0016f7339699..ba873c36d2fd 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/can/raw.c b/net/can/raw.c
index dea99a6e596c..cde1b4a20f75 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index be683f2d401f..cc04dd667a10 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -27,3 +27,17 @@ config CEPH_LIB_PRETTYDEBUG
27 27
28 If unsure, say N. 28 If unsure, say N.
29 29
30config CEPH_LIB_USE_DNS_RESOLVER
31 bool "Use in-kernel support for DNS lookup"
32 depends on CEPH_LIB
33 select DNS_RESOLVER
34 default n
35 help
36 If you say Y here, hostnames (e.g. monitor addresses) will
37 be resolved using the CONFIG_DNS_RESOLVER facility.
38
39 For information on how to use CONFIG_DNS_RESOLVER consult
40 Documentation/networking/dns_resolver.txt
41
42 If unsure, say N.
43
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 2883ea01e680..97f70e50ad3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -432,9 +432,12 @@ EXPORT_SYMBOL(ceph_client_id);
432/* 432/*
433 * create a fresh client instance 433 * create a fresh client instance
434 */ 434 */
435struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) 435struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
436 unsigned supported_features,
437 unsigned required_features)
436{ 438{
437 struct ceph_client *client; 439 struct ceph_client *client;
440 struct ceph_entity_addr *myaddr = NULL;
438 int err = -ENOMEM; 441 int err = -ENOMEM;
439 442
440 client = kzalloc(sizeof(*client), GFP_KERNEL); 443 client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -449,15 +452,27 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
449 client->auth_err = 0; 452 client->auth_err = 0;
450 453
451 client->extra_mon_dispatch = NULL; 454 client->extra_mon_dispatch = NULL;
452 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT; 455 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT |
453 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT; 456 supported_features;
454 457 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT |
455 client->msgr = NULL; 458 required_features;
459
460 /* msgr */
461 if (ceph_test_opt(client, MYIP))
462 myaddr = &client->options->my_addr;
463 client->msgr = ceph_messenger_create(myaddr,
464 client->supported_features,
465 client->required_features);
466 if (IS_ERR(client->msgr)) {
467 err = PTR_ERR(client->msgr);
468 goto fail;
469 }
470 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
456 471
457 /* subsystems */ 472 /* subsystems */
458 err = ceph_monc_init(&client->monc, client); 473 err = ceph_monc_init(&client->monc, client);
459 if (err < 0) 474 if (err < 0)
460 goto fail; 475 goto fail_msgr;
461 err = ceph_osdc_init(&client->osdc, client); 476 err = ceph_osdc_init(&client->osdc, client);
462 if (err < 0) 477 if (err < 0)
463 goto fail_monc; 478 goto fail_monc;
@@ -466,6 +481,8 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
466 481
467fail_monc: 482fail_monc:
468 ceph_monc_stop(&client->monc); 483 ceph_monc_stop(&client->monc);
484fail_msgr:
485 ceph_messenger_destroy(client->msgr);
469fail: 486fail:
470 kfree(client); 487 kfree(client);
471 return ERR_PTR(err); 488 return ERR_PTR(err);
@@ -490,8 +507,7 @@ void ceph_destroy_client(struct ceph_client *client)
490 507
491 ceph_debugfs_client_cleanup(client); 508 ceph_debugfs_client_cleanup(client);
492 509
493 if (client->msgr) 510 ceph_messenger_destroy(client->msgr);
494 ceph_messenger_destroy(client->msgr);
495 511
496 ceph_destroy_options(client->options); 512 ceph_destroy_options(client->options);
497 513
@@ -514,24 +530,9 @@ static int have_mon_and_osd_map(struct ceph_client *client)
514 */ 530 */
515int __ceph_open_session(struct ceph_client *client, unsigned long started) 531int __ceph_open_session(struct ceph_client *client, unsigned long started)
516{ 532{
517 struct ceph_entity_addr *myaddr = NULL;
518 int err; 533 int err;
519 unsigned long timeout = client->options->mount_timeout * HZ; 534 unsigned long timeout = client->options->mount_timeout * HZ;
520 535
521 /* initialize the messenger */
522 if (client->msgr == NULL) {
523 if (ceph_test_opt(client, MYIP))
524 myaddr = &client->options->my_addr;
525 client->msgr = ceph_messenger_create(myaddr,
526 client->supported_features,
527 client->required_features);
528 if (IS_ERR(client->msgr)) {
529 client->msgr = NULL;
530 return PTR_ERR(client->msgr);
531 }
532 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
533 }
534
535 /* open session, and wait for mon and osd maps */ 536 /* open session, and wait for mon and osd maps */
536 err = ceph_monc_open_session(&client->monc); 537 err = ceph_monc_open_session(&client->monc);
537 if (err < 0) 538 if (err < 0)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9918e9eb276e..f466930e26fa 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/dns_resolver.h>
14#include <net/tcp.h> 15#include <net/tcp.h>
15 16
16#include <linux/ceph/libceph.h> 17#include <linux/ceph/libceph.h>
@@ -1078,6 +1079,101 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
1078} 1079}
1079 1080
1080/* 1081/*
1082 * Unlike other *_pton function semantics, zero indicates success.
1083 */
1084static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1085 char delim, const char **ipend)
1086{
1087 struct sockaddr_in *in4 = (void *)ss;
1088 struct sockaddr_in6 *in6 = (void *)ss;
1089
1090 memset(ss, 0, sizeof(*ss));
1091
1092 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1093 ss->ss_family = AF_INET;
1094 return 0;
1095 }
1096
1097 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1098 ss->ss_family = AF_INET6;
1099 return 0;
1100 }
1101
1102 return -EINVAL;
1103}
1104
1105/*
1106 * Extract hostname string and resolve using kernel DNS facility.
1107 */
1108#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1109static int ceph_dns_resolve_name(const char *name, size_t namelen,
1110 struct sockaddr_storage *ss, char delim, const char **ipend)
1111{
1112 const char *end, *delim_p;
1113 char *colon_p, *ip_addr = NULL;
1114 int ip_len, ret;
1115
1116 /*
1117 * The end of the hostname occurs immediately preceding the delimiter or
1118 * the port marker (':') where the delimiter takes precedence.
1119 */
1120 delim_p = memchr(name, delim, namelen);
1121 colon_p = memchr(name, ':', namelen);
1122
1123 if (delim_p && colon_p)
1124 end = delim_p < colon_p ? delim_p : colon_p;
1125 else if (!delim_p && colon_p)
1126 end = colon_p;
1127 else {
1128 end = delim_p;
1129 if (!end) /* case: hostname:/ */
1130 end = name + namelen;
1131 }
1132
1133 if (end <= name)
1134 return -EINVAL;
1135
1136 /* do dns_resolve upcall */
1137 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1138 if (ip_len > 0)
1139 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1140 else
1141 ret = -ESRCH;
1142
1143 kfree(ip_addr);
1144
1145 *ipend = end;
1146
1147 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1148 ret, ret ? "failed" : ceph_pr_addr(ss));
1149
1150 return ret;
1151}
1152#else
1153static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1154 struct sockaddr_storage *ss, char delim, const char **ipend)
1155{
1156 return -EINVAL;
1157}
1158#endif
1159
1160/*
1161 * Parse a server name (IP or hostname). If a valid IP address is not found
1162 * then try to extract a hostname to resolve using userspace DNS upcall.
1163 */
1164static int ceph_parse_server_name(const char *name, size_t namelen,
1165 struct sockaddr_storage *ss, char delim, const char **ipend)
1166{
1167 int ret;
1168
1169 ret = ceph_pton(name, namelen, ss, delim, ipend);
1170 if (ret)
1171 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1172
1173 return ret;
1174}
1175
1176/*
1081 * Parse an ip[:port] list into an addr array. Use the default 1177 * Parse an ip[:port] list into an addr array. Use the default
1082 * monitor port if a port isn't specified. 1178 * monitor port if a port isn't specified.
1083 */ 1179 */
@@ -1085,15 +1181,13 @@ int ceph_parse_ips(const char *c, const char *end,
1085 struct ceph_entity_addr *addr, 1181 struct ceph_entity_addr *addr,
1086 int max_count, int *count) 1182 int max_count, int *count)
1087{ 1183{
1088 int i; 1184 int i, ret = -EINVAL;
1089 const char *p = c; 1185 const char *p = c;
1090 1186
1091 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1187 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1092 for (i = 0; i < max_count; i++) { 1188 for (i = 0; i < max_count; i++) {
1093 const char *ipend; 1189 const char *ipend;
1094 struct sockaddr_storage *ss = &addr[i].in_addr; 1190 struct sockaddr_storage *ss = &addr[i].in_addr;
1095 struct sockaddr_in *in4 = (void *)ss;
1096 struct sockaddr_in6 *in6 = (void *)ss;
1097 int port; 1191 int port;
1098 char delim = ','; 1192 char delim = ',';
1099 1193
@@ -1102,15 +1196,11 @@ int ceph_parse_ips(const char *c, const char *end,
1102 p++; 1196 p++;
1103 } 1197 }
1104 1198
1105 memset(ss, 0, sizeof(*ss)); 1199 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1106 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, 1200 if (ret)
1107 delim, &ipend))
1108 ss->ss_family = AF_INET;
1109 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1110 delim, &ipend))
1111 ss->ss_family = AF_INET6;
1112 else
1113 goto bad; 1201 goto bad;
1202 ret = -EINVAL;
1203
1114 p = ipend; 1204 p = ipend;
1115 1205
1116 if (delim == ']') { 1206 if (delim == ']') {
@@ -1155,7 +1245,7 @@ int ceph_parse_ips(const char *c, const char *end,
1155 1245
1156bad: 1246bad:
1157 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1247 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1158 return -EINVAL; 1248 return ret;
1159} 1249}
1160EXPORT_SYMBOL(ceph_parse_ips); 1250EXPORT_SYMBOL(ceph_parse_ips);
1161 1251
@@ -2281,7 +2371,8 @@ EXPORT_SYMBOL(ceph_con_keepalive);
2281 * construct a new message with given type, size 2371 * construct a new message with given type, size
2282 * the new msg has a ref count of 1. 2372 * the new msg has a ref count of 1.
2283 */ 2373 */
2284struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) 2374struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2375 bool can_fail)
2285{ 2376{
2286 struct ceph_msg *m; 2377 struct ceph_msg *m;
2287 2378
@@ -2333,7 +2424,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2333 m->front.iov_base = kmalloc(front_len, flags); 2424 m->front.iov_base = kmalloc(front_len, flags);
2334 } 2425 }
2335 if (m->front.iov_base == NULL) { 2426 if (m->front.iov_base == NULL) {
2336 pr_err("msg_new can't allocate %d bytes\n", 2427 dout("ceph_msg_new can't allocate %d bytes\n",
2337 front_len); 2428 front_len);
2338 goto out2; 2429 goto out2;
2339 } 2430 }
@@ -2348,7 +2439,14 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2348out2: 2439out2:
2349 ceph_msg_put(m); 2440 ceph_msg_put(m);
2350out: 2441out:
2351 pr_err("msg_new can't create type %d front %d\n", type, front_len); 2442 if (!can_fail) {
2443 pr_err("msg_new can't create type %d front %d\n", type,
2444 front_len);
2445 WARN_ON(1);
2446 } else {
2447 dout("msg_new can't create type %d front %d\n", type,
2448 front_len);
2449 }
2352 return NULL; 2450 return NULL;
2353} 2451}
2354EXPORT_SYMBOL(ceph_msg_new); 2452EXPORT_SYMBOL(ceph_msg_new);
@@ -2398,7 +2496,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2398 } 2496 }
2399 if (!msg) { 2497 if (!msg) {
2400 *skip = 0; 2498 *skip = 0;
2401 msg = ceph_msg_new(type, front_len, GFP_NOFS); 2499 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2402 if (!msg) { 2500 if (!msg) {
2403 pr_err("unable to allocate msg type %d len %d\n", 2501 pr_err("unable to allocate msg type %d len %d\n",
2404 type, front_len); 2502 type, front_len);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index cbe31fa45508..0b62deae42bd 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -116,14 +116,12 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
116 */ 116 */
117static void __close_session(struct ceph_mon_client *monc) 117static void __close_session(struct ceph_mon_client *monc)
118{ 118{
119 if (monc->con) { 119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 dout("__close_session closing mon%d\n", monc->cur_mon); 120 ceph_con_revoke(monc->con, monc->m_auth);
121 ceph_con_revoke(monc->con, monc->m_auth); 121 ceph_con_close(monc->con);
122 ceph_con_close(monc->con); 122 monc->cur_mon = -1;
123 monc->cur_mon = -1; 123 monc->pending_auth = 0;
124 monc->pending_auth = 0; 124 ceph_auth_reset(monc->auth);
125 ceph_auth_reset(monc->auth);
126 }
127} 125}
128 126
129/* 127/*
@@ -302,15 +300,6 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
302 */ 300 */
303int ceph_monc_open_session(struct ceph_mon_client *monc) 301int ceph_monc_open_session(struct ceph_mon_client *monc)
304{ 302{
305 if (!monc->con) {
306 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
307 if (!monc->con)
308 return -ENOMEM;
309 ceph_con_init(monc->client->msgr, monc->con);
310 monc->con->private = monc;
311 monc->con->ops = &mon_con_ops;
312 }
313
314 mutex_lock(&monc->mutex); 303 mutex_lock(&monc->mutex);
315 __open_session(monc); 304 __open_session(monc);
316 __schedule_delayed(monc); 305 __schedule_delayed(monc);
@@ -528,10 +517,12 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
528 init_completion(&req->completion); 517 init_completion(&req->completion);
529 518
530 err = -ENOMEM; 519 err = -ENOMEM;
531 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS); 520 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
521 true);
532 if (!req->request) 522 if (!req->request)
533 goto out; 523 goto out;
534 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS); 524 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
525 true);
535 if (!req->reply) 526 if (!req->reply)
536 goto out; 527 goto out;
537 528
@@ -626,10 +617,12 @@ int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
626 init_completion(&req->completion); 617 init_completion(&req->completion);
627 618
628 err = -ENOMEM; 619 err = -ENOMEM;
629 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS); 620 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
621 true);
630 if (!req->request) 622 if (!req->request)
631 goto out; 623 goto out;
632 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS); 624 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
625 true);
633 if (!req->reply) 626 if (!req->reply)
634 goto out; 627 goto out;
635 628
@@ -755,13 +748,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
755 if (err) 748 if (err)
756 goto out; 749 goto out;
757 750
758 monc->con = NULL; 751 /* connection */
752 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
753 if (!monc->con)
754 goto out_monmap;
755 ceph_con_init(monc->client->msgr, monc->con);
756 monc->con->private = monc;
757 monc->con->ops = &mon_con_ops;
759 758
760 /* authentication */ 759 /* authentication */
761 monc->auth = ceph_auth_init(cl->options->name, 760 monc->auth = ceph_auth_init(cl->options->name,
762 cl->options->key); 761 cl->options->key);
763 if (IS_ERR(monc->auth)) 762 if (IS_ERR(monc->auth)) {
764 return PTR_ERR(monc->auth); 763 err = PTR_ERR(monc->auth);
764 goto out_con;
765 }
765 monc->auth->want_keys = 766 monc->auth->want_keys =
766 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | 767 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
767 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; 768 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
@@ -770,19 +771,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
770 err = -ENOMEM; 771 err = -ENOMEM;
771 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, 772 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
772 sizeof(struct ceph_mon_subscribe_ack), 773 sizeof(struct ceph_mon_subscribe_ack),
773 GFP_NOFS); 774 GFP_NOFS, true);
774 if (!monc->m_subscribe_ack) 775 if (!monc->m_subscribe_ack)
775 goto out_monmap; 776 goto out_auth;
776 777
777 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS); 778 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
779 true);
778 if (!monc->m_subscribe) 780 if (!monc->m_subscribe)
779 goto out_subscribe_ack; 781 goto out_subscribe_ack;
780 782
781 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS); 783 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
784 true);
782 if (!monc->m_auth_reply) 785 if (!monc->m_auth_reply)
783 goto out_subscribe; 786 goto out_subscribe;
784 787
785 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS); 788 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
786 monc->pending_auth = 0; 789 monc->pending_auth = 0;
787 if (!monc->m_auth) 790 if (!monc->m_auth)
788 goto out_auth_reply; 791 goto out_auth_reply;
@@ -808,6 +811,10 @@ out_subscribe:
808 ceph_msg_put(monc->m_subscribe); 811 ceph_msg_put(monc->m_subscribe);
809out_subscribe_ack: 812out_subscribe_ack:
810 ceph_msg_put(monc->m_subscribe_ack); 813 ceph_msg_put(monc->m_subscribe_ack);
814out_auth:
815 ceph_auth_destroy(monc->auth);
816out_con:
817 monc->con->ops->put(monc->con);
811out_monmap: 818out_monmap:
812 kfree(monc->monmap); 819 kfree(monc->monmap);
813out: 820out:
@@ -822,11 +829,11 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
822 829
823 mutex_lock(&monc->mutex); 830 mutex_lock(&monc->mutex);
824 __close_session(monc); 831 __close_session(monc);
825 if (monc->con) { 832
826 monc->con->private = NULL; 833 monc->con->private = NULL;
827 monc->con->ops->put(monc->con); 834 monc->con->ops->put(monc->con);
828 monc->con = NULL; 835 monc->con = NULL;
829 } 836
830 mutex_unlock(&monc->mutex); 837 mutex_unlock(&monc->mutex);
831 838
832 ceph_auth_destroy(monc->auth); 839 ceph_auth_destroy(monc->auth);
@@ -973,7 +980,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
973 case CEPH_MSG_MON_MAP: 980 case CEPH_MSG_MON_MAP:
974 case CEPH_MSG_MDS_MAP: 981 case CEPH_MSG_MDS_MAP:
975 case CEPH_MSG_OSD_MAP: 982 case CEPH_MSG_OSD_MAP:
976 m = ceph_msg_new(type, front_len, GFP_NOFS); 983 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
977 break; 984 break;
978 } 985 }
979 986
@@ -1000,7 +1007,7 @@ static void mon_fault(struct ceph_connection *con)
1000 if (!con->private) 1007 if (!con->private)
1001 goto out; 1008 goto out;
1002 1009
1003 if (monc->con && !monc->hunting) 1010 if (!monc->hunting)
1004 pr_info("mon%d %s session lost, " 1011 pr_info("mon%d %s session lost, "
1005 "hunting for new mon\n", monc->cur_mon, 1012 "hunting for new mon\n", monc->cur_mon,
1006 ceph_pr_addr(&monc->con->peer_addr.in_addr)); 1013 ceph_pr_addr(&monc->con->peer_addr.in_addr));
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 1f4cb30a42c5..11d5f4196a73 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 struct ceph_msg *msg; 13 struct ceph_msg *msg;
14 14
15 msg = ceph_msg_new(0, pool->front_len, gfp_mask); 15 msg = ceph_msg_new(0, pool->front_len, gfp_mask, true);
16 if (!msg) { 16 if (!msg) {
17 dout("msgpool_alloc %s failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 } else { 18 } else {
@@ -61,7 +61,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
61 WARN_ON(1); 61 WARN_ON(1);
62 62
63 /* try to alloc a fresh message */ 63 /* try to alloc a fresh message */
64 return ceph_msg_new(0, front_len, GFP_NOFS); 64 return ceph_msg_new(0, front_len, GFP_NOFS, false);
65 } 65 }
66 66
67 msg = mempool_alloc(pool->pool, GFP_NOFS); 67 msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 88ad8a2501b5..733e46008b89 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -227,7 +227,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
228 else 228 else
229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
230 OSD_OPREPLY_FRONT_LEN, gfp_flags); 230 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
231 if (!msg) { 231 if (!msg) {
232 ceph_osdc_put_request(req); 232 ceph_osdc_put_request(req);
233 return NULL; 233 return NULL;
@@ -250,7 +250,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
250 if (use_mempool) 250 if (use_mempool)
251 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 251 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
252 else 252 else
253 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags); 253 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
254 if (!msg) { 254 if (!msg) {
255 ceph_osdc_put_request(req); 255 ceph_osdc_put_request(req);
256 return NULL; 256 return NULL;
@@ -943,7 +943,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
943 * Caller should hold map_sem for read and request_mutex. 943 * Caller should hold map_sem for read and request_mutex.
944 */ 944 */
945static int __map_request(struct ceph_osd_client *osdc, 945static int __map_request(struct ceph_osd_client *osdc,
946 struct ceph_osd_request *req) 946 struct ceph_osd_request *req, int force_resend)
947{ 947{
948 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; 948 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
949 struct ceph_pg pgid; 949 struct ceph_pg pgid;
@@ -967,7 +967,8 @@ static int __map_request(struct ceph_osd_client *osdc,
967 num = err; 967 num = err;
968 } 968 }
969 969
970 if ((req->r_osd && req->r_osd->o_osd == o && 970 if ((!force_resend &&
971 req->r_osd && req->r_osd->o_osd == o &&
971 req->r_sent >= req->r_osd->o_incarnation && 972 req->r_sent >= req->r_osd->o_incarnation &&
972 req->r_num_pg_osds == num && 973 req->r_num_pg_osds == num &&
973 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || 974 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
@@ -1289,18 +1290,18 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1289 * 1290 *
1290 * Caller should hold map_sem for read and request_mutex. 1291 * Caller should hold map_sem for read and request_mutex.
1291 */ 1292 */
1292static void kick_requests(struct ceph_osd_client *osdc) 1293static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1293{ 1294{
1294 struct ceph_osd_request *req, *nreq; 1295 struct ceph_osd_request *req, *nreq;
1295 struct rb_node *p; 1296 struct rb_node *p;
1296 int needmap = 0; 1297 int needmap = 0;
1297 int err; 1298 int err;
1298 1299
1299 dout("kick_requests\n"); 1300 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1300 mutex_lock(&osdc->request_mutex); 1301 mutex_lock(&osdc->request_mutex);
1301 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 1302 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
1302 req = rb_entry(p, struct ceph_osd_request, r_node); 1303 req = rb_entry(p, struct ceph_osd_request, r_node);
1303 err = __map_request(osdc, req); 1304 err = __map_request(osdc, req, force_resend);
1304 if (err < 0) 1305 if (err < 0)
1305 continue; /* error */ 1306 continue; /* error */
1306 if (req->r_osd == NULL) { 1307 if (req->r_osd == NULL) {
@@ -1318,7 +1319,7 @@ static void kick_requests(struct ceph_osd_client *osdc)
1318 r_linger_item) { 1319 r_linger_item) {
1319 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1320 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1320 1321
1321 err = __map_request(osdc, req); 1322 err = __map_request(osdc, req, force_resend);
1322 if (err == 0) 1323 if (err == 0)
1323 continue; /* no change and no osd was specified */ 1324 continue; /* no change and no osd was specified */
1324 if (err < 0) 1325 if (err < 0)
@@ -1395,7 +1396,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1395 ceph_osdmap_destroy(osdc->osdmap); 1396 ceph_osdmap_destroy(osdc->osdmap);
1396 osdc->osdmap = newmap; 1397 osdc->osdmap = newmap;
1397 } 1398 }
1398 kick_requests(osdc); 1399 kick_requests(osdc, 0);
1399 reset_changed_osds(osdc); 1400 reset_changed_osds(osdc);
1400 } else { 1401 } else {
1401 dout("ignoring incremental map %u len %d\n", 1402 dout("ignoring incremental map %u len %d\n",
@@ -1423,6 +1424,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1423 "older than our %u\n", epoch, maplen, 1424 "older than our %u\n", epoch, maplen,
1424 osdc->osdmap->epoch); 1425 osdc->osdmap->epoch);
1425 } else { 1426 } else {
1427 int skipped_map = 0;
1428
1426 dout("taking full map %u len %d\n", epoch, maplen); 1429 dout("taking full map %u len %d\n", epoch, maplen);
1427 newmap = osdmap_decode(&p, p+maplen); 1430 newmap = osdmap_decode(&p, p+maplen);
1428 if (IS_ERR(newmap)) { 1431 if (IS_ERR(newmap)) {
@@ -1432,9 +1435,12 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1432 BUG_ON(!newmap); 1435 BUG_ON(!newmap);
1433 oldmap = osdc->osdmap; 1436 oldmap = osdc->osdmap;
1434 osdc->osdmap = newmap; 1437 osdc->osdmap = newmap;
1435 if (oldmap) 1438 if (oldmap) {
1439 if (oldmap->epoch + 1 < newmap->epoch)
1440 skipped_map = 1;
1436 ceph_osdmap_destroy(oldmap); 1441 ceph_osdmap_destroy(oldmap);
1437 kick_requests(osdc); 1442 }
1443 kick_requests(osdc, skipped_map);
1438 } 1444 }
1439 p += maplen; 1445 p += maplen;
1440 nr_maps--; 1446 nr_maps--;
@@ -1707,7 +1713,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1707 * the request still han't been touched yet. 1713 * the request still han't been touched yet.
1708 */ 1714 */
1709 if (req->r_sent == 0) { 1715 if (req->r_sent == 0) {
1710 rc = __map_request(osdc, req); 1716 rc = __map_request(osdc, req, 0);
1711 if (rc < 0) { 1717 if (rc < 0) {
1712 if (nofail) { 1718 if (nofail) {
1713 dout("osdc_start_request failed map, " 1719 dout("osdc_start_request failed map, "
@@ -2032,7 +2038,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2032 if (front > req->r_reply->front.iov_len) { 2038 if (front > req->r_reply->front.iov_len) {
2033 pr_warning("get_reply front %d > preallocated %d\n", 2039 pr_warning("get_reply front %d > preallocated %d\n",
2034 front, (int)req->r_reply->front.iov_len); 2040 front, (int)req->r_reply->front.iov_len);
2035 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS); 2041 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2036 if (!m) 2042 if (!m)
2037 goto out; 2043 goto out;
2038 ceph_msg_put(req->r_reply); 2044 ceph_msg_put(req->r_reply);
@@ -2080,7 +2086,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2080 switch (type) { 2086 switch (type) {
2081 case CEPH_MSG_OSD_MAP: 2087 case CEPH_MSG_OSD_MAP:
2082 case CEPH_MSG_WATCH_NOTIFY: 2088 case CEPH_MSG_WATCH_NOTIFY:
2083 return ceph_msg_new(type, front, GFP_NOFS); 2089 return ceph_msg_new(type, front, GFP_NOFS, false);
2084 case CEPH_MSG_OSD_OPREPLY: 2090 case CEPH_MSG_OSD_OPREPLY:
2085 return get_reply(con, hdr, skip); 2091 return get_reply(con, hdr, skip);
2086 default: 2092 default:
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 18ac112ea7ae..68bbf9f65cb0 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,15 +324,15 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
324 /* Copy paged appendix. Hmm... why does this look so complicated? */ 324 /* Copy paged appendix. Hmm... why does this look so complicated? */
325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
326 int end; 326 int end;
327 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
327 328
328 WARN_ON(start > offset + len); 329 WARN_ON(start > offset + len);
329 330
330 end = start + skb_shinfo(skb)->frags[i].size; 331 end = start + skb_frag_size(frag);
331 if ((copy = end - offset) > 0) { 332 if ((copy = end - offset) > 0) {
332 int err; 333 int err;
333 u8 *vaddr; 334 u8 *vaddr;
334 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 335 struct page *page = skb_frag_page(frag);
335 struct page *page = frag->page;
336 336
337 if (copy > len) 337 if (copy > len)
338 copy = len; 338 copy = len;
@@ -410,15 +410,15 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
410 /* Copy paged appendix. Hmm... why does this look so complicated? */ 410 /* Copy paged appendix. Hmm... why does this look so complicated? */
411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
412 int end; 412 int end;
413 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
413 414
414 WARN_ON(start > offset + len); 415 WARN_ON(start > offset + len);
415 416
416 end = start + skb_shinfo(skb)->frags[i].size; 417 end = start + skb_frag_size(frag);
417 if ((copy = end - offset) > 0) { 418 if ((copy = end - offset) > 0) {
418 int err; 419 int err;
419 u8 *vaddr; 420 u8 *vaddr;
420 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 421 struct page *page = skb_frag_page(frag);
421 struct page *page = frag->page;
422 422
423 if (copy > len) 423 if (copy > len)
424 copy = len; 424 copy = len;
@@ -500,15 +500,15 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
500 /* Copy paged appendix. Hmm... why does this look so complicated? */ 500 /* Copy paged appendix. Hmm... why does this look so complicated? */
501 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 501 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
502 int end; 502 int end;
503 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
503 504
504 WARN_ON(start > offset + len); 505 WARN_ON(start > offset + len);
505 506
506 end = start + skb_shinfo(skb)->frags[i].size; 507 end = start + skb_frag_size(frag);
507 if ((copy = end - offset) > 0) { 508 if ((copy = end - offset) > 0) {
508 int err; 509 int err;
509 u8 *vaddr; 510 u8 *vaddr;
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 511 struct page *page = skb_frag_page(frag);
511 struct page *page = frag->page;
512 512
513 if (copy > len) 513 if (copy > len)
514 copy = len; 514 copy = len;
@@ -585,16 +585,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
585 585
586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
587 int end; 587 int end;
588 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
588 589
589 WARN_ON(start > offset + len); 590 WARN_ON(start > offset + len);
590 591
591 end = start + skb_shinfo(skb)->frags[i].size; 592 end = start + skb_frag_size(frag);
592 if ((copy = end - offset) > 0) { 593 if ((copy = end - offset) > 0) {
593 __wsum csum2; 594 __wsum csum2;
594 int err = 0; 595 int err = 0;
595 u8 *vaddr; 596 u8 *vaddr;
596 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 597 struct page *page = skb_frag_page(frag);
597 struct page *page = frag->page;
598 598
599 if (copy > len) 599 if (copy > len)
600 copy = len; 600 copy = len;
diff --git a/net/core/dev.c b/net/core/dev.c
index b10ff0a71855..6ba50a1e404c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,10 @@
133#include <linux/pci.h> 133#include <linux/pci.h>
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/if_tunnel.h>
137#include <linux/if_pppox.h>
138#include <linux/ppp_defs.h>
139#include <linux/net_tstamp.h>
136 140
137#include "net-sysfs.h" 141#include "net-sysfs.h"
138 142
@@ -1474,6 +1478,57 @@ static inline void net_timestamp_check(struct sk_buff *skb)
1474 __net_timestamp(skb); 1478 __net_timestamp(skb);
1475} 1479}
1476 1480
1481static int net_hwtstamp_validate(struct ifreq *ifr)
1482{
1483 struct hwtstamp_config cfg;
1484 enum hwtstamp_tx_types tx_type;
1485 enum hwtstamp_rx_filters rx_filter;
1486 int tx_type_valid = 0;
1487 int rx_filter_valid = 0;
1488
1489 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1490 return -EFAULT;
1491
1492 if (cfg.flags) /* reserved for future extensions */
1493 return -EINVAL;
1494
1495 tx_type = cfg.tx_type;
1496 rx_filter = cfg.rx_filter;
1497
1498 switch (tx_type) {
1499 case HWTSTAMP_TX_OFF:
1500 case HWTSTAMP_TX_ON:
1501 case HWTSTAMP_TX_ONESTEP_SYNC:
1502 tx_type_valid = 1;
1503 break;
1504 }
1505
1506 switch (rx_filter) {
1507 case HWTSTAMP_FILTER_NONE:
1508 case HWTSTAMP_FILTER_ALL:
1509 case HWTSTAMP_FILTER_SOME:
1510 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1513 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1514 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1515 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1518 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1519 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1520 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1521 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1522 rx_filter_valid = 1;
1523 break;
1524 }
1525
1526 if (!tx_type_valid || !rx_filter_valid)
1527 return -ERANGE;
1528
1529 return 0;
1530}
1531
1477static inline bool is_skb_forwardable(struct net_device *dev, 1532static inline bool is_skb_forwardable(struct net_device *dev,
1478 struct sk_buff *skb) 1533 struct sk_buff *skb)
1479{ 1534{
@@ -1955,9 +2010,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1955#ifdef CONFIG_HIGHMEM 2010#ifdef CONFIG_HIGHMEM
1956 int i; 2011 int i;
1957 if (!(dev->features & NETIF_F_HIGHDMA)) { 2012 if (!(dev->features & NETIF_F_HIGHDMA)) {
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1959 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 2014 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2015 if (PageHighMem(skb_frag_page(frag)))
1960 return 1; 2016 return 1;
2017 }
1961 } 2018 }
1962 2019
1963 if (PCI_DMA_BUS_IS_PHYS) { 2020 if (PCI_DMA_BUS_IS_PHYS) {
@@ -1966,7 +2023,8 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1966 if (!pdev) 2023 if (!pdev)
1967 return 0; 2024 return 0;
1968 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1969 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); 2026 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2027 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
1970 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2028 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1971 return 1; 2029 return 1;
1972 } 2030 }
@@ -2527,25 +2585,31 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2527 2585
2528/* 2586/*
2529 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2587 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2530 * and src/dst port numbers. Returns a non-zero hash number on success 2588 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2531 * and 0 on failure. 2589 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2590 * if hash is a canonical 4-tuple hash over transport ports.
2532 */ 2591 */
2533__u32 __skb_get_rxhash(struct sk_buff *skb) 2592void __skb_get_rxhash(struct sk_buff *skb)
2534{ 2593{
2535 int nhoff, hash = 0, poff; 2594 int nhoff, hash = 0, poff;
2536 const struct ipv6hdr *ip6; 2595 const struct ipv6hdr *ip6;
2537 const struct iphdr *ip; 2596 const struct iphdr *ip;
2597 const struct vlan_hdr *vlan;
2538 u8 ip_proto; 2598 u8 ip_proto;
2539 u32 addr1, addr2, ihl; 2599 u32 addr1, addr2;
2600 u16 proto;
2540 union { 2601 union {
2541 u32 v32; 2602 u32 v32;
2542 u16 v16[2]; 2603 u16 v16[2];
2543 } ports; 2604 } ports;
2544 2605
2545 nhoff = skb_network_offset(skb); 2606 nhoff = skb_network_offset(skb);
2607 proto = skb->protocol;
2546 2608
2547 switch (skb->protocol) { 2609again:
2610 switch (proto) {
2548 case __constant_htons(ETH_P_IP): 2611 case __constant_htons(ETH_P_IP):
2612ip:
2549 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2613 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2550 goto done; 2614 goto done;
2551 2615
@@ -2556,9 +2620,10 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2556 ip_proto = ip->protocol; 2620 ip_proto = ip->protocol;
2557 addr1 = (__force u32) ip->saddr; 2621 addr1 = (__force u32) ip->saddr;
2558 addr2 = (__force u32) ip->daddr; 2622 addr2 = (__force u32) ip->daddr;
2559 ihl = ip->ihl; 2623 nhoff += ip->ihl * 4;
2560 break; 2624 break;
2561 case __constant_htons(ETH_P_IPV6): 2625 case __constant_htons(ETH_P_IPV6):
2626ipv6:
2562 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2627 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2563 goto done; 2628 goto done;
2564 2629
@@ -2566,20 +2631,71 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2566 ip_proto = ip6->nexthdr; 2631 ip_proto = ip6->nexthdr;
2567 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2632 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2568 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2633 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2569 ihl = (40 >> 2); 2634 nhoff += 40;
2570 break; 2635 break;
2636 case __constant_htons(ETH_P_8021Q):
2637 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2638 goto done;
2639 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2640 proto = vlan->h_vlan_encapsulated_proto;
2641 nhoff += sizeof(*vlan);
2642 goto again;
2643 case __constant_htons(ETH_P_PPP_SES):
2644 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2645 goto done;
2646 proto = *((__be16 *) (skb->data + nhoff +
2647 sizeof(struct pppoe_hdr)));
2648 nhoff += PPPOE_SES_HLEN;
2649 switch (proto) {
2650 case __constant_htons(PPP_IP):
2651 goto ip;
2652 case __constant_htons(PPP_IPV6):
2653 goto ipv6;
2654 default:
2655 goto done;
2656 }
2571 default: 2657 default:
2572 goto done; 2658 goto done;
2573 } 2659 }
2574 2660
2661 switch (ip_proto) {
2662 case IPPROTO_GRE:
2663 if (pskb_may_pull(skb, nhoff + 16)) {
2664 u8 *h = skb->data + nhoff;
2665 __be16 flags = *(__be16 *)h;
2666
2667 /*
2668 * Only look inside GRE if version zero and no
2669 * routing
2670 */
2671 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2672 proto = *(__be16 *)(h + 2);
2673 nhoff += 4;
2674 if (flags & GRE_CSUM)
2675 nhoff += 4;
2676 if (flags & GRE_KEY)
2677 nhoff += 4;
2678 if (flags & GRE_SEQ)
2679 nhoff += 4;
2680 goto again;
2681 }
2682 }
2683 break;
2684 case IPPROTO_IPIP:
2685 goto again;
2686 default:
2687 break;
2688 }
2689
2575 ports.v32 = 0; 2690 ports.v32 = 0;
2576 poff = proto_ports_offset(ip_proto); 2691 poff = proto_ports_offset(ip_proto);
2577 if (poff >= 0) { 2692 if (poff >= 0) {
2578 nhoff += ihl * 4 + poff; 2693 nhoff += poff;
2579 if (pskb_may_pull(skb, nhoff + 4)) { 2694 if (pskb_may_pull(skb, nhoff + 4)) {
2580 ports.v32 = * (__force u32 *) (skb->data + nhoff); 2695 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2581 if (ports.v16[1] < ports.v16[0]) 2696 if (ports.v16[1] < ports.v16[0])
2582 swap(ports.v16[0], ports.v16[1]); 2697 swap(ports.v16[0], ports.v16[1]);
2698 skb->l4_rxhash = 1;
2583 } 2699 }
2584 } 2700 }
2585 2701
@@ -2592,7 +2708,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2592 hash = 1; 2708 hash = 1;
2593 2709
2594done: 2710done:
2595 return hash; 2711 skb->rxhash = hash;
2596} 2712}
2597EXPORT_SYMBOL(__skb_get_rxhash); 2713EXPORT_SYMBOL(__skb_get_rxhash);
2598 2714
@@ -2606,10 +2722,7 @@ static struct rps_dev_flow *
2606set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2722set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2607 struct rps_dev_flow *rflow, u16 next_cpu) 2723 struct rps_dev_flow *rflow, u16 next_cpu)
2608{ 2724{
2609 u16 tcpu; 2725 if (next_cpu != RPS_NO_CPU) {
2610
2611 tcpu = rflow->cpu = next_cpu;
2612 if (tcpu != RPS_NO_CPU) {
2613#ifdef CONFIG_RFS_ACCEL 2726#ifdef CONFIG_RFS_ACCEL
2614 struct netdev_rx_queue *rxqueue; 2727 struct netdev_rx_queue *rxqueue;
2615 struct rps_dev_flow_table *flow_table; 2728 struct rps_dev_flow_table *flow_table;
@@ -2637,16 +2750,16 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2637 goto out; 2750 goto out;
2638 old_rflow = rflow; 2751 old_rflow = rflow;
2639 rflow = &flow_table->flows[flow_id]; 2752 rflow = &flow_table->flows[flow_id];
2640 rflow->cpu = next_cpu;
2641 rflow->filter = rc; 2753 rflow->filter = rc;
2642 if (old_rflow->filter == rflow->filter) 2754 if (old_rflow->filter == rflow->filter)
2643 old_rflow->filter = RPS_NO_FILTER; 2755 old_rflow->filter = RPS_NO_FILTER;
2644 out: 2756 out:
2645#endif 2757#endif
2646 rflow->last_qtail = 2758 rflow->last_qtail =
2647 per_cpu(softnet_data, tcpu).input_queue_head; 2759 per_cpu(softnet_data, next_cpu).input_queue_head;
2648 } 2760 }
2649 2761
2762 rflow->cpu = next_cpu;
2650 return rflow; 2763 return rflow;
2651} 2764}
2652 2765
@@ -2681,13 +2794,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2681 map = rcu_dereference(rxqueue->rps_map); 2794 map = rcu_dereference(rxqueue->rps_map);
2682 if (map) { 2795 if (map) {
2683 if (map->len == 1 && 2796 if (map->len == 1 &&
2684 !rcu_dereference_raw(rxqueue->rps_flow_table)) { 2797 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2685 tcpu = map->cpus[0]; 2798 tcpu = map->cpus[0];
2686 if (cpu_online(tcpu)) 2799 if (cpu_online(tcpu))
2687 cpu = tcpu; 2800 cpu = tcpu;
2688 goto done; 2801 goto done;
2689 } 2802 }
2690 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { 2803 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2691 goto done; 2804 goto done;
2692 } 2805 }
2693 2806
@@ -3102,8 +3215,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3102{ 3215{
3103 3216
3104 ASSERT_RTNL(); 3217 ASSERT_RTNL();
3105 rcu_assign_pointer(dev->rx_handler, NULL); 3218 RCU_INIT_POINTER(dev->rx_handler, NULL);
3106 rcu_assign_pointer(dev->rx_handler_data, NULL); 3219 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3107} 3220}
3108EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3221EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3109 3222
@@ -3171,6 +3284,17 @@ ncls:
3171#endif 3284#endif
3172 3285
3173 rx_handler = rcu_dereference(skb->dev->rx_handler); 3286 rx_handler = rcu_dereference(skb->dev->rx_handler);
3287 if (vlan_tx_tag_present(skb)) {
3288 if (pt_prev) {
3289 ret = deliver_skb(skb, pt_prev, orig_dev);
3290 pt_prev = NULL;
3291 }
3292 if (vlan_do_receive(&skb, !rx_handler))
3293 goto another_round;
3294 else if (unlikely(!skb))
3295 goto out;
3296 }
3297
3174 if (rx_handler) { 3298 if (rx_handler) {
3175 if (pt_prev) { 3299 if (pt_prev) {
3176 ret = deliver_skb(skb, pt_prev, orig_dev); 3300 ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3190,18 +3314,6 @@ ncls:
3190 } 3314 }
3191 } 3315 }
3192 3316
3193 if (vlan_tx_tag_present(skb)) {
3194 if (pt_prev) {
3195 ret = deliver_skb(skb, pt_prev, orig_dev);
3196 pt_prev = NULL;
3197 }
3198 if (vlan_do_receive(&skb)) {
3199 ret = __netif_receive_skb(skb);
3200 goto out;
3201 } else if (unlikely(!skb))
3202 goto out;
3203 }
3204
3205 /* deliver only exact match when indicated */ 3317 /* deliver only exact match when indicated */
3206 null_or_dev = deliver_exact ? skb->dev : NULL; 3318 null_or_dev = deliver_exact ? skb->dev : NULL;
3207 3319
@@ -3429,10 +3541,10 @@ pull:
3429 skb->data_len -= grow; 3541 skb->data_len -= grow;
3430 3542
3431 skb_shinfo(skb)->frags[0].page_offset += grow; 3543 skb_shinfo(skb)->frags[0].page_offset += grow;
3432 skb_shinfo(skb)->frags[0].size -= grow; 3544 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3433 3545
3434 if (unlikely(!skb_shinfo(skb)->frags[0].size)) { 3546 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3435 put_page(skb_shinfo(skb)->frags[0].page); 3547 skb_frag_unref(skb, 0);
3436 memmove(skb_shinfo(skb)->frags, 3548 memmove(skb_shinfo(skb)->frags,
3437 skb_shinfo(skb)->frags + 1, 3549 skb_shinfo(skb)->frags + 1,
3438 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3550 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -3496,11 +3608,10 @@ void skb_gro_reset_offset(struct sk_buff *skb)
3496 NAPI_GRO_CB(skb)->frag0_len = 0; 3608 NAPI_GRO_CB(skb)->frag0_len = 0;
3497 3609
3498 if (skb->mac_header == skb->tail && 3610 if (skb->mac_header == skb->tail &&
3499 !PageHighMem(skb_shinfo(skb)->frags[0].page)) { 3611 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3500 NAPI_GRO_CB(skb)->frag0 = 3612 NAPI_GRO_CB(skb)->frag0 =
3501 page_address(skb_shinfo(skb)->frags[0].page) + 3613 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3502 skb_shinfo(skb)->frags[0].page_offset; 3614 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3503 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3504 } 3615 }
3505} 3616}
3506EXPORT_SYMBOL(skb_gro_reset_offset); 3617EXPORT_SYMBOL(skb_gro_reset_offset);
@@ -3982,6 +4093,60 @@ static int dev_ifconf(struct net *net, char __user *arg)
3982} 4093}
3983 4094
3984#ifdef CONFIG_PROC_FS 4095#ifdef CONFIG_PROC_FS
4096
4097#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
4098
4099struct dev_iter_state {
4100 struct seq_net_private p;
4101 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4102};
4103
4104#define get_bucket(x) ((x) >> BUCKET_SPACE)
4105#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4106#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4107
4108static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
4109{
4110 struct dev_iter_state *state = seq->private;
4111 struct net *net = seq_file_net(seq);
4112 struct net_device *dev;
4113 struct hlist_node *p;
4114 struct hlist_head *h;
4115 unsigned int count, bucket, offset;
4116
4117 bucket = get_bucket(state->pos);
4118 offset = get_offset(state->pos);
4119 h = &net->dev_name_head[bucket];
4120 count = 0;
4121 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4122 if (count++ == offset) {
4123 state->pos = set_bucket_offset(bucket, count);
4124 return dev;
4125 }
4126 }
4127
4128 return NULL;
4129}
4130
4131static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4132{
4133 struct dev_iter_state *state = seq->private;
4134 struct net_device *dev;
4135 unsigned int bucket;
4136
4137 bucket = get_bucket(state->pos);
4138 do {
4139 dev = dev_from_same_bucket(seq);
4140 if (dev)
4141 return dev;
4142
4143 bucket++;
4144 state->pos = set_bucket_offset(bucket, 0);
4145 } while (bucket < NETDEV_HASHENTRIES);
4146
4147 return NULL;
4148}
4149
3985/* 4150/*
3986 * This is invoked by the /proc filesystem handler to display a device 4151 * This is invoked by the /proc filesystem handler to display a device
3987 * in detail. 4152 * in detail.
@@ -3989,33 +4154,33 @@ static int dev_ifconf(struct net *net, char __user *arg)
3989void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4154void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3990 __acquires(RCU) 4155 __acquires(RCU)
3991{ 4156{
3992 struct net *net = seq_file_net(seq); 4157 struct dev_iter_state *state = seq->private;
3993 loff_t off;
3994 struct net_device *dev;
3995 4158
3996 rcu_read_lock(); 4159 rcu_read_lock();
3997 if (!*pos) 4160 if (!*pos)
3998 return SEQ_START_TOKEN; 4161 return SEQ_START_TOKEN;
3999 4162
4000 off = 1; 4163 /* check for end of the hash */
4001 for_each_netdev_rcu(net, dev) 4164 if (state->pos == 0 && *pos > 1)
4002 if (off++ == *pos) 4165 return NULL;
4003 return dev;
4004 4166
4005 return NULL; 4167 return dev_from_new_bucket(seq);
4006} 4168}
4007 4169
4008void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4170void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4009{ 4171{
4010 struct net_device *dev = v; 4172 struct net_device *dev;
4173
4174 ++*pos;
4011 4175
4012 if (v == SEQ_START_TOKEN) 4176 if (v == SEQ_START_TOKEN)
4013 dev = first_net_device_rcu(seq_file_net(seq)); 4177 return dev_from_new_bucket(seq);
4014 else
4015 dev = next_net_device_rcu(dev);
4016 4178
4017 ++*pos; 4179 dev = dev_from_same_bucket(seq);
4018 return dev; 4180 if (dev)
4181 return dev;
4182
4183 return dev_from_new_bucket(seq);
4019} 4184}
4020 4185
4021void dev_seq_stop(struct seq_file *seq, void *v) 4186void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4114,7 +4279,7 @@ static const struct seq_operations dev_seq_ops = {
4114static int dev_seq_open(struct inode *inode, struct file *file) 4279static int dev_seq_open(struct inode *inode, struct file *file)
4115{ 4280{
4116 return seq_open_net(inode, file, &dev_seq_ops, 4281 return seq_open_net(inode, file, &dev_seq_ops,
4117 sizeof(struct seq_net_private)); 4282 sizeof(struct dev_iter_state));
4118} 4283}
4119 4284
4120static const struct file_operations dev_seq_fops = { 4285static const struct file_operations dev_seq_fops = {
@@ -4497,9 +4662,7 @@ void __dev_set_rx_mode(struct net_device *dev)
4497 if (!netif_device_present(dev)) 4662 if (!netif_device_present(dev))
4498 return; 4663 return;
4499 4664
4500 if (ops->ndo_set_rx_mode) 4665 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4501 ops->ndo_set_rx_mode(dev);
4502 else {
4503 /* Unicast addresses changes may only happen under the rtnl, 4666 /* Unicast addresses changes may only happen under the rtnl,
4504 * therefore calling __dev_set_promiscuity here is safe. 4667 * therefore calling __dev_set_promiscuity here is safe.
4505 */ 4668 */
@@ -4510,10 +4673,10 @@ void __dev_set_rx_mode(struct net_device *dev)
4510 __dev_set_promiscuity(dev, -1); 4673 __dev_set_promiscuity(dev, -1);
4511 dev->uc_promisc = false; 4674 dev->uc_promisc = false;
4512 } 4675 }
4513
4514 if (ops->ndo_set_multicast_list)
4515 ops->ndo_set_multicast_list(dev);
4516 } 4676 }
4677
4678 if (ops->ndo_set_rx_mode)
4679 ops->ndo_set_rx_mode(dev);
4517} 4680}
4518 4681
4519void dev_set_rx_mode(struct net_device *dev) 4682void dev_set_rx_mode(struct net_device *dev)
@@ -4524,30 +4687,6 @@ void dev_set_rx_mode(struct net_device *dev)
4524} 4687}
4525 4688
4526/** 4689/**
4527 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4528 * @dev: device
4529 * @cmd: memory area for ethtool_ops::get_settings() result
4530 *
4531 * The cmd arg is initialized properly (cleared and
4532 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4533 *
4534 * Return device's ethtool_ops::get_settings() result value or
4535 * -EOPNOTSUPP when device doesn't expose
4536 * ethtool_ops::get_settings() operation.
4537 */
4538int dev_ethtool_get_settings(struct net_device *dev,
4539 struct ethtool_cmd *cmd)
4540{
4541 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4542 return -EOPNOTSUPP;
4543
4544 memset(cmd, 0, sizeof(struct ethtool_cmd));
4545 cmd->cmd = ETHTOOL_GSET;
4546 return dev->ethtool_ops->get_settings(dev, cmd);
4547}
4548EXPORT_SYMBOL(dev_ethtool_get_settings);
4549
4550/**
4551 * dev_get_flags - get flags reported to userspace 4690 * dev_get_flags - get flags reported to userspace
4552 * @dev: device 4691 * @dev: device
4553 * 4692 *
@@ -4863,7 +5002,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4863 return -EOPNOTSUPP; 5002 return -EOPNOTSUPP;
4864 5003
4865 case SIOCADDMULTI: 5004 case SIOCADDMULTI:
4866 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 5005 if (!ops->ndo_set_rx_mode ||
4867 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5006 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4868 return -EINVAL; 5007 return -EINVAL;
4869 if (!netif_device_present(dev)) 5008 if (!netif_device_present(dev))
@@ -4871,7 +5010,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4871 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 5010 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4872 5011
4873 case SIOCDELMULTI: 5012 case SIOCDELMULTI:
4874 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 5013 if (!ops->ndo_set_rx_mode ||
4875 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5014 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4876 return -EINVAL; 5015 return -EINVAL;
4877 if (!netif_device_present(dev)) 5016 if (!netif_device_present(dev))
@@ -4888,6 +5027,12 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4888 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 5027 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4889 return dev_change_name(dev, ifr->ifr_newname); 5028 return dev_change_name(dev, ifr->ifr_newname);
4890 5029
5030 case SIOCSHWTSTAMP:
5031 err = net_hwtstamp_validate(ifr);
5032 if (err)
5033 return err;
5034 /* fall through */
5035
4891 /* 5036 /*
4892 * Unknown or private ioctl 5037 * Unknown or private ioctl
4893 */ 5038 */
@@ -5202,7 +5347,7 @@ static void rollback_registered_many(struct list_head *head)
5202 dev = list_first_entry(head, struct net_device, unreg_list); 5347 dev = list_first_entry(head, struct net_device, unreg_list);
5203 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5348 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5204 5349
5205 rcu_barrier(); 5350 synchronize_net();
5206 5351
5207 list_for_each_entry(dev, head, unreg_list) 5352 list_for_each_entry(dev, head, unreg_list)
5208 dev_put(dev); 5353 dev_put(dev);
@@ -5715,6 +5860,12 @@ void netdev_run_todo(void)
5715 5860
5716 __rtnl_unlock(); 5861 __rtnl_unlock();
5717 5862
5863 /* Wait for rcu callbacks to finish before attempting to drain
5864 * the device list. This usually avoids a 250ms wait.
5865 */
5866 if (!list_empty(&list))
5867 rcu_barrier();
5868
5718 while (!list_empty(&list)) { 5869 while (!list_empty(&list)) {
5719 struct net_device *dev 5870 struct net_device *dev
5720 = list_first_entry(&list, struct net_device, todo_list); 5871 = list_first_entry(&list, struct net_device, todo_list);
@@ -5735,8 +5886,8 @@ void netdev_run_todo(void)
5735 5886
5736 /* paranoia */ 5887 /* paranoia */
5737 BUG_ON(netdev_refcnt_read(dev)); 5888 BUG_ON(netdev_refcnt_read(dev));
5738 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5889 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5739 WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); 5890 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5740 WARN_ON(dev->dn_ptr); 5891 WARN_ON(dev->dn_ptr);
5741 5892
5742 if (dev->destructor) 5893 if (dev->destructor)
@@ -5940,7 +6091,7 @@ void free_netdev(struct net_device *dev)
5940 kfree(dev->_rx); 6091 kfree(dev->_rx);
5941#endif 6092#endif
5942 6093
5943 kfree(rcu_dereference_raw(dev->ingress_queue)); 6094 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5944 6095
5945 /* Flush device addresses */ 6096 /* Flush device addresses */
5946 dev_addr_flush(dev); 6097 dev_addr_flush(dev);
@@ -6115,6 +6266,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6115 */ 6266 */
6116 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6267 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6117 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6268 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6269 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6118 6270
6119 /* 6271 /*
6120 * Flush the unicast and multicast chains 6272 * Flush the unicast and multicast chains
@@ -6298,7 +6450,7 @@ const char *netdev_drivername(const struct net_device *dev)
6298 return empty; 6450 return empty;
6299} 6451}
6300 6452
6301static int __netdev_printk(const char *level, const struct net_device *dev, 6453int __netdev_printk(const char *level, const struct net_device *dev,
6302 struct va_format *vaf) 6454 struct va_format *vaf)
6303{ 6455{
6304 int r; 6456 int r;
@@ -6313,6 +6465,7 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
6313 6465
6314 return r; 6466 return r;
6315} 6467}
6468EXPORT_SYMBOL(__netdev_printk);
6316 6469
6317int netdev_printk(const char *level, const struct net_device *dev, 6470int netdev_printk(const char *level, const struct net_device *dev,
6318 const char *format, ...) 6471 const char *format, ...)
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e66939ed00..283d1b863876 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -591,8 +591,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
591 * addresses that have no users left. The source device must be 591 * addresses that have no users left. The source device must be
592 * locked by netif_tx_lock_bh. 592 * locked by netif_tx_lock_bh.
593 * 593 *
594 * This function is intended to be called from the dev->set_multicast_list 594 * This function is intended to be called from the ndo_set_rx_mode
595 * or dev->set_rx_mode function of layered software devices. 595 * function of layered software devices.
596 */ 596 */
597int dev_mc_sync(struct net_device *to, struct net_device *from) 597int dev_mc_sync(struct net_device *to, struct net_device *from)
598{ 598{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf0733..d5e2c4c09107 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->_neighbour = NULL; 174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 176 dst->xfrm = NULL;
177#endif 177#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
229 smp_rmb(); 229 smp_rmb();
230 230
231again: 231again:
232 neigh = dst->_neighbour; 232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 233 child = dst->child;
234 234
235 if (neigh) { 235 if (neigh) {
236 dst->_neighbour = NULL; 236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh); 237 neigh_release(neigh);
238 } 238 }
239 239
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 360 if (!unregister) {
361 dst->input = dst->output = dst_discard; 361 dst->input = dst->output = dst_discard;
362 } else { 362 } else {
363 struct neighbour *neigh;
364
363 dst->dev = dev_net(dst->dev)->loopback_dev; 365 dst->dev = dev_net(dst->dev)->loopback_dev;
364 dev_hold(dst->dev); 366 dev_hold(dst->dev);
365 dev_put(dev); 367 dev_put(dev);
366 if (dst->_neighbour && dst->_neighbour->dev == dev) { 368 rcu_read_lock();
367 dst->_neighbour->dev = dst->dev; 369 neigh = dst_get_neighbour(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
368 dev_hold(dst->dev); 372 dev_hold(dst->dev);
369 dev_put(dev); 373 dev_put(dev);
370 } 374 }
375 rcu_read_unlock();
371 } 376 }
372} 377}
373 378
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6cdba5fc2bed..f44481707124 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -569,15 +569,25 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
569 return 0; 569 return 0;
570} 570}
571 571
572static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 572int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
573{ 573{
574 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; 574 ASSERT_RTNL();
575 int err;
576 575
577 if (!dev->ethtool_ops->get_settings) 576 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
578 return -EOPNOTSUPP; 577 return -EOPNOTSUPP;
579 578
580 err = dev->ethtool_ops->get_settings(dev, &cmd); 579 memset(cmd, 0, sizeof(struct ethtool_cmd));
580 cmd->cmd = ETHTOOL_GSET;
581 return dev->ethtool_ops->get_settings(dev, cmd);
582}
583EXPORT_SYMBOL(__ethtool_get_settings);
584
585static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
586{
587 int err;
588 struct ethtool_cmd cmd;
589
590 err = __ethtool_get_settings(dev, &cmd);
581 if (err < 0) 591 if (err < 0)
582 return err; 592 return err;
583 593
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3231b468bb72..57e8f95110e6 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
475 475
476 list_del_rcu(&rule->list); 476 list_del_rcu(&rule->list);
477 477
478 if (rule->action == FR_ACT_GOTO) 478 if (rule->action == FR_ACT_GOTO) {
479 ops->nr_goto_rules--; 479 ops->nr_goto_rules--;
480 if (rtnl_dereference(rule->ctarget) == NULL)
481 ops->unresolved_rules--;
482 }
480 483
481 /* 484 /*
482 * Check if this rule is a target to any of them. If so, 485 * Check if this rule is a target to any of them. If so,
@@ -487,7 +490,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
487 if (ops->nr_goto_rules > 0) { 490 if (ops->nr_goto_rules > 0) {
488 list_for_each_entry(tmp, &ops->rules_list, list) { 491 list_for_each_entry(tmp, &ops->rules_list, list) {
489 if (rtnl_dereference(tmp->ctarget) == rule) { 492 if (rtnl_dereference(tmp->ctarget) == rule) {
490 rcu_assign_pointer(tmp->ctarget, NULL); 493 RCU_INIT_POINTER(tmp->ctarget, NULL);
491 ops->unresolved_rules++; 494 ops->unresolved_rules++;
492 } 495 }
493 } 496 }
@@ -545,7 +548,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
545 frh->flags = rule->flags; 548 frh->flags = rule->flags;
546 549
547 if (rule->action == FR_ACT_GOTO && 550 if (rule->action == FR_ACT_GOTO &&
548 rcu_dereference_raw(rule->ctarget) == NULL) 551 rcu_access_pointer(rule->ctarget) == NULL)
549 frh->flags |= FIB_RULE_UNRESOLVED; 552 frh->flags |= FIB_RULE_UNRESOLVED;
550 553
551 if (rule->iifname[0]) { 554 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975fa87cb..5dea45279215 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -436,7 +436,7 @@ error:
436 * 436 *
437 * Returns 0 if the rule set is legal or -EINVAL if not. 437 * Returns 0 if the rule set is legal or -EINVAL if not.
438 */ 438 */
439int sk_chk_filter(struct sock_filter *filter, int flen) 439int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
440{ 440{
441 /* 441 /*
442 * Valid instructions are initialized to non-0. 442 * Valid instructions are initialized to non-0.
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
645 filter = rcu_dereference_protected(sk->sk_filter, 645 filter = rcu_dereference_protected(sk->sk_filter,
646 sock_owned_by_user(sk)); 646 sock_owned_by_user(sk));
647 if (filter) { 647 if (filter) {
648 rcu_assign_pointer(sk->sk_filter, NULL); 648 RCU_INIT_POINTER(sk->sk_filter, NULL);
649 sk_filter_uncharge(sk, filter); 649 sk_filter_uncharge(sk, filter);
650 ret = 0; 650 ret = 0;
651 } 651 }
diff --git a/net/core/flow.c b/net/core/flow.c
index 555a456efb07..8ae42de9c79e 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -413,7 +413,7 @@ static int __init flow_cache_init(struct flow_cache *fc)
413 413
414 for_each_online_cpu(i) { 414 for_each_online_cpu(i) {
415 if (flow_cache_cpu_prepare(fc, i)) 415 if (flow_cache_cpu_prepare(fc, i))
416 return -ENOMEM; 416 goto err;
417 } 417 }
418 fc->hotcpu_notifier = (struct notifier_block){ 418 fc->hotcpu_notifier = (struct notifier_block){
419 .notifier_call = flow_cache_cpu, 419 .notifier_call = flow_cache_cpu,
@@ -426,6 +426,18 @@ static int __init flow_cache_init(struct flow_cache *fc)
426 add_timer(&fc->rnd_timer); 426 add_timer(&fc->rnd_timer);
427 427
428 return 0; 428 return 0;
429
430err:
431 for_each_possible_cpu(i) {
432 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
433 kfree(fcp->hash_table);
434 fcp->hash_table = NULL;
435 }
436
437 free_percpu(fc->percpu);
438 fc->percpu = NULL;
439
440 return -ENOMEM;
429} 441}
430 442
431static int __init flow_cache_init_global(void) 443static int __init flow_cache_init_global(void)
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
index 283c2b993fb8..81e1ed7c8383 100644
--- a/net/core/kmap_skb.h
+++ b/net/core/kmap_skb.h
@@ -7,7 +7,7 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
7 7
8 local_bh_disable(); 8 local_bh_disable();
9#endif 9#endif
10 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); 10 return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
11} 11}
12 12
13static inline void kunmap_skb_frag(void *vaddr) 13static inline void kunmap_skb_frag(void *vaddr)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 357bd4ee4baa..c3519c6d1b16 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -78,8 +78,13 @@ static void rfc2863_policy(struct net_device *dev)
78 78
79static bool linkwatch_urgent_event(struct net_device *dev) 79static bool linkwatch_urgent_event(struct net_device *dev)
80{ 80{
81 return netif_running(dev) && netif_carrier_ok(dev) && 81 if (!netif_running(dev))
82 qdisc_tx_changing(dev); 82 return false;
83
84 if (dev->ifindex != dev->iflink)
85 return true;
86
87 return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
83} 88}
84 89
85 90
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1334d7e56f02..909ecb3c2a33 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
844 skb_queue_purge(&neigh->arp_queue); 844 skb_queue_purge(&neigh->arp_queue);
845} 845}
846 846
847static void neigh_probe(struct neighbour *neigh)
848 __releases(neigh->lock)
849{
850 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
851 /* keep skb alive even if arp_queue overflows */
852 if (skb)
853 skb = skb_copy(skb, GFP_ATOMIC);
854 write_unlock(&neigh->lock);
855 neigh->ops->solicit(neigh, skb);
856 atomic_inc(&neigh->probes);
857 kfree_skb(skb);
858}
859
847/* Called when a timer expires for a neighbour entry. */ 860/* Called when a timer expires for a neighbour entry. */
848 861
849static void neigh_timer_handler(unsigned long arg) 862static void neigh_timer_handler(unsigned long arg)
@@ -920,14 +933,7 @@ static void neigh_timer_handler(unsigned long arg)
920 neigh_hold(neigh); 933 neigh_hold(neigh);
921 } 934 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 935 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 936 neigh_probe(neigh);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else { 937 } else {
932out: 938out:
933 write_unlock(&neigh->lock); 939 write_unlock(&neigh->lock);
@@ -942,7 +948,7 @@ out:
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 948int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{ 949{
944 int rc; 950 int rc;
945 unsigned long now; 951 bool immediate_probe = false;
946 952
947 write_lock_bh(&neigh->lock); 953 write_lock_bh(&neigh->lock);
948 954
@@ -950,14 +956,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 956 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh; 957 goto out_unlock_bh;
952 958
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 959 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) { 960 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
961 unsigned long next, now = jiffies;
962
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 963 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE; 964 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies; 965 neigh->updated = now;
960 neigh_add_timer(neigh, now + 1); 966 next = now + max(neigh->parms->retrans_time, HZ/2);
967 neigh_add_timer(neigh, next);
968 immediate_probe = true;
961 } else { 969 } else {
962 neigh->nud_state = NUD_FAILED; 970 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies; 971 neigh->updated = jiffies;
@@ -989,7 +997,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
989 rc = 1; 997 rc = 1;
990 } 998 }
991out_unlock_bh: 999out_unlock_bh:
992 write_unlock_bh(&neigh->lock); 1000 if (immediate_probe)
1001 neigh_probe(neigh);
1002 else
1003 write_unlock(&neigh->lock);
1004 local_bh_enable();
993 return rc; 1005 return rc;
994} 1006}
995EXPORT_SYMBOL(__neigh_event_send); 1007EXPORT_SYMBOL(__neigh_event_send);
@@ -1156,10 +1168,14 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1156 struct dst_entry *dst = skb_dst(skb); 1168 struct dst_entry *dst = skb_dst(skb);
1157 struct neighbour *n2, *n1 = neigh; 1169 struct neighbour *n2, *n1 = neigh;
1158 write_unlock_bh(&neigh->lock); 1170 write_unlock_bh(&neigh->lock);
1171
1172 rcu_read_lock();
1159 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1173 /* On shaper/eql skb->dst->neighbour != neigh :( */
1160 if (dst && (n2 = dst_get_neighbour(dst)) != NULL) 1174 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1161 n1 = n2; 1175 n1 = n2;
1162 n1->output(n1, skb); 1176 n1->output(n1, skb);
1177 rcu_read_unlock();
1178
1163 write_lock_bh(&neigh->lock); 1179 write_lock_bh(&neigh->lock);
1164 } 1180 }
1165 skb_queue_purge(&neigh->arp_queue); 1181 skb_queue_purge(&neigh->arp_queue);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5db2f27..7604a635376b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -147,7 +147,7 @@ static ssize_t show_speed(struct device *dev,
147 147
148 if (netif_running(netdev)) { 148 if (netif_running(netdev)) {
149 struct ethtool_cmd cmd; 149 struct ethtool_cmd cmd;
150 if (!dev_ethtool_get_settings(netdev, &cmd)) 150 if (!__ethtool_get_settings(netdev, &cmd))
151 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); 151 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152 } 152 }
153 rtnl_unlock(); 153 rtnl_unlock();
@@ -165,7 +165,7 @@ static ssize_t show_duplex(struct device *dev,
165 165
166 if (netif_running(netdev)) { 166 if (netif_running(netdev)) {
167 struct ethtool_cmd cmd; 167 struct ethtool_cmd cmd;
168 if (!dev_ethtool_get_settings(netdev, &cmd)) 168 if (!__ethtool_get_settings(netdev, &cmd))
169 ret = sprintf(buf, "%s\n", 169 ret = sprintf(buf, "%s\n",
170 cmd.duplex ? "full" : "half"); 170 cmd.duplex ? "full" : "half");
171 } 171 }
@@ -712,13 +712,13 @@ static void rx_queue_release(struct kobject *kobj)
712 struct rps_dev_flow_table *flow_table; 712 struct rps_dev_flow_table *flow_table;
713 713
714 714
715 map = rcu_dereference_raw(queue->rps_map); 715 map = rcu_dereference_protected(queue->rps_map, 1);
716 if (map) { 716 if (map) {
717 RCU_INIT_POINTER(queue->rps_map, NULL); 717 RCU_INIT_POINTER(queue->rps_map, NULL);
718 kfree_rcu(map, rcu); 718 kfree_rcu(map, rcu);
719 } 719 }
720 720
721 flow_table = rcu_dereference_raw(queue->rps_flow_table); 721 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
722 if (flow_table) { 722 if (flow_table) {
723 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 723 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +987,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
987 } 987 }
988 988
989 if (nonempty) 989 if (nonempty)
990 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 990 RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
991 else { 991 else {
992 kfree(new_dev_maps); 992 kfree(new_dev_maps);
993 rcu_assign_pointer(dev->xps_maps, NULL); 993 RCU_INIT_POINTER(dev->xps_maps, NULL);
994 } 994 }
995 995
996 if (dev_maps) 996 if (dev_maps)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 52622517e0d8..f57d94627a2a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -762,7 +762,7 @@ int __netpoll_setup(struct netpoll *np)
762 } 762 }
763 763
764 /* last thing to do is link it to the net device structure */ 764 /* last thing to do is link it to the net device structure */
765 rcu_assign_pointer(ndev->npinfo, npinfo); 765 RCU_INIT_POINTER(ndev->npinfo, npinfo);
766 766
767 return 0; 767 return 0;
768 768
@@ -903,7 +903,7 @@ void __netpoll_cleanup(struct netpoll *np)
903 if (ops->ndo_netpoll_cleanup) 903 if (ops->ndo_netpoll_cleanup)
904 ops->ndo_netpoll_cleanup(np->dev); 904 ops->ndo_netpoll_cleanup(np->dev);
905 905
906 rcu_assign_pointer(np->dev->npinfo, NULL); 906 RCU_INIT_POINTER(np->dev->npinfo, NULL);
907 907
908 /* avoid racing with NAPI reading npinfo */ 908 /* avoid racing with NAPI reading npinfo */
909 synchronize_rcu_bh(); 909 synchronize_rcu_bh();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index e35a6fbb8110..0001c243b35c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2145,9 +2145,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2145 } 2145 }
2146 2146
2147 start_time = ktime_now(); 2147 start_time = ktime_now();
2148 if (remaining < 100000) 2148 if (remaining < 100000) {
2149 ndelay(remaining); /* really small just spin */ 2149 /* for small delays (<100us), just loop until limit is reached */
2150 else { 2150 do {
2151 end_time = ktime_now();
2152 } while (ktime_lt(end_time, spin_until));
2153 } else {
2151 /* see do_nanosleep */ 2154 /* see do_nanosleep */
2152 hrtimer_init_sleeper(&t, current); 2155 hrtimer_init_sleeper(&t, current);
2153 do { 2156 do {
@@ -2162,8 +2165,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2162 hrtimer_cancel(&t.timer); 2165 hrtimer_cancel(&t.timer);
2163 } while (t.task && pkt_dev->running && !signal_pending(current)); 2166 } while (t.task && pkt_dev->running && !signal_pending(current));
2164 __set_current_state(TASK_RUNNING); 2167 __set_current_state(TASK_RUNNING);
2168 end_time = ktime_now();
2165 } 2169 }
2166 end_time = ktime_now();
2167 2170
2168 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2171 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2169 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2172 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
@@ -2602,18 +2605,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2602 if (!pkt_dev->page) 2605 if (!pkt_dev->page)
2603 break; 2606 break;
2604 } 2607 }
2605 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2606 get_page(pkt_dev->page); 2608 get_page(pkt_dev->page);
2609 skb_frag_set_page(skb, i, pkt_dev->page);
2607 skb_shinfo(skb)->frags[i].page_offset = 0; 2610 skb_shinfo(skb)->frags[i].page_offset = 0;
2608 /*last fragment, fill rest of data*/ 2611 /*last fragment, fill rest of data*/
2609 if (i == (frags - 1)) 2612 if (i == (frags - 1))
2610 skb_shinfo(skb)->frags[i].size = 2613 skb_frag_size_set(&skb_shinfo(skb)->frags[i],
2611 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2614 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
2612 else 2615 else
2613 skb_shinfo(skb)->frags[i].size = frag_len; 2616 skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
2614 datalen -= skb_shinfo(skb)->frags[i].size; 2617 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
2615 skb->len += skb_shinfo(skb)->frags[i].size; 2618 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2616 skb->data_len += skb_shinfo(skb)->frags[i].size; 2619 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2617 i++; 2620 i++;
2618 skb_shinfo(skb)->nr_frags = i; 2621 skb_shinfo(skb)->nr_frags = i;
2619 } 2622 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e953fe39..9083e82bdae5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -731,7 +731,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
731 size += num_vfs * 731 size += num_vfs *
732 (nla_total_size(sizeof(struct ifla_vf_mac)) + 732 (nla_total_size(sizeof(struct ifla_vf_mac)) +
733 nla_total_size(sizeof(struct ifla_vf_vlan)) + 733 nla_total_size(sizeof(struct ifla_vf_vlan)) +
734 nla_total_size(sizeof(struct ifla_vf_tx_rate))); 734 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
735 nla_total_size(sizeof(struct ifla_vf_spoofchk)));
735 return size; 736 return size;
736 } else 737 } else
737 return 0; 738 return 0;
@@ -954,13 +955,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
954 struct ifla_vf_mac vf_mac; 955 struct ifla_vf_mac vf_mac;
955 struct ifla_vf_vlan vf_vlan; 956 struct ifla_vf_vlan vf_vlan;
956 struct ifla_vf_tx_rate vf_tx_rate; 957 struct ifla_vf_tx_rate vf_tx_rate;
958 struct ifla_vf_spoofchk vf_spoofchk;
959
960 /*
961 * Not all SR-IOV capable drivers support the
962 * spoofcheck query. Preset to -1 so the user
963 * space tool can detect that the driver didn't
964 * report anything.
965 */
966 ivi.spoofchk = -1;
957 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 967 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
958 break; 968 break;
959 vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; 969 vf_mac.vf =
970 vf_vlan.vf =
971 vf_tx_rate.vf =
972 vf_spoofchk.vf = ivi.vf;
973
960 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 974 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
961 vf_vlan.vlan = ivi.vlan; 975 vf_vlan.vlan = ivi.vlan;
962 vf_vlan.qos = ivi.qos; 976 vf_vlan.qos = ivi.qos;
963 vf_tx_rate.rate = ivi.tx_rate; 977 vf_tx_rate.rate = ivi.tx_rate;
978 vf_spoofchk.setting = ivi.spoofchk;
964 vf = nla_nest_start(skb, IFLA_VF_INFO); 979 vf = nla_nest_start(skb, IFLA_VF_INFO);
965 if (!vf) { 980 if (!vf) {
966 nla_nest_cancel(skb, vfinfo); 981 nla_nest_cancel(skb, vfinfo);
@@ -968,7 +983,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
968 } 983 }
969 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); 984 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
970 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); 985 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
971 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); 986 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
987 &vf_tx_rate);
988 NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
989 &vf_spoofchk);
972 nla_nest_end(skb, vf); 990 nla_nest_end(skb, vf);
973 } 991 }
974 nla_nest_end(skb, vfinfo); 992 nla_nest_end(skb, vfinfo);
@@ -1202,6 +1220,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
1202 ivt->rate); 1220 ivt->rate);
1203 break; 1221 break;
1204 } 1222 }
1223 case IFLA_VF_SPOOFCHK: {
1224 struct ifla_vf_spoofchk *ivs;
1225 ivs = nla_data(vf);
1226 err = -EOPNOTSUPP;
1227 if (ops->ndo_set_vf_spoofchk)
1228 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1229 ivs->setting);
1230 break;
1231 }
1205 default: 1232 default:
1206 err = -EINVAL; 1233 err = -EINVAL;
1207 break; 1234 break;
@@ -1604,7 +1631,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1604 dev_net_set(dev, net); 1631 dev_net_set(dev, net);
1605 dev->rtnl_link_ops = ops; 1632 dev->rtnl_link_ops = ops;
1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1633 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1607 dev->real_num_tx_queues = real_num_queues;
1608 1634
1609 if (tb[IFLA_MTU]) 1635 if (tb[IFLA_MTU])
1610 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1636 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 811b53fb330e..ff52ad0a5150 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -173,7 +173,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
173 if (err) 173 if (err)
174 goto error; 174 goto error;
175 175
176 if (pid_vnr(p->pid) != p->creds.pid) { 176 if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
177 struct pid *pid; 177 struct pid *pid;
178 err = -ESRCH; 178 err = -ESRCH;
179 pid = find_get_pid(p->creds.pid); 179 pid = find_get_pid(p->creds.pid);
@@ -183,8 +183,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
183 p->pid = pid; 183 p->pid = pid;
184 } 184 }
185 185
186 if ((p->cred->euid != p->creds.uid) || 186 if (!p->cred ||
187 (p->cred->egid != p->creds.gid)) { 187 (p->cred->euid != p->creds.uid) ||
188 (p->cred->egid != p->creds.gid)) {
188 struct cred *cred; 189 struct cred *cred;
189 err = -ENOMEM; 190 err = -ENOMEM;
190 cred = prepare_creds(); 191 cred = prepare_creds();
@@ -193,7 +194,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
193 194
194 cred->uid = cred->euid = p->creds.uid; 195 cred->uid = cred->euid = p->creds.uid;
195 cred->gid = cred->egid = p->creds.gid; 196 cred->gid = cred->egid = p->creds.gid;
196 put_cred(p->cred); 197 if (p->cred)
198 put_cred(p->cred);
197 p->cred = cred; 199 p->cred = cred;
198 } 200 }
199 break; 201 break;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 45329d7c9dd9..025233de25f9 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq)
35} 35}
36 36
37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
38__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 38__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
39 __be16 sport, __be16 dport) 39 __be16 sport, __be16 dport)
40{ 40{
41 u32 secret[MD5_MESSAGE_BYTES / 4]; 41 u32 secret[MD5_MESSAGE_BYTES / 4];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f56fce..ca4db40e75b8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 size = SKB_DATA_ALIGN(size); 187 /* We do our best to align skb_shared_info on a separate cache
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 gfp_mask, node); 189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
192 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
193 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 194 if (!data)
191 goto nodata; 195 goto nodata;
196 /* kmalloc(size) might give us more room than requested.
197 * Put skb_shared_info exactly at the end of allocated zone,
198 * to allow max possible filling before reallocation.
199 */
200 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 201 prefetchw(data + size);
193 202
194 /* 203 /*
@@ -197,7 +206,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 206 * the tail pointer in struct sk_buff!
198 */ 207 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 208 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 209 /* Account for allocated memory : skb + skb->head */
210 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 211 atomic_set(&skb->users, 1);
202 skb->head = data; 212 skb->head = data;
203 skb->data = data; 213 skb->data = data;
@@ -326,7 +336,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 336 if (skb_shinfo(skb)->nr_frags) {
327 int i; 337 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 339 skb_frag_unref(skb, i);
330 } 340 }
331 341
332 /* 342 /*
@@ -475,6 +485,30 @@ void consume_skb(struct sk_buff *skb)
475EXPORT_SYMBOL(consume_skb); 485EXPORT_SYMBOL(consume_skb);
476 486
477/** 487/**
488 * skb_recycle - clean up an skb for reuse
489 * @skb: buffer
490 *
491 * Recycles the skb to be reused as a receive buffer. This
492 * function does any necessary reference count dropping, and
493 * cleans up the skbuff as if it just came from __alloc_skb().
494 */
495void skb_recycle(struct sk_buff *skb)
496{
497 struct skb_shared_info *shinfo;
498
499 skb_release_head_state(skb);
500
501 shinfo = skb_shinfo(skb);
502 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
503 atomic_set(&shinfo->dataref, 1);
504
505 memset(skb, 0, offsetof(struct sk_buff, tail));
506 skb->data = skb->head + NET_SKB_PAD;
507 skb_reset_tail_pointer(skb);
508}
509EXPORT_SYMBOL(skb_recycle);
510
511/**
478 * skb_recycle_check - check if skb can be reused for receive 512 * skb_recycle_check - check if skb can be reused for receive
479 * @skb: buffer 513 * @skb: buffer
480 * @skb_size: minimum receive buffer size 514 * @skb_size: minimum receive buffer size
@@ -488,33 +522,10 @@ EXPORT_SYMBOL(consume_skb);
488 */ 522 */
489bool skb_recycle_check(struct sk_buff *skb, int skb_size) 523bool skb_recycle_check(struct sk_buff *skb, int skb_size)
490{ 524{
491 struct skb_shared_info *shinfo; 525 if (!skb_is_recycleable(skb, skb_size))
492
493 if (irqs_disabled())
494 return false;
495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 return false; 526 return false;
501 527
502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 528 skb_recycle(skb);
503 if (skb_end_pointer(skb) - skb->head < skb_size)
504 return false;
505
506 if (skb_shared(skb) || skb_cloned(skb))
507 return false;
508
509 skb_release_head_state(skb);
510
511 shinfo = skb_shinfo(skb);
512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
513 atomic_set(&shinfo->dataref, 1);
514
515 memset(skb, 0, offsetof(struct sk_buff, tail));
516 skb->data = skb->head + NET_SKB_PAD;
517 skb_reset_tail_pointer(skb);
518 529
519 return true; 530 return true;
520} 531}
@@ -529,6 +540,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 540 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 541 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 542 new->rxhash = old->rxhash;
543 new->ooo_okay = old->ooo_okay;
544 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 545#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 546 new->sp = secpath_get(old->sp);
534#endif 547#endif
@@ -647,7 +660,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
647 } 660 }
648 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 661 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
649 memcpy(page_address(page), 662 memcpy(page_address(page),
650 vaddr + f->page_offset, f->size); 663 vaddr + f->page_offset, skb_frag_size(f));
651 kunmap_skb_frag(vaddr); 664 kunmap_skb_frag(vaddr);
652 page->private = (unsigned long)head; 665 page->private = (unsigned long)head;
653 head = page; 666 head = page;
@@ -655,14 +668,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
655 668
656 /* skb frags release userspace buffers */ 669 /* skb frags release userspace buffers */
657 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
658 put_page(skb_shinfo(skb)->frags[i].page); 671 skb_frag_unref(skb, i);
659 672
660 uarg->callback(uarg); 673 uarg->callback(uarg);
661 674
662 /* skb frags point to kernel buffers */ 675 /* skb frags point to kernel buffers */
663 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 676 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
664 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 677 __skb_fill_page_desc(skb, i-1, head, 0,
665 skb_shinfo(skb)->frags[i - 1].page = head; 678 skb_shinfo(skb)->frags[i - 1].size);
666 head = (struct page *)head->private; 679 head = (struct page *)head->private;
667 } 680 }
668 681
@@ -820,7 +833,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
820 } 833 }
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 834 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 835 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
823 get_page(skb_shinfo(n)->frags[i].page); 836 skb_frag_ref(skb, i);
824 } 837 }
825 skb_shinfo(n)->nr_frags = i; 838 skb_shinfo(n)->nr_frags = i;
826 } 839 }
@@ -911,7 +924,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
911 goto nofrags; 924 goto nofrags;
912 } 925 }
913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 926 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
914 get_page(skb_shinfo(skb)->frags[i].page); 927 skb_frag_ref(skb, i);
915 928
916 if (skb_has_frag_list(skb)) 929 if (skb_has_frag_list(skb))
917 skb_clone_fraglist(skb); 930 skb_clone_fraglist(skb);
@@ -1178,20 +1191,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1178 goto drop_pages; 1191 goto drop_pages;
1179 1192
1180 for (; i < nfrags; i++) { 1193 for (; i < nfrags; i++) {
1181 int end = offset + skb_shinfo(skb)->frags[i].size; 1194 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1182 1195
1183 if (end < len) { 1196 if (end < len) {
1184 offset = end; 1197 offset = end;
1185 continue; 1198 continue;
1186 } 1199 }
1187 1200
1188 skb_shinfo(skb)->frags[i++].size = len - offset; 1201 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1189 1202
1190drop_pages: 1203drop_pages:
1191 skb_shinfo(skb)->nr_frags = i; 1204 skb_shinfo(skb)->nr_frags = i;
1192 1205
1193 for (; i < nfrags; i++) 1206 for (; i < nfrags; i++)
1194 put_page(skb_shinfo(skb)->frags[i].page); 1207 skb_frag_unref(skb, i);
1195 1208
1196 if (skb_has_frag_list(skb)) 1209 if (skb_has_frag_list(skb))
1197 skb_drop_fraglist(skb); 1210 skb_drop_fraglist(skb);
@@ -1294,9 +1307,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1294 /* Estimate size of pulled pages. */ 1307 /* Estimate size of pulled pages. */
1295 eat = delta; 1308 eat = delta;
1296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1309 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297 if (skb_shinfo(skb)->frags[i].size >= eat) 1310 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1311
1312 if (size >= eat)
1298 goto pull_pages; 1313 goto pull_pages;
1299 eat -= skb_shinfo(skb)->frags[i].size; 1314 eat -= size;
1300 } 1315 }
1301 1316
1302 /* If we need update frag list, we are in troubles. 1317 /* If we need update frag list, we are in troubles.
@@ -1359,14 +1374,16 @@ pull_pages:
1359 eat = delta; 1374 eat = delta;
1360 k = 0; 1375 k = 0;
1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1362 if (skb_shinfo(skb)->frags[i].size <= eat) { 1377 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1363 put_page(skb_shinfo(skb)->frags[i].page); 1378
1364 eat -= skb_shinfo(skb)->frags[i].size; 1379 if (size <= eat) {
1380 skb_frag_unref(skb, i);
1381 eat -= size;
1365 } else { 1382 } else {
1366 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1383 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1367 if (eat) { 1384 if (eat) {
1368 skb_shinfo(skb)->frags[k].page_offset += eat; 1385 skb_shinfo(skb)->frags[k].page_offset += eat;
1369 skb_shinfo(skb)->frags[k].size -= eat; 1386 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1370 eat = 0; 1387 eat = 0;
1371 } 1388 }
1372 k++; 1389 k++;
@@ -1421,7 +1438,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1421 1438
1422 WARN_ON(start > offset + len); 1439 WARN_ON(start > offset + len);
1423 1440
1424 end = start + skb_shinfo(skb)->frags[i].size; 1441 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1425 if ((copy = end - offset) > 0) { 1442 if ((copy = end - offset) > 0) {
1426 u8 *vaddr; 1443 u8 *vaddr;
1427 1444
@@ -1619,7 +1636,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1619 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1636 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1620 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1637 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1621 1638
1622 if (__splice_segment(f->page, f->page_offset, f->size, 1639 if (__splice_segment(skb_frag_page(f),
1640 f->page_offset, skb_frag_size(f),
1623 offset, len, skb, spd, 0, sk, pipe)) 1641 offset, len, skb, spd, 0, sk, pipe))
1624 return 1; 1642 return 1;
1625 } 1643 }
@@ -1729,7 +1747,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1729 1747
1730 WARN_ON(start > offset + len); 1748 WARN_ON(start > offset + len);
1731 1749
1732 end = start + frag->size; 1750 end = start + skb_frag_size(frag);
1733 if ((copy = end - offset) > 0) { 1751 if ((copy = end - offset) > 0) {
1734 u8 *vaddr; 1752 u8 *vaddr;
1735 1753
@@ -1802,7 +1820,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1802 1820
1803 WARN_ON(start > offset + len); 1821 WARN_ON(start > offset + len);
1804 1822
1805 end = start + skb_shinfo(skb)->frags[i].size; 1823 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1806 if ((copy = end - offset) > 0) { 1824 if ((copy = end - offset) > 0) {
1807 __wsum csum2; 1825 __wsum csum2;
1808 u8 *vaddr; 1826 u8 *vaddr;
@@ -1877,7 +1895,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1877 1895
1878 WARN_ON(start > offset + len); 1896 WARN_ON(start > offset + len);
1879 1897
1880 end = start + skb_shinfo(skb)->frags[i].size; 1898 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1881 if ((copy = end - offset) > 0) { 1899 if ((copy = end - offset) > 0) {
1882 __wsum csum2; 1900 __wsum csum2;
1883 u8 *vaddr; 1901 u8 *vaddr;
@@ -2150,7 +2168,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2150 skb->data_len = len - pos; 2168 skb->data_len = len - pos;
2151 2169
2152 for (i = 0; i < nfrags; i++) { 2170 for (i = 0; i < nfrags; i++) {
2153 int size = skb_shinfo(skb)->frags[i].size; 2171 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2154 2172
2155 if (pos + size > len) { 2173 if (pos + size > len) {
2156 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2174 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2182,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2164 * where splitting is expensive. 2182 * where splitting is expensive.
2165 * 2. Split is accurately. We make this. 2183 * 2. Split is accurately. We make this.
2166 */ 2184 */
2167 get_page(skb_shinfo(skb)->frags[i].page); 2185 skb_frag_ref(skb, i);
2168 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2186 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2169 skb_shinfo(skb1)->frags[0].size -= len - pos; 2187 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2170 skb_shinfo(skb)->frags[i].size = len - pos; 2188 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2171 skb_shinfo(skb)->nr_frags++; 2189 skb_shinfo(skb)->nr_frags++;
2172 } 2190 }
2173 k++; 2191 k++;
@@ -2239,12 +2257,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2239 * commit all, so that we don't have to undo partial changes 2257 * commit all, so that we don't have to undo partial changes
2240 */ 2258 */
2241 if (!to || 2259 if (!to ||
2242 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2260 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2261 fragfrom->page_offset)) {
2243 merge = -1; 2262 merge = -1;
2244 } else { 2263 } else {
2245 merge = to - 1; 2264 merge = to - 1;
2246 2265
2247 todo -= fragfrom->size; 2266 todo -= skb_frag_size(fragfrom);
2248 if (todo < 0) { 2267 if (todo < 0) {
2249 if (skb_prepare_for_shift(skb) || 2268 if (skb_prepare_for_shift(skb) ||
2250 skb_prepare_for_shift(tgt)) 2269 skb_prepare_for_shift(tgt))
@@ -2254,8 +2273,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2273 fragfrom = &skb_shinfo(skb)->frags[from];
2255 fragto = &skb_shinfo(tgt)->frags[merge]; 2274 fragto = &skb_shinfo(tgt)->frags[merge];
2256 2275
2257 fragto->size += shiftlen; 2276 skb_frag_size_add(fragto, shiftlen);
2258 fragfrom->size -= shiftlen; 2277 skb_frag_size_sub(fragfrom, shiftlen);
2259 fragfrom->page_offset += shiftlen; 2278 fragfrom->page_offset += shiftlen;
2260 2279
2261 goto onlymerged; 2280 goto onlymerged;
@@ -2279,20 +2298,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2279 fragfrom = &skb_shinfo(skb)->frags[from]; 2298 fragfrom = &skb_shinfo(skb)->frags[from];
2280 fragto = &skb_shinfo(tgt)->frags[to]; 2299 fragto = &skb_shinfo(tgt)->frags[to];
2281 2300
2282 if (todo >= fragfrom->size) { 2301 if (todo >= skb_frag_size(fragfrom)) {
2283 *fragto = *fragfrom; 2302 *fragto = *fragfrom;
2284 todo -= fragfrom->size; 2303 todo -= skb_frag_size(fragfrom);
2285 from++; 2304 from++;
2286 to++; 2305 to++;
2287 2306
2288 } else { 2307 } else {
2289 get_page(fragfrom->page); 2308 __skb_frag_ref(fragfrom);
2290 fragto->page = fragfrom->page; 2309 fragto->page = fragfrom->page;
2291 fragto->page_offset = fragfrom->page_offset; 2310 fragto->page_offset = fragfrom->page_offset;
2292 fragto->size = todo; 2311 skb_frag_size_set(fragto, todo);
2293 2312
2294 fragfrom->page_offset += todo; 2313 fragfrom->page_offset += todo;
2295 fragfrom->size -= todo; 2314 skb_frag_size_sub(fragfrom, todo);
2296 todo = 0; 2315 todo = 0;
2297 2316
2298 to++; 2317 to++;
@@ -2307,8 +2326,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2307 fragfrom = &skb_shinfo(skb)->frags[0]; 2326 fragfrom = &skb_shinfo(skb)->frags[0];
2308 fragto = &skb_shinfo(tgt)->frags[merge]; 2327 fragto = &skb_shinfo(tgt)->frags[merge];
2309 2328
2310 fragto->size += fragfrom->size; 2329 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2311 put_page(fragfrom->page); 2330 __skb_frag_unref(fragfrom);
2312 } 2331 }
2313 2332
2314 /* Reposition in the original skb */ 2333 /* Reposition in the original skb */
@@ -2405,7 +2424,7 @@ next_skb:
2405 2424
2406 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2425 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2407 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2426 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2408 block_limit = frag->size + st->stepped_offset; 2427 block_limit = skb_frag_size(frag) + st->stepped_offset;
2409 2428
2410 if (abs_offset < block_limit) { 2429 if (abs_offset < block_limit) {
2411 if (!st->frag_data) 2430 if (!st->frag_data)
@@ -2423,7 +2442,7 @@ next_skb:
2423 } 2442 }
2424 2443
2425 st->frag_idx++; 2444 st->frag_idx++;
2426 st->stepped_offset += frag->size; 2445 st->stepped_offset += skb_frag_size(frag);
2427 } 2446 }
2428 2447
2429 if (st->frag_data) { 2448 if (st->frag_data) {
@@ -2553,14 +2572,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2553 left = PAGE_SIZE - frag->page_offset; 2572 left = PAGE_SIZE - frag->page_offset;
2554 copy = (length > left)? left : length; 2573 copy = (length > left)? left : length;
2555 2574
2556 ret = getfrag(from, (page_address(frag->page) + 2575 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2557 frag->page_offset + frag->size),
2558 offset, copy, 0, skb); 2576 offset, copy, 0, skb);
2559 if (ret < 0) 2577 if (ret < 0)
2560 return -EFAULT; 2578 return -EFAULT;
2561 2579
2562 /* copy was successful so update the size parameters */ 2580 /* copy was successful so update the size parameters */
2563 frag->size += copy; 2581 skb_frag_size_add(frag, copy);
2564 skb->len += copy; 2582 skb->len += copy;
2565 skb->data_len += copy; 2583 skb->data_len += copy;
2566 offset += copy; 2584 offset += copy;
@@ -2706,12 +2724,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2706 2724
2707 while (pos < offset + len && i < nfrags) { 2725 while (pos < offset + len && i < nfrags) {
2708 *frag = skb_shinfo(skb)->frags[i]; 2726 *frag = skb_shinfo(skb)->frags[i];
2709 get_page(frag->page); 2727 __skb_frag_ref(frag);
2710 size = frag->size; 2728 size = skb_frag_size(frag);
2711 2729
2712 if (pos < offset) { 2730 if (pos < offset) {
2713 frag->page_offset += offset - pos; 2731 frag->page_offset += offset - pos;
2714 frag->size -= offset - pos; 2732 skb_frag_size_sub(frag, offset - pos);
2715 } 2733 }
2716 2734
2717 skb_shinfo(nskb)->nr_frags++; 2735 skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2720 i++; 2738 i++;
2721 pos += size; 2739 pos += size;
2722 } else { 2740 } else {
2723 frag->size -= pos + size - (offset + len); 2741 skb_frag_size_sub(frag, pos + size - (offset + len));
2724 goto skip_fraglist; 2742 goto skip_fraglist;
2725 } 2743 }
2726 2744
@@ -2800,7 +2818,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2800 } while (--i); 2818 } while (--i);
2801 2819
2802 frag->page_offset += offset; 2820 frag->page_offset += offset;
2803 frag->size -= offset; 2821 skb_frag_size_sub(frag, offset);
2804 2822
2805 skb->truesize -= skb->data_len; 2823 skb->truesize -= skb->data_len;
2806 skb->len -= skb->data_len; 2824 skb->len -= skb->data_len;
@@ -2852,7 +2870,7 @@ merge:
2852 unsigned int eat = offset - headlen; 2870 unsigned int eat = offset - headlen;
2853 2871
2854 skbinfo->frags[0].page_offset += eat; 2872 skbinfo->frags[0].page_offset += eat;
2855 skbinfo->frags[0].size -= eat; 2873 skb_frag_size_sub(&skbinfo->frags[0], eat);
2856 skb->data_len -= eat; 2874 skb->data_len -= eat;
2857 skb->len -= eat; 2875 skb->len -= eat;
2858 offset = headlen; 2876 offset = headlen;
@@ -2923,13 +2941,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2923 2941
2924 WARN_ON(start > offset + len); 2942 WARN_ON(start > offset + len);
2925 2943
2926 end = start + skb_shinfo(skb)->frags[i].size; 2944 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2927 if ((copy = end - offset) > 0) { 2945 if ((copy = end - offset) > 0) {
2928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2929 2947
2930 if (copy > len) 2948 if (copy > len)
2931 copy = len; 2949 copy = len;
2932 sg_set_page(&sg[elt], frag->page, copy, 2950 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2933 frag->page_offset+offset-start); 2951 frag->page_offset+offset-start);
2934 elt++; 2952 elt++;
2935 if (!(len -= copy)) 2953 if (!(len -= copy))
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d00ea4d..4ed7b1d12f5e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -207,7 +207,7 @@ static struct lock_class_key af_callback_keys[AF_MAX];
207 * not depend upon such differences. 207 * not depend upon such differences.
208 */ 208 */
209#define _SK_MEM_PACKETS 256 209#define _SK_MEM_PACKETS 256
210#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 210#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
213 213
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
387 387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk); 389 sk_tx_queue_clear(sk);
390 rcu_assign_pointer(sk->sk_dst_cache, NULL); 390 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
391 dst_release(dst); 391 dst_release(dst);
392 return NULL; 392 return NULL;
393 } 393 }
@@ -738,10 +738,7 @@ set_rcvbuf:
738 /* We implement the SO_SNDLOWAT etc to 738 /* We implement the SO_SNDLOWAT etc to
739 not be settable (1003.1g 5.3) */ 739 not be settable (1003.1g 5.3) */
740 case SO_RXQ_OVFL: 740 case SO_RXQ_OVFL:
741 if (valbool) 741 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
742 sock_set_flag(sk, SOCK_RXQ_OVFL);
743 else
744 sock_reset_flag(sk, SOCK_RXQ_OVFL);
745 break; 742 break;
746 default: 743 default:
747 ret = -ENOPROTOOPT; 744 ret = -ENOPROTOOPT;
@@ -1158,7 +1155,7 @@ static void __sk_free(struct sock *sk)
1158 atomic_read(&sk->sk_wmem_alloc) == 0); 1155 atomic_read(&sk->sk_wmem_alloc) == 0);
1159 if (filter) { 1156 if (filter) {
1160 sk_filter_uncharge(sk, filter); 1157 sk_filter_uncharge(sk, filter);
1161 rcu_assign_pointer(sk->sk_filter, NULL); 1158 RCU_INIT_POINTER(sk->sk_filter, NULL);
1162 } 1159 }
1163 1160
1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1161 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
@@ -1260,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1260 /* It is still raw copy of parent, so invalidate 1257 /* It is still raw copy of parent, so invalidate
1261 * destructor and make plain sk_free() */ 1258 * destructor and make plain sk_free() */
1262 newsk->sk_destruct = NULL; 1259 newsk->sk_destruct = NULL;
1260 bh_unlock_sock(newsk);
1263 sk_free(newsk); 1261 sk_free(newsk);
1264 newsk = NULL; 1262 newsk = NULL;
1265 goto out; 1263 goto out;
@@ -1533,7 +1531,6 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1533 skb_shinfo(skb)->nr_frags = npages; 1531 skb_shinfo(skb)->nr_frags = npages;
1534 for (i = 0; i < npages; i++) { 1532 for (i = 0; i < npages; i++) {
1535 struct page *page; 1533 struct page *page;
1536 skb_frag_t *frag;
1537 1534
1538 page = alloc_pages(sk->sk_allocation, 0); 1535 page = alloc_pages(sk->sk_allocation, 0);
1539 if (!page) { 1536 if (!page) {
@@ -1543,12 +1540,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1543 goto failure; 1540 goto failure;
1544 } 1541 }
1545 1542
1546 frag = &skb_shinfo(skb)->frags[i]; 1543 __skb_fill_page_desc(skb, i,
1547 frag->page = page; 1544 page, 0,
1548 frag->page_offset = 0; 1545 (data_len >= PAGE_SIZE ?
1549 frag->size = (data_len >= PAGE_SIZE ? 1546 PAGE_SIZE :
1550 PAGE_SIZE : 1547 data_len));
1551 data_len);
1552 data_len -= PAGE_SIZE; 1548 data_len -= PAGE_SIZE;
1553 } 1549 }
1554 1550
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 98a52640e7cd..82fb28857b64 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -57,9 +57,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
57 case PTP_CLASS_V2_VLAN: 57 case PTP_CLASS_V2_VLAN:
58 phydev = skb->dev->phydev; 58 phydev = skb->dev->phydev;
59 if (likely(phydev->drv->txtstamp)) { 59 if (likely(phydev->drv->txtstamp)) {
60 if (!atomic_inc_not_zero(&sk->sk_refcnt))
61 return;
60 clone = skb_clone(skb, GFP_ATOMIC); 62 clone = skb_clone(skb, GFP_ATOMIC);
61 if (!clone) 63 if (!clone) {
64 sock_put(sk);
62 return; 65 return;
66 }
63 clone->sk = sk; 67 clone->sk = sk;
64 phydev->drv->txtstamp(phydev, clone, type); 68 phydev->drv->txtstamp(phydev, clone, type);
65 } 69 }
@@ -77,8 +81,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
77 struct sock_exterr_skb *serr; 81 struct sock_exterr_skb *serr;
78 int err; 82 int err;
79 83
80 if (!hwtstamps) 84 if (!hwtstamps) {
85 sock_put(sk);
86 kfree_skb(skb);
81 return; 87 return;
88 }
82 89
83 *skb_hwtstamps(skb) = *hwtstamps; 90 *skb_hwtstamps(skb) = *hwtstamps;
84 serr = SKB_EXT_ERR(skb); 91 serr = SKB_EXT_ERR(skb);
@@ -87,6 +94,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
87 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 94 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
88 skb->sk = NULL; 95 skb->sk = NULL;
89 err = sock_queue_err_skb(sk, skb); 96 err = sock_queue_err_skb(sk, skb);
97 sock_put(sk);
90 if (err) 98 if (err)
91 kfree_skb(skb); 99 kfree_skb(skb);
92} 100}
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 25d717ebc92e..2d7cf3d52b4c 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -71,14 +71,14 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
71 /* Copy paged appendix. Hmm... why does this look so complicated? */ 71 /* Copy paged appendix. Hmm... why does this look so complicated? */
72 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 72 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
73 int end; 73 int end;
74 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
74 75
75 WARN_ON(start > offset + len); 76 WARN_ON(start > offset + len);
76 77
77 end = start + skb_shinfo(skb)->frags[i].size; 78 end = start + skb_frag_size(frag);
78 copy = end - offset; 79 copy = end - offset;
79 if (copy > 0) { 80 if (copy > 0) {
80 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 81 struct page *page = skb_frag_page(frag);
81 struct page *page = frag->page;
82 82
83 if (copy > len) 83 if (copy > len)
84 copy = len; 84 copy = len;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3cb56af4e13c..9bfbc1d1b50c 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1255,7 +1255,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1255 1255
1256 spin_lock(&dcb_lock); 1256 spin_lock(&dcb_lock);
1257 list_for_each_entry(itr, &dcb_app_list, list) { 1257 list_for_each_entry(itr, &dcb_app_list, list) {
1258 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { 1258 if (itr->ifindex == netdev->ifindex) {
1259 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1259 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1260 &itr->app); 1260 &itr->app);
1261 if (err) { 1261 if (err) {
@@ -1412,7 +1412,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1412 goto dcb_unlock; 1412 goto dcb_unlock;
1413 1413
1414 list_for_each_entry(itr, &dcb_app_list, list) { 1414 list_for_each_entry(itr, &dcb_app_list, list) {
1415 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { 1415 if (itr->ifindex == netdev->ifindex) {
1416 struct nlattr *app_nest = nla_nest_start(skb, 1416 struct nlattr *app_nest = nla_nest_start(skb,
1417 DCB_ATTR_APP); 1417 DCB_ATTR_APP);
1418 if (!app_nest) 1418 if (!app_nest)
@@ -2050,7 +2050,7 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2050 list_for_each_entry(itr, &dcb_app_list, list) { 2050 list_for_each_entry(itr, &dcb_app_list, list) {
2051 if (itr->app.selector == app->selector && 2051 if (itr->app.selector == app->selector &&
2052 itr->app.protocol == app->protocol && 2052 itr->app.protocol == app->protocol &&
2053 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2053 itr->ifindex == dev->ifindex) {
2054 prio = itr->app.priority; 2054 prio = itr->app.priority;
2055 break; 2055 break;
2056 } 2056 }
@@ -2073,15 +2073,17 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2073 struct dcb_app_type *itr; 2073 struct dcb_app_type *itr;
2074 struct dcb_app_type event; 2074 struct dcb_app_type event;
2075 2075
2076 memcpy(&event.name, dev->name, sizeof(event.name)); 2076 event.ifindex = dev->ifindex;
2077 memcpy(&event.app, new, sizeof(event.app)); 2077 memcpy(&event.app, new, sizeof(event.app));
2078 if (dev->dcbnl_ops->getdcbx)
2079 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2078 2080
2079 spin_lock(&dcb_lock); 2081 spin_lock(&dcb_lock);
2080 /* Search for existing match and replace */ 2082 /* Search for existing match and replace */
2081 list_for_each_entry(itr, &dcb_app_list, list) { 2083 list_for_each_entry(itr, &dcb_app_list, list) {
2082 if (itr->app.selector == new->selector && 2084 if (itr->app.selector == new->selector &&
2083 itr->app.protocol == new->protocol && 2085 itr->app.protocol == new->protocol &&
2084 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2086 itr->ifindex == dev->ifindex) {
2085 if (new->priority) 2087 if (new->priority)
2086 itr->app.priority = new->priority; 2088 itr->app.priority = new->priority;
2087 else { 2089 else {
@@ -2101,7 +2103,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2101 } 2103 }
2102 2104
2103 memcpy(&entry->app, new, sizeof(*new)); 2105 memcpy(&entry->app, new, sizeof(*new));
2104 strncpy(entry->name, dev->name, IFNAMSIZ); 2106 entry->ifindex = dev->ifindex;
2105 list_add(&entry->list, &dcb_app_list); 2107 list_add(&entry->list, &dcb_app_list);
2106 } 2108 }
2107out: 2109out:
@@ -2127,7 +2129,7 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2127 list_for_each_entry(itr, &dcb_app_list, list) { 2129 list_for_each_entry(itr, &dcb_app_list, list) {
2128 if (itr->app.selector == app->selector && 2130 if (itr->app.selector == app->selector &&
2129 itr->app.protocol == app->protocol && 2131 itr->app.protocol == app->protocol &&
2130 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2132 itr->ifindex == dev->ifindex) {
2131 prio |= 1 << itr->app.priority; 2133 prio |= 1 << itr->app.priority;
2132 } 2134 }
2133 } 2135 }
@@ -2150,8 +2152,10 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2150 struct dcb_app_type event; 2152 struct dcb_app_type event;
2151 int err = 0; 2153 int err = 0;
2152 2154
2153 memcpy(&event.name, dev->name, sizeof(event.name)); 2155 event.ifindex = dev->ifindex;
2154 memcpy(&event.app, new, sizeof(event.app)); 2156 memcpy(&event.app, new, sizeof(event.app));
2157 if (dev->dcbnl_ops->getdcbx)
2158 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2155 2159
2156 spin_lock(&dcb_lock); 2160 spin_lock(&dcb_lock);
2157 /* Search for existing match and abort if found */ 2161 /* Search for existing match and abort if found */
@@ -2159,7 +2163,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2159 if (itr->app.selector == new->selector && 2163 if (itr->app.selector == new->selector &&
2160 itr->app.protocol == new->protocol && 2164 itr->app.protocol == new->protocol &&
2161 itr->app.priority == new->priority && 2165 itr->app.priority == new->priority &&
2162 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2166 itr->ifindex == dev->ifindex) {
2163 err = -EEXIST; 2167 err = -EEXIST;
2164 goto out; 2168 goto out;
2165 } 2169 }
@@ -2173,7 +2177,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2173 } 2177 }
2174 2178
2175 memcpy(&entry->app, new, sizeof(*new)); 2179 memcpy(&entry->app, new, sizeof(*new));
2176 strncpy(entry->name, dev->name, IFNAMSIZ); 2180 entry->ifindex = dev->ifindex;
2177 list_add(&entry->list, &dcb_app_list); 2181 list_add(&entry->list, &dcb_app_list);
2178out: 2182out:
2179 spin_unlock(&dcb_lock); 2183 spin_unlock(&dcb_lock);
@@ -2194,8 +2198,10 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2194 struct dcb_app_type event; 2198 struct dcb_app_type event;
2195 int err = -ENOENT; 2199 int err = -ENOENT;
2196 2200
2197 memcpy(&event.name, dev->name, sizeof(event.name)); 2201 event.ifindex = dev->ifindex;
2198 memcpy(&event.app, del, sizeof(event.app)); 2202 memcpy(&event.app, del, sizeof(event.app));
2203 if (dev->dcbnl_ops->getdcbx)
2204 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2199 2205
2200 spin_lock(&dcb_lock); 2206 spin_lock(&dcb_lock);
2201 /* Search for existing match and remove it. */ 2207 /* Search for existing match and remove it. */
@@ -2203,7 +2209,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2203 if (itr->app.selector == del->selector && 2209 if (itr->app.selector == del->selector &&
2204 itr->app.protocol == del->protocol && 2210 itr->app.protocol == del->protocol &&
2205 itr->app.priority == del->priority && 2211 itr->app.priority == del->priority &&
2206 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2212 itr->ifindex == dev->ifindex) {
2207 list_del(&itr->list); 2213 list_del(&itr->list);
2208 kfree(itr); 2214 kfree(itr);
2209 err = 0; 2215 err = 0;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040fc818..67164bb6ae4d 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
85 85
86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
87{ 87{
88 struct dccp_sock *dp = dccp_sk(sk);
89 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); 88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
90 89
91 /* 90 /*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
98 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); 97 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
99 val = max_ratio; 98 val = max_ratio;
100 } 99 }
101 if (val > DCCPF_ACK_RATIO_MAX) 100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
102 val = DCCPF_ACK_RATIO_MAX; 101 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102}
103 103
104 if (val == dp->dccps_l_ack_ratio) 104static void ccid2_check_l_ack_ratio(struct sock *sk)
105 return; 105{
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
106 107
107 ccid2_pr_debug("changing local ack ratio to %u\n", val); 108 /*
108 dp->dccps_l_ack_ratio = val; 109 * After a loss, idle period, application limited period, or RTO we
110 * need to check that the ack ratio is still less than the congestion
111 * window. Otherwise, we will send an entire congestion window of
112 * packets and got no response because we haven't sent ack ratio
113 * packets yet.
114 * If the ack ratio does need to be reduced, we reduce it to half of
115 * the congestion window (or 1 if that's zero) instead of to the
116 * congestion window. This prevents problems if one ack is lost.
117 */
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
123{
124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
125 clamp_val(val, DCCPF_SEQ_WMIN,
126 DCCPF_SEQ_WMAX));
109} 127}
110 128
111static void ccid2_hc_tx_rto_expire(unsigned long data) 129static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
187 } 205 }
188 hc->tx_cwnd_used = 0; 206 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now; 207 hc->tx_cwnd_stamp = now;
208
209 ccid2_check_l_ack_ratio(sk);
190} 210}
191 211
192/* This borrows the code of tcp_cwnd_restart() */ 212/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
205 225
206 hc->tx_cwnd_stamp = now; 226 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0; 227 hc->tx_cwnd_used = 0;
228
229 ccid2_check_l_ack_ratio(sk);
208} 230}
209 231
210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
405 unsigned int *maxincr) 427 unsigned int *maxincr)
406{ 428{
407 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 429 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
408 430 struct dccp_sock *dp = dccp_sk(sk);
409 if (hc->tx_cwnd < hc->tx_ssthresh) { 431 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
410 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { 432
433 if (hc->tx_cwnd < dp->dccps_l_seq_win &&
434 r_seq_used < dp->dccps_r_seq_win) {
435 if (hc->tx_cwnd < hc->tx_ssthresh) {
436 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
437 hc->tx_cwnd += 1;
438 *maxincr -= 1;
439 hc->tx_packets_acked = 0;
440 }
441 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
411 hc->tx_cwnd += 1; 442 hc->tx_cwnd += 1;
412 *maxincr -= 1;
413 hc->tx_packets_acked = 0; 443 hc->tx_packets_acked = 0;
414 } 444 }
415 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
416 hc->tx_cwnd += 1;
417 hc->tx_packets_acked = 0;
418 } 445 }
446
447 /*
448 * Adjust the local sequence window and the ack ratio to allow about
449 * 5 times the number of packets in the network (RFC 4340 7.5.2)
450 */
451 if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
452 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
453 else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
454 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
455
456 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
457 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
458 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
459 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
460
419 /* 461 /*
420 * FIXME: RTT is sampled several times per acknowledgment (for each 462 * FIXME: RTT is sampled several times per acknowledgment (for each
421 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). 463 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
441 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 483 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
442 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 484 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
443 485
444 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 486 ccid2_check_l_ack_ratio(sk);
445 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
446 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
447} 487}
448 488
449static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, 489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
494 if (hc->tx_rpdupack >= NUMDUPACK) { 534 if (hc->tx_rpdupack >= NUMDUPACK) {
495 hc->tx_rpdupack = -1; /* XXX lame */ 535 hc->tx_rpdupack = -1; /* XXX lame */
496 hc->tx_rpseq = 0; 536 hc->tx_rpseq = 0;
497 537#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
538 /*
539 * FIXME: Ack Congestion Control is broken; in
540 * the current state instabilities occurred with
541 * Ack Ratios greater than 1; causing hang-ups
542 * and long RTO timeouts. This needs to be fixed
543 * before opening up dynamic changes. -- gerrit
544 */
498 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); 545 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
546#endif
499 } 547 }
500 } 548 }
501 } 549 }
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index f585d330e1e5..18c97543e522 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
43#define CCID2_SEQBUF_LEN 1024 43#define CCID2_SEQBUF_LEN 1024
44#define CCID2_SEQBUF_MAX 128 44#define CCID2_SEQBUF_MAX 128
45 45
46/*
47 * Multiple of congestion window to keep the sequence window at
48 * (RFC 4340 7.5.2)
49 */
50#define CCID2_WIN_CHANGE_FACTOR 5
51
46/** 52/**
47 * struct ccid2_hc_tx_sock - CCID2 TX half connection 53 * struct ccid2_hc_tx_sock - CCID2 TX half connection
48 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 54 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb07229017..583490aaf56f 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); 474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
475} 475}
476 476
477extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
477extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 478extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
478extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); 479extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
479extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, 480extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def952722..23cea0ee3101 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
12 * ----------- 12 * -----------
13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild 13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild
14 * changes of parameters of an established connection are not supported. 14 * changes of parameters of an established connection are not supported.
15 * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
15 * o All currently known SP features have 1-byte quantities. If in the future 16 * o All currently known SP features have 1-byte quantities. If in the future
16 * extensions of RFCs 4340..42 define features with item lengths larger than 17 * extensions of RFCs 4340..42 define features with item lengths larger than
17 * one byte, a feature-specific extension of the code will be required. 18 * one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
343 return dccp_feat_table[idx].activation_hdlr(sk, val, rx); 344 return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
344} 345}
345 346
347/**
348 * dccp_feat_activate - Activate feature value on socket
349 * @sk: fully connected DCCP socket (after handshake is complete)
350 * @feat_num: feature to activate, one of %dccp_feature_numbers
351 * @local: whether local (1) or remote (0) @feat_num is meant
352 * @fval: the value (SP or NN) to activate, or NULL to use the default value
353 * For general use this function is preferable over __dccp_feat_activate().
354 */
355static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
356 dccp_feat_val const *fval)
357{
358 return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
359}
360
346/* Test for "Req'd" feature (RFC 4340, 6.4) */ 361/* Test for "Req'd" feature (RFC 4340, 6.4) */
347static inline int dccp_feat_must_be_understood(u8 feat_num) 362static inline int dccp_feat_must_be_understood(u8 feat_num)
348{ 363{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
650 return -1; 665 return -1;
651 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb)) 666 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
652 return -1; 667 return -1;
653 /* 668
654 * Enter CHANGING after transmitting the Change option (6.6.2). 669 if (skb->sk->sk_state == DCCP_OPEN &&
655 */ 670 (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
656 if (pos->state == FEAT_INITIALISING) 671 /*
657 pos->state = FEAT_CHANGING; 672 * Confirms don't get retransmitted (6.6.3) once the
673 * connection is in state OPEN
674 */
675 dccp_feat_list_pop(pos);
676 } else {
677 /*
678 * Enter CHANGING after transmitting the Change
679 * option (6.6.2).
680 */
681 if (pos->state == FEAT_INITIALISING)
682 pos->state = FEAT_CHANGING;
683 }
658 } 684 }
659 return 0; 685 return 0;
660} 686}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
730 0, list, len); 756 0, list, len);
731} 757}
732 758
759/**
760 * dccp_feat_nn_get - Query current/pending value of NN feature
761 * @sk: DCCP socket of an established connection
762 * @feat: NN feature number from %dccp_feature_numbers
763 * For a known NN feature, returns value currently being negotiated, or
764 * current (confirmed) value if no negotiation is going on.
765 */
766u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
767{
768 if (dccp_feat_type(feat) == FEAT_NN) {
769 struct dccp_sock *dp = dccp_sk(sk);
770 struct dccp_feat_entry *entry;
771
772 entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
773 if (entry != NULL)
774 return entry->val.nn;
775
776 switch (feat) {
777 case DCCPF_ACK_RATIO:
778 return dp->dccps_l_ack_ratio;
779 case DCCPF_SEQUENCE_WINDOW:
780 return dp->dccps_l_seq_win;
781 }
782 }
783 DCCP_BUG("attempt to look up unsupported feature %u", feat);
784 return 0;
785}
786EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
787
788/**
789 * dccp_feat_signal_nn_change - Update NN values for an established connection
790 * @sk: DCCP socket of an established connection
791 * @feat: NN feature number from %dccp_feature_numbers
792 * @nn_val: the new value to use
793 * This function is used to communicate NN updates out-of-band.
794 */
795int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
796{
797 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
798 dccp_feat_val fval = { .nn = nn_val };
799 struct dccp_feat_entry *entry;
800
801 if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
802 return 0;
803
804 if (dccp_feat_type(feat) != FEAT_NN ||
805 !dccp_feat_is_valid_nn_val(feat, nn_val))
806 return -EINVAL;
807
808 if (nn_val == dccp_feat_nn_get(sk, feat))
809 return 0; /* already set or negotiation under way */
810
811 entry = dccp_feat_list_lookup(fn, feat, 1);
812 if (entry != NULL) {
813 dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
814 (unsigned long long)entry->val.nn,
815 (unsigned long long)nn_val);
816 dccp_feat_list_pop(entry);
817 }
818
819 inet_csk_schedule_ack(sk);
820 return dccp_feat_push_change(fn, feat, 1, 0, &fval);
821}
822EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
733 823
734/* 824/*
735 * Tracking features whose value depend on the choice of CCID 825 * Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
1187} 1277}
1188 1278
1189/** 1279/**
1280 * dccp_feat_handle_nn_established - Fast-path reception of NN options
1281 * @sk: socket of an established DCCP connection
1282 * @mandatory: whether @opt was preceded by a Mandatory option
1283 * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
1284 * @feat: NN number, one of %dccp_feature_numbers
1285 * @val: NN value
1286 * @len: length of @val in bytes
1287 * This function combines the functionality of change_recv/confirm_recv, with
1288 * the following differences (reset codes are the same):
1289 * - cleanup after receiving the Confirm;
1290 * - values are directly activated after successful parsing;
1291 * - deliberately restricted to NN features.
1292 * The restriction to NN features is essential since SP features can have non-
1293 * predictable outcomes (depending on the remote configuration), and are inter-
1294 * dependent (CCIDs for instance cause further dependencies).
1295 */
1296static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
1297 u8 feat, u8 *val, u8 len)
1298{
1299 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
1300 const bool local = (opt == DCCPO_CONFIRM_R);
1301 struct dccp_feat_entry *entry;
1302 u8 type = dccp_feat_type(feat);
1303 dccp_feat_val fval;
1304
1305 dccp_feat_print_opt(opt, feat, val, len, mandatory);
1306
1307 /* Ignore non-mandatory unknown and non-NN features */
1308 if (type == FEAT_UNKNOWN) {
1309 if (local && !mandatory)
1310 return 0;
1311 goto fast_path_unknown;
1312 } else if (type != FEAT_NN) {
1313 return 0;
1314 }
1315
1316 /*
1317 * We don't accept empty Confirms, since in fast-path feature
1318 * negotiation the values are enabled immediately after sending
1319 * the Change option.
1320 * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
1321 */
1322 if (len == 0 || len > sizeof(fval.nn))
1323 goto fast_path_unknown;
1324
1325 if (opt == DCCPO_CHANGE_L) {
1326 fval.nn = dccp_decode_value_var(val, len);
1327 if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
1328 goto fast_path_unknown;
1329
1330 if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
1331 dccp_feat_activate(sk, feat, local, &fval))
1332 return DCCP_RESET_CODE_TOO_BUSY;
1333
1334 /* set the `Ack Pending' flag to piggyback a Confirm */
1335 inet_csk_schedule_ack(sk);
1336
1337 } else if (opt == DCCPO_CONFIRM_R) {
1338 entry = dccp_feat_list_lookup(fn, feat, local);
1339 if (entry == NULL || entry->state != FEAT_CHANGING)
1340 return 0;
1341
1342 fval.nn = dccp_decode_value_var(val, len);
1343 /*
1344 * Just ignore a value that doesn't match our current value.
1345 * If the option changes twice within two RTTs, then at least
1346 * one CONFIRM will be received for the old value after a
1347 * new CHANGE was sent.
1348 */
1349 if (fval.nn != entry->val.nn)
1350 return 0;
1351
1352 /* Only activate after receiving the Confirm option (6.6.1). */
1353 dccp_feat_activate(sk, feat, local, &fval);
1354
1355 /* It has been confirmed - so remove the entry */
1356 dccp_feat_list_pop(entry);
1357
1358 } else {
1359 DCCP_WARN("Received illegal option %u\n", opt);
1360 goto fast_path_failed;
1361 }
1362 return 0;
1363
1364fast_path_unknown:
1365 if (!mandatory)
1366 return dccp_push_empty_confirm(fn, feat, local);
1367
1368fast_path_failed:
1369 return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
1370 : DCCP_RESET_CODE_OPTION_ERROR;
1371}
1372
1373/**
1190 * dccp_feat_parse_options - Process Feature-Negotiation Options 1374 * dccp_feat_parse_options - Process Feature-Negotiation Options
1191 * @sk: for general use and used by the client during connection setup 1375 * @sk: for general use and used by the client during connection setup
1192 * @dreq: used by the server during connection setup 1376 * @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
1221 return dccp_feat_confirm_recv(fn, mandatory, opt, feat, 1405 return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
1222 val, len, server); 1406 val, len, server);
1223 } 1407 }
1408 break;
1409 /*
1410 * Support for exchanging NN options on an established connection.
1411 */
1412 case DCCP_OPEN:
1413 case DCCP_PARTOPEN:
1414 return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
1415 val, len);
1224 } 1416 }
1225 return 0; /* ignore FN options in all other states */ 1417 return 0; /* ignore FN options in all other states */
1226} 1418}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5e634e..90b957d34d26 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
129 129
130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len); 130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len); 131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
132extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
132 133
133extern int dccp_insert_option_mandatory(struct sk_buff *skb); 134extern int dccp_insert_option_mandatory(struct sk_buff *skb);
134extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, 135extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b74f76117dcf..17ee85ce148d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -271,7 +271,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
271 &ireq6->loc_addr, 271 &ireq6->loc_addr,
272 &ireq6->rmt_addr); 272 &ireq6->rmt_addr);
273 ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); 273 ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
274 err = ip6_xmit(sk, skb, &fl6, opt); 274 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
275 err = net_xmit_eval(err); 275 err = net_xmit_eval(err);
276 } 276 }
277 277
@@ -326,7 +326,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
326 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 326 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
327 if (!IS_ERR(dst)) { 327 if (!IS_ERR(dst)) {
328 skb_dst_set(skb, dst); 328 skb_dst_set(skb, dst);
329 ip6_xmit(ctl_sk, skb, &fl6, NULL); 329 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
332 return; 332 return;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d942d9..e742f90a6858 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
184 dp->dccps_rate_last = jiffies; 184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; 187 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
189 188
190 dccp_init_xmit_timers(sk); 189 dccp_init_xmit_timers(sk);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ba4faceec405..2ab16e12520c 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
388 } 388 }
389 389
390 ifa->ifa_next = dn_db->ifa_list; 390 ifa->ifa_next = dn_db->ifa_list;
391 rcu_assign_pointer(dn_db->ifa_list, ifa); 391 RCU_INIT_POINTER(dn_db->ifa_list, ifa);
392 392
393 dn_ifaddr_notify(RTM_NEWADDR, ifa); 393 dn_ifaddr_notify(RTM_NEWADDR, ifa);
394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1093 1093
1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1095 1095
1096 rcu_assign_pointer(dev->dn_ptr, dn_db); 1096 RCU_INIT_POINTER(dev->dn_ptr, dn_db);
1097 dn_db->dev = dev; 1097 dn_db->dev = dev;
1098 init_timer(&dn_db->timer); 1098 init_timer(&dn_db->timer);
1099 1099
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1101 1101
1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1103 if (!dn_db->neigh_parms) { 1103 if (!dn_db->neigh_parms) {
1104 rcu_assign_pointer(dev->dn_ptr, NULL); 1104 RCU_INIT_POINTER(dev->dn_ptr, NULL);
1105 kfree(dn_db); 1105 kfree(dn_db);
1106 return NULL; 1106 return NULL;
1107 } 1107 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c37038..56cf9b8e1c7c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
301 .ndo_start_xmit = dsa_xmit, 301 .ndo_start_xmit = dsa_xmit,
302 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 302 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
303 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 303 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
304 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
305 .ndo_set_mac_address = dsa_slave_set_mac_address, 304 .ndo_set_mac_address = dsa_slave_set_mac_address,
306 .ndo_do_ioctl = dsa_slave_ioctl, 305 .ndo_do_ioctl = dsa_slave_ioctl,
307}; 306};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
314 .ndo_start_xmit = edsa_xmit, 313 .ndo_start_xmit = edsa_xmit,
315 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 314 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
316 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 315 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
317 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
318 .ndo_set_mac_address = dsa_slave_set_mac_address, 316 .ndo_set_mac_address = dsa_slave_set_mac_address,
319 .ndo_do_ioctl = dsa_slave_ioctl, 317 .ndo_do_ioctl = dsa_slave_ioctl,
320}; 318};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
327 .ndo_start_xmit = trailer_xmit, 325 .ndo_start_xmit = trailer_xmit,
328 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 326 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
329 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 327 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
330 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
331 .ndo_set_mac_address = dsa_slave_set_mac_address, 328 .ndo_set_mac_address = dsa_slave_set_mac_address,
332 .ndo_do_ioctl = dsa_slave_ioctl, 329 .ndo_do_ioctl = dsa_slave_ioctl,
333}; 330};
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
new file mode 100644
index 000000000000..19d6aefe97d4
--- /dev/null
+++ b/net/ieee802154/6lowpan.c
@@ -0,0 +1,891 @@
1/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#define DEBUG
54
55#include <linux/bitops.h>
56#include <linux/if_arp.h>
57#include <linux/module.h>
58#include <linux/moduleparam.h>
59#include <linux/netdevice.h>
60#include <net/af_ieee802154.h>
61#include <net/ieee802154.h>
62#include <net/ieee802154_netdev.h>
63#include <net/ipv6.h>
64
65#include "6lowpan.h"
66
67/* TTL uncompression values */
68static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
69
70static LIST_HEAD(lowpan_devices);
71
72/*
73 * Uncompression of linklocal:
74 * 0 -> 16 bytes from packet
75 * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
76 * 2 -> 2 bytes from prefix - zeroes + 2 from packet
77 * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
78 *
79 * NOTE: => the uncompress function does change 0xf to 0x10
80 * NOTE: 0x00 => no-autoconfig => unspecified
81 */
82static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
83
84/*
85 * Uncompression of ctx-based:
86 * 0 -> 0 bits from packet [unspecified / reserved]
87 * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
88 * 2 -> 8 bytes from prefix - zeroes + 2 from packet
89 * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
90 */
91static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
92
93/*
94 * Uncompression of ctx-base
95 * 0 -> 0 bits from packet
96 * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
97 * 2 -> 2 bytes from prefix - zeroes + 3 from packet
98 * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
99 */
100static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
101
102/* Link local prefix */
103static const u8 lowpan_llprefix[] = {0xfe, 0x80};
104
105/* private device info */
106struct lowpan_dev_info {
107 struct net_device *real_dev; /* real WPAN device ptr */
108 struct mutex dev_list_mtx; /* mutex for list ops */
109};
110
111struct lowpan_dev_record {
112 struct net_device *ldev;
113 struct list_head list;
114};
115
116static inline struct
117lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
118{
119 return netdev_priv(dev);
120}
121
122static inline void lowpan_address_flip(u8 *src, u8 *dest)
123{
124 int i;
125 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
126 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
127}
128
129/* list of all 6lowpan devices, uses for package delivering */
130/* print data in line */
131static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
132 unsigned char *buf, int len)
133{
134#ifdef DEBUG
135 if (msg)
136 pr_debug("(%s) %s: ", caller, msg);
137 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
138 16, 1, buf, len, false);
139#endif /* DEBUG */
140}
141
142/*
143 * print data in a table format:
144 *
145 * addr: xx xx xx xx xx xx
146 * addr: xx xx xx xx xx xx
147 * ...
148 */
149static inline void lowpan_raw_dump_table(const char *caller, char *msg,
150 unsigned char *buf, int len)
151{
152#ifdef DEBUG
153 if (msg)
154 pr_debug("(%s) %s:\n", caller, msg);
155 print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
156 16, 1, buf, len, false);
157#endif /* DEBUG */
158}
159
160static u8
161lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
162 const unsigned char *lladdr)
163{
164 u8 val = 0;
165
166 if (is_addr_mac_addr_based(ipaddr, lladdr))
167 val = 3; /* 0-bits */
168 else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
169 /* compress IID to 16 bits xxxx::XXXX */
170 memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
171 *hc06_ptr += 2;
172 val = 2; /* 16-bits */
173 } else {
174 /* do not compress IID => xxxx::IID */
175 memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
176 *hc06_ptr += 8;
177 val = 1; /* 64-bits */
178 }
179
180 return rol8(val, shift);
181}
182
183static void
184lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
185{
186 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
187 /* second bit-flip (Universe/Local) is done according RFC2464 */
188 ipaddr->s6_addr[8] ^= 0x02;
189}
190
191/*
192 * Uncompress addresses based on a prefix and a postfix with zeroes in
193 * between. If the postfix is zero in length it will use the link address
194 * to configure the IP address (autoconf style).
195 * pref_post_count takes a byte where the first nibble specify prefix count
196 * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
197 */
198static int
199lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
200 u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
201{
202 u8 prefcount = pref_post_count >> 4;
203 u8 postcount = pref_post_count & 0x0f;
204
205 /* full nibble 15 => 16 */
206 prefcount = (prefcount == 15 ? 16 : prefcount);
207 postcount = (postcount == 15 ? 16 : postcount);
208
209 if (lladdr)
210 lowpan_raw_dump_inline(__func__, "linklocal address",
211 lladdr, IEEE802154_ALEN);
212 if (prefcount > 0)
213 memcpy(ipaddr, prefix, prefcount);
214
215 if (prefcount + postcount < 16)
216 memset(&ipaddr->s6_addr[prefcount], 0,
217 16 - (prefcount + postcount));
218
219 if (postcount > 0) {
220 memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
221 skb_pull(skb, postcount);
222 } else if (prefcount > 0) {
223 if (lladdr == NULL)
224 return -EINVAL;
225
226 /* no IID based configuration if no prefix and no data */
227 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
228 }
229
230 pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
231 postcount);
232 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
233
234 return 0;
235}
236
237static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
238{
239 u8 ret;
240
241 ret = skb->data[0];
242 skb_pull(skb, 1);
243
244 return ret;
245}
246
247static int lowpan_header_create(struct sk_buff *skb,
248 struct net_device *dev,
249 unsigned short type, const void *_daddr,
250 const void *_saddr, unsigned len)
251{
252 u8 tmp, iphc0, iphc1, *hc06_ptr;
253 struct ipv6hdr *hdr;
254 const u8 *saddr = _saddr;
255 const u8 *daddr = _daddr;
256 u8 *head;
257 struct ieee802154_addr sa, da;
258
259 if (type != ETH_P_IPV6)
260 return 0;
261 /* TODO:
262 * if this package isn't ipv6 one, where should it be routed?
263 */
264 head = kzalloc(100, GFP_KERNEL);
265 if (head == NULL)
266 return -ENOMEM;
267
268 hdr = ipv6_hdr(skb);
269 hc06_ptr = head + 2;
270
271 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
272 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
273 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
274 hdr->hop_limit);
275
276 lowpan_raw_dump_table(__func__, "raw skb network header dump",
277 skb_network_header(skb), sizeof(struct ipv6hdr));
278
279 if (!saddr)
280 saddr = dev->dev_addr;
281
282 lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
283
284 /*
285 * As we copy some bit-length fields, in the IPHC encoding bytes,
286 * we sometimes use |=
287 * If the field is 0, and the current bit value in memory is 1,
288 * this does not work. We therefore reset the IPHC encoding here
289 */
290 iphc0 = LOWPAN_DISPATCH_IPHC;
291 iphc1 = 0;
292
293 /* TODO: context lookup */
294
295 lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
296
297 /*
298 * Traffic class, flow label
299 * If flow label is 0, compress it. If traffic class is 0, compress it
300 * We have to process both in the same time as the offset of traffic
301 * class depends on the presence of version and flow label
302 */
303
304 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
305 tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
306 tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
307
308 if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
309 (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
310 /* flow label can be compressed */
311 iphc0 |= LOWPAN_IPHC_FL_C;
312 if ((hdr->priority == 0) &&
313 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
314 /* compress (elide) all */
315 iphc0 |= LOWPAN_IPHC_TC_C;
316 } else {
317 /* compress only the flow label */
318 *hc06_ptr = tmp;
319 hc06_ptr += 1;
320 }
321 } else {
322 /* Flow label cannot be compressed */
323 if ((hdr->priority == 0) &&
324 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
325 /* compress only traffic class */
326 iphc0 |= LOWPAN_IPHC_TC_C;
327 *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
328 memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
329 hc06_ptr += 3;
330 } else {
331 /* compress nothing */
332 memcpy(hc06_ptr, &hdr, 4);
333 /* replace the top byte with new ECN | DSCP format */
334 *hc06_ptr = tmp;
335 hc06_ptr += 4;
336 }
337 }
338
339 /* NOTE: payload length is always compressed */
340
341 /* Next Header is compress if UDP */
342 if (hdr->nexthdr == UIP_PROTO_UDP)
343 iphc0 |= LOWPAN_IPHC_NH_C;
344
345/* TODO: next header compression */
346
347 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
348 *hc06_ptr = hdr->nexthdr;
349 hc06_ptr += 1;
350 }
351
352 /*
353 * Hop limit
354 * if 1: compress, encoding is 01
355 * if 64: compress, encoding is 10
356 * if 255: compress, encoding is 11
357 * else do not compress
358 */
359 switch (hdr->hop_limit) {
360 case 1:
361 iphc0 |= LOWPAN_IPHC_TTL_1;
362 break;
363 case 64:
364 iphc0 |= LOWPAN_IPHC_TTL_64;
365 break;
366 case 255:
367 iphc0 |= LOWPAN_IPHC_TTL_255;
368 break;
369 default:
370 *hc06_ptr = hdr->hop_limit;
371 break;
372 }
373
374 /* source address compression */
375 if (is_addr_unspecified(&hdr->saddr)) {
376 pr_debug("(%s): source address is unspecified, setting SAC\n",
377 __func__);
378 iphc1 |= LOWPAN_IPHC_SAC;
379 /* TODO: context lookup */
380 } else if (is_addr_link_local(&hdr->saddr)) {
381 pr_debug("(%s): source address is link-local\n", __func__);
382 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
383 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
384 } else {
385 pr_debug("(%s): send the full source address\n", __func__);
386 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
387 hc06_ptr += 16;
388 }
389
390 /* destination address compression */
391 if (is_addr_mcast(&hdr->daddr)) {
392 pr_debug("(%s): destination address is multicast", __func__);
393 iphc1 |= LOWPAN_IPHC_M;
394 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
395 pr_debug("compressed to 1 octet\n");
396 iphc1 |= LOWPAN_IPHC_DAM_11;
397 /* use last byte */
398 *hc06_ptr = hdr->daddr.s6_addr[15];
399 hc06_ptr += 1;
400 } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
401 pr_debug("compressed to 4 octets\n");
402 iphc1 |= LOWPAN_IPHC_DAM_10;
403 /* second byte + the last three */
404 *hc06_ptr = hdr->daddr.s6_addr[1];
405 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
406 hc06_ptr += 4;
407 } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
408 pr_debug("compressed to 6 octets\n");
409 iphc1 |= LOWPAN_IPHC_DAM_01;
410 /* second byte + the last five */
411 *hc06_ptr = hdr->daddr.s6_addr[1];
412 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
413 hc06_ptr += 6;
414 } else {
415 pr_debug("using full address\n");
416 iphc1 |= LOWPAN_IPHC_DAM_00;
417 memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
418 hc06_ptr += 16;
419 }
420 } else {
421 pr_debug("(%s): destination address is unicast: ", __func__);
422 /* TODO: context lookup */
423 if (is_addr_link_local(&hdr->daddr)) {
424 pr_debug("destination address is link-local\n");
425 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
426 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
427 } else {
428 pr_debug("using full address\n");
429 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
430 hc06_ptr += 16;
431 }
432 }
433
434 /* TODO: UDP header compression */
435 /* TODO: Next Header compression */
436
437 head[0] = iphc0;
438 head[1] = iphc1;
439
440 skb_pull(skb, sizeof(struct ipv6hdr));
441 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
442
443 kfree(head);
444
445 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
446 skb->len);
447
448 /*
449 * NOTE1: I'm still unsure about the fact that compression and WPAN
450 * header are created here and not later in the xmit. So wait for
451 * an opinion of net maintainers.
452 */
453 /*
454 * NOTE2: to be absolutely correct, we must derive PANid information
455 * from MAC subif of the 'dev' and 'real_dev' network devices, but
456 * this isn't implemented in mainline yet, so currently we assign 0xff
457 */
458 {
459 /* prepare wpan address data */
460 sa.addr_type = IEEE802154_ADDR_LONG;
461 sa.pan_id = 0xff;
462
463 da.addr_type = IEEE802154_ADDR_LONG;
464 da.pan_id = 0xff;
465
466 memcpy(&(da.hwaddr), daddr, 8);
467 memcpy(&(sa.hwaddr), saddr, 8);
468
469 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
470 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
471 type, (void *)&da, (void *)&sa, skb->len);
472 }
473}
474
475static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
476{
477 struct sk_buff *new;
478 struct lowpan_dev_record *entry;
479 int stat = NET_RX_SUCCESS;
480
481 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
482 GFP_ATOMIC);
483 kfree_skb(skb);
484
485 if (!new)
486 return -ENOMEM;
487
488 skb_push(new, sizeof(struct ipv6hdr));
489 skb_reset_network_header(new);
490 skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
491
492 new->protocol = htons(ETH_P_IPV6);
493 new->pkt_type = PACKET_HOST;
494
495 rcu_read_lock();
496 list_for_each_entry_rcu(entry, &lowpan_devices, list)
497 if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
498 skb = skb_copy(new, GFP_ATOMIC);
499 if (!skb) {
500 stat = -ENOMEM;
501 break;
502 }
503
504 skb->dev = entry->ldev;
505 stat = netif_rx(skb);
506 }
507 rcu_read_unlock();
508
509 kfree_skb(new);
510
511 return stat;
512}
513
514static int
515lowpan_process_data(struct sk_buff *skb)
516{
517 struct ipv6hdr hdr;
518 u8 tmp, iphc0, iphc1, num_context = 0;
519 u8 *_saddr, *_daddr;
520 int err;
521
522 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
523 skb->len);
524 /* at least two bytes will be used for the encoding */
525 if (skb->len < 2)
526 goto drop;
527 iphc0 = lowpan_fetch_skb_u8(skb);
528 iphc1 = lowpan_fetch_skb_u8(skb);
529
530 _saddr = mac_cb(skb)->sa.hwaddr;
531 _daddr = mac_cb(skb)->da.hwaddr;
532
533 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
534
535 /* another if the CID flag is set */
536 if (iphc1 & LOWPAN_IPHC_CID) {
537 pr_debug("(%s): CID flag is set, increase header with one\n",
538 __func__);
539 if (!skb->len)
540 goto drop;
541 num_context = lowpan_fetch_skb_u8(skb);
542 }
543
544 hdr.version = 6;
545
546 /* Traffic Class and Flow Label */
547 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
548 /*
549 * Traffic Class and FLow Label carried in-line
550 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
551 */
552 case 0: /* 00b */
553 if (!skb->len)
554 goto drop;
555 tmp = lowpan_fetch_skb_u8(skb);
556 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
557 skb_pull(skb, 3);
558 hdr.priority = ((tmp >> 2) & 0x0f);
559 hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
560 (hdr.flow_lbl[0] & 0x0f);
561 break;
562 /*
563 * Traffic class carried in-line
564 * ECN + DSCP (1 byte), Flow Label is elided
565 */
566 case 1: /* 10b */
567 if (!skb->len)
568 goto drop;
569 tmp = lowpan_fetch_skb_u8(skb);
570 hdr.priority = ((tmp >> 2) & 0x0f);
571 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
572 hdr.flow_lbl[1] = 0;
573 hdr.flow_lbl[2] = 0;
574 break;
575 /*
576 * Flow Label carried in-line
577 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
578 */
579 case 2: /* 01b */
580 if (!skb->len)
581 goto drop;
582 tmp = lowpan_fetch_skb_u8(skb);
583 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
584 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
585 skb_pull(skb, 2);
586 break;
587 /* Traffic Class and Flow Label are elided */
588 case 3: /* 11b */
589 hdr.priority = 0;
590 hdr.flow_lbl[0] = 0;
591 hdr.flow_lbl[1] = 0;
592 hdr.flow_lbl[2] = 0;
593 break;
594 default:
595 break;
596 }
597
598 /* Next Header */
599 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
600 /* Next header is carried inline */
601 if (!skb->len)
602 goto drop;
603 hdr.nexthdr = lowpan_fetch_skb_u8(skb);
604 pr_debug("(%s): NH flag is set, next header is carried "
605 "inline: %02x\n", __func__, hdr.nexthdr);
606 }
607
608 /* Hop Limit */
609 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
610 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
611 else {
612 if (!skb->len)
613 goto drop;
614 hdr.hop_limit = lowpan_fetch_skb_u8(skb);
615 }
616
617 /* Extract SAM to the tmp variable */
618 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
619
620 /* Source address uncompression */
621 pr_debug("(%s): source address stateless compression\n", __func__);
622 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
623 lowpan_unc_llconf[tmp], skb->data);
624 if (err)
625 goto drop;
626
627 /* Extract DAM to the tmp variable */
628 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
629
630 /* check for Multicast Compression */
631 if (iphc1 & LOWPAN_IPHC_M) {
632 if (iphc1 & LOWPAN_IPHC_DAC) {
633 pr_debug("(%s): destination address context-based "
634 "multicast compression\n", __func__);
635 /* TODO: implement this */
636 } else {
637 u8 prefix[] = {0xff, 0x02};
638
639 pr_debug("(%s): destination address non-context-based"
640 " multicast compression\n", __func__);
641 if (0 < tmp && tmp < 3) {
642 if (!skb->len)
643 goto drop;
644 else
645 prefix[1] = lowpan_fetch_skb_u8(skb);
646 }
647
648 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
649 lowpan_unc_mxconf[tmp], NULL);
650 if (err)
651 goto drop;
652 }
653 } else {
654 pr_debug("(%s): destination address stateless compression\n",
655 __func__);
656 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
657 lowpan_unc_llconf[tmp], skb->data);
658 if (err)
659 goto drop;
660 }
661
662 /* TODO: UDP header parse */
663
664 /* Not fragmented package */
665 hdr.payload_len = htons(skb->len);
666
667 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
668 skb_headroom(skb), skb->len);
669
670 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
671 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
672 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
673
674 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
675 sizeof(hdr));
676 return lowpan_skb_deliver(skb, &hdr);
677drop:
678 kfree_skb(skb);
679 return -EINVAL;
680}
681
682static int lowpan_set_address(struct net_device *dev, void *p)
683{
684 struct sockaddr *sa = p;
685
686 if (netif_running(dev))
687 return -EBUSY;
688
689 /* TODO: validate addr */
690 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
691
692 return 0;
693}
694
695static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
696{
697 int err = 0;
698
699 pr_debug("(%s): package xmit\n", __func__);
700
701 skb->dev = lowpan_dev_info(dev)->real_dev;
702 if (skb->dev == NULL) {
703 pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
704 dev_kfree_skb(skb);
705 } else
706 err = dev_queue_xmit(skb);
707
708 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
709}
710
711static void lowpan_dev_free(struct net_device *dev)
712{
713 dev_put(lowpan_dev_info(dev)->real_dev);
714 free_netdev(dev);
715}
716
717static struct header_ops lowpan_header_ops = {
718 .create = lowpan_header_create,
719};
720
721static const struct net_device_ops lowpan_netdev_ops = {
722 .ndo_start_xmit = lowpan_xmit,
723 .ndo_set_mac_address = lowpan_set_address,
724};
725
726static void lowpan_setup(struct net_device *dev)
727{
728 pr_debug("(%s)\n", __func__);
729
730 dev->addr_len = IEEE802154_ADDR_LEN;
731 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
732 dev->type = ARPHRD_IEEE802154;
733 dev->features = NETIF_F_NO_CSUM;
734 /* Frame Control + Sequence Number + Address fields + Security Header */
735 dev->hard_header_len = 2 + 1 + 20 + 14;
736 dev->needed_tailroom = 2; /* FCS */
737 dev->mtu = 1281;
738 dev->tx_queue_len = 0;
739 dev->flags = IFF_NOARP | IFF_BROADCAST;
740 dev->watchdog_timeo = 0;
741
742 dev->netdev_ops = &lowpan_netdev_ops;
743 dev->header_ops = &lowpan_header_ops;
744 dev->destructor = lowpan_dev_free;
745}
746
747static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
748{
749 pr_debug("(%s)\n", __func__);
750
751 if (tb[IFLA_ADDRESS]) {
752 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
753 return -EINVAL;
754 }
755 return 0;
756}
757
758static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
759 struct packet_type *pt, struct net_device *orig_dev)
760{
761 if (!netif_running(dev))
762 goto drop;
763
764 if (dev->type != ARPHRD_IEEE802154)
765 goto drop;
766
767 /* check that it's our buffer */
768 if ((skb->data[0] & 0xe0) == 0x60)
769 lowpan_process_data(skb);
770
771 return NET_RX_SUCCESS;
772
773drop:
774 kfree_skb(skb);
775 return NET_RX_DROP;
776}
777
778static int lowpan_newlink(struct net *src_net, struct net_device *dev,
779 struct nlattr *tb[], struct nlattr *data[])
780{
781 struct net_device *real_dev;
782 struct lowpan_dev_record *entry;
783
784 pr_debug("(%s)\n", __func__);
785
786 if (!tb[IFLA_LINK])
787 return -EINVAL;
788 /* find and hold real wpan device */
789 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
790 if (!real_dev)
791 return -ENODEV;
792
793 lowpan_dev_info(dev)->real_dev = real_dev;
794 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
795
796 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
797 if (!entry) {
798 dev_put(real_dev);
799 lowpan_dev_info(dev)->real_dev = NULL;
800 return -ENOMEM;
801 }
802
803 entry->ldev = dev;
804
805 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
806 INIT_LIST_HEAD(&entry->list);
807 list_add_tail(&entry->list, &lowpan_devices);
808 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
809
810 register_netdevice(dev);
811
812 return 0;
813}
814
815static void lowpan_dellink(struct net_device *dev, struct list_head *head)
816{
817 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
818 struct net_device *real_dev = lowpan_dev->real_dev;
819 struct lowpan_dev_record *entry;
820 struct lowpan_dev_record *tmp;
821
822 ASSERT_RTNL();
823
824 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
825 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
826 if (entry->ldev == dev) {
827 list_del(&entry->list);
828 kfree(entry);
829 }
830 }
831 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
832
833 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
834
835 unregister_netdevice_queue(dev, head);
836
837 dev_put(real_dev);
838}
839
840static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
841 .kind = "lowpan",
842 .priv_size = sizeof(struct lowpan_dev_info),
843 .setup = lowpan_setup,
844 .newlink = lowpan_newlink,
845 .dellink = lowpan_dellink,
846 .validate = lowpan_validate,
847};
848
849static inline int __init lowpan_netlink_init(void)
850{
851 return rtnl_link_register(&lowpan_link_ops);
852}
853
854static inline void __init lowpan_netlink_fini(void)
855{
856 rtnl_link_unregister(&lowpan_link_ops);
857}
858
859static struct packet_type lowpan_packet_type = {
860 .type = __constant_htons(ETH_P_IEEE802154),
861 .func = lowpan_rcv,
862};
863
864static int __init lowpan_init_module(void)
865{
866 int err = 0;
867
868 pr_debug("(%s)\n", __func__);
869
870 err = lowpan_netlink_init();
871 if (err < 0)
872 goto out;
873
874 dev_add_pack(&lowpan_packet_type);
875out:
876 return err;
877}
878
879static void __exit lowpan_cleanup_module(void)
880{
881 pr_debug("(%s)\n", __func__);
882
883 lowpan_netlink_fini();
884
885 dev_remove_pack(&lowpan_packet_type);
886}
887
888module_init(lowpan_init_module);
889module_exit(lowpan_cleanup_module);
890MODULE_LICENSE("GPL");
891MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
new file mode 100644
index 000000000000..5d8cf80b930d
--- /dev/null
+++ b/net/ieee802154/6lowpan.h
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#ifndef __6LOWPAN_H__
54#define __6LOWPAN_H__
55
56/* need to know address length to manipulate with it */
57#define IEEE802154_ALEN 8
58
59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
61#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
62#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
63
64/*
65 * ipv6 address based on mac
66 * second bit-flip (Universe/Local) is done according RFC2464
67 */
68#define is_addr_mac_addr_based(a, m) \
69 ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
70 (((a)->s6_addr[9]) == (m)[1]) && \
71 (((a)->s6_addr[10]) == (m)[2]) && \
72 (((a)->s6_addr[11]) == (m)[3]) && \
73 (((a)->s6_addr[12]) == (m)[4]) && \
74 (((a)->s6_addr[13]) == (m)[5]) && \
75 (((a)->s6_addr[14]) == (m)[6]) && \
76 (((a)->s6_addr[15]) == (m)[7]))
77
78/* ipv6 address is unspecified */
79#define is_addr_unspecified(a) \
80 ((((a)->s6_addr32[0]) == 0) && \
81 (((a)->s6_addr32[1]) == 0) && \
82 (((a)->s6_addr32[2]) == 0) && \
83 (((a)->s6_addr32[3]) == 0))
84
85/* compare ipv6 addresses prefixes */
86#define ipaddr_prefixcmp(addr1, addr2, length) \
87 (memcmp(addr1, addr2, length >> 3) == 0)
88
89/* local link, i.e. FE80::/10 */
90#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
91
92/*
93 * check whether we can compress the IID to 16 bits,
94 * it's possible for unicast adresses with first 49 bits are zero only.
95 */
96#define lowpan_is_iid_16_bit_compressable(a) \
97 ((((a)->s6_addr16[4]) == 0) && \
98 (((a)->s6_addr16[5]) == 0) && \
99 (((a)->s6_addr16[6]) == 0) && \
100 ((((a)->s6_addr[14]) & 0x80) == 0))
101
102/* multicast address */
103#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
104
105/* check whether the 112-bit gid of the multicast address is mappable to: */
106
107/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
108#define lowpan_is_mcast_addr_compressable(a) \
109 ((((a)->s6_addr16[1]) == 0) && \
110 (((a)->s6_addr16[2]) == 0) && \
111 (((a)->s6_addr16[3]) == 0) && \
112 (((a)->s6_addr16[4]) == 0) && \
113 (((a)->s6_addr16[5]) == 0) && \
114 (((a)->s6_addr16[6]) == 0) && \
115 (((a)->s6_addr[14]) == 0) && \
116 ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
117
118/* 48 bits, FFXX::00XX:XXXX:XXXX */
119#define lowpan_is_mcast_addr_compressable48(a) \
120 ((((a)->s6_addr16[1]) == 0) && \
121 (((a)->s6_addr16[2]) == 0) && \
122 (((a)->s6_addr16[3]) == 0) && \
123 (((a)->s6_addr16[4]) == 0) && \
124 (((a)->s6_addr[10]) == 0))
125
126/* 32 bits, FFXX::00XX:XXXX */
127#define lowpan_is_mcast_addr_compressable32(a) \
128 ((((a)->s6_addr16[1]) == 0) && \
129 (((a)->s6_addr16[2]) == 0) && \
130 (((a)->s6_addr16[3]) == 0) && \
131 (((a)->s6_addr16[4]) == 0) && \
132 (((a)->s6_addr16[5]) == 0) && \
133 (((a)->s6_addr[12]) == 0))
134
135/* 8 bits, FF02::00XX */
136#define lowpan_is_mcast_addr_compressable8(a) \
137 ((((a)->s6_addr[1]) == 2) && \
138 (((a)->s6_addr16[1]) == 0) && \
139 (((a)->s6_addr16[2]) == 0) && \
140 (((a)->s6_addr16[3]) == 0) && \
141 (((a)->s6_addr16[4]) == 0) && \
142 (((a)->s6_addr16[5]) == 0) && \
143 (((a)->s6_addr16[6]) == 0) && \
144 (((a)->s6_addr[14]) == 0))
145
146#define lowpan_is_addr_broadcast(a) \
147 ((((a)[0]) == 0xFF) && \
148 (((a)[1]) == 0xFF) && \
149 (((a)[2]) == 0xFF) && \
150 (((a)[3]) == 0xFF) && \
151 (((a)[4]) == 0xFF) && \
152 (((a)[5]) == 0xFF) && \
153 (((a)[6]) == 0xFF) && \
154 (((a)[7]) == 0xFF))
155
156#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
157#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
158#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
159#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
160#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
161
162/*
163 * Values of fields within the IPHC encoding first byte
164 * (C stands for compressed and I for inline)
165 */
166#define LOWPAN_IPHC_TF 0x18
167
168#define LOWPAN_IPHC_FL_C 0x10
169#define LOWPAN_IPHC_TC_C 0x08
170#define LOWPAN_IPHC_NH_C 0x04
171#define LOWPAN_IPHC_TTL_1 0x01
172#define LOWPAN_IPHC_TTL_64 0x02
173#define LOWPAN_IPHC_TTL_255 0x03
174#define LOWPAN_IPHC_TTL_I 0x00
175
176
177/* Values of fields within the IPHC encoding second byte */
178#define LOWPAN_IPHC_CID 0x80
179
180#define LOWPAN_IPHC_SAC 0x40
181#define LOWPAN_IPHC_SAM_00 0x00
182#define LOWPAN_IPHC_SAM_01 0x10
183#define LOWPAN_IPHC_SAM_10 0x20
184#define LOWPAN_IPHC_SAM 0x30
185
186#define LOWPAN_IPHC_SAM_BIT 4
187
188#define LOWPAN_IPHC_M 0x08
189#define LOWPAN_IPHC_DAC 0x04
190#define LOWPAN_IPHC_DAM_00 0x00
191#define LOWPAN_IPHC_DAM_01 0x01
192#define LOWPAN_IPHC_DAM_10 0x02
193#define LOWPAN_IPHC_DAM_11 0x03
194
195#define LOWPAN_IPHC_DAM_BIT 0
196/*
197 * LOWPAN_UDP encoding (works together with IPHC)
198 */
199#define LOWPAN_NHC_UDP_MASK 0xF8
200#define LOWPAN_NHC_UDP_ID 0xF0
201#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
202#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
203
204/* values for port compression, _with checksum_ ie bit 5 set to 0 */
205#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
206#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
207 dest = 0xF0 + 8 bit inline */
208#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
209 dest = 16 bit inline */
210#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
211
212#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1c1de97d264a..7dee65052925 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -10,3 +10,9 @@ config IEEE802154
10 10
11 Say Y here to compile LR-WPAN support into the kernel or say M to 11 Say Y here to compile LR-WPAN support into the kernel or say M to
12 compile it as modules. 12 compile it as modules.
13
14config IEEE802154_6LOWPAN
15 tristate "6lowpan support over IEEE 802.15.4"
16 depends on IEEE802154 && IPV6
17 ---help---
18 IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 5761185f884e..d7716d64c6bb 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,3 +1,5 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 2obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3
4ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
5af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd2b9478ddd1..1b5096a9875a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -893,7 +893,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
893EXPORT_SYMBOL(inet_ioctl); 893EXPORT_SYMBOL(inet_ioctl);
894 894
895#ifdef CONFIG_COMPAT 895#ifdef CONFIG_COMPAT
896int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 896static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
897{ 897{
898 struct sock *sk = sock->sk; 898 struct sock *sk = sock->sk;
899 int err = -ENOIOCTLCMD; 899 int err = -ENOIOCTLCMD;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2c2a98e402e7..86f3b885b4f3 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -476,7 +476,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
476 doi = doi_def->doi; 476 doi = doi_def->doi;
477 doi_type = doi_def->type; 477 doi_type = doi_def->type;
478 478
479 if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) 479 if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
480 goto doi_add_return; 480 goto doi_add_return;
481 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { 481 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
482 switch (doi_def->tags[iter]) { 482 switch (doi_def->tags[iter]) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd06dd00..c6b5092f29a1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
258 ip_mc_up(in_dev); 258 ip_mc_up(in_dev);
259 259
260 /* we can receive as soon as ip_ptr is set -- do this last */ 260 /* we can receive as soon as ip_ptr is set -- do this last */
261 rcu_assign_pointer(dev->ip_ptr, in_dev); 261 RCU_INIT_POINTER(dev->ip_ptr, in_dev);
262out: 262out:
263 return in_dev; 263 return in_dev;
264out_kfree: 264out_kfree:
@@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev)
291 inet_free_ifa(ifa); 291 inet_free_ifa(ifa);
292 } 292 }
293 293
294 rcu_assign_pointer(dev->ip_ptr, NULL); 294 RCU_INIT_POINTER(dev->ip_ptr, NULL);
295 295
296 devinet_sysctl_unregister(in_dev); 296 devinet_sysctl_unregister(in_dev);
297 neigh_parms_release(&arp_tbl, in_dev->arp_parms); 297 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1175 switch (event) { 1175 switch (event) {
1176 case NETDEV_REGISTER: 1176 case NETDEV_REGISTER:
1177 printk(KERN_DEBUG "inetdev_event: bug\n"); 1177 printk(KERN_DEBUG "inetdev_event: bug\n");
1178 rcu_assign_pointer(dev->ip_ptr, NULL); 1178 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1179 break; 1179 break;
1180 case NETDEV_UP: 1180 case NETDEV_UP:
1181 if (!inetdev_valid_mtu(dev->mtu)) 1181 if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index de9e2978476f..89d6f71a6a99 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -204,7 +204,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
204 return (struct tnode *)(parent & ~NODE_TYPE_MASK); 204 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
205} 205}
206 206
207/* Same as rcu_assign_pointer 207/* Same as RCU_INIT_POINTER
208 * but that macro() assumes that value is a pointer. 208 * but that macro() assumes that value is a pointer.
209 */ 209 */
210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr) 210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
@@ -528,7 +528,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
528 if (n) 528 if (n)
529 node_set_parent(n, tn); 529 node_set_parent(n, tn);
530 530
531 rcu_assign_pointer(tn->child[i], n); 531 RCU_INIT_POINTER(tn->child[i], n);
532} 532}
533 533
534#define MAX_WORK 10 534#define MAX_WORK 10
@@ -1014,7 +1014,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1014 1014
1015 tp = node_parent((struct rt_trie_node *) tn); 1015 tp = node_parent((struct rt_trie_node *) tn);
1016 if (!tp) 1016 if (!tp)
1017 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1017 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1018 1018
1019 tnode_free_flush(); 1019 tnode_free_flush();
1020 if (!tp) 1020 if (!tp)
@@ -1026,7 +1026,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1026 if (IS_TNODE(tn)) 1026 if (IS_TNODE(tn))
1027 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1027 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1028 1028
1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1029 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1030 tnode_free_flush(); 1030 tnode_free_flush();
1031} 1031}
1032 1032
@@ -1163,7 +1163,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1163 put_child(t, (struct tnode *)tp, cindex, 1163 put_child(t, (struct tnode *)tp, cindex,
1164 (struct rt_trie_node *)tn); 1164 (struct rt_trie_node *)tn);
1165 } else { 1165 } else {
1166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1166 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1167 tp = tn; 1167 tp = tn;
1168 } 1168 }
1169 } 1169 }
@@ -1621,7 +1621,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1621 put_child(t, (struct tnode *)tp, cindex, NULL); 1621 put_child(t, (struct tnode *)tp, cindex, NULL);
1622 trie_rebalance(t, tp); 1622 trie_rebalance(t, tp);
1623 } else 1623 } else
1624 rcu_assign_pointer(t->trie, NULL); 1624 RCU_INIT_POINTER(t->trie, NULL);
1625 1625
1626 free_leaf(l); 1626 free_leaf(l);
1627} 1627}
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index dbfc21de3479..8cb1ebb7cd74 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version)
34 if (gre_proto[version]) 34 if (gre_proto[version])
35 goto err_out_unlock; 35 goto err_out_unlock;
36 36
37 rcu_assign_pointer(gre_proto[version], proto); 37 RCU_INIT_POINTER(gre_proto[version], proto);
38 spin_unlock(&gre_proto_lock); 38 spin_unlock(&gre_proto_lock);
39 return 0; 39 return 0;
40 40
@@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
54 if (rcu_dereference_protected(gre_proto[version], 54 if (rcu_dereference_protected(gre_proto[version],
55 lockdep_is_held(&gre_proto_lock)) != proto) 55 lockdep_is_held(&gre_proto_lock)) != proto)
56 goto err_out_unlock; 56 goto err_out_unlock;
57 rcu_assign_pointer(gre_proto[version], NULL); 57 RCU_INIT_POINTER(gre_proto[version], NULL);
58 spin_unlock(&gre_proto_lock); 58 spin_unlock(&gre_proto_lock);
59 synchronize_rcu(); 59 synchronize_rcu();
60 return 0; 60 return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 23ef31baa1af..ab188ae12fd9 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1152,10 +1152,9 @@ static int __net_init icmp_sk_init(struct net *net)
1152 net->ipv4.icmp_sk[i] = sk; 1152 net->ipv4.icmp_sk[i] = sk;
1153 1153
1154 /* Enough space for 2 64K ICMP packets, including 1154 /* Enough space for 2 64K ICMP packets, including
1155 * sk_buff struct overhead. 1155 * sk_buff/skb_shared_info struct overhead.
1156 */ 1156 */
1157 sk->sk_sndbuf = 1157 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
1158 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1159 1158
1160 /* 1159 /*
1161 * Speedup sock_wfree() 1160 * Speedup sock_wfree()
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d577199eabd5..c7472eff2d51 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1009 1009
1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1011 We will get multicast token leakage, when IFF_MULTICAST 1011 We will get multicast token leakage, when IFF_MULTICAST
1012 is changed. This check should be done in dev->set_multicast_list 1012 is changed. This check should be done in ndo_set_rx_mode
1013 routine. Something sort of: 1013 routine. Something sort of:
1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1015 --ANK 1015 --ANK
@@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1242 1242
1243 im->next_rcu = in_dev->mc_list; 1243 im->next_rcu = in_dev->mc_list;
1244 in_dev->mc_count++; 1244 in_dev->mc_count++;
1245 rcu_assign_pointer(in_dev->mc_list, im); 1245 RCU_INIT_POINTER(in_dev->mc_list, im);
1246 1246
1247#ifdef CONFIG_IP_MULTICAST 1247#ifdef CONFIG_IP_MULTICAST
1248 igmpv3_del_delrec(in_dev, im->multiaddr); 1248 igmpv3_del_delrec(in_dev, im->multiaddr);
@@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1813 iml->next_rcu = inet->mc_list; 1813 iml->next_rcu = inet->mc_list;
1814 iml->sflist = NULL; 1814 iml->sflist = NULL;
1815 iml->sfmode = MCAST_EXCLUDE; 1815 iml->sfmode = MCAST_EXCLUDE;
1816 rcu_assign_pointer(inet->mc_list, iml); 1816 RCU_INIT_POINTER(inet->mc_list, iml);
1817 ip_mc_inc_group(in_dev, addr); 1817 ip_mc_inc_group(in_dev, addr);
1818 err = 0; 1818 err = 0;
1819done: 1819done:
@@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1835 } 1835 }
1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1838 rcu_assign_pointer(iml->sflist, NULL); 1838 RCU_INIT_POINTER(iml->sflist, NULL);
1839 /* decrease mem now to avoid the memleak warning */ 1839 /* decrease mem now to avoid the memleak warning */
1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); 1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1841 kfree_rcu(psf, rcu); 1841 kfree_rcu(psf, rcu);
@@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); 2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2001 kfree_rcu(psl, rcu); 2001 kfree_rcu(psl, rcu);
2002 } 2002 }
2003 rcu_assign_pointer(pmc->sflist, newpsl); 2003 RCU_INIT_POINTER(pmc->sflist, newpsl);
2004 psl = newpsl; 2004 psl = newpsl;
2005 } 2005 }
2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2103 } else 2103 } else
2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2105 0, NULL, 0); 2105 0, NULL, 0);
2106 rcu_assign_pointer(pmc->sflist, newpsl); 2106 RCU_INIT_POINTER(pmc->sflist, newpsl);
2107 pmc->sfmode = msf->imsf_fmode; 2107 pmc->sfmode = msf->imsf_fmode;
2108 err = 0; 2108 err = 0;
2109done: 2109done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 389a2e6a17fd..f5e2bdaef949 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,9 @@ static int inet_csk_diag_fill(struct sock *sk,
108 icsk->icsk_ca_ops->name); 108 icsk->icsk_ca_ops->name);
109 } 109 }
110 110
111 if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
112 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
113
111 r->idiag_family = sk->sk_family; 114 r->idiag_family = sk->sk_family;
112 r->idiag_state = sk->sk_state; 115 r->idiag_state = sk->sk_state;
113 r->idiag_timer = 0; 116 r->idiag_timer = 0;
@@ -130,6 +133,8 @@ static int inet_csk_diag_fill(struct sock *sk,
130 &np->rcv_saddr); 133 &np->rcv_saddr);
131 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 134 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
132 &np->daddr); 135 &np->daddr);
136 if (ext & (1 << (INET_DIAG_TOS - 1)))
137 RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
133 } 138 }
134#endif 139#endif
135 140
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ef7ae6049a51..cc280a3f4f96 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -244,11 +244,11 @@ static void lro_add_frags(struct net_lro_desc *lro_desc,
244 skb->truesize += truesize; 244 skb->truesize += truesize;
245 245
246 skb_frags[0].page_offset += hlen; 246 skb_frags[0].page_offset += hlen;
247 skb_frags[0].size -= hlen; 247 skb_frag_size_sub(&skb_frags[0], hlen);
248 248
249 while (tcp_data_len > 0) { 249 while (tcp_data_len > 0) {
250 *(lro_desc->next_frag) = *skb_frags; 250 *(lro_desc->next_frag) = *skb_frags;
251 tcp_data_len -= skb_frags->size; 251 tcp_data_len -= skb_frag_size(skb_frags);
252 lro_desc->next_frag++; 252 lro_desc->next_frag++;
253 skb_frags++; 253 skb_frags++;
254 skb_shinfo(skb)->nr_frags++; 254 skb_shinfo(skb)->nr_frags++;
@@ -400,14 +400,14 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
400 skb_frags = skb_shinfo(skb)->frags; 400 skb_frags = skb_shinfo(skb)->frags;
401 while (data_len > 0) { 401 while (data_len > 0) {
402 *skb_frags = *frags; 402 *skb_frags = *frags;
403 data_len -= frags->size; 403 data_len -= skb_frag_size(frags);
404 skb_frags++; 404 skb_frags++;
405 frags++; 405 frags++;
406 skb_shinfo(skb)->nr_frags++; 406 skb_shinfo(skb)->nr_frags++;
407 } 407 }
408 408
409 skb_shinfo(skb)->frags[0].page_offset += hdr_len; 409 skb_shinfo(skb)->frags[0].page_offset += hdr_len;
410 skb_shinfo(skb)->frags[0].size -= hdr_len; 410 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
411 411
412 skb->ip_summed = ip_summed; 412 skb->ip_summed = ip_summed;
413 skb->csum = sum; 413 skb->csum = sum;
@@ -433,7 +433,7 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
433 if (!lro_mgr->get_frag_header || 433 if (!lro_mgr->get_frag_header ||
434 lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, 434 lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
435 (void *)&tcph, &flags, priv)) { 435 (void *)&tcph, &flags, priv)) {
436 mac_hdr = page_address(frags->page) + frags->page_offset; 436 mac_hdr = skb_frag_address(frags);
437 goto out1; 437 goto out1;
438 } 438 }
439 439
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 3c8dfa16614d..44d65d546e30 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -183,6 +183,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
183 tw->tw_daddr = inet->inet_daddr; 183 tw->tw_daddr = inet->inet_daddr;
184 tw->tw_rcv_saddr = inet->inet_rcv_saddr; 184 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
185 tw->tw_bound_dev_if = sk->sk_bound_dev_if; 185 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
186 tw->tw_tos = inet->tos;
186 tw->tw_num = inet->inet_num; 187 tw->tw_num = inet->inet_num;
187 tw->tw_state = TCP_TIME_WAIT; 188 tw->tw_state = TCP_TIME_WAIT;
188 tw->tw_substate = state; 189 tw->tw_substate = state;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 0e0ab98abc6f..fdaabf2f2b68 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,8 +599,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
599 head->next = clone; 599 head->next = clone;
600 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 600 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
601 skb_frag_list_init(head); 601 skb_frag_list_init(head);
602 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 602 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
603 plen += skb_shinfo(head)->frags[i].size; 603 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
604 clone->len = clone->data_len = head->data_len - plen; 604 clone->len = clone->data_len = head->data_len - plen;
605 head->data_len -= clone->len; 605 head->data_len -= clone->len;
606 head->len -= clone->len; 606 head->len -= clone->len;
@@ -682,6 +682,42 @@ int ip_defrag(struct sk_buff *skb, u32 user)
682} 682}
683EXPORT_SYMBOL(ip_defrag); 683EXPORT_SYMBOL(ip_defrag);
684 684
685struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
686{
687 const struct iphdr *iph;
688 u32 len;
689
690 if (skb->protocol != htons(ETH_P_IP))
691 return skb;
692
693 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
694 return skb;
695
696 iph = ip_hdr(skb);
697 if (iph->ihl < 5 || iph->version != 4)
698 return skb;
699 if (!pskb_may_pull(skb, iph->ihl*4))
700 return skb;
701 iph = ip_hdr(skb);
702 len = ntohs(iph->tot_len);
703 if (skb->len < len || len < (iph->ihl * 4))
704 return skb;
705
706 if (ip_is_fragment(ip_hdr(skb))) {
707 skb = skb_share_check(skb, GFP_ATOMIC);
708 if (skb) {
709 if (pskb_trim_rcsum(skb, len))
710 return skb;
711 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
712 if (ip_defrag(skb, user))
713 return NULL;
714 skb->rxhash = 0;
715 }
716 }
717 return skb;
718}
719EXPORT_SYMBOL(ip_check_defrag);
720
685#ifdef CONFIG_SYSCTL 721#ifdef CONFIG_SYSCTL
686static int zero; 722static int zero;
687 723
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7bb94c48345..d55110e93120 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -835,8 +835,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
838 if (max_headroom > dev->needed_headroom)
839 dev->needed_headroom = max_headroom;
840 if (!new_skb) { 838 if (!new_skb) {
841 ip_rt_put(rt); 839 ip_rt_put(rt);
842 dev->stats.tx_dropped++; 840 dev->stats.tx_dropped++;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8c6563361ab5..0bc95f3977d2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -989,13 +989,13 @@ alloc_new_skb:
989 if (page && (left = PAGE_SIZE - off) > 0) { 989 if (page && (left = PAGE_SIZE - off) > 0) {
990 if (copy >= left) 990 if (copy >= left)
991 copy = left; 991 copy = left;
992 if (page != frag->page) { 992 if (page != skb_frag_page(frag)) {
993 if (i == MAX_SKB_FRAGS) { 993 if (i == MAX_SKB_FRAGS) {
994 err = -EMSGSIZE; 994 err = -EMSGSIZE;
995 goto error; 995 goto error;
996 } 996 }
997 get_page(page);
998 skb_fill_page_desc(skb, i, page, off, 0); 997 skb_fill_page_desc(skb, i, page, off, 0);
998 skb_frag_ref(skb, i);
999 frag = &skb_shinfo(skb)->frags[i]; 999 frag = &skb_shinfo(skb)->frags[i];
1000 } 1000 }
1001 } else if (i < MAX_SKB_FRAGS) { 1001 } else if (i < MAX_SKB_FRAGS) {
@@ -1015,12 +1015,13 @@ alloc_new_skb:
1015 err = -EMSGSIZE; 1015 err = -EMSGSIZE;
1016 goto error; 1016 goto error;
1017 } 1017 }
1018 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { 1018 if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
1019 offset, copy, skb->len, skb) < 0) {
1019 err = -EFAULT; 1020 err = -EFAULT;
1020 goto error; 1021 goto error;
1021 } 1022 }
1022 cork->off += copy; 1023 cork->off += copy;
1023 frag->size += copy; 1024 skb_frag_size_add(frag, copy);
1024 skb->len += copy; 1025 skb->len += copy;
1025 skb->data_len += copy; 1026 skb->data_len += copy;
1026 skb->truesize += copy; 1027 skb->truesize += copy;
@@ -1229,7 +1230,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1229 if (len > size) 1230 if (len > size)
1230 len = size; 1231 len = size;
1231 if (skb_can_coalesce(skb, i, page, offset)) { 1232 if (skb_can_coalesce(skb, i, page, offset)) {
1232 skb_shinfo(skb)->frags[i-1].size += len; 1233 skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
1233 } else if (i < MAX_SKB_FRAGS) { 1234 } else if (i < MAX_SKB_FRAGS) {
1234 get_page(page); 1235 get_page(page);
1235 skb_fill_page_desc(skb, i, page, offset, len); 1236 skb_fill_page_desc(skb, i, page, offset, len);
@@ -1465,7 +1466,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1465 * structure to pass arguments. 1466 * structure to pass arguments.
1466 */ 1467 */
1467void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, 1468void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1468 struct ip_reply_arg *arg, unsigned int len) 1469 const struct ip_reply_arg *arg, unsigned int len)
1469{ 1470{
1470 struct inet_sock *inet = inet_sk(sk); 1471 struct inet_sock *inet = inet_sk(sk);
1471 struct ip_options_data replyopts; 1472 struct ip_options_data replyopts;
@@ -1488,7 +1489,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1488 } 1489 }
1489 1490
1490 flowi4_init_output(&fl4, arg->bound_dev_if, 0, 1491 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1491 RT_TOS(ip_hdr(skb)->tos), 1492 RT_TOS(arg->tos),
1492 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1493 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1493 ip_reply_arg_flowi_flags(arg), 1494 ip_reply_arg_flowi_flags(arg),
1494 daddr, rt->rt_spec_dst, 1495 daddr, rt->rt_spec_dst,
@@ -1505,7 +1506,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1505 with locally disabled BH and that sk cannot be already spinlocked. 1506 with locally disabled BH and that sk cannot be already spinlocked.
1506 */ 1507 */
1507 bh_lock_sock(sk); 1508 bh_lock_sock(sk);
1508 inet->tos = ip_hdr(skb)->tos; 1509 inet->tos = arg->tos;
1509 sk->sk_priority = skb->priority; 1510 sk->sk_priority = skb->priority;
1510 sk->sk_protocol = ip_hdr(skb)->protocol; 1511 sk->sk_protocol = ip_hdr(skb)->protocol;
1511 sk->sk_bound_dev_if = arg->bound_dev_if; 1512 sk->sk_bound_dev_if = arg->bound_dev_if;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8905e92f896a..09ff51bf16a4 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -33,6 +33,7 @@
33#include <linux/netfilter.h> 33#include <linux/netfilter.h>
34#include <linux/route.h> 34#include <linux/route.h>
35#include <linux/mroute.h> 35#include <linux/mroute.h>
36#include <net/inet_ecn.h>
36#include <net/route.h> 37#include <net/route.h>
37#include <net/xfrm.h> 38#include <net/xfrm.h>
38#include <net/compat.h> 39#include <net/compat.h>
@@ -578,8 +579,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
578 break; 579 break;
579 case IP_TOS: /* This sets both TOS and Precedence */ 580 case IP_TOS: /* This sets both TOS and Precedence */
580 if (sk->sk_type == SOCK_STREAM) { 581 if (sk->sk_type == SOCK_STREAM) {
581 val &= ~3; 582 val &= ~INET_ECN_MASK;
582 val |= inet->tos & 3; 583 val |= inet->tos & INET_ECN_MASK;
583 } 584 }
584 if (inet->tos != val) { 585 if (inet->tos != val) {
585 inet->tos = val; 586 inet->tos = val;
@@ -961,7 +962,7 @@ mc_msf_out:
961 break; 962 break;
962 963
963 case IP_TRANSPARENT: 964 case IP_TRANSPARENT:
964 if (!capable(CAP_NET_ADMIN)) { 965 if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) {
965 err = -EPERM; 966 err = -EPERM;
966 break; 967 break;
967 } 968 }
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b7ca6e..065effd8349a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
231 (iter = rtnl_dereference(*tp)) != NULL; 231 (iter = rtnl_dereference(*tp)) != NULL;
232 tp = &iter->next) { 232 tp = &iter->next) {
233 if (t == iter) { 233 if (t == iter) {
234 rcu_assign_pointer(*tp, t->next); 234 RCU_INIT_POINTER(*tp, t->next);
235 break; 235 break;
236 } 236 }
237 } 237 }
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
241{ 241{
242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t); 242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
243 243
244 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 244 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
245 rcu_assign_pointer(*tp, t); 245 RCU_INIT_POINTER(*tp, t);
246} 246}
247 247
248static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 248static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
301 struct ipip_net *ipn = net_generic(net, ipip_net_id); 301 struct ipip_net *ipn = net_generic(net, ipip_net_id);
302 302
303 if (dev == ipn->fb_tunnel_dev) 303 if (dev == ipn->fb_tunnel_dev)
304 rcu_assign_pointer(ipn->tunnels_wc[0], NULL); 304 RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
305 else 305 else
306 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 306 ipip_tunnel_unlink(ipn, netdev_priv(dev));
307 dev_put(dev); 307 dev_put(dev);
@@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
791 return -ENOMEM; 791 return -ENOMEM;
792 792
793 dev_hold(dev); 793 dev_hold(dev);
794 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); 794 RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
795 return 0; 795 return 0;
796} 796}
797 797
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 58e879157976..6164e982e0ef 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1176,7 +1176,7 @@ static void mrtsock_destruct(struct sock *sk)
1176 ipmr_for_each_table(mrt, net) { 1176 ipmr_for_each_table(mrt, net) {
1177 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1177 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1179 rcu_assign_pointer(mrt->mroute_sk, NULL); 1179 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1180 mroute_clean_tables(mrt); 1180 mroute_clean_tables(mrt);
1181 } 1181 }
1182 } 1182 }
@@ -1203,7 +1203,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1203 return -ENOENT; 1203 return -ENOENT;
1204 1204
1205 if (optname != MRT_INIT) { 1205 if (optname != MRT_INIT) {
1206 if (sk != rcu_dereference_raw(mrt->mroute_sk) && 1206 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1207 !capable(CAP_NET_ADMIN)) 1207 !capable(CAP_NET_ADMIN))
1208 return -EACCES; 1208 return -EACCES;
1209 } 1209 }
@@ -1224,13 +1224,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1224 1224
1225 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1225 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1226 if (ret == 0) { 1226 if (ret == 0) {
1227 rcu_assign_pointer(mrt->mroute_sk, sk); 1227 RCU_INIT_POINTER(mrt->mroute_sk, sk);
1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1229 } 1229 }
1230 rtnl_unlock(); 1230 rtnl_unlock();
1231 return ret; 1231 return ret;
1232 case MRT_DONE: 1232 case MRT_DONE:
1233 if (sk != rcu_dereference_raw(mrt->mroute_sk)) 1233 if (sk != rcu_access_pointer(mrt->mroute_sk))
1234 return -EACCES; 1234 return -EACCES;
1235 return ip_ra_control(sk, 0, NULL); 1235 return ip_ra_control(sk, 0, NULL);
1236 case MRT_ADD_VIF: 1236 case MRT_ADD_VIF:
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 703f366fd235..7b22382ff0e9 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb,
70 70
71static void __exit nf_nat_amanda_fini(void) 71static void __exit nf_nat_amanda_fini(void)
72{ 72{
73 rcu_assign_pointer(nf_nat_amanda_hook, NULL); 73 RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
74 synchronize_rcu(); 74 synchronize_rcu();
75} 75}
76 76
77static int __init nf_nat_amanda_init(void) 77static int __init nf_nat_amanda_init(void)
78{ 78{
79 BUG_ON(nf_nat_amanda_hook != NULL); 79 BUG_ON(nf_nat_amanda_hook != NULL);
80 rcu_assign_pointer(nf_nat_amanda_hook, help); 80 RCU_INIT_POINTER(nf_nat_amanda_hook, help);
81 return 0; 81 return 0;
82} 82}
83 83
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3346de5d94d0..447bc5cfdc6c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
514 ret = -EBUSY; 514 ret = -EBUSY;
515 goto out; 515 goto out;
516 } 516 }
517 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); 517 RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
518 out: 518 out:
519 spin_unlock_bh(&nf_nat_lock); 519 spin_unlock_bh(&nf_nat_lock);
520 return ret; 520 return ret;
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
526{ 526{
527 spin_lock_bh(&nf_nat_lock); 527 spin_lock_bh(&nf_nat_lock);
528 rcu_assign_pointer(nf_nat_protos[proto->protonum], 528 RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
529 &nf_nat_unknown_protocol); 529 &nf_nat_unknown_protocol);
530 spin_unlock_bh(&nf_nat_lock); 530 spin_unlock_bh(&nf_nat_lock);
531 synchronize_rcu(); 531 synchronize_rcu();
@@ -736,10 +736,10 @@ static int __init nf_nat_init(void)
736 /* Sew in builtin protocols. */ 736 /* Sew in builtin protocols. */
737 spin_lock_bh(&nf_nat_lock); 737 spin_lock_bh(&nf_nat_lock);
738 for (i = 0; i < MAX_IP_NAT_PROTO; i++) 738 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
739 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); 739 RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
740 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); 740 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
741 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); 741 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
742 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); 742 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
743 spin_unlock_bh(&nf_nat_lock); 743 spin_unlock_bh(&nf_nat_lock);
744 744
745 /* Initialize fake conntrack so that NAT will skip it */ 745 /* Initialize fake conntrack so that NAT will skip it */
@@ -748,12 +748,12 @@ static int __init nf_nat_init(void)
748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
749 749
750 BUG_ON(nf_nat_seq_adjust_hook != NULL); 750 BUG_ON(nf_nat_seq_adjust_hook != NULL);
751 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 751 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
753 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, 753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
754 nfnetlink_parse_nat_setup); 754 nfnetlink_parse_nat_setup);
755 BUG_ON(nf_ct_nat_offset != NULL); 755 BUG_ON(nf_ct_nat_offset != NULL);
756 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset); 756 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
757 return 0; 757 return 0;
758 758
759 cleanup_extend: 759 cleanup_extend:
@@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void)
766 unregister_pernet_subsys(&nf_nat_net_ops); 766 unregister_pernet_subsys(&nf_nat_net_ops);
767 nf_ct_l3proto_put(l3proto); 767 nf_ct_l3proto_put(l3proto);
768 nf_ct_extend_unregister(&nat_extend); 768 nf_ct_extend_unregister(&nat_extend);
769 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); 769 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
770 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); 770 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
771 rcu_assign_pointer(nf_ct_nat_offset, NULL); 771 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
772 synchronize_net(); 772 synchronize_net();
773} 773}
774 774
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index dc73abb3fe27..e462a957d080 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -113,14 +113,14 @@ out:
113 113
114static void __exit nf_nat_ftp_fini(void) 114static void __exit nf_nat_ftp_fini(void)
115{ 115{
116 rcu_assign_pointer(nf_nat_ftp_hook, NULL); 116 RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
117 synchronize_rcu(); 117 synchronize_rcu();
118} 118}
119 119
120static int __init nf_nat_ftp_init(void) 120static int __init nf_nat_ftp_init(void)
121{ 121{
122 BUG_ON(nf_nat_ftp_hook != NULL); 122 BUG_ON(nf_nat_ftp_hook != NULL);
123 rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); 123 RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 790f3160e012..b9a1136addbd 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -581,30 +581,30 @@ static int __init init(void)
581 BUG_ON(nat_callforwarding_hook != NULL); 581 BUG_ON(nat_callforwarding_hook != NULL);
582 BUG_ON(nat_q931_hook != NULL); 582 BUG_ON(nat_q931_hook != NULL);
583 583
584 rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); 584 RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
585 rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); 585 RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
586 rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); 586 RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
587 rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); 587 RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
588 rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); 588 RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
589 rcu_assign_pointer(nat_t120_hook, nat_t120); 589 RCU_INIT_POINTER(nat_t120_hook, nat_t120);
590 rcu_assign_pointer(nat_h245_hook, nat_h245); 590 RCU_INIT_POINTER(nat_h245_hook, nat_h245);
591 rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); 591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
592 rcu_assign_pointer(nat_q931_hook, nat_q931); 592 RCU_INIT_POINTER(nat_q931_hook, nat_q931);
593 return 0; 593 return 0;
594} 594}
595 595
596/****************************************************************************/ 596/****************************************************************************/
597static void __exit fini(void) 597static void __exit fini(void)
598{ 598{
599 rcu_assign_pointer(set_h245_addr_hook, NULL); 599 RCU_INIT_POINTER(set_h245_addr_hook, NULL);
600 rcu_assign_pointer(set_h225_addr_hook, NULL); 600 RCU_INIT_POINTER(set_h225_addr_hook, NULL);
601 rcu_assign_pointer(set_sig_addr_hook, NULL); 601 RCU_INIT_POINTER(set_sig_addr_hook, NULL);
602 rcu_assign_pointer(set_ras_addr_hook, NULL); 602 RCU_INIT_POINTER(set_ras_addr_hook, NULL);
603 rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); 603 RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
604 rcu_assign_pointer(nat_t120_hook, NULL); 604 RCU_INIT_POINTER(nat_t120_hook, NULL);
605 rcu_assign_pointer(nat_h245_hook, NULL); 605 RCU_INIT_POINTER(nat_h245_hook, NULL);
606 rcu_assign_pointer(nat_callforwarding_hook, NULL); 606 RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
607 rcu_assign_pointer(nat_q931_hook, NULL); 607 RCU_INIT_POINTER(nat_q931_hook, NULL);
608 synchronize_rcu(); 608 synchronize_rcu();
609} 609}
610 610
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 535e1a802356..979ae165f4ef 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb,
75 75
76static void __exit nf_nat_irc_fini(void) 76static void __exit nf_nat_irc_fini(void)
77{ 77{
78 rcu_assign_pointer(nf_nat_irc_hook, NULL); 78 RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
79 synchronize_rcu(); 79 synchronize_rcu();
80} 80}
81 81
82static int __init nf_nat_irc_init(void) 82static int __init nf_nat_irc_init(void)
83{ 83{
84 BUG_ON(nf_nat_irc_hook != NULL); 84 BUG_ON(nf_nat_irc_hook != NULL);
85 rcu_assign_pointer(nf_nat_irc_hook, help); 85 RCU_INIT_POINTER(nf_nat_irc_hook, help);
86 return 0; 86 return 0;
87} 87}
88 88
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 4c060038d29f..3e8284ba46b8 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void)
282 nf_nat_need_gre(); 282 nf_nat_need_gre();
283 283
284 BUG_ON(nf_nat_pptp_hook_outbound != NULL); 284 BUG_ON(nf_nat_pptp_hook_outbound != NULL);
285 rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); 285 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
286 286
287 BUG_ON(nf_nat_pptp_hook_inbound != NULL); 287 BUG_ON(nf_nat_pptp_hook_inbound != NULL);
288 rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); 288 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
289 289
290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); 290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
291 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); 291 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
292 292
293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL); 293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
294 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); 294 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
295 return 0; 295 return 0;
296} 296}
297 297
298static void __exit nf_nat_helper_pptp_fini(void) 298static void __exit nf_nat_helper_pptp_fini(void)
299{ 299{
300 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); 300 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
301 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); 301 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
302 rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); 302 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
303 rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); 303 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
304 synchronize_rcu(); 304 synchronize_rcu();
305} 305}
306 306
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf7816fdb..78844d9208f1 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -528,13 +528,13 @@ err1:
528 528
529static void __exit nf_nat_sip_fini(void) 529static void __exit nf_nat_sip_fini(void)
530{ 530{
531 rcu_assign_pointer(nf_nat_sip_hook, NULL); 531 RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); 532 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
533 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 533 RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
534 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 534 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
535 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 535 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
536 rcu_assign_pointer(nf_nat_sdp_session_hook, NULL); 536 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
537 rcu_assign_pointer(nf_nat_sdp_media_hook, NULL); 537 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
538 synchronize_rcu(); 538 synchronize_rcu();
539} 539}
540 540
@@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void)
547 BUG_ON(nf_nat_sdp_port_hook != NULL); 547 BUG_ON(nf_nat_sdp_port_hook != NULL);
548 BUG_ON(nf_nat_sdp_session_hook != NULL); 548 BUG_ON(nf_nat_sdp_session_hook != NULL);
549 BUG_ON(nf_nat_sdp_media_hook != NULL); 549 BUG_ON(nf_nat_sdp_media_hook != NULL);
550 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 550 RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
551 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); 551 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
552 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 552 RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
553 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 553 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
554 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 554 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
555 rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session); 555 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
556 rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media); 556 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
557 return 0; 557 return 0;
558} 558}
559 559
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 076b7c8c4aa4..d1cb412c18e0 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1310,7 +1310,7 @@ static int __init nf_nat_snmp_basic_init(void)
1310 int ret = 0; 1310 int ret = 0;
1311 1311
1312 BUG_ON(nf_nat_snmp_hook != NULL); 1312 BUG_ON(nf_nat_snmp_hook != NULL);
1313 rcu_assign_pointer(nf_nat_snmp_hook, help); 1313 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1314 1314
1315 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1315 ret = nf_conntrack_helper_register(&snmp_trap_helper);
1316 if (ret < 0) { 1316 if (ret < 0) {
@@ -1322,7 +1322,7 @@ static int __init nf_nat_snmp_basic_init(void)
1322 1322
1323static void __exit nf_nat_snmp_basic_fini(void) 1323static void __exit nf_nat_snmp_basic_fini(void)
1324{ 1324{
1325 rcu_assign_pointer(nf_nat_snmp_hook, NULL); 1325 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1326 nf_conntrack_helper_unregister(&snmp_trap_helper); 1326 nf_conntrack_helper_unregister(&snmp_trap_helper);
1327} 1327}
1328 1328
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index a6e606e84820..92900482edea 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void)
284 284
285#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
286 BUG_ON(ip_nat_decode_session != NULL); 286 BUG_ON(ip_nat_decode_session != NULL);
287 rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); 287 RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
288#endif 288#endif
289 ret = nf_nat_rule_init(); 289 ret = nf_nat_rule_init();
290 if (ret < 0) { 290 if (ret < 0) {
@@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void)
302 nf_nat_rule_cleanup(); 302 nf_nat_rule_cleanup();
303 cleanup_decode_session: 303 cleanup_decode_session:
304#ifdef CONFIG_XFRM 304#ifdef CONFIG_XFRM
305 rcu_assign_pointer(ip_nat_decode_session, NULL); 305 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
306 synchronize_net(); 306 synchronize_net();
307#endif 307#endif
308 return ret; 308 return ret;
@@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void)
313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); 313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
314 nf_nat_rule_cleanup(); 314 nf_nat_rule_cleanup();
315#ifdef CONFIG_XFRM 315#ifdef CONFIG_XFRM
316 rcu_assign_pointer(ip_nat_decode_session, NULL); 316 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
317 synchronize_net(); 317 synchronize_net();
318#endif 318#endif
319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */ 319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 7274a43c7a12..a2901bf829c0 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb,
36 36
37static void __exit nf_nat_tftp_fini(void) 37static void __exit nf_nat_tftp_fini(void)
38{ 38{
39 rcu_assign_pointer(nf_nat_tftp_hook, NULL); 39 RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
40 synchronize_rcu(); 40 synchronize_rcu();
41} 41}
42 42
43static int __init nf_nat_tftp_init(void) 43static int __init nf_nat_tftp_init(void)
44{ 44{
45 BUG_ON(nf_nat_tftp_hook != NULL); 45 BUG_ON(nf_nat_tftp_hook != NULL);
46 rcu_assign_pointer(nf_nat_tftp_hook, help); 46 RCU_INIT_POINTER(nf_nat_tftp_hook, help);
47 return 0; 47 return 0;
48} 48}
49 49
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 075212e41b83..155138d8ec8b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -120,7 +120,6 @@
120 120
121static int ip_rt_max_size; 121static int ip_rt_max_size;
122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
123static int ip_rt_gc_interval __read_mostly = 60 * HZ;
124static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 123static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
125static int ip_rt_redirect_number __read_mostly = 9; 124static int ip_rt_redirect_number __read_mostly = 9;
126static int ip_rt_redirect_load __read_mostly = HZ / 50; 125static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -324,7 +323,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
324 struct rtable *r = NULL; 323 struct rtable *r = NULL;
325 324
326 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 325 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
327 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) 326 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
328 continue; 327 continue;
329 rcu_read_lock_bh(); 328 rcu_read_lock_bh();
330 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 329 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -350,7 +349,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
350 do { 349 do {
351 if (--st->bucket < 0) 350 if (--st->bucket < 0)
352 return NULL; 351 return NULL;
353 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); 352 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
354 rcu_read_lock_bh(); 353 rcu_read_lock_bh();
355 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 354 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
356 } 355 }
@@ -761,7 +760,7 @@ static void rt_do_flush(struct net *net, int process_context)
761 760
762 if (process_context && need_resched()) 761 if (process_context && need_resched())
763 cond_resched(); 762 cond_resched();
764 rth = rcu_dereference_raw(rt_hash_table[i].chain); 763 rth = rcu_access_pointer(rt_hash_table[i].chain);
765 if (!rth) 764 if (!rth)
766 continue; 765 continue;
767 766
@@ -1309,7 +1308,12 @@ static void rt_del(unsigned hash, struct rtable *rt)
1309void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1308void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1310 __be32 saddr, struct net_device *dev) 1309 __be32 saddr, struct net_device *dev)
1311{ 1310{
1311 int s, i;
1312 struct in_device *in_dev = __in_dev_get_rcu(dev); 1312 struct in_device *in_dev = __in_dev_get_rcu(dev);
1313 struct rtable *rt;
1314 __be32 skeys[2] = { saddr, 0 };
1315 int ikeys[2] = { dev->ifindex, 0 };
1316 struct flowi4 fl4;
1313 struct inet_peer *peer; 1317 struct inet_peer *peer;
1314 struct net *net; 1318 struct net *net;
1315 1319
@@ -1332,13 +1336,34 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1332 goto reject_redirect; 1336 goto reject_redirect;
1333 } 1337 }
1334 1338
1335 peer = inet_getpeer_v4(daddr, 1); 1339 memset(&fl4, 0, sizeof(fl4));
1336 if (peer) { 1340 fl4.daddr = daddr;
1337 peer->redirect_learned.a4 = new_gw; 1341 for (s = 0; s < 2; s++) {
1342 for (i = 0; i < 2; i++) {
1343 fl4.flowi4_oif = ikeys[i];
1344 fl4.saddr = skeys[s];
1345 rt = __ip_route_output_key(net, &fl4);
1346 if (IS_ERR(rt))
1347 continue;
1338 1348
1339 inet_putpeer(peer); 1349 if (rt->dst.error || rt->dst.dev != dev ||
1350 rt->rt_gateway != old_gw) {
1351 ip_rt_put(rt);
1352 continue;
1353 }
1354
1355 if (!rt->peer)
1356 rt_bind_peer(rt, rt->rt_dst, 1);
1357
1358 peer = rt->peer;
1359 if (peer) {
1360 peer->redirect_learned.a4 = new_gw;
1361 atomic_inc(&__rt_peer_genid);
1362 }
1340 1363
1341 atomic_inc(&__rt_peer_genid); 1364 ip_rt_put(rt);
1365 return;
1366 }
1342 } 1367 }
1343 return; 1368 return;
1344 1369
@@ -1568,11 +1593,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1568 est_mtu = mtu; 1593 est_mtu = mtu;
1569 peer->pmtu_learned = mtu; 1594 peer->pmtu_learned = mtu;
1570 peer->pmtu_expires = pmtu_expires; 1595 peer->pmtu_expires = pmtu_expires;
1596 atomic_inc(&__rt_peer_genid);
1571 } 1597 }
1572 1598
1573 inet_putpeer(peer); 1599 inet_putpeer(peer);
1574
1575 atomic_inc(&__rt_peer_genid);
1576 } 1600 }
1577 return est_mtu ? : new_mtu; 1601 return est_mtu ? : new_mtu;
1578} 1602}
@@ -3121,13 +3145,6 @@ static ctl_table ipv4_route_table[] = {
3121 .proc_handler = proc_dointvec_jiffies, 3145 .proc_handler = proc_dointvec_jiffies,
3122 }, 3146 },
3123 { 3147 {
3124 .procname = "gc_interval",
3125 .data = &ip_rt_gc_interval,
3126 .maxlen = sizeof(int),
3127 .mode = 0644,
3128 .proc_handler = proc_dointvec_jiffies,
3129 },
3130 {
3131 .procname = "redirect_load", 3148 .procname = "redirect_load",
3132 .data = &ip_rt_redirect_load, 3149 .data = &ip_rt_redirect_load,
3133 .maxlen = sizeof(int), 3150 .maxlen = sizeof(int),
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3bc5c8f7c71b..d7b89b12f6d8 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -265,7 +265,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
265 struct ip_options *opt) 265 struct ip_options *opt)
266{ 266{
267 struct tcp_options_received tcp_opt; 267 struct tcp_options_received tcp_opt;
268 u8 *hash_location; 268 const u8 *hash_location;
269 struct inet_request_sock *ireq; 269 struct inet_request_sock *ireq;
270 struct tcp_request_sock *treq; 270 struct tcp_request_sock *treq;
271 struct tcp_sock *tp = tcp_sk(sk); 271 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb729..34f5db1e1c8b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -374,7 +374,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
374{ 374{
375 unsigned int mask; 375 unsigned int mask;
376 struct sock *sk = sock->sk; 376 struct sock *sk = sock->sk;
377 struct tcp_sock *tp = tcp_sk(sk); 377 const struct tcp_sock *tp = tcp_sk(sk);
378 378
379 sock_poll_wait(file, sk_sleep(sk), wait); 379 sock_poll_wait(file, sk_sleep(sk), wait);
380 if (sk->sk_state == TCP_LISTEN) 380 if (sk->sk_state == TCP_LISTEN)
@@ -524,11 +524,11 @@ EXPORT_SYMBOL(tcp_ioctl);
524 524
525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
526{ 526{
527 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 527 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
528 tp->pushed_seq = tp->write_seq; 528 tp->pushed_seq = tp->write_seq;
529} 529}
530 530
531static inline int forced_push(struct tcp_sock *tp) 531static inline int forced_push(const struct tcp_sock *tp)
532{ 532{
533 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 533 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
534} 534}
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
540 540
541 skb->csum = 0; 541 skb->csum = 0;
542 tcb->seq = tcb->end_seq = tp->write_seq; 542 tcb->seq = tcb->end_seq = tp->write_seq;
543 tcb->flags = TCPHDR_ACK; 543 tcb->tcp_flags = TCPHDR_ACK;
544 tcb->sacked = 0; 544 tcb->sacked = 0;
545 skb_header_release(skb); 545 skb_header_release(skb);
546 tcp_add_write_queue_tail(sk, skb); 546 tcp_add_write_queue_tail(sk, skb);
@@ -813,7 +813,7 @@ new_segment:
813 goto wait_for_memory; 813 goto wait_for_memory;
814 814
815 if (can_coalesce) { 815 if (can_coalesce) {
816 skb_shinfo(skb)->frags[i - 1].size += copy; 816 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
817 } else { 817 } else {
818 get_page(page); 818 get_page(page);
819 skb_fill_page_desc(skb, i, page, offset, copy); 819 skb_fill_page_desc(skb, i, page, offset, copy);
@@ -830,7 +830,7 @@ new_segment:
830 skb_shinfo(skb)->gso_segs = 0; 830 skb_shinfo(skb)->gso_segs = 0;
831 831
832 if (!copied) 832 if (!copied)
833 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 833 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
834 834
835 copied += copy; 835 copied += copy;
836 poffset += copy; 836 poffset += copy;
@@ -891,9 +891,9 @@ EXPORT_SYMBOL(tcp_sendpage);
891#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 891#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
892#define TCP_OFF(sk) (sk->sk_sndmsg_off) 892#define TCP_OFF(sk) (sk->sk_sndmsg_off)
893 893
894static inline int select_size(struct sock *sk, int sg) 894static inline int select_size(const struct sock *sk, int sg)
895{ 895{
896 struct tcp_sock *tp = tcp_sk(sk); 896 const struct tcp_sock *tp = tcp_sk(sk);
897 int tmp = tp->mss_cache; 897 int tmp = tp->mss_cache;
898 898
899 if (sg) { 899 if (sg) {
@@ -1058,8 +1058,7 @@ new_segment:
1058 1058
1059 /* Update the skb. */ 1059 /* Update the skb. */
1060 if (merge) { 1060 if (merge) {
1061 skb_shinfo(skb)->frags[i - 1].size += 1061 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1062 copy;
1063 } else { 1062 } else {
1064 skb_fill_page_desc(skb, i, page, off, copy); 1063 skb_fill_page_desc(skb, i, page, off, copy);
1065 if (TCP_PAGE(sk)) { 1064 if (TCP_PAGE(sk)) {
@@ -1074,7 +1073,7 @@ new_segment:
1074 } 1073 }
1075 1074
1076 if (!copied) 1075 if (!copied)
1077 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 1076 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1078 1077
1079 tp->write_seq += copy; 1078 tp->write_seq += copy;
1080 TCP_SKB_CB(skb)->end_seq += copy; 1079 TCP_SKB_CB(skb)->end_seq += copy;
@@ -1194,13 +1193,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1194 struct tcp_sock *tp = tcp_sk(sk); 1193 struct tcp_sock *tp = tcp_sk(sk);
1195 int time_to_ack = 0; 1194 int time_to_ack = 0;
1196 1195
1197#if TCP_DEBUG
1198 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1196 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1199 1197
1200 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1198 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1201 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1199 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1202 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1200 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1203#endif
1204 1201
1205 if (inet_csk_ack_scheduled(sk)) { 1202 if (inet_csk_ack_scheduled(sk)) {
1206 const struct inet_connection_sock *icsk = inet_csk(sk); 1203 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2409,7 +2406,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2409int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2406int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2410 unsigned int optlen) 2407 unsigned int optlen)
2411{ 2408{
2412 struct inet_connection_sock *icsk = inet_csk(sk); 2409 const struct inet_connection_sock *icsk = inet_csk(sk);
2413 2410
2414 if (level != SOL_TCP) 2411 if (level != SOL_TCP)
2415 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2412 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
@@ -2431,9 +2428,9 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
2431#endif 2428#endif
2432 2429
2433/* Return information about state of tcp endpoint in API format. */ 2430/* Return information about state of tcp endpoint in API format. */
2434void tcp_get_info(struct sock *sk, struct tcp_info *info) 2431void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2435{ 2432{
2436 struct tcp_sock *tp = tcp_sk(sk); 2433 const struct tcp_sock *tp = tcp_sk(sk);
2437 const struct inet_connection_sock *icsk = inet_csk(sk); 2434 const struct inet_connection_sock *icsk = inet_csk(sk);
2438 u32 now = tcp_time_stamp; 2435 u32 now = tcp_time_stamp;
2439 2436
@@ -2455,8 +2452,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2455 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2452 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2456 } 2453 }
2457 2454
2458 if (tp->ecn_flags&TCP_ECN_OK) 2455 if (tp->ecn_flags & TCP_ECN_OK)
2459 info->tcpi_options |= TCPI_OPT_ECN; 2456 info->tcpi_options |= TCPI_OPT_ECN;
2457 if (tp->ecn_flags & TCP_ECN_SEEN)
2458 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2460 2459
2461 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2460 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2462 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2461 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2857,26 +2856,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
2857 2856
2858#ifdef CONFIG_TCP_MD5SIG 2857#ifdef CONFIG_TCP_MD5SIG
2859static unsigned long tcp_md5sig_users; 2858static unsigned long tcp_md5sig_users;
2860static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; 2859static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
2861static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2860static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2862 2861
2863static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) 2862static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2864{ 2863{
2865 int cpu; 2864 int cpu;
2865
2866 for_each_possible_cpu(cpu) { 2866 for_each_possible_cpu(cpu) {
2867 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); 2867 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2868 if (p) { 2868
2869 if (p->md5_desc.tfm) 2869 if (p->md5_desc.tfm)
2870 crypto_free_hash(p->md5_desc.tfm); 2870 crypto_free_hash(p->md5_desc.tfm);
2871 kfree(p);
2872 }
2873 } 2871 }
2874 free_percpu(pool); 2872 free_percpu(pool);
2875} 2873}
2876 2874
2877void tcp_free_md5sig_pool(void) 2875void tcp_free_md5sig_pool(void)
2878{ 2876{
2879 struct tcp_md5sig_pool * __percpu *pool = NULL; 2877 struct tcp_md5sig_pool __percpu *pool = NULL;
2880 2878
2881 spin_lock_bh(&tcp_md5sig_pool_lock); 2879 spin_lock_bh(&tcp_md5sig_pool_lock);
2882 if (--tcp_md5sig_users == 0) { 2880 if (--tcp_md5sig_users == 0) {
@@ -2889,30 +2887,24 @@ void tcp_free_md5sig_pool(void)
2889} 2887}
2890EXPORT_SYMBOL(tcp_free_md5sig_pool); 2888EXPORT_SYMBOL(tcp_free_md5sig_pool);
2891 2889
2892static struct tcp_md5sig_pool * __percpu * 2890static struct tcp_md5sig_pool __percpu *
2893__tcp_alloc_md5sig_pool(struct sock *sk) 2891__tcp_alloc_md5sig_pool(struct sock *sk)
2894{ 2892{
2895 int cpu; 2893 int cpu;
2896 struct tcp_md5sig_pool * __percpu *pool; 2894 struct tcp_md5sig_pool __percpu *pool;
2897 2895
2898 pool = alloc_percpu(struct tcp_md5sig_pool *); 2896 pool = alloc_percpu(struct tcp_md5sig_pool);
2899 if (!pool) 2897 if (!pool)
2900 return NULL; 2898 return NULL;
2901 2899
2902 for_each_possible_cpu(cpu) { 2900 for_each_possible_cpu(cpu) {
2903 struct tcp_md5sig_pool *p;
2904 struct crypto_hash *hash; 2901 struct crypto_hash *hash;
2905 2902
2906 p = kzalloc(sizeof(*p), sk->sk_allocation);
2907 if (!p)
2908 goto out_free;
2909 *per_cpu_ptr(pool, cpu) = p;
2910
2911 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2903 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2912 if (!hash || IS_ERR(hash)) 2904 if (!hash || IS_ERR(hash))
2913 goto out_free; 2905 goto out_free;
2914 2906
2915 p->md5_desc.tfm = hash; 2907 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2916 } 2908 }
2917 return pool; 2909 return pool;
2918out_free: 2910out_free:
@@ -2920,9 +2912,9 @@ out_free:
2920 return NULL; 2912 return NULL;
2921} 2913}
2922 2914
2923struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 2915struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2924{ 2916{
2925 struct tcp_md5sig_pool * __percpu *pool; 2917 struct tcp_md5sig_pool __percpu *pool;
2926 int alloc = 0; 2918 int alloc = 0;
2927 2919
2928retry: 2920retry:
@@ -2941,7 +2933,7 @@ retry:
2941 2933
2942 if (alloc) { 2934 if (alloc) {
2943 /* we cannot hold spinlock here because this may sleep. */ 2935 /* we cannot hold spinlock here because this may sleep. */
2944 struct tcp_md5sig_pool * __percpu *p; 2936 struct tcp_md5sig_pool __percpu *p;
2945 2937
2946 p = __tcp_alloc_md5sig_pool(sk); 2938 p = __tcp_alloc_md5sig_pool(sk);
2947 spin_lock_bh(&tcp_md5sig_pool_lock); 2939 spin_lock_bh(&tcp_md5sig_pool_lock);
@@ -2974,7 +2966,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2974 */ 2966 */
2975struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2967struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2976{ 2968{
2977 struct tcp_md5sig_pool * __percpu *p; 2969 struct tcp_md5sig_pool __percpu *p;
2978 2970
2979 local_bh_disable(); 2971 local_bh_disable();
2980 2972
@@ -2985,7 +2977,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2985 spin_unlock(&tcp_md5sig_pool_lock); 2977 spin_unlock(&tcp_md5sig_pool_lock);
2986 2978
2987 if (p) 2979 if (p)
2988 return *this_cpu_ptr(p); 2980 return this_cpu_ptr(p);
2989 2981
2990 local_bh_enable(); 2982 local_bh_enable();
2991 return NULL; 2983 return NULL;
@@ -3000,23 +2992,25 @@ void tcp_put_md5sig_pool(void)
3000EXPORT_SYMBOL(tcp_put_md5sig_pool); 2992EXPORT_SYMBOL(tcp_put_md5sig_pool);
3001 2993
3002int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 2994int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3003 struct tcphdr *th) 2995 const struct tcphdr *th)
3004{ 2996{
3005 struct scatterlist sg; 2997 struct scatterlist sg;
2998 struct tcphdr hdr;
3006 int err; 2999 int err;
3007 3000
3008 __sum16 old_checksum = th->check; 3001 /* We are not allowed to change tcphdr, make a local copy */
3009 th->check = 0; 3002 memcpy(&hdr, th, sizeof(hdr));
3003 hdr.check = 0;
3004
3010 /* options aren't included in the hash */ 3005 /* options aren't included in the hash */
3011 sg_init_one(&sg, th, sizeof(struct tcphdr)); 3006 sg_init_one(&sg, &hdr, sizeof(hdr));
3012 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); 3007 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3013 th->check = old_checksum;
3014 return err; 3008 return err;
3015} 3009}
3016EXPORT_SYMBOL(tcp_md5_hash_header); 3010EXPORT_SYMBOL(tcp_md5_hash_header);
3017 3011
3018int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3012int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3019 struct sk_buff *skb, unsigned header_len) 3013 const struct sk_buff *skb, unsigned int header_len)
3020{ 3014{
3021 struct scatterlist sg; 3015 struct scatterlist sg;
3022 const struct tcphdr *tp = tcp_hdr(skb); 3016 const struct tcphdr *tp = tcp_hdr(skb);
@@ -3035,8 +3029,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3035 3029
3036 for (i = 0; i < shi->nr_frags; ++i) { 3030 for (i = 0; i < shi->nr_frags; ++i) {
3037 const struct skb_frag_struct *f = &shi->frags[i]; 3031 const struct skb_frag_struct *f = &shi->frags[i];
3038 sg_set_page(&sg, f->page, f->size, f->page_offset); 3032 struct page *page = skb_frag_page(f);
3039 if (crypto_hash_update(desc, &sg, f->size)) 3033 sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
3034 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3040 return 1; 3035 return 1;
3041 } 3036 }
3042 3037
@@ -3048,7 +3043,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3048} 3043}
3049EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3044EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3050 3045
3051int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) 3046int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3052{ 3047{
3053 struct scatterlist sg; 3048 struct scatterlist sg;
3054 3049
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d73aab3fbfc0..52b5c2d0ecd0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -206,7 +206,7 @@ static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
207} 207}
208 208
209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) 209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
210{ 210{
211 if (tcp_hdr(skb)->cwr) 211 if (tcp_hdr(skb)->cwr)
212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
@@ -217,32 +217,41 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
217 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 217 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
218} 218}
219 219
220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) 220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
221{ 221{
222 if (tp->ecn_flags & TCP_ECN_OK) { 222 if (!(tp->ecn_flags & TCP_ECN_OK))
223 if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) 223 return;
224 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 224
225 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
226 case INET_ECN_NOT_ECT:
225 /* Funny extension: if ECT is not set on a segment, 227 /* Funny extension: if ECT is not set on a segment,
226 * it is surely retransmit. It is not in ECN RFC, 228 * and we already seen ECT on a previous segment,
227 * but Linux follows this rule. */ 229 * it is probably a retransmit.
228 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 230 */
231 if (tp->ecn_flags & TCP_ECN_SEEN)
229 tcp_enter_quickack_mode((struct sock *)tp); 232 tcp_enter_quickack_mode((struct sock *)tp);
233 break;
234 case INET_ECN_CE:
235 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
236 /* fallinto */
237 default:
238 tp->ecn_flags |= TCP_ECN_SEEN;
230 } 239 }
231} 240}
232 241
233static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) 242static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
234{ 243{
235 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 244 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
236 tp->ecn_flags &= ~TCP_ECN_OK; 245 tp->ecn_flags &= ~TCP_ECN_OK;
237} 246}
238 247
239static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) 248static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
240{ 249{
241 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 250 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
242 tp->ecn_flags &= ~TCP_ECN_OK; 251 tp->ecn_flags &= ~TCP_ECN_OK;
243} 252}
244 253
245static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) 254static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
246{ 255{
247 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 256 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
248 return 1; 257 return 1;
@@ -256,14 +265,11 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
256 265
257static void tcp_fixup_sndbuf(struct sock *sk) 266static void tcp_fixup_sndbuf(struct sock *sk)
258{ 267{
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 268 int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
260 sizeof(struct sk_buff);
261 269
262 if (sk->sk_sndbuf < 3 * sndmem) { 270 sndmem *= TCP_INIT_CWND;
263 sk->sk_sndbuf = 3 * sndmem; 271 if (sk->sk_sndbuf < sndmem)
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) 272 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
267} 273}
268 274
269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 275/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -309,7 +315,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
309 return 0; 315 return 0;
310} 316}
311 317
312static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) 318static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
313{ 319{
314 struct tcp_sock *tp = tcp_sk(sk); 320 struct tcp_sock *tp = tcp_sk(sk);
315 321
@@ -339,17 +345,24 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
339 345
340static void tcp_fixup_rcvbuf(struct sock *sk) 346static void tcp_fixup_rcvbuf(struct sock *sk)
341{ 347{
342 struct tcp_sock *tp = tcp_sk(sk); 348 u32 mss = tcp_sk(sk)->advmss;
343 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 349 u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
350 int rcvmem;
344 351
345 /* Try to select rcvbuf so that 4 mss-sized segments 352 /* Limit to 10 segments if mss <= 1460,
346 * will fit to window and corresponding skbs will fit to our rcvbuf. 353 * or 14600/mss segments, with a minimum of two segments.
347 * (was 3; 4 is minimum to allow fast retransmit to work.)
348 */ 354 */
349 while (tcp_win_from_space(rcvmem) < tp->advmss) 355 if (mss > 1460)
356 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
357
358 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
359 while (tcp_win_from_space(rcvmem) < mss)
350 rcvmem += 128; 360 rcvmem += 128;
351 if (sk->sk_rcvbuf < 4 * rcvmem) 361
352 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 362 rcvmem *= icwnd;
363
364 if (sk->sk_rcvbuf < rcvmem)
365 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
353} 366}
354 367
355/* 4. Try to fixup all. It is made immediately after connection enters 368/* 4. Try to fixup all. It is made immediately after connection enters
@@ -416,7 +429,7 @@ static void tcp_clamp_window(struct sock *sk)
416 */ 429 */
417void tcp_initialize_rcv_mss(struct sock *sk) 430void tcp_initialize_rcv_mss(struct sock *sk)
418{ 431{
419 struct tcp_sock *tp = tcp_sk(sk); 432 const struct tcp_sock *tp = tcp_sk(sk);
420 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 433 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
421 434
422 hint = min(hint, tp->rcv_wnd / 2); 435 hint = min(hint, tp->rcv_wnd / 2);
@@ -531,8 +544,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
531 space /= tp->advmss; 544 space /= tp->advmss;
532 if (!space) 545 if (!space)
533 space = 1; 546 space = 1;
534 rcvmem = (tp->advmss + MAX_TCP_HEADER + 547 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
535 16 + sizeof(struct sk_buff));
536 while (tcp_win_from_space(rcvmem) < tp->advmss) 548 while (tcp_win_from_space(rcvmem) < tp->advmss)
537 rcvmem += 128; 549 rcvmem += 128;
538 space *= rcvmem; 550 space *= rcvmem;
@@ -812,7 +824,7 @@ void tcp_update_metrics(struct sock *sk)
812 } 824 }
813} 825}
814 826
815__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 827__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
816{ 828{
817 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 829 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
818 830
@@ -1204,7 +1216,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1204 tp->lost_retrans_low = new_low_seq; 1216 tp->lost_retrans_low = new_low_seq;
1205} 1217}
1206 1218
1207static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, 1219static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1208 struct tcp_sack_block_wire *sp, int num_sacks, 1220 struct tcp_sack_block_wire *sp, int num_sacks,
1209 u32 prior_snd_una) 1221 u32 prior_snd_una)
1210{ 1222{
@@ -1298,7 +1310,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1298 return in_sack; 1310 return in_sack;
1299} 1311}
1300 1312
1301static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1313static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1302 struct tcp_sacktag_state *state, 1314 struct tcp_sacktag_state *state,
1303 int dup_sack, int pcount) 1315 int dup_sack, int pcount)
1304{ 1316{
@@ -1438,7 +1450,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1438 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1450 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1439 } 1451 }
1440 1452
1441 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags; 1453 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
1442 if (skb == tcp_highest_sack(sk)) 1454 if (skb == tcp_highest_sack(sk))
1443 tcp_advance_highest_sack(sk, skb); 1455 tcp_advance_highest_sack(sk, skb);
1444 1456
@@ -1453,13 +1465,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1453/* I wish gso_size would have a bit more sane initialization than 1465/* I wish gso_size would have a bit more sane initialization than
1454 * something-or-zero which complicates things 1466 * something-or-zero which complicates things
1455 */ 1467 */
1456static int tcp_skb_seglen(struct sk_buff *skb) 1468static int tcp_skb_seglen(const struct sk_buff *skb)
1457{ 1469{
1458 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1470 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1459} 1471}
1460 1472
1461/* Shifting pages past head area doesn't work */ 1473/* Shifting pages past head area doesn't work */
1462static int skb_can_shift(struct sk_buff *skb) 1474static int skb_can_shift(const struct sk_buff *skb)
1463{ 1475{
1464 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1476 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1465} 1477}
@@ -1708,19 +1720,19 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1708 return skb; 1720 return skb;
1709} 1721}
1710 1722
1711static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) 1723static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
1712{ 1724{
1713 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1725 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1714} 1726}
1715 1727
1716static int 1728static int
1717tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, 1729tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1718 u32 prior_snd_una) 1730 u32 prior_snd_una)
1719{ 1731{
1720 const struct inet_connection_sock *icsk = inet_csk(sk); 1732 const struct inet_connection_sock *icsk = inet_csk(sk);
1721 struct tcp_sock *tp = tcp_sk(sk); 1733 struct tcp_sock *tp = tcp_sk(sk);
1722 unsigned char *ptr = (skb_transport_header(ack_skb) + 1734 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1723 TCP_SKB_CB(ack_skb)->sacked); 1735 TCP_SKB_CB(ack_skb)->sacked);
1724 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1736 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1725 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1737 struct tcp_sack_block sp[TCP_NUM_SACKS];
1726 struct tcp_sack_block *cache; 1738 struct tcp_sack_block *cache;
@@ -2284,7 +2296,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
2284 return 0; 2296 return 0;
2285} 2297}
2286 2298
2287static inline int tcp_fackets_out(struct tcp_sock *tp) 2299static inline int tcp_fackets_out(const struct tcp_sock *tp)
2288{ 2300{
2289 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 2301 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
2290} 2302}
@@ -2304,19 +2316,20 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
2304 * they differ. Since neither occurs due to loss, TCP should really 2316 * they differ. Since neither occurs due to loss, TCP should really
2305 * ignore them. 2317 * ignore them.
2306 */ 2318 */
2307static inline int tcp_dupack_heuristics(struct tcp_sock *tp) 2319static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2308{ 2320{
2309 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2321 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2310} 2322}
2311 2323
2312static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2324static inline int tcp_skb_timedout(const struct sock *sk,
2325 const struct sk_buff *skb)
2313{ 2326{
2314 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 2327 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
2315} 2328}
2316 2329
2317static inline int tcp_head_timedout(struct sock *sk) 2330static inline int tcp_head_timedout(const struct sock *sk)
2318{ 2331{
2319 struct tcp_sock *tp = tcp_sk(sk); 2332 const struct tcp_sock *tp = tcp_sk(sk);
2320 2333
2321 return tp->packets_out && 2334 return tp->packets_out &&
2322 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 2335 tcp_skb_timedout(sk, tcp_write_queue_head(sk));
@@ -2627,7 +2640,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
2627/* Nothing was retransmitted or returned timestamp is less 2640/* Nothing was retransmitted or returned timestamp is less
2628 * than timestamp of the first retransmission. 2641 * than timestamp of the first retransmission.
2629 */ 2642 */
2630static inline int tcp_packet_delayed(struct tcp_sock *tp) 2643static inline int tcp_packet_delayed(const struct tcp_sock *tp)
2631{ 2644{
2632 return !tp->retrans_stamp || 2645 return !tp->retrans_stamp ||
2633 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2646 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2688,7 +2701,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
2688 tp->snd_cwnd_stamp = tcp_time_stamp; 2701 tp->snd_cwnd_stamp = tcp_time_stamp;
2689} 2702}
2690 2703
2691static inline int tcp_may_undo(struct tcp_sock *tp) 2704static inline int tcp_may_undo(const struct tcp_sock *tp)
2692{ 2705{
2693 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2706 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2694} 2707}
@@ -2752,9 +2765,9 @@ static void tcp_try_undo_dsack(struct sock *sk)
2752 * that successive retransmissions of a segment must not advance 2765 * that successive retransmissions of a segment must not advance
2753 * retrans_stamp under any conditions. 2766 * retrans_stamp under any conditions.
2754 */ 2767 */
2755static int tcp_any_retrans_done(struct sock *sk) 2768static int tcp_any_retrans_done(const struct sock *sk)
2756{ 2769{
2757 struct tcp_sock *tp = tcp_sk(sk); 2770 const struct tcp_sock *tp = tcp_sk(sk);
2758 struct sk_buff *skb; 2771 struct sk_buff *skb;
2759 2772
2760 if (tp->retrans_out) 2773 if (tp->retrans_out)
@@ -2828,9 +2841,13 @@ static int tcp_try_undo_loss(struct sock *sk)
2828static inline void tcp_complete_cwr(struct sock *sk) 2841static inline void tcp_complete_cwr(struct sock *sk)
2829{ 2842{
2830 struct tcp_sock *tp = tcp_sk(sk); 2843 struct tcp_sock *tp = tcp_sk(sk);
2831 /* Do not moderate cwnd if it's already undone in cwr or recovery */ 2844
2832 if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) { 2845 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2833 tp->snd_cwnd = tp->snd_ssthresh; 2846 if (tp->undo_marker) {
2847 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
2848 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2849 else /* PRR */
2850 tp->snd_cwnd = tp->snd_ssthresh;
2834 tp->snd_cwnd_stamp = tcp_time_stamp; 2851 tp->snd_cwnd_stamp = tcp_time_stamp;
2835 } 2852 }
2836 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2853 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2948,6 +2965,38 @@ void tcp_simple_retransmit(struct sock *sk)
2948} 2965}
2949EXPORT_SYMBOL(tcp_simple_retransmit); 2966EXPORT_SYMBOL(tcp_simple_retransmit);
2950 2967
2968/* This function implements the PRR algorithm, specifcally the PRR-SSRB
2969 * (proportional rate reduction with slow start reduction bound) as described in
2970 * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
2971 * It computes the number of packets to send (sndcnt) based on packets newly
2972 * delivered:
2973 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2974 * cwnd reductions across a full RTT.
2975 * 2) If packets in flight is lower than ssthresh (such as due to excess
2976 * losses and/or application stalls), do not perform any further cwnd
2977 * reductions, but instead slow start up to ssthresh.
2978 */
2979static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
2980 int fast_rexmit, int flag)
2981{
2982 struct tcp_sock *tp = tcp_sk(sk);
2983 int sndcnt = 0;
2984 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2985
2986 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
2987 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2988 tp->prior_cwnd - 1;
2989 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2990 } else {
2991 sndcnt = min_t(int, delta,
2992 max_t(int, tp->prr_delivered - tp->prr_out,
2993 newly_acked_sacked) + 1);
2994 }
2995
2996 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
2997 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2998}
2999
2951/* Process an event, which can update packets-in-flight not trivially. 3000/* Process an event, which can update packets-in-flight not trivially.
2952 * Main goal of this function is to calculate new estimate for left_out, 3001 * Main goal of this function is to calculate new estimate for left_out,
2953 * taking into account both packets sitting in receiver's buffer and 3002 * taking into account both packets sitting in receiver's buffer and
@@ -2959,7 +3008,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit);
2959 * It does _not_ decide what to send, it is made in function 3008 * It does _not_ decide what to send, it is made in function
2960 * tcp_xmit_retransmit_queue(). 3009 * tcp_xmit_retransmit_queue().
2961 */ 3010 */
2962static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) 3011static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3012 int newly_acked_sacked, int flag)
2963{ 3013{
2964 struct inet_connection_sock *icsk = inet_csk(sk); 3014 struct inet_connection_sock *icsk = inet_csk(sk);
2965 struct tcp_sock *tp = tcp_sk(sk); 3015 struct tcp_sock *tp = tcp_sk(sk);
@@ -3109,13 +3159,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
3109 3159
3110 tp->bytes_acked = 0; 3160 tp->bytes_acked = 0;
3111 tp->snd_cwnd_cnt = 0; 3161 tp->snd_cwnd_cnt = 0;
3162 tp->prior_cwnd = tp->snd_cwnd;
3163 tp->prr_delivered = 0;
3164 tp->prr_out = 0;
3112 tcp_set_ca_state(sk, TCP_CA_Recovery); 3165 tcp_set_ca_state(sk, TCP_CA_Recovery);
3113 fast_rexmit = 1; 3166 fast_rexmit = 1;
3114 } 3167 }
3115 3168
3116 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 3169 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
3117 tcp_update_scoreboard(sk, fast_rexmit); 3170 tcp_update_scoreboard(sk, fast_rexmit);
3118 tcp_cwnd_down(sk, flag); 3171 tp->prr_delivered += newly_acked_sacked;
3172 tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
3119 tcp_xmit_retransmit_queue(sk); 3173 tcp_xmit_retransmit_queue(sk);
3120} 3174}
3121 3175
@@ -3192,7 +3246,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3192 */ 3246 */
3193static void tcp_rearm_rto(struct sock *sk) 3247static void tcp_rearm_rto(struct sock *sk)
3194{ 3248{
3195 struct tcp_sock *tp = tcp_sk(sk); 3249 const struct tcp_sock *tp = tcp_sk(sk);
3196 3250
3197 if (!tp->packets_out) { 3251 if (!tp->packets_out) {
3198 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3252 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
@@ -3296,7 +3350,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3296 * connection startup slow start one packet too 3350 * connection startup slow start one packet too
3297 * quickly. This is severely frowned upon behavior. 3351 * quickly. This is severely frowned upon behavior.
3298 */ 3352 */
3299 if (!(scb->flags & TCPHDR_SYN)) { 3353 if (!(scb->tcp_flags & TCPHDR_SYN)) {
3300 flag |= FLAG_DATA_ACKED; 3354 flag |= FLAG_DATA_ACKED;
3301 } else { 3355 } else {
3302 flag |= FLAG_SYN_ACKED; 3356 flag |= FLAG_SYN_ACKED;
@@ -3444,7 +3498,7 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
3444 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3498 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3445 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3499 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3446 */ 3500 */
3447static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, 3501static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3448 u32 ack_seq) 3502 u32 ack_seq)
3449{ 3503{
3450 struct tcp_sock *tp = tcp_sk(sk); 3504 struct tcp_sock *tp = tcp_sk(sk);
@@ -3620,7 +3674,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3620} 3674}
3621 3675
3622/* This routine deals with incoming acks, but not outgoing ones. */ 3676/* This routine deals with incoming acks, but not outgoing ones. */
3623static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 3677static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3624{ 3678{
3625 struct inet_connection_sock *icsk = inet_csk(sk); 3679 struct inet_connection_sock *icsk = inet_csk(sk);
3626 struct tcp_sock *tp = tcp_sk(sk); 3680 struct tcp_sock *tp = tcp_sk(sk);
@@ -3630,6 +3684,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3630 u32 prior_in_flight; 3684 u32 prior_in_flight;
3631 u32 prior_fackets; 3685 u32 prior_fackets;
3632 int prior_packets; 3686 int prior_packets;
3687 int prior_sacked = tp->sacked_out;
3688 int newly_acked_sacked = 0;
3633 int frto_cwnd = 0; 3689 int frto_cwnd = 0;
3634 3690
3635 /* If the ack is older than previous acks 3691 /* If the ack is older than previous acks
@@ -3701,6 +3757,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3701 /* See if we can take anything off of the retransmit queue. */ 3757 /* See if we can take anything off of the retransmit queue. */
3702 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3758 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3703 3759
3760 newly_acked_sacked = (prior_packets - prior_sacked) -
3761 (tp->packets_out - tp->sacked_out);
3762
3704 if (tp->frto_counter) 3763 if (tp->frto_counter)
3705 frto_cwnd = tcp_process_frto(sk, flag); 3764 frto_cwnd = tcp_process_frto(sk, flag);
3706 /* Guarantee sacktag reordering detection against wrap-arounds */ 3765 /* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3713,7 +3772,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3713 tcp_may_raise_cwnd(sk, flag)) 3772 tcp_may_raise_cwnd(sk, flag))
3714 tcp_cong_avoid(sk, ack, prior_in_flight); 3773 tcp_cong_avoid(sk, ack, prior_in_flight);
3715 tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, 3774 tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
3716 flag); 3775 newly_acked_sacked, flag);
3717 } else { 3776 } else {
3718 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3777 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
3719 tcp_cong_avoid(sk, ack, prior_in_flight); 3778 tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3752,14 +3811,14 @@ old_ack:
3752 * But, this can also be called on packets in the established flow when 3811 * But, this can also be called on packets in the established flow when
3753 * the fast version below fails. 3812 * the fast version below fails.
3754 */ 3813 */
3755void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3814void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
3756 u8 **hvpp, int estab) 3815 const u8 **hvpp, int estab)
3757{ 3816{
3758 unsigned char *ptr; 3817 const unsigned char *ptr;
3759 struct tcphdr *th = tcp_hdr(skb); 3818 const struct tcphdr *th = tcp_hdr(skb);
3760 int length = (th->doff * 4) - sizeof(struct tcphdr); 3819 int length = (th->doff * 4) - sizeof(struct tcphdr);
3761 3820
3762 ptr = (unsigned char *)(th + 1); 3821 ptr = (const unsigned char *)(th + 1);
3763 opt_rx->saw_tstamp = 0; 3822 opt_rx->saw_tstamp = 0;
3764 3823
3765 while (length > 0) { 3824 while (length > 0) {
@@ -3870,9 +3929,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3870} 3929}
3871EXPORT_SYMBOL(tcp_parse_options); 3930EXPORT_SYMBOL(tcp_parse_options);
3872 3931
3873static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) 3932static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
3874{ 3933{
3875 __be32 *ptr = (__be32 *)(th + 1); 3934 const __be32 *ptr = (const __be32 *)(th + 1);
3876 3935
3877 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3936 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3878 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3937 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
@@ -3889,8 +3948,9 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3889/* Fast parse options. This hopes to only see timestamps. 3948/* Fast parse options. This hopes to only see timestamps.
3890 * If it is wrong it falls back on tcp_parse_options(). 3949 * If it is wrong it falls back on tcp_parse_options().
3891 */ 3950 */
3892static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 3951static int tcp_fast_parse_options(const struct sk_buff *skb,
3893 struct tcp_sock *tp, u8 **hvpp) 3952 const struct tcphdr *th,
3953 struct tcp_sock *tp, const u8 **hvpp)
3894{ 3954{
3895 /* In the spirit of fast parsing, compare doff directly to constant 3955 /* In the spirit of fast parsing, compare doff directly to constant
3896 * values. Because equality is used, short doff can be ignored here. 3956 * values. Because equality is used, short doff can be ignored here.
@@ -3911,10 +3971,10 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3911/* 3971/*
3912 * Parse MD5 Signature option 3972 * Parse MD5 Signature option
3913 */ 3973 */
3914u8 *tcp_parse_md5sig_option(struct tcphdr *th) 3974const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3915{ 3975{
3916 int length = (th->doff << 2) - sizeof (*th); 3976 int length = (th->doff << 2) - sizeof(*th);
3917 u8 *ptr = (u8*)(th + 1); 3977 const u8 *ptr = (const u8 *)(th + 1);
3918 3978
3919 /* If the TCP option is too short, we can short cut */ 3979 /* If the TCP option is too short, we can short cut */
3920 if (length < TCPOLEN_MD5SIG) 3980 if (length < TCPOLEN_MD5SIG)
@@ -3991,8 +4051,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3991 4051
3992static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4052static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
3993{ 4053{
3994 struct tcp_sock *tp = tcp_sk(sk); 4054 const struct tcp_sock *tp = tcp_sk(sk);
3995 struct tcphdr *th = tcp_hdr(skb); 4055 const struct tcphdr *th = tcp_hdr(skb);
3996 u32 seq = TCP_SKB_CB(skb)->seq; 4056 u32 seq = TCP_SKB_CB(skb)->seq;
3997 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4057 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3998 4058
@@ -4031,7 +4091,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
4031 * (borrowed from freebsd) 4091 * (borrowed from freebsd)
4032 */ 4092 */
4033 4093
4034static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 4094static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4035{ 4095{
4036 return !before(end_seq, tp->rcv_wup) && 4096 return !before(end_seq, tp->rcv_wup) &&
4037 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4097 !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4076,7 +4136,7 @@ static void tcp_reset(struct sock *sk)
4076 * 4136 *
4077 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4137 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4078 */ 4138 */
4079static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) 4139static void tcp_fin(struct sock *sk)
4080{ 4140{
4081 struct tcp_sock *tp = tcp_sk(sk); 4141 struct tcp_sock *tp = tcp_sk(sk);
4082 4142
@@ -4188,7 +4248,7 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4188 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4248 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4189} 4249}
4190 4250
4191static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4251static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4192{ 4252{
4193 struct tcp_sock *tp = tcp_sk(sk); 4253 struct tcp_sock *tp = tcp_sk(sk);
4194 4254
@@ -4347,7 +4407,7 @@ static void tcp_ofo_queue(struct sock *sk)
4347 __skb_queue_tail(&sk->sk_receive_queue, skb); 4407 __skb_queue_tail(&sk->sk_receive_queue, skb);
4348 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4408 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4349 if (tcp_hdr(skb)->fin) 4409 if (tcp_hdr(skb)->fin)
4350 tcp_fin(skb, sk, tcp_hdr(skb)); 4410 tcp_fin(sk);
4351 } 4411 }
4352} 4412}
4353 4413
@@ -4375,7 +4435,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4375 4435
4376static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4436static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4377{ 4437{
4378 struct tcphdr *th = tcp_hdr(skb); 4438 const struct tcphdr *th = tcp_hdr(skb);
4379 struct tcp_sock *tp = tcp_sk(sk); 4439 struct tcp_sock *tp = tcp_sk(sk);
4380 int eaten = -1; 4440 int eaten = -1;
4381 4441
@@ -4429,7 +4489,7 @@ queue_and_out:
4429 if (skb->len) 4489 if (skb->len)
4430 tcp_event_data_recv(sk, skb); 4490 tcp_event_data_recv(sk, skb);
4431 if (th->fin) 4491 if (th->fin)
4432 tcp_fin(skb, sk, th); 4492 tcp_fin(sk);
4433 4493
4434 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4494 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4435 tcp_ofo_queue(sk); 4495 tcp_ofo_queue(sk);
@@ -4859,9 +4919,9 @@ void tcp_cwnd_application_limited(struct sock *sk)
4859 tp->snd_cwnd_stamp = tcp_time_stamp; 4919 tp->snd_cwnd_stamp = tcp_time_stamp;
4860} 4920}
4861 4921
4862static int tcp_should_expand_sndbuf(struct sock *sk) 4922static int tcp_should_expand_sndbuf(const struct sock *sk)
4863{ 4923{
4864 struct tcp_sock *tp = tcp_sk(sk); 4924 const struct tcp_sock *tp = tcp_sk(sk);
4865 4925
4866 /* If the user specified a specific send buffer setting, do 4926 /* If the user specified a specific send buffer setting, do
4867 * not modify it. 4927 * not modify it.
@@ -4895,8 +4955,10 @@ static void tcp_new_space(struct sock *sk)
4895 struct tcp_sock *tp = tcp_sk(sk); 4955 struct tcp_sock *tp = tcp_sk(sk);
4896 4956
4897 if (tcp_should_expand_sndbuf(sk)) { 4957 if (tcp_should_expand_sndbuf(sk)) {
4898 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 4958 int sndmem = SKB_TRUESIZE(max_t(u32,
4899 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 4959 tp->rx_opt.mss_clamp,
4960 tp->mss_cache) +
4961 MAX_TCP_HEADER);
4900 int demanded = max_t(unsigned int, tp->snd_cwnd, 4962 int demanded = max_t(unsigned int, tp->snd_cwnd,
4901 tp->reordering + 1); 4963 tp->reordering + 1);
4902 sndmem *= 2 * demanded; 4964 sndmem *= 2 * demanded;
@@ -4968,7 +5030,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
4968 * either form (or just set the sysctl tcp_stdurg). 5030 * either form (or just set the sysctl tcp_stdurg).
4969 */ 5031 */
4970 5032
4971static void tcp_check_urg(struct sock *sk, struct tcphdr *th) 5033static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
4972{ 5034{
4973 struct tcp_sock *tp = tcp_sk(sk); 5035 struct tcp_sock *tp = tcp_sk(sk);
4974 u32 ptr = ntohs(th->urg_ptr); 5036 u32 ptr = ntohs(th->urg_ptr);
@@ -5034,7 +5096,7 @@ static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
5034} 5096}
5035 5097
5036/* This is the 'fast' part of urgent handling. */ 5098/* This is the 'fast' part of urgent handling. */
5037static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 5099static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
5038{ 5100{
5039 struct tcp_sock *tp = tcp_sk(sk); 5101 struct tcp_sock *tp = tcp_sk(sk);
5040 5102
@@ -5155,9 +5217,9 @@ out:
5155 * play significant role here. 5217 * play significant role here.
5156 */ 5218 */
5157static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5219static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5158 struct tcphdr *th, int syn_inerr) 5220 const struct tcphdr *th, int syn_inerr)
5159{ 5221{
5160 u8 *hash_location; 5222 const u8 *hash_location;
5161 struct tcp_sock *tp = tcp_sk(sk); 5223 struct tcp_sock *tp = tcp_sk(sk);
5162 5224
5163 /* RFC1323: H1. Apply PAWS check first. */ 5225 /* RFC1323: H1. Apply PAWS check first. */
@@ -5238,7 +5300,7 @@ discard:
5238 * tcp_data_queue when everything is OK. 5300 * tcp_data_queue when everything is OK.
5239 */ 5301 */
5240int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5302int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5241 struct tcphdr *th, unsigned len) 5303 const struct tcphdr *th, unsigned int len)
5242{ 5304{
5243 struct tcp_sock *tp = tcp_sk(sk); 5305 struct tcp_sock *tp = tcp_sk(sk);
5244 int res; 5306 int res;
@@ -5449,9 +5511,9 @@ discard:
5449EXPORT_SYMBOL(tcp_rcv_established); 5511EXPORT_SYMBOL(tcp_rcv_established);
5450 5512
5451static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5513static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5452 struct tcphdr *th, unsigned len) 5514 const struct tcphdr *th, unsigned int len)
5453{ 5515{
5454 u8 *hash_location; 5516 const u8 *hash_location;
5455 struct inet_connection_sock *icsk = inet_csk(sk); 5517 struct inet_connection_sock *icsk = inet_csk(sk);
5456 struct tcp_sock *tp = tcp_sk(sk); 5518 struct tcp_sock *tp = tcp_sk(sk);
5457 struct tcp_cookie_values *cvp = tp->cookie_values; 5519 struct tcp_cookie_values *cvp = tp->cookie_values;
@@ -5726,7 +5788,7 @@ reset_and_undo:
5726 */ 5788 */
5727 5789
5728int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 5790int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5729 struct tcphdr *th, unsigned len) 5791 const struct tcphdr *th, unsigned int len)
5730{ 5792{
5731 struct tcp_sock *tp = tcp_sk(sk); 5793 struct tcp_sock *tp = tcp_sk(sk);
5732 struct inet_connection_sock *icsk = inet_csk(sk); 5794 struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7963e03f1068..0ea10eefa60f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93 __be32 addr); 93 __be32 addr);
94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, struct tcphdr *th); 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96#else 96#else
97static inline 97static inline
98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
@@ -104,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
104struct inet_hashinfo tcp_hashinfo; 104struct inet_hashinfo tcp_hashinfo;
105EXPORT_SYMBOL(tcp_hashinfo); 105EXPORT_SYMBOL(tcp_hashinfo);
106 106
107static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 107static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
108{ 108{
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
110 ip_hdr(skb)->saddr, 110 ip_hdr(skb)->saddr,
@@ -552,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb,
552/* This routine computes an IPv4 TCP checksum. */ 552/* This routine computes an IPv4 TCP checksum. */
553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
554{ 554{
555 struct inet_sock *inet = inet_sk(sk); 555 const struct inet_sock *inet = inet_sk(sk);
556 556
557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
558} 558}
@@ -590,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
590 590
591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
592{ 592{
593 struct tcphdr *th = tcp_hdr(skb); 593 const struct tcphdr *th = tcp_hdr(skb);
594 struct { 594 struct {
595 struct tcphdr th; 595 struct tcphdr th;
596#ifdef CONFIG_TCP_MD5SIG 596#ifdef CONFIG_TCP_MD5SIG
@@ -652,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 653
654 net = dev_net(skb_dst(skb)->dev); 654 net = dev_net(skb_dst(skb)->dev);
655 arg.tos = ip_hdr(skb)->tos;
655 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 656 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
656 &arg, arg.iov[0].iov_len); 657 &arg, arg.iov[0].iov_len);
657 658
@@ -666,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
666static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 667static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
667 u32 win, u32 ts, int oif, 668 u32 win, u32 ts, int oif,
668 struct tcp_md5sig_key *key, 669 struct tcp_md5sig_key *key,
669 int reply_flags) 670 int reply_flags, u8 tos)
670{ 671{
671 struct tcphdr *th = tcp_hdr(skb); 672 const struct tcphdr *th = tcp_hdr(skb);
672 struct { 673 struct {
673 struct tcphdr th; 674 struct tcphdr th;
674 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 675 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -726,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
726 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 727 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
727 if (oif) 728 if (oif)
728 arg.bound_dev_if = oif; 729 arg.bound_dev_if = oif;
729 730 arg.tos = tos;
730 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 731 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
731 &arg, arg.iov[0].iov_len); 732 &arg, arg.iov[0].iov_len);
732 733
@@ -743,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
743 tcptw->tw_ts_recent, 744 tcptw->tw_ts_recent,
744 tw->tw_bound_dev_if, 745 tw->tw_bound_dev_if,
745 tcp_twsk_md5_key(tcptw), 746 tcp_twsk_md5_key(tcptw),
746 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0 747 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
748 tw->tw_tos
747 ); 749 );
748 750
749 inet_twsk_put(tw); 751 inet_twsk_put(tw);
@@ -757,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
757 req->ts_recent, 759 req->ts_recent,
758 0, 760 0,
759 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 761 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
760 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0); 762 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
763 ip_hdr(skb)->tos);
761} 764}
762 765
763/* 766/*
@@ -1090,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1090} 1093}
1091 1094
1092static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1095static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1093 __be32 daddr, __be32 saddr, struct tcphdr *th) 1096 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1094{ 1097{
1095 struct tcp_md5sig_pool *hp; 1098 struct tcp_md5sig_pool *hp;
1096 struct hash_desc *desc; 1099 struct hash_desc *desc;
@@ -1122,12 +1125,12 @@ clear_hash_noput:
1122} 1125}
1123 1126
1124int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 1127int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1125 struct sock *sk, struct request_sock *req, 1128 const struct sock *sk, const struct request_sock *req,
1126 struct sk_buff *skb) 1129 const struct sk_buff *skb)
1127{ 1130{
1128 struct tcp_md5sig_pool *hp; 1131 struct tcp_md5sig_pool *hp;
1129 struct hash_desc *desc; 1132 struct hash_desc *desc;
1130 struct tcphdr *th = tcp_hdr(skb); 1133 const struct tcphdr *th = tcp_hdr(skb);
1131 __be32 saddr, daddr; 1134 __be32 saddr, daddr;
1132 1135
1133 if (sk) { 1136 if (sk) {
@@ -1172,7 +1175,7 @@ clear_hash_noput:
1172} 1175}
1173EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1176EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1174 1177
1175static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) 1178static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1176{ 1179{
1177 /* 1180 /*
1178 * This gets called for each TCP segment that arrives 1181 * This gets called for each TCP segment that arrives
@@ -1182,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1182 * o MD5 hash and we're not expecting one. 1185 * o MD5 hash and we're not expecting one.
1183 * o MD5 hash and its wrong. 1186 * o MD5 hash and its wrong.
1184 */ 1187 */
1185 __u8 *hash_location = NULL; 1188 const __u8 *hash_location = NULL;
1186 struct tcp_md5sig_key *hash_expected; 1189 struct tcp_md5sig_key *hash_expected;
1187 const struct iphdr *iph = ip_hdr(skb); 1190 const struct iphdr *iph = ip_hdr(skb);
1188 struct tcphdr *th = tcp_hdr(skb); 1191 const struct tcphdr *th = tcp_hdr(skb);
1189 int genhash; 1192 int genhash;
1190 unsigned char newhash[16]; 1193 unsigned char newhash[16];
1191 1194
@@ -1248,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1248{ 1251{
1249 struct tcp_extend_values tmp_ext; 1252 struct tcp_extend_values tmp_ext;
1250 struct tcp_options_received tmp_opt; 1253 struct tcp_options_received tmp_opt;
1251 u8 *hash_location; 1254 const u8 *hash_location;
1252 struct request_sock *req; 1255 struct request_sock *req;
1253 struct inet_request_sock *ireq; 1256 struct inet_request_sock *ireq;
1254 struct tcp_sock *tp = tcp_sk(sk); 1257 struct tcp_sock *tp = tcp_sk(sk);
@@ -1588,7 +1591,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1588#endif 1591#endif
1589 1592
1590 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1593 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1591 sock_rps_save_rxhash(sk, skb->rxhash); 1594 sock_rps_save_rxhash(sk, skb);
1592 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1595 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1593 rsk = sk; 1596 rsk = sk;
1594 goto reset; 1597 goto reset;
@@ -1605,7 +1608,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1605 goto discard; 1608 goto discard;
1606 1609
1607 if (nsk != sk) { 1610 if (nsk != sk) {
1608 sock_rps_save_rxhash(nsk, skb->rxhash); 1611 sock_rps_save_rxhash(nsk, skb);
1609 if (tcp_child_process(sk, nsk, skb)) { 1612 if (tcp_child_process(sk, nsk, skb)) {
1610 rsk = nsk; 1613 rsk = nsk;
1611 goto reset; 1614 goto reset;
@@ -1613,7 +1616,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1613 return 0; 1616 return 0;
1614 } 1617 }
1615 } else 1618 } else
1616 sock_rps_save_rxhash(sk, skb->rxhash); 1619 sock_rps_save_rxhash(sk, skb);
1617 1620
1618 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1621 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1619 rsk = sk; 1622 rsk = sk;
@@ -1645,7 +1648,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
1645int tcp_v4_rcv(struct sk_buff *skb) 1648int tcp_v4_rcv(struct sk_buff *skb)
1646{ 1649{
1647 const struct iphdr *iph; 1650 const struct iphdr *iph;
1648 struct tcphdr *th; 1651 const struct tcphdr *th;
1649 struct sock *sk; 1652 struct sock *sk;
1650 int ret; 1653 int ret;
1651 struct net *net = dev_net(skb->dev); 1654 struct net *net = dev_net(skb->dev);
@@ -1680,7 +1683,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1680 skb->len - th->doff * 4); 1683 skb->len - th->doff * 4);
1681 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1684 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1682 TCP_SKB_CB(skb)->when = 0; 1685 TCP_SKB_CB(skb)->when = 0;
1683 TCP_SKB_CB(skb)->flags = iph->tos; 1686 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1684 TCP_SKB_CB(skb)->sacked = 0; 1687 TCP_SKB_CB(skb)->sacked = 0;
1685 1688
1686 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1689 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1809,7 +1812,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer);
1809 1812
1810void *tcp_v4_tw_get_peer(struct sock *sk) 1813void *tcp_v4_tw_get_peer(struct sock *sk)
1811{ 1814{
1812 struct inet_timewait_sock *tw = inet_twsk(sk); 1815 const struct inet_timewait_sock *tw = inet_twsk(sk);
1813 1816
1814 return inet_getpeer_v4(tw->tw_daddr, 1); 1817 return inet_getpeer_v4(tw->tw_daddr, 1);
1815} 1818}
@@ -2381,7 +2384,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2381} 2384}
2382EXPORT_SYMBOL(tcp_proc_unregister); 2385EXPORT_SYMBOL(tcp_proc_unregister);
2383 2386
2384static void get_openreq4(struct sock *sk, struct request_sock *req, 2387static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2385 struct seq_file *f, int i, int uid, int *len) 2388 struct seq_file *f, int i, int uid, int *len)
2386{ 2389{
2387 const struct inet_request_sock *ireq = inet_rsk(req); 2390 const struct inet_request_sock *ireq = inet_rsk(req);
@@ -2411,9 +2414,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2411{ 2414{
2412 int timer_active; 2415 int timer_active;
2413 unsigned long timer_expires; 2416 unsigned long timer_expires;
2414 struct tcp_sock *tp = tcp_sk(sk); 2417 const struct tcp_sock *tp = tcp_sk(sk);
2415 const struct inet_connection_sock *icsk = inet_csk(sk); 2418 const struct inet_connection_sock *icsk = inet_csk(sk);
2416 struct inet_sock *inet = inet_sk(sk); 2419 const struct inet_sock *inet = inet_sk(sk);
2417 __be32 dest = inet->inet_daddr; 2420 __be32 dest = inet->inet_daddr;
2418 __be32 src = inet->inet_rcv_saddr; 2421 __be32 src = inet->inet_rcv_saddr;
2419 __u16 destp = ntohs(inet->inet_dport); 2422 __u16 destp = ntohs(inet->inet_dport);
@@ -2462,7 +2465,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2462 len); 2465 len);
2463} 2466}
2464 2467
2465static void get_timewait4_sock(struct inet_timewait_sock *tw, 2468static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2466 struct seq_file *f, int i, int *len) 2469 struct seq_file *f, int i, int *len)
2467{ 2470{
2468 __be32 dest, src; 2471 __be32 dest, src;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d2fe4e06b472..66363b689ad6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -141,7 +141,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
141 const struct tcphdr *th) 141 const struct tcphdr *th)
142{ 142{
143 struct tcp_options_received tmp_opt; 143 struct tcp_options_received tmp_opt;
144 u8 *hash_location; 144 const u8 *hash_location;
145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
146 int paws_reject = 0; 146 int paws_reject = 0;
147 147
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
330 330
331 tw->tw_transparent = inet_sk(sk)->transparent;
331 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
332 tcptw->tw_rcv_nxt = tp->rcv_nxt; 333 tcptw->tw_rcv_nxt = tp->rcv_nxt;
333 tcptw->tw_snd_nxt = tp->snd_nxt; 334 tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -344,6 +345,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
344 tw6 = inet6_twsk((struct sock *)tw); 345 tw6 = inet6_twsk((struct sock *)tw);
345 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); 346 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
346 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); 347 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
348 tw->tw_tclass = np->tclass;
347 tw->tw_ipv6only = np->ipv6only; 349 tw->tw_ipv6only = np->ipv6only;
348 } 350 }
349#endif 351#endif
@@ -566,7 +568,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
566 struct request_sock **prev) 568 struct request_sock **prev)
567{ 569{
568 struct tcp_options_received tmp_opt; 570 struct tcp_options_received tmp_opt;
569 u8 *hash_location; 571 const u8 *hash_location;
570 struct sock *child; 572 struct sock *child;
571 const struct tcphdr *th = tcp_hdr(skb); 573 const struct tcphdr *th = tcp_hdr(skb);
572 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 574 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 882e0b0964d0..980b98f6288c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
65 65
66 66
67/* Account for new data that has been sent to the network. */ 67/* Account for new data that has been sent to the network. */
68static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 68static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
69{ 69{
70 struct tcp_sock *tp = tcp_sk(sk); 70 struct tcp_sock *tp = tcp_sk(sk);
71 unsigned int prior_packets = tp->packets_out; 71 unsigned int prior_packets = tp->packets_out;
@@ -89,9 +89,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
90 * invalid. OK, let's make this for now: 90 * invalid. OK, let's make this for now:
91 */ 91 */
92static inline __u32 tcp_acceptable_seq(struct sock *sk) 92static inline __u32 tcp_acceptable_seq(const struct sock *sk)
93{ 93{
94 struct tcp_sock *tp = tcp_sk(sk); 94 const struct tcp_sock *tp = tcp_sk(sk);
95 95
96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
97 return tp->snd_nxt; 97 return tp->snd_nxt;
@@ -116,7 +116,7 @@ static inline __u32 tcp_acceptable_seq(struct sock *sk)
116static __u16 tcp_advertise_mss(struct sock *sk) 116static __u16 tcp_advertise_mss(struct sock *sk)
117{ 117{
118 struct tcp_sock *tp = tcp_sk(sk); 118 struct tcp_sock *tp = tcp_sk(sk);
119 struct dst_entry *dst = __sk_dst_get(sk); 119 const struct dst_entry *dst = __sk_dst_get(sk);
120 int mss = tp->advmss; 120 int mss = tp->advmss;
121 121
122 if (dst) { 122 if (dst) {
@@ -133,7 +133,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
133 133
134/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 134/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
135 * This is the first part of cwnd validation mechanism. */ 135 * This is the first part of cwnd validation mechanism. */
136static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 136static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
137{ 137{
138 struct tcp_sock *tp = tcp_sk(sk); 138 struct tcp_sock *tp = tcp_sk(sk);
139 s32 delta = tcp_time_stamp - tp->lsndtime; 139 s32 delta = tcp_time_stamp - tp->lsndtime;
@@ -154,7 +154,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
154 154
155/* Congestion state accounting after a packet has been sent. */ 155/* Congestion state accounting after a packet has been sent. */
156static void tcp_event_data_sent(struct tcp_sock *tp, 156static void tcp_event_data_sent(struct tcp_sock *tp,
157 struct sk_buff *skb, struct sock *sk) 157 struct sock *sk)
158{ 158{
159 struct inet_connection_sock *icsk = inet_csk(sk); 159 struct inet_connection_sock *icsk = inet_csk(sk);
160 const u32 now = tcp_time_stamp; 160 const u32 now = tcp_time_stamp;
@@ -295,11 +295,11 @@ static u16 tcp_select_window(struct sock *sk)
295} 295}
296 296
297/* Packet ECN state for a SYN-ACK */ 297/* Packet ECN state for a SYN-ACK */
298static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 298static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
299{ 299{
300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR; 300 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
301 if (!(tp->ecn_flags & TCP_ECN_OK)) 301 if (!(tp->ecn_flags & TCP_ECN_OK))
302 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE; 302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
303} 303}
304 304
305/* Packet ECN state for a SYN. */ 305/* Packet ECN state for a SYN. */
@@ -309,13 +309,13 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
309 309
310 tp->ecn_flags = 0; 310 tp->ecn_flags = 0;
311 if (sysctl_tcp_ecn == 1) { 311 if (sysctl_tcp_ecn == 1) {
312 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR; 312 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
313 tp->ecn_flags = TCP_ECN_OK; 313 tp->ecn_flags = TCP_ECN_OK;
314 } 314 }
315} 315}
316 316
317static __inline__ void 317static __inline__ void
318TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 318TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
319{ 319{
320 if (inet_rsk(req)->ecn_ok) 320 if (inet_rsk(req)->ecn_ok)
321 th->ece = 1; 321 th->ece = 1;
@@ -356,7 +356,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
356 skb->ip_summed = CHECKSUM_PARTIAL; 356 skb->ip_summed = CHECKSUM_PARTIAL;
357 skb->csum = 0; 357 skb->csum = 0;
358 358
359 TCP_SKB_CB(skb)->flags = flags; 359 TCP_SKB_CB(skb)->tcp_flags = flags;
360 TCP_SKB_CB(skb)->sacked = 0; 360 TCP_SKB_CB(skb)->sacked = 0;
361 361
362 skb_shinfo(skb)->gso_segs = 1; 362 skb_shinfo(skb)->gso_segs = 1;
@@ -565,7 +565,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
565 */ 565 */
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts, 567 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) { 568 struct tcp_md5sig_key **md5)
569{
569 struct tcp_sock *tp = tcp_sk(sk); 570 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp_cookie_values *cvp = tp->cookie_values; 571 struct tcp_cookie_values *cvp = tp->cookie_values;
571 unsigned remaining = MAX_TCP_OPTION_SPACE; 572 unsigned remaining = MAX_TCP_OPTION_SPACE;
@@ -743,7 +744,8 @@ static unsigned tcp_synack_options(struct sock *sk,
743 */ 744 */
744static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 745static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
745 struct tcp_out_options *opts, 746 struct tcp_out_options *opts,
746 struct tcp_md5sig_key **md5) { 747 struct tcp_md5sig_key **md5)
748{
747 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 749 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
748 struct tcp_sock *tp = tcp_sk(sk); 750 struct tcp_sock *tp = tcp_sk(sk);
749 unsigned size = 0; 751 unsigned size = 0;
@@ -826,7 +828,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
826 tcb = TCP_SKB_CB(skb); 828 tcb = TCP_SKB_CB(skb);
827 memset(&opts, 0, sizeof(opts)); 829 memset(&opts, 0, sizeof(opts));
828 830
829 if (unlikely(tcb->flags & TCPHDR_SYN)) 831 if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
830 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 832 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
831 else 833 else
832 tcp_options_size = tcp_established_options(sk, skb, &opts, 834 tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -850,9 +852,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
850 th->seq = htonl(tcb->seq); 852 th->seq = htonl(tcb->seq);
851 th->ack_seq = htonl(tp->rcv_nxt); 853 th->ack_seq = htonl(tp->rcv_nxt);
852 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 854 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
853 tcb->flags); 855 tcb->tcp_flags);
854 856
855 if (unlikely(tcb->flags & TCPHDR_SYN)) { 857 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
856 /* RFC1323: The window in SYN & SYN/ACK segments 858 /* RFC1323: The window in SYN & SYN/ACK segments
857 * is never scaled. 859 * is never scaled.
858 */ 860 */
@@ -875,7 +877,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
875 } 877 }
876 878
877 tcp_options_write((__be32 *)(th + 1), tp, &opts); 879 tcp_options_write((__be32 *)(th + 1), tp, &opts);
878 if (likely((tcb->flags & TCPHDR_SYN) == 0)) 880 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
879 TCP_ECN_send(sk, skb, tcp_header_size); 881 TCP_ECN_send(sk, skb, tcp_header_size);
880 882
881#ifdef CONFIG_TCP_MD5SIG 883#ifdef CONFIG_TCP_MD5SIG
@@ -889,11 +891,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
889 891
890 icsk->icsk_af_ops->send_check(sk, skb); 892 icsk->icsk_af_ops->send_check(sk, skb);
891 893
892 if (likely(tcb->flags & TCPHDR_ACK)) 894 if (likely(tcb->tcp_flags & TCPHDR_ACK))
893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 895 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
894 896
895 if (skb->len != tcp_header_size) 897 if (skb->len != tcp_header_size)
896 tcp_event_data_sent(tp, skb, sk); 898 tcp_event_data_sent(tp, sk);
897 899
898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 900 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 901 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
@@ -926,7 +928,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
926} 928}
927 929
928/* Initialize TSO segments for a packet. */ 930/* Initialize TSO segments for a packet. */
929static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 931static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
930 unsigned int mss_now) 932 unsigned int mss_now)
931{ 933{
932 if (skb->len <= mss_now || !sk_can_gso(sk) || 934 if (skb->len <= mss_now || !sk_can_gso(sk) ||
@@ -947,7 +949,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
947/* When a modification to fackets out becomes necessary, we need to check 949/* When a modification to fackets out becomes necessary, we need to check
948 * skb is counted to fackets_out or not. 950 * skb is counted to fackets_out or not.
949 */ 951 */
950static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 952static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
951 int decr) 953 int decr)
952{ 954{
953 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
@@ -962,7 +964,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
962/* Pcount in the middle of the write queue got changed, we need to do various 964/* Pcount in the middle of the write queue got changed, we need to do various
963 * tweaks to fix counters 965 * tweaks to fix counters
964 */ 966 */
965static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 967static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
966{ 968{
967 struct tcp_sock *tp = tcp_sk(sk); 969 struct tcp_sock *tp = tcp_sk(sk);
968 970
@@ -1032,9 +1034,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1032 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1034 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1033 1035
1034 /* PSH and FIN should only be set in the second packet. */ 1036 /* PSH and FIN should only be set in the second packet. */
1035 flags = TCP_SKB_CB(skb)->flags; 1037 flags = TCP_SKB_CB(skb)->tcp_flags;
1036 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1038 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1037 TCP_SKB_CB(buff)->flags = flags; 1039 TCP_SKB_CB(buff)->tcp_flags = flags;
1038 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1040 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1039 1041
1040 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1042 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -1094,14 +1096,16 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1094 eat = len; 1096 eat = len;
1095 k = 0; 1097 k = 0;
1096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1098 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1097 if (skb_shinfo(skb)->frags[i].size <= eat) { 1099 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1098 put_page(skb_shinfo(skb)->frags[i].page); 1100
1099 eat -= skb_shinfo(skb)->frags[i].size; 1101 if (size <= eat) {
1102 skb_frag_unref(skb, i);
1103 eat -= size;
1100 } else { 1104 } else {
1101 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1105 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1102 if (eat) { 1106 if (eat) {
1103 skb_shinfo(skb)->frags[k].page_offset += eat; 1107 skb_shinfo(skb)->frags[k].page_offset += eat;
1104 skb_shinfo(skb)->frags[k].size -= eat; 1108 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1105 eat = 0; 1109 eat = 0;
1106 } 1110 }
1107 k++; 1111 k++;
@@ -1144,10 +1148,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1144} 1148}
1145 1149
1146/* Calculate MSS. Not accounting for SACKs here. */ 1150/* Calculate MSS. Not accounting for SACKs here. */
1147int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1151int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1148{ 1152{
1149 struct tcp_sock *tp = tcp_sk(sk); 1153 const struct tcp_sock *tp = tcp_sk(sk);
1150 struct inet_connection_sock *icsk = inet_csk(sk); 1154 const struct inet_connection_sock *icsk = inet_csk(sk);
1151 int mss_now; 1155 int mss_now;
1152 1156
1153 /* Calculate base mss without TCP options: 1157 /* Calculate base mss without TCP options:
@@ -1173,10 +1177,10 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1173} 1177}
1174 1178
1175/* Inverse of above */ 1179/* Inverse of above */
1176int tcp_mss_to_mtu(struct sock *sk, int mss) 1180int tcp_mss_to_mtu(const struct sock *sk, int mss)
1177{ 1181{
1178 struct tcp_sock *tp = tcp_sk(sk); 1182 const struct tcp_sock *tp = tcp_sk(sk);
1179 struct inet_connection_sock *icsk = inet_csk(sk); 1183 const struct inet_connection_sock *icsk = inet_csk(sk);
1180 int mtu; 1184 int mtu;
1181 1185
1182 mtu = mss + 1186 mtu = mss +
@@ -1250,8 +1254,8 @@ EXPORT_SYMBOL(tcp_sync_mss);
1250 */ 1254 */
1251unsigned int tcp_current_mss(struct sock *sk) 1255unsigned int tcp_current_mss(struct sock *sk)
1252{ 1256{
1253 struct tcp_sock *tp = tcp_sk(sk); 1257 const struct tcp_sock *tp = tcp_sk(sk);
1254 struct dst_entry *dst = __sk_dst_get(sk); 1258 const struct dst_entry *dst = __sk_dst_get(sk);
1255 u32 mss_now; 1259 u32 mss_now;
1256 unsigned header_len; 1260 unsigned header_len;
1257 struct tcp_out_options opts; 1261 struct tcp_out_options opts;
@@ -1311,10 +1315,10 @@ static void tcp_cwnd_validate(struct sock *sk)
1311 * modulo only when the receiver window alone is the limiting factor or 1315 * modulo only when the receiver window alone is the limiting factor or
1312 * when we would be allowed to send the split-due-to-Nagle skb fully. 1316 * when we would be allowed to send the split-due-to-Nagle skb fully.
1313 */ 1317 */
1314static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1318static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1315 unsigned int mss_now, unsigned int cwnd) 1319 unsigned int mss_now, unsigned int cwnd)
1316{ 1320{
1317 struct tcp_sock *tp = tcp_sk(sk); 1321 const struct tcp_sock *tp = tcp_sk(sk);
1318 u32 needed, window, cwnd_len; 1322 u32 needed, window, cwnd_len;
1319 1323
1320 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1324 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -1334,13 +1338,14 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1334/* Can at least one segment of SKB be sent right now, according to the 1338/* Can at least one segment of SKB be sent right now, according to the
1335 * congestion window rules? If so, return how many segments are allowed. 1339 * congestion window rules? If so, return how many segments are allowed.
1336 */ 1340 */
1337static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1341static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1338 struct sk_buff *skb) 1342 const struct sk_buff *skb)
1339{ 1343{
1340 u32 in_flight, cwnd; 1344 u32 in_flight, cwnd;
1341 1345
1342 /* Don't be strict about the congestion window for the final FIN. */ 1346 /* Don't be strict about the congestion window for the final FIN. */
1343 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) 1347 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1348 tcp_skb_pcount(skb) == 1)
1344 return 1; 1349 return 1;
1345 1350
1346 in_flight = tcp_packets_in_flight(tp); 1351 in_flight = tcp_packets_in_flight(tp);
@@ -1355,7 +1360,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1355 * This must be invoked the first time we consider transmitting 1360 * This must be invoked the first time we consider transmitting
1356 * SKB onto the wire. 1361 * SKB onto the wire.
1357 */ 1362 */
1358static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1363static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1359 unsigned int mss_now) 1364 unsigned int mss_now)
1360{ 1365{
1361 int tso_segs = tcp_skb_pcount(skb); 1366 int tso_segs = tcp_skb_pcount(skb);
@@ -1393,7 +1398,7 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1393/* Return non-zero if the Nagle test allows this packet to be 1398/* Return non-zero if the Nagle test allows this packet to be
1394 * sent now. 1399 * sent now.
1395 */ 1400 */
1396static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1401static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1397 unsigned int cur_mss, int nonagle) 1402 unsigned int cur_mss, int nonagle)
1398{ 1403{
1399 /* Nagle rule does not apply to frames, which sit in the middle of the 1404 /* Nagle rule does not apply to frames, which sit in the middle of the
@@ -1409,7 +1414,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1409 * Nagle can be ignored during F-RTO too (see RFC4138). 1414 * Nagle can be ignored during F-RTO too (see RFC4138).
1410 */ 1415 */
1411 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1416 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1412 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)) 1417 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1413 return 1; 1418 return 1;
1414 1419
1415 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1420 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1419,7 +1424,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1419} 1424}
1420 1425
1421/* Does at least the first segment of SKB fit into the send window? */ 1426/* Does at least the first segment of SKB fit into the send window? */
1422static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1427static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1423 unsigned int cur_mss) 1428 unsigned int cur_mss)
1424{ 1429{
1425 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1430 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1434,10 +1439,10 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1434 * should be put on the wire right now. If so, it returns the number of 1439 * should be put on the wire right now. If so, it returns the number of
1435 * packets allowed by the congestion window. 1440 * packets allowed by the congestion window.
1436 */ 1441 */
1437static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1442static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1438 unsigned int cur_mss, int nonagle) 1443 unsigned int cur_mss, int nonagle)
1439{ 1444{
1440 struct tcp_sock *tp = tcp_sk(sk); 1445 const struct tcp_sock *tp = tcp_sk(sk);
1441 unsigned int cwnd_quota; 1446 unsigned int cwnd_quota;
1442 1447
1443 tcp_init_tso_segs(sk, skb, cur_mss); 1448 tcp_init_tso_segs(sk, skb, cur_mss);
@@ -1455,7 +1460,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1455/* Test if sending is allowed right now. */ 1460/* Test if sending is allowed right now. */
1456int tcp_may_send_now(struct sock *sk) 1461int tcp_may_send_now(struct sock *sk)
1457{ 1462{
1458 struct tcp_sock *tp = tcp_sk(sk); 1463 const struct tcp_sock *tp = tcp_sk(sk);
1459 struct sk_buff *skb = tcp_send_head(sk); 1464 struct sk_buff *skb = tcp_send_head(sk);
1460 1465
1461 return skb && 1466 return skb &&
@@ -1497,9 +1502,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1497 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1502 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1498 1503
1499 /* PSH and FIN should only be set in the second packet. */ 1504 /* PSH and FIN should only be set in the second packet. */
1500 flags = TCP_SKB_CB(skb)->flags; 1505 flags = TCP_SKB_CB(skb)->tcp_flags;
1501 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1506 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1502 TCP_SKB_CB(buff)->flags = flags; 1507 TCP_SKB_CB(buff)->tcp_flags = flags;
1503 1508
1504 /* This packet was never sent out yet, so no SACK bits. */ 1509 /* This packet was never sent out yet, so no SACK bits. */
1505 TCP_SKB_CB(buff)->sacked = 0; 1510 TCP_SKB_CB(buff)->sacked = 0;
@@ -1530,7 +1535,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1530 u32 send_win, cong_win, limit, in_flight; 1535 u32 send_win, cong_win, limit, in_flight;
1531 int win_divisor; 1536 int win_divisor;
1532 1537
1533 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1538 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1534 goto send_now; 1539 goto send_now;
1535 1540
1536 if (icsk->icsk_ca_state != TCP_CA_Open) 1541 if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1657,7 +1662,7 @@ static int tcp_mtu_probe(struct sock *sk)
1657 1662
1658 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1663 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1659 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1664 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1660 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK; 1665 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
1661 TCP_SKB_CB(nskb)->sacked = 0; 1666 TCP_SKB_CB(nskb)->sacked = 0;
1662 nskb->csum = 0; 1667 nskb->csum = 0;
1663 nskb->ip_summed = skb->ip_summed; 1668 nskb->ip_summed = skb->ip_summed;
@@ -1677,11 +1682,11 @@ static int tcp_mtu_probe(struct sock *sk)
1677 if (skb->len <= copy) { 1682 if (skb->len <= copy) {
1678 /* We've eaten all the data from this skb. 1683 /* We've eaten all the data from this skb.
1679 * Throw it away. */ 1684 * Throw it away. */
1680 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1685 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1681 tcp_unlink_write_queue(skb, sk); 1686 tcp_unlink_write_queue(skb, sk);
1682 sk_wmem_free_skb(sk, skb); 1687 sk_wmem_free_skb(sk, skb);
1683 } else { 1688 } else {
1684 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1689 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1685 ~(TCPHDR_FIN|TCPHDR_PSH); 1690 ~(TCPHDR_FIN|TCPHDR_PSH);
1686 if (!skb_shinfo(skb)->nr_frags) { 1691 if (!skb_shinfo(skb)->nr_frags) {
1687 skb_pull(skb, copy); 1692 skb_pull(skb, copy);
@@ -1796,11 +1801,13 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1796 tcp_event_new_data_sent(sk, skb); 1801 tcp_event_new_data_sent(sk, skb);
1797 1802
1798 tcp_minshall_update(tp, mss_now, skb); 1803 tcp_minshall_update(tp, mss_now, skb);
1799 sent_pkts++; 1804 sent_pkts += tcp_skb_pcount(skb);
1800 1805
1801 if (push_one) 1806 if (push_one)
1802 break; 1807 break;
1803 } 1808 }
1809 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
1810 tp->prr_out += sent_pkts;
1804 1811
1805 if (likely(sent_pkts)) { 1812 if (likely(sent_pkts)) {
1806 tcp_cwnd_validate(sk); 1813 tcp_cwnd_validate(sk);
@@ -1985,7 +1992,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1985 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1992 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1986 1993
1987 /* Merge over control information. This moves PSH/FIN etc. over */ 1994 /* Merge over control information. This moves PSH/FIN etc. over */
1988 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1995 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
1989 1996
1990 /* All done, get rid of second SKB and account for it so 1997 /* All done, get rid of second SKB and account for it so
1991 * packet counting does not break. 1998 * packet counting does not break.
@@ -2003,7 +2010,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2003} 2010}
2004 2011
2005/* Check if coalescing SKBs is legal. */ 2012/* Check if coalescing SKBs is legal. */
2006static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 2013static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2007{ 2014{
2008 if (tcp_skb_pcount(skb) > 1) 2015 if (tcp_skb_pcount(skb) > 1)
2009 return 0; 2016 return 0;
@@ -2033,7 +2040,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2033 2040
2034 if (!sysctl_tcp_retrans_collapse) 2041 if (!sysctl_tcp_retrans_collapse)
2035 return; 2042 return;
2036 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN) 2043 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2037 return; 2044 return;
2038 2045
2039 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2046 tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2125,12 +2132,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2125 * since it is cheap to do so and saves bytes on the network. 2132 * since it is cheap to do so and saves bytes on the network.
2126 */ 2133 */
2127 if (skb->len > 0 && 2134 if (skb->len > 0 &&
2128 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && 2135 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2129 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2136 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2130 if (!pskb_trim(skb, 0)) { 2137 if (!pskb_trim(skb, 0)) {
2131 /* Reuse, even though it does some unnecessary work */ 2138 /* Reuse, even though it does some unnecessary work */
2132 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2139 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2133 TCP_SKB_CB(skb)->flags); 2140 TCP_SKB_CB(skb)->tcp_flags);
2134 skb->ip_summed = CHECKSUM_NONE; 2141 skb->ip_summed = CHECKSUM_NONE;
2135 } 2142 }
2136 } 2143 }
@@ -2179,7 +2186,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2179static int tcp_can_forward_retransmit(struct sock *sk) 2186static int tcp_can_forward_retransmit(struct sock *sk)
2180{ 2187{
2181 const struct inet_connection_sock *icsk = inet_csk(sk); 2188 const struct inet_connection_sock *icsk = inet_csk(sk);
2182 struct tcp_sock *tp = tcp_sk(sk); 2189 const struct tcp_sock *tp = tcp_sk(sk);
2183 2190
2184 /* Forward retransmissions are possible only during Recovery. */ 2191 /* Forward retransmissions are possible only during Recovery. */
2185 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2192 if (icsk->icsk_ca_state != TCP_CA_Recovery)
@@ -2294,6 +2301,9 @@ begin_fwd:
2294 return; 2301 return;
2295 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2302 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2296 2303
2304 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
2305 tp->prr_out += tcp_skb_pcount(skb);
2306
2297 if (skb == tcp_write_queue_head(sk)) 2307 if (skb == tcp_write_queue_head(sk))
2298 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2308 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2299 inet_csk(sk)->icsk_rto, 2309 inet_csk(sk)->icsk_rto,
@@ -2317,7 +2327,7 @@ void tcp_send_fin(struct sock *sk)
2317 mss_now = tcp_current_mss(sk); 2327 mss_now = tcp_current_mss(sk);
2318 2328
2319 if (tcp_send_head(sk) != NULL) { 2329 if (tcp_send_head(sk) != NULL) {
2320 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN; 2330 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2321 TCP_SKB_CB(skb)->end_seq++; 2331 TCP_SKB_CB(skb)->end_seq++;
2322 tp->write_seq++; 2332 tp->write_seq++;
2323 } else { 2333 } else {
@@ -2379,11 +2389,11 @@ int tcp_send_synack(struct sock *sk)
2379 struct sk_buff *skb; 2389 struct sk_buff *skb;
2380 2390
2381 skb = tcp_write_queue_head(sk); 2391 skb = tcp_write_queue_head(sk);
2382 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) { 2392 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2383 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2393 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2384 return -EFAULT; 2394 return -EFAULT;
2385 } 2395 }
2386 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) { 2396 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2387 if (skb_cloned(skb)) { 2397 if (skb_cloned(skb)) {
2388 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2398 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2389 if (nskb == NULL) 2399 if (nskb == NULL)
@@ -2397,7 +2407,7 @@ int tcp_send_synack(struct sock *sk)
2397 skb = nskb; 2407 skb = nskb;
2398 } 2408 }
2399 2409
2400 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK; 2410 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2401 TCP_ECN_send_synack(tcp_sk(sk), skb); 2411 TCP_ECN_send_synack(tcp_sk(sk), skb);
2402 } 2412 }
2403 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2413 TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2542,7 +2552,7 @@ EXPORT_SYMBOL(tcp_make_synack);
2542/* Do all connect socket setups that can be done AF independent. */ 2552/* Do all connect socket setups that can be done AF independent. */
2543static void tcp_connect_init(struct sock *sk) 2553static void tcp_connect_init(struct sock *sk)
2544{ 2554{
2545 struct dst_entry *dst = __sk_dst_get(sk); 2555 const struct dst_entry *dst = __sk_dst_get(sk);
2546 struct tcp_sock *tp = tcp_sk(sk); 2556 struct tcp_sock *tp = tcp_sk(sk);
2547 __u8 rcv_wscale; 2557 __u8 rcv_wscale;
2548 2558
@@ -2794,13 +2804,13 @@ int tcp_write_wakeup(struct sock *sk)
2794 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2804 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2795 skb->len > mss) { 2805 skb->len > mss) {
2796 seg_size = min(seg_size, mss); 2806 seg_size = min(seg_size, mss);
2797 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2807 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2798 if (tcp_fragment(sk, skb, seg_size, mss)) 2808 if (tcp_fragment(sk, skb, seg_size, mss))
2799 return -1; 2809 return -1;
2800 } else if (!tcp_skb_pcount(skb)) 2810 } else if (!tcp_skb_pcount(skb))
2801 tcp_set_skb_tso_segs(sk, skb, mss); 2811 tcp_set_skb_tso_segs(sk, skb, mss);
2802 2812
2803 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2813 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2804 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2814 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2805 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2815 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2806 if (!err) 2816 if (!err)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ecd44b0c45f1..2e0f0af76c19 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -334,7 +334,6 @@ void tcp_retransmit_timer(struct sock *sk)
334 * connection. If the socket is an orphan, time it out, 334 * connection. If the socket is an orphan, time it out,
335 * we cannot allow such beasts to hang infinitely. 335 * we cannot allow such beasts to hang infinitely.
336 */ 336 */
337#ifdef TCP_DEBUG
338 struct inet_sock *inet = inet_sk(sk); 337 struct inet_sock *inet = inet_sk(sk);
339 if (sk->sk_family == AF_INET) { 338 if (sk->sk_family == AF_INET) {
340 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 339 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
@@ -349,7 +348,6 @@ void tcp_retransmit_timer(struct sock *sk)
349 inet->inet_num, tp->snd_una, tp->snd_nxt); 348 inet->inet_num, tp->snd_una, tp->snd_nxt);
350 } 349 }
351#endif 350#endif
352#endif
353 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 351 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
354 tcp_write_err(sk); 352 tcp_write_err(sk);
355 goto out; 353 goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b5a19340a95..ebaa96bd3464 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags)
1267 sk->sk_state = TCP_CLOSE; 1267 sk->sk_state = TCP_CLOSE;
1268 inet->inet_daddr = 0; 1268 inet->inet_daddr = 0;
1269 inet->inet_dport = 0; 1269 inet->inet_dport = 0;
1270 sock_rps_save_rxhash(sk, 0); 1270 sock_rps_reset_rxhash(sk);
1271 sk->sk_bound_dev_if = 0; 1271 sk->sk_bound_dev_if = 0;
1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1273 inet_reset_saddr(sk); 1273 inet_reset_saddr(sk);
@@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1355 int rc; 1355 int rc;
1356 1356
1357 if (inet_sk(sk)->inet_daddr) 1357 if (inet_sk(sk)->inet_daddr)
1358 sock_rps_save_rxhash(sk, skb->rxhash); 1358 sock_rps_save_rxhash(sk, skb);
1359 1359
1360 rc = ip_queue_rcv_skb(sk, skb); 1360 rc = ip_queue_rcv_skb(sk, skb);
1361 if (rc < 0) { 1361 if (rc < 0) {
@@ -1461,10 +1461,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1461 } 1461 }
1462 } 1462 }
1463 1463
1464 if (rcu_dereference_raw(sk->sk_filter)) { 1464 if (rcu_access_pointer(sk->sk_filter) &&
1465 if (udp_lib_checksum_complete(skb)) 1465 udp_lib_checksum_complete(skb))
1466 goto drop; 1466 goto drop;
1467 }
1468 1467
1469 1468
1470 if (sk_rcvqueues_full(sk, skb)) 1469 if (sk_rcvqueues_full(sk, skb))
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fc5368ad2b0d..a0b4c5da8d43 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
79 struct rtable *rt = (struct rtable *)xdst->route; 79 struct rtable *rt = (struct rtable *)xdst->route;
80 const struct flowi4 *fl4 = &fl->u.ip4; 80 const struct flowi4 *fl4 = &fl->u.ip4;
81 81
82 rt->rt_key_dst = fl4->daddr; 82 xdst->u.rt.rt_key_dst = fl4->daddr;
83 rt->rt_key_src = fl4->saddr; 83 xdst->u.rt.rt_key_src = fl4->saddr;
84 rt->rt_key_tos = fl4->flowi4_tos; 84 xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
85 rt->rt_route_iif = fl4->flowi4_iif; 85 xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
86 rt->rt_iif = fl4->flowi4_iif; 86 xdst->u.rt.rt_iif = fl4->flowi4_iif;
87 rt->rt_oif = fl4->flowi4_oif; 87 xdst->u.rt.rt_oif = fl4->flowi4_oif;
88 rt->rt_mark = fl4->flowi4_mark; 88 xdst->u.rt.rt_mark = fl4->flowi4_mark;
89 89
90 xdst->u.dst.dev = dev; 90 xdst->u.dst.dev = dev;
91 dev_hold(dev); 91 dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12368c586068..d0611a5de45f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -428,7 +428,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
428 ndev->tstamp = jiffies; 428 ndev->tstamp = jiffies;
429 addrconf_sysctl_register(ndev); 429 addrconf_sysctl_register(ndev);
430 /* protected by rtnl_lock */ 430 /* protected by rtnl_lock */
431 rcu_assign_pointer(dev->ip6_ptr, ndev); 431 RCU_INIT_POINTER(dev->ip6_ptr, ndev);
432 432
433 /* Join all-node multicast group */ 433 /* Join all-node multicast group */
434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
@@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
824{ 824{
825 struct inet6_dev *idev = ifp->idev; 825 struct inet6_dev *idev = ifp->idev;
826 struct in6_addr addr, *tmpaddr; 826 struct in6_addr addr, *tmpaddr;
827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; 827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
828 unsigned long regen_advance; 828 unsigned long regen_advance;
829 int tmp_plen; 829 int tmp_plen;
830 int ret = 0; 830 int ret = 0;
831 int max_addresses; 831 int max_addresses;
832 u32 addr_flags; 832 u32 addr_flags;
833 unsigned long now = jiffies;
833 834
834 write_lock(&idev->lock); 835 write_lock(&idev->lock);
835 if (ift) { 836 if (ift) {
@@ -874,7 +875,7 @@ retry:
874 goto out; 875 goto out;
875 } 876 }
876 memcpy(&addr.s6_addr[8], idev->rndid, 8); 877 memcpy(&addr.s6_addr[8], idev->rndid, 8);
877 age = (jiffies - ifp->tstamp) / HZ; 878 age = (now - ifp->tstamp) / HZ;
878 tmp_valid_lft = min_t(__u32, 879 tmp_valid_lft = min_t(__u32,
879 ifp->valid_lft, 880 ifp->valid_lft,
880 idev->cnf.temp_valid_lft + age); 881 idev->cnf.temp_valid_lft + age);
@@ -884,7 +885,6 @@ retry:
884 idev->cnf.max_desync_factor); 885 idev->cnf.max_desync_factor);
885 tmp_plen = ifp->prefix_len; 886 tmp_plen = ifp->prefix_len;
886 max_addresses = idev->cnf.max_addresses; 887 max_addresses = idev->cnf.max_addresses;
887 tmp_cstamp = ifp->cstamp;
888 tmp_tstamp = ifp->tstamp; 888 tmp_tstamp = ifp->tstamp;
889 spin_unlock_bh(&ifp->lock); 889 spin_unlock_bh(&ifp->lock);
890 890
@@ -929,7 +929,7 @@ retry:
929 ift->ifpub = ifp; 929 ift->ifpub = ifp;
930 ift->valid_lft = tmp_valid_lft; 930 ift->valid_lft = tmp_valid_lft;
931 ift->prefered_lft = tmp_prefered_lft; 931 ift->prefered_lft = tmp_prefered_lft;
932 ift->cstamp = tmp_cstamp; 932 ift->cstamp = now;
933 ift->tstamp = tmp_tstamp; 933 ift->tstamp = tmp_tstamp;
934 spin_unlock_bh(&ift->lock); 934 spin_unlock_bh(&ift->lock);
935 935
@@ -1713,6 +1713,40 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1713 ip6_route_add(&cfg); 1713 ip6_route_add(&cfg);
1714} 1714}
1715 1715
1716
1717static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1718 int plen,
1719 const struct net_device *dev,
1720 u32 flags, u32 noflags)
1721{
1722 struct fib6_node *fn;
1723 struct rt6_info *rt = NULL;
1724 struct fib6_table *table;
1725
1726 table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
1727 if (table == NULL)
1728 return NULL;
1729
1730 write_lock_bh(&table->tb6_lock);
1731 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
1732 if (!fn)
1733 goto out;
1734 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1735 if (rt->rt6i_dev->ifindex != dev->ifindex)
1736 continue;
1737 if ((rt->rt6i_flags & flags) != flags)
1738 continue;
1739 if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
1740 continue;
1741 dst_hold(&rt->dst);
1742 break;
1743 }
1744out:
1745 write_unlock_bh(&table->tb6_lock);
1746 return rt;
1747}
1748
1749
1716/* Create "default" multicast route to the interface */ 1750/* Create "default" multicast route to the interface */
1717 1751
1718static void addrconf_add_mroute(struct net_device *dev) 1752static void addrconf_add_mroute(struct net_device *dev)
@@ -1842,10 +1876,13 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1842 if (addrconf_finite_timeout(rt_expires)) 1876 if (addrconf_finite_timeout(rt_expires))
1843 rt_expires *= HZ; 1877 rt_expires *= HZ;
1844 1878
1845 rt = rt6_lookup(net, &pinfo->prefix, NULL, 1879 rt = addrconf_get_prefix_route(&pinfo->prefix,
1846 dev->ifindex, 1); 1880 pinfo->prefix_len,
1881 dev,
1882 RTF_ADDRCONF | RTF_PREFIX_RT,
1883 RTF_GATEWAY | RTF_DEFAULT);
1847 1884
1848 if (rt && addrconf_is_prefix_route(rt)) { 1885 if (rt) {
1849 /* Autoconf prefix route */ 1886 /* Autoconf prefix route */
1850 if (valid_lft == 0) { 1887 if (valid_lft == 0) {
1851 ip6_del_rt(rt); 1888 ip6_del_rt(rt);
@@ -1999,25 +2036,50 @@ ok:
1999#ifdef CONFIG_IPV6_PRIVACY 2036#ifdef CONFIG_IPV6_PRIVACY
2000 read_lock_bh(&in6_dev->lock); 2037 read_lock_bh(&in6_dev->lock);
2001 /* update all temporary addresses in the list */ 2038 /* update all temporary addresses in the list */
2002 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { 2039 list_for_each_entry(ift, &in6_dev->tempaddr_list,
2003 /* 2040 tmp_list) {
2004 * When adjusting the lifetimes of an existing 2041 int age, max_valid, max_prefered;
2005 * temporary address, only lower the lifetimes. 2042
2006 * Implementations must not increase the
2007 * lifetimes of an existing temporary address
2008 * when processing a Prefix Information Option.
2009 */
2010 if (ifp != ift->ifpub) 2043 if (ifp != ift->ifpub)
2011 continue; 2044 continue;
2012 2045
2046 /*
2047 * RFC 4941 section 3.3:
2048 * If a received option will extend the lifetime
2049 * of a public address, the lifetimes of
2050 * temporary addresses should be extended,
2051 * subject to the overall constraint that no
2052 * temporary addresses should ever remain
2053 * "valid" or "preferred" for a time longer than
2054 * (TEMP_VALID_LIFETIME) or
2055 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
2056 * respectively.
2057 */
2058 age = (now - ift->cstamp) / HZ;
2059 max_valid = in6_dev->cnf.temp_valid_lft - age;
2060 if (max_valid < 0)
2061 max_valid = 0;
2062
2063 max_prefered = in6_dev->cnf.temp_prefered_lft -
2064 in6_dev->cnf.max_desync_factor -
2065 age;
2066 if (max_prefered < 0)
2067 max_prefered = 0;
2068
2069 if (valid_lft > max_valid)
2070 valid_lft = max_valid;
2071
2072 if (prefered_lft > max_prefered)
2073 prefered_lft = max_prefered;
2074
2013 spin_lock(&ift->lock); 2075 spin_lock(&ift->lock);
2014 flags = ift->flags; 2076 flags = ift->flags;
2015 if (ift->valid_lft > valid_lft && 2077 ift->valid_lft = valid_lft;
2016 ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) 2078 ift->prefered_lft = prefered_lft;
2017 ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; 2079 ift->tstamp = now;
2018 if (ift->prefered_lft > prefered_lft && 2080 if (prefered_lft > 0)
2019 ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) 2081 ift->flags &= ~IFA_F_DEPRECATED;
2020 ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; 2082
2021 spin_unlock(&ift->lock); 2083 spin_unlock(&ift->lock);
2022 if (!(flags&IFA_F_TENTATIVE)) 2084 if (!(flags&IFA_F_TENTATIVE))
2023 ipv6_ifa_notify(0, ift); 2085 ipv6_ifa_notify(0, ift);
@@ -2025,9 +2087,11 @@ ok:
2025 2087
2026 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { 2088 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2027 /* 2089 /*
2028 * When a new public address is created as described in [ADDRCONF], 2090 * When a new public address is created as
2029 * also create a new temporary address. Also create a temporary 2091 * described in [ADDRCONF], also create a new
2030 * address if it's enabled but no temporary address currently exists. 2092 * temporary address. Also create a temporary
2093 * address if it's enabled but no temporary
2094 * address currently exists.
2031 */ 2095 */
2032 read_unlock_bh(&in6_dev->lock); 2096 read_unlock_bh(&in6_dev->lock);
2033 ipv6_create_tempaddr(ifp, NULL); 2097 ipv6_create_tempaddr(ifp, NULL);
@@ -2706,7 +2770,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2706 idev->dead = 1; 2770 idev->dead = 1;
2707 2771
2708 /* protected by rtnl_lock */ 2772 /* protected by rtnl_lock */
2709 rcu_assign_pointer(dev->ip6_ptr, NULL); 2773 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
2710 2774
2711 /* Step 1.5: remove snmp6 entry */ 2775 /* Step 1.5: remove snmp6 entry */
2712 snmp6_unregister_dev(idev); 2776 snmp6_unregister_dev(idev);
@@ -2969,12 +3033,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2969 3033
2970 ipv6_ifa_notify(RTM_NEWADDR, ifp); 3034 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2971 3035
2972 /* If added prefix is link local and forwarding is off, 3036 /* If added prefix is link local and we are prepared to process
2973 start sending router solicitations. 3037 router advertisements, start sending router solicitations.
2974 */ 3038 */
2975 3039
2976 if ((ifp->idev->cnf.forwarding == 0 || 3040 if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
2977 ifp->idev->cnf.forwarding == 2) && 3041 ifp->idev->cnf.accept_ra == 2) &&
2978 ifp->idev->cnf.rtr_solicits > 0 && 3042 ifp->idev->cnf.rtr_solicits > 0 &&
2979 (dev->flags&IFF_LOOPBACK) == 0 && 3043 (dev->flags&IFF_LOOPBACK) == 0 &&
2980 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 3044 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b46e9f88ce37..e2480691c220 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -297,10 +297,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
297 ipv6_addr_copy(&iph->daddr, &fl6->daddr); 297 ipv6_addr_copy(&iph->daddr, &fl6->daddr);
298 298
299 mtu_info = IP6CBMTU(skb); 299 mtu_info = IP6CBMTU(skb);
300 if (!mtu_info) {
301 kfree_skb(skb);
302 return;
303 }
304 300
305 mtu_info->ip6m_mtu = mtu; 301 mtu_info->ip6m_mtu = mtu;
306 mtu_info->ip6m_addr.sin6_family = AF_INET6; 302 mtu_info->ip6m_addr.sin6_family = AF_INET6;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 79a485e8a700..1318de4c3e8d 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -273,12 +273,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
273#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 273#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
274 __u16 dstbuf; 274 __u16 dstbuf;
275#endif 275#endif
276 struct dst_entry *dst; 276 struct dst_entry *dst = skb_dst(skb);
277 277
278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
279 !pskb_may_pull(skb, (skb_transport_offset(skb) + 279 !pskb_may_pull(skb, (skb_transport_offset(skb) +
280 ((skb_transport_header(skb)[1] + 1) << 3)))) { 280 ((skb_transport_header(skb)[1] + 1) << 3)))) {
281 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 281 IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
282 IPSTATS_MIB_INHDRERRORS); 282 IPSTATS_MIB_INHDRERRORS);
283 kfree_skb(skb); 283 kfree_skb(skb);
284 return -1; 284 return -1;
@@ -289,9 +289,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
289 dstbuf = opt->dst1; 289 dstbuf = opt->dst1;
290#endif 290#endif
291 291
292 dst = dst_clone(skb_dst(skb));
293 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
294 dst_release(dst);
295 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 293 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
296 opt = IP6CB(skb); 294 opt = IP6CB(skb);
297#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 295#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -304,7 +302,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
304 302
305 IP6_INC_STATS_BH(dev_net(dst->dev), 303 IP6_INC_STATS_BH(dev_net(dst->dev),
306 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 304 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
307 dst_release(dst);
308 return -1; 305 return -1;
309} 306}
310 307
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 11900417b1cc..90868fb42757 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
490 goto out_dst_release; 490 goto out_dst_release;
491 } 491 }
492 492
493 idev = in6_dev_get(skb->dev); 493 rcu_read_lock();
494 idev = __in6_dev_get(skb->dev);
494 495
495 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 496 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
496 len + sizeof(struct icmp6hdr), 497 len + sizeof(struct icmp6hdr),
@@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
500 if (err) { 501 if (err) {
501 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 502 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
502 ip6_flush_pending_frames(sk); 503 ip6_flush_pending_frames(sk);
503 goto out_put; 504 } else {
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
506 len + sizeof(struct icmp6hdr));
504 } 507 }
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); 508 rcu_read_unlock();
506
507out_put:
508 if (likely(idev != NULL))
509 in6_dev_put(idev);
510out_dst_release: 509out_dst_release:
511 dst_release(dst); 510 dst_release(dst);
512out: 511out:
513 icmpv6_xmit_unlock(sk); 512 icmpv6_xmit_unlock(sk);
514} 513}
515
516EXPORT_SYMBOL(icmpv6_send); 514EXPORT_SYMBOL(icmpv6_send);
517 515
518static void icmpv6_echo_reply(struct sk_buff *skb) 516static void icmpv6_echo_reply(struct sk_buff *skb)
@@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
569 if (hlimit < 0) 567 if (hlimit < 0)
570 hlimit = ip6_dst_hoplimit(dst); 568 hlimit = ip6_dst_hoplimit(dst);
571 569
572 idev = in6_dev_get(skb->dev); 570 idev = __in6_dev_get(skb->dev);
573 571
574 msg.skb = skb; 572 msg.skb = skb;
575 msg.offset = 0; 573 msg.offset = 0;
@@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
583 if (err) { 581 if (err) {
584 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 582 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
585 ip6_flush_pending_frames(sk); 583 ip6_flush_pending_frames(sk);
586 goto out_put; 584 } else {
585 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
586 skb->len + sizeof(struct icmp6hdr));
587 } 587 }
588 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
589
590out_put:
591 if (likely(idev != NULL))
592 in6_dev_put(idev);
593 dst_release(dst); 588 dst_release(dst);
594out: 589out:
595 icmpv6_xmit_unlock(sk); 590 icmpv6_xmit_unlock(sk);
@@ -840,8 +835,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
840 /* Enough space for 2 64K ICMP packets, including 835 /* Enough space for 2 64K ICMP packets, including
841 * sk_buff struct overhead. 836 * sk_buff struct overhead.
842 */ 837 */
843 sk->sk_sndbuf = 838 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
844 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
845 } 839 }
846 return 0; 840 return 0;
847 841
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a58e8cf6646..fee46d5a2f12 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
211 struct flowi6 fl6; 211 struct flowi6 fl6;
212 struct dst_entry *dst; 212 struct dst_entry *dst;
213 struct in6_addr *final_p, final; 213 struct in6_addr *final_p, final;
214 int res;
214 215
215 memset(&fl6, 0, sizeof(fl6)); 216 memset(&fl6, 0, sizeof(fl6));
216 fl6.flowi6_proto = sk->sk_protocol; 217 fl6.flowi6_proto = sk->sk_protocol;
@@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
241 __inet6_csk_dst_store(sk, dst, NULL, NULL); 242 __inet6_csk_dst_store(sk, dst, NULL, NULL);
242 } 243 }
243 244
244 skb_dst_set(skb, dst_clone(dst)); 245 rcu_read_lock();
246 skb_dst_set_noref(skb, dst);
245 247
246 /* Restore final destination back after routing done */ 248 /* Restore final destination back after routing done */
247 ipv6_addr_copy(&fl6.daddr, &np->daddr); 249 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 250
249 return ip6_xmit(sk, skb, &fl6, np->opt); 251 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
252 rcu_read_unlock();
253 return res;
250} 254}
251
252EXPORT_SYMBOL_GPL(inet6_csk_xmit); 255EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 320d91d20ad7..93718f3db79b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -28,10 +28,6 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#ifdef CONFIG_PROC_FS
32#include <linux/proc_fs.h>
33#endif
34
35#include <net/ipv6.h> 31#include <net/ipv6.h>
36#include <net/ndisc.h> 32#include <net/ndisc.h>
37#include <net/addrconf.h> 33#include <net/addrconf.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4c882cf4e8a1..84d0bd5cac93 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -180,7 +180,7 @@ int ip6_output(struct sk_buff *skb)
180 */ 180 */
181 181
182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
183 struct ipv6_txoptions *opt) 183 struct ipv6_txoptions *opt, int tclass)
184{ 184{
185 struct net *net = sock_net(sk); 185 struct net *net = sock_net(sk);
186 struct ipv6_pinfo *np = inet6_sk(sk); 186 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -190,7 +190,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
190 u8 proto = fl6->flowi6_proto; 190 u8 proto = fl6->flowi6_proto;
191 int seg_len = skb->len; 191 int seg_len = skb->len;
192 int hlimit = -1; 192 int hlimit = -1;
193 int tclass = 0;
194 u32 mtu; 193 u32 mtu;
195 194
196 if (opt) { 195 if (opt) {
@@ -228,10 +227,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
228 /* 227 /*
229 * Fill in the IPv6 header 228 * Fill in the IPv6 header
230 */ 229 */
231 if (np) { 230 if (np)
232 tclass = np->tclass;
233 hlimit = np->hop_limit; 231 hlimit = np->hop_limit;
234 }
235 if (hlimit < 0) 232 if (hlimit < 0)
236 hlimit = ip6_dst_hoplimit(dst); 233 hlimit = ip6_dst_hoplimit(dst);
237 234
@@ -1126,7 +1123,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1126 hh_len + fragheaderlen + transhdrlen + 20, 1123 hh_len + fragheaderlen + transhdrlen + 20,
1127 (flags & MSG_DONTWAIT), &err); 1124 (flags & MSG_DONTWAIT), &err);
1128 if (skb == NULL) 1125 if (skb == NULL)
1129 return -ENOMEM; 1126 return err;
1130 1127
1131 /* reserve space for Hardware header */ 1128 /* reserve space for Hardware header */
1132 skb_reserve(skb, hh_len); 1129 skb_reserve(skb, hh_len);
@@ -1193,6 +1190,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1193 struct sk_buff *skb; 1190 struct sk_buff *skb;
1194 unsigned int maxfraglen, fragheaderlen; 1191 unsigned int maxfraglen, fragheaderlen;
1195 int exthdrlen; 1192 int exthdrlen;
1193 int dst_exthdrlen;
1196 int hh_len; 1194 int hh_len;
1197 int mtu; 1195 int mtu;
1198 int copy; 1196 int copy;
@@ -1248,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1248 np->cork.hop_limit = hlimit; 1246 np->cork.hop_limit = hlimit;
1249 np->cork.tclass = tclass; 1247 np->cork.tclass = tclass;
1250 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1248 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1251 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1249 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1252 if (np->frag_size < mtu) { 1250 if (np->frag_size < mtu) {
1253 if (np->frag_size) 1251 if (np->frag_size)
1254 mtu = np->frag_size; 1252 mtu = np->frag_size;
@@ -1259,16 +1257,17 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1259 cork->length = 0; 1257 cork->length = 0;
1260 sk->sk_sndmsg_page = NULL; 1258 sk->sk_sndmsg_page = NULL;
1261 sk->sk_sndmsg_off = 0; 1259 sk->sk_sndmsg_off = 0;
1262 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - 1260 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
1263 rt->rt6i_nfheader_len;
1264 length += exthdrlen; 1261 length += exthdrlen;
1265 transhdrlen += exthdrlen; 1262 transhdrlen += exthdrlen;
1263 dst_exthdrlen = rt->dst.header_len;
1266 } else { 1264 } else {
1267 rt = (struct rt6_info *)cork->dst; 1265 rt = (struct rt6_info *)cork->dst;
1268 fl6 = &inet->cork.fl.u.ip6; 1266 fl6 = &inet->cork.fl.u.ip6;
1269 opt = np->cork.opt; 1267 opt = np->cork.opt;
1270 transhdrlen = 0; 1268 transhdrlen = 0;
1271 exthdrlen = 0; 1269 exthdrlen = 0;
1270 dst_exthdrlen = 0;
1272 mtu = cork->fragsize; 1271 mtu = cork->fragsize;
1273 } 1272 }
1274 1273
@@ -1368,6 +1367,8 @@ alloc_new_skb:
1368 else 1367 else
1369 alloclen = datalen + fragheaderlen; 1368 alloclen = datalen + fragheaderlen;
1370 1369
1370 alloclen += dst_exthdrlen;
1371
1371 /* 1372 /*
1372 * The last fragment gets additional space at tail. 1373 * The last fragment gets additional space at tail.
1373 * Note: we overallocate on fragments with MSG_MODE 1374 * Note: we overallocate on fragments with MSG_MODE
@@ -1419,9 +1420,9 @@ alloc_new_skb:
1419 /* 1420 /*
1420 * Find where to start putting bytes 1421 * Find where to start putting bytes
1421 */ 1422 */
1422 data = skb_put(skb, fraglen); 1423 data = skb_put(skb, fraglen + dst_exthdrlen);
1423 skb_set_network_header(skb, exthdrlen); 1424 skb_set_network_header(skb, exthdrlen + dst_exthdrlen);
1424 data += fragheaderlen; 1425 data += fragheaderlen + dst_exthdrlen;
1425 skb->transport_header = (skb->network_header + 1426 skb->transport_header = (skb->network_header +
1426 fragheaderlen); 1427 fragheaderlen);
1427 if (fraggap) { 1428 if (fraggap) {
@@ -1434,6 +1435,7 @@ alloc_new_skb:
1434 pskb_trim_unique(skb_prev, maxfraglen); 1435 pskb_trim_unique(skb_prev, maxfraglen);
1435 } 1436 }
1436 copy = datalen - transhdrlen - fraggap; 1437 copy = datalen - transhdrlen - fraggap;
1438
1437 if (copy < 0) { 1439 if (copy < 0) {
1438 err = -EINVAL; 1440 err = -EINVAL;
1439 kfree_skb(skb); 1441 kfree_skb(skb);
@@ -1448,6 +1450,7 @@ alloc_new_skb:
1448 length -= datalen - fraggap; 1450 length -= datalen - fraggap;
1449 transhdrlen = 0; 1451 transhdrlen = 0;
1450 exthdrlen = 0; 1452 exthdrlen = 0;
1453 dst_exthdrlen = 0;
1451 csummode = CHECKSUM_NONE; 1454 csummode = CHECKSUM_NONE;
1452 1455
1453 /* 1456 /*
@@ -1480,13 +1483,13 @@ alloc_new_skb:
1480 if (page && (left = PAGE_SIZE - off) > 0) { 1483 if (page && (left = PAGE_SIZE - off) > 0) {
1481 if (copy >= left) 1484 if (copy >= left)
1482 copy = left; 1485 copy = left;
1483 if (page != frag->page) { 1486 if (page != skb_frag_page(frag)) {
1484 if (i == MAX_SKB_FRAGS) { 1487 if (i == MAX_SKB_FRAGS) {
1485 err = -EMSGSIZE; 1488 err = -EMSGSIZE;
1486 goto error; 1489 goto error;
1487 } 1490 }
1488 get_page(page);
1489 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); 1491 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1492 skb_frag_ref(skb, i);
1490 frag = &skb_shinfo(skb)->frags[i]; 1493 frag = &skb_shinfo(skb)->frags[i];
1491 } 1494 }
1492 } else if(i < MAX_SKB_FRAGS) { 1495 } else if(i < MAX_SKB_FRAGS) {
@@ -1506,12 +1509,14 @@ alloc_new_skb:
1506 err = -EMSGSIZE; 1509 err = -EMSGSIZE;
1507 goto error; 1510 goto error;
1508 } 1511 }
1509 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { 1512 if (getfrag(from,
1513 skb_frag_address(frag) + skb_frag_size(frag),
1514 offset, copy, skb->len, skb) < 0) {
1510 err = -EFAULT; 1515 err = -EFAULT;
1511 goto error; 1516 goto error;
1512 } 1517 }
1513 sk->sk_sndmsg_off += copy; 1518 sk->sk_sndmsg_off += copy;
1514 frag->size += copy; 1519 skb_frag_size_add(frag, copy);
1515 skb->len += copy; 1520 skb->len += copy;
1516 skb->data_len += copy; 1521 skb->data_len += copy;
1517 skb->truesize += copy; 1522 skb->truesize += copy;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0bc98886c383..bdc15c9003d7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
218{ 218{
219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
220 220
221 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 221 RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
222 rcu_assign_pointer(*tp, t); 222 RCU_INIT_POINTER(*tp, t);
223} 223}
224 224
225/** 225/**
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
237 (iter = rtnl_dereference(*tp)) != NULL; 237 (iter = rtnl_dereference(*tp)) != NULL;
238 tp = &iter->next) { 238 tp = &iter->next) {
239 if (t == iter) { 239 if (t == iter) {
240 rcu_assign_pointer(*tp, t->next); 240 RCU_INIT_POINTER(*tp, t->next);
241 break; 241 break;
242 } 242 }
243 } 243 }
@@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351 351
352 if (dev == ip6n->fb_tnl_dev) 352 if (dev == ip6n->fb_tnl_dev)
353 rcu_assign_pointer(ip6n->tnls_wc[0], NULL); 353 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
354 else 354 else
355 ip6_tnl_unlink(ip6n, t); 355 ip6_tnl_unlink(ip6n, t);
356 ip6_tnl_dst_reset(t); 356 ip6_tnl_dst_reset(t);
@@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
889 struct net_device_stats *stats = &t->dev->stats; 889 struct net_device_stats *stats = &t->dev->stats;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 struct ipv6_tel_txoption opt; 891 struct ipv6_tel_txoption opt;
892 struct dst_entry *dst; 892 struct dst_entry *dst = NULL, *ndst = NULL;
893 struct net_device *tdev; 893 struct net_device *tdev;
894 int mtu; 894 int mtu;
895 unsigned int max_headroom = sizeof(struct ipv6hdr); 895 unsigned int max_headroom = sizeof(struct ipv6hdr);
@@ -897,19 +897,20 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
897 int err = -1; 897 int err = -1;
898 int pkt_len; 898 int pkt_len;
899 899
900 if ((dst = ip6_tnl_dst_check(t)) != NULL) 900 if (!fl6->flowi6_mark)
901 dst_hold(dst); 901 dst = ip6_tnl_dst_check(t);
902 else { 902 if (!dst) {
903 dst = ip6_route_output(net, NULL, fl6); 903 ndst = ip6_route_output(net, NULL, fl6);
904 904
905 if (dst->error) 905 if (ndst->error)
906 goto tx_err_link_failure; 906 goto tx_err_link_failure;
907 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 907 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
908 if (IS_ERR(dst)) { 908 if (IS_ERR(ndst)) {
909 err = PTR_ERR(dst); 909 err = PTR_ERR(ndst);
910 dst = NULL; 910 ndst = NULL;
911 goto tx_err_link_failure; 911 goto tx_err_link_failure;
912 } 912 }
913 dst = ndst;
913 } 914 }
914 915
915 tdev = dst->dev; 916 tdev = dst->dev;
@@ -955,8 +956,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
955 skb = new_skb; 956 skb = new_skb;
956 } 957 }
957 skb_dst_drop(skb); 958 skb_dst_drop(skb);
958 skb_dst_set(skb, dst_clone(dst)); 959 if (fl6->flowi6_mark) {
959 960 skb_dst_set(skb, dst);
961 ndst = NULL;
962 } else {
963 skb_dst_set_noref(skb, dst);
964 }
960 skb->transport_header = skb->network_header; 965 skb->transport_header = skb->network_header;
961 966
962 proto = fl6->flowi6_proto; 967 proto = fl6->flowi6_proto;
@@ -987,13 +992,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
987 stats->tx_errors++; 992 stats->tx_errors++;
988 stats->tx_aborted_errors++; 993 stats->tx_aborted_errors++;
989 } 994 }
990 ip6_tnl_dst_store(t, dst); 995 if (ndst)
996 ip6_tnl_dst_store(t, ndst);
991 return 0; 997 return 0;
992tx_err_link_failure: 998tx_err_link_failure:
993 stats->tx_carrier_errors++; 999 stats->tx_carrier_errors++;
994 dst_link_failure(skb); 1000 dst_link_failure(skb);
995tx_err_dst_release: 1001tx_err_dst_release:
996 dst_release(dst); 1002 dst_release(ndst);
997 return err; 1003 return err;
998} 1004}
999 1005
@@ -1020,9 +1026,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1020 1026
1021 dsfield = ipv4_get_dsfield(iph); 1027 dsfield = ipv4_get_dsfield(iph);
1022 1028
1023 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) 1029 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1024 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1030 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1025 & IPV6_TCLASS_MASK; 1031 & IPV6_TCLASS_MASK;
1032 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1033 fl6.flowi6_mark = skb->mark;
1026 1034
1027 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1035 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1028 if (err != 0) { 1036 if (err != 0) {
@@ -1069,10 +1077,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1069 fl6.flowi6_proto = IPPROTO_IPV6; 1077 fl6.flowi6_proto = IPPROTO_IPV6;
1070 1078
1071 dsfield = ipv6_get_dsfield(ipv6h); 1079 dsfield = ipv6_get_dsfield(ipv6h);
1072 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) 1080 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1073 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 1081 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1074 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1082 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1075 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); 1083 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1084 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1085 fl6.flowi6_mark = skb->mark;
1076 1086
1077 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1087 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1078 if (err != 0) { 1088 if (err != 0) {
@@ -1439,7 +1449,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1439 1449
1440 t->parms.proto = IPPROTO_IPV6; 1450 t->parms.proto = IPPROTO_IPV6;
1441 dev_hold(dev); 1451 dev_hold(dev);
1442 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1452 RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
1443 return 0; 1453 return 0;
1444} 1454}
1445 1455
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2fbda5fc4cc4..c99e3ee9781f 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
343 break; 343 break;
344 344
345 case IPV6_TRANSPARENT: 345 case IPV6_TRANSPARENT:
346 if (!capable(CAP_NET_ADMIN)) { 346 if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
347 retv = -EPERM; 347 retv = -EPERM;
348 break; 348 break;
349 } 349 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9da6e02eaaeb..44e5b7f2a6c1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -370,17 +370,14 @@ static int ndisc_constructor(struct neighbour *neigh)
370 struct neigh_parms *parms; 370 struct neigh_parms *parms;
371 int is_multicast = ipv6_addr_is_multicast(addr); 371 int is_multicast = ipv6_addr_is_multicast(addr);
372 372
373 rcu_read_lock();
374 in6_dev = in6_dev_get(dev); 373 in6_dev = in6_dev_get(dev);
375 if (in6_dev == NULL) { 374 if (in6_dev == NULL) {
376 rcu_read_unlock();
377 return -EINVAL; 375 return -EINVAL;
378 } 376 }
379 377
380 parms = in6_dev->nd_parms; 378 parms = in6_dev->nd_parms;
381 __neigh_parms_put(neigh->parms); 379 __neigh_parms_put(neigh->parms);
382 neigh->parms = neigh_parms_clone(parms); 380 neigh->parms = neigh_parms_clone(parms);
383 rcu_read_unlock();
384 381
385 neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST; 382 neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST;
386 if (!dev->header_ops) { 383 if (!dev->header_ops) {
@@ -533,7 +530,8 @@ void ndisc_send_skb(struct sk_buff *skb,
533 530
534 skb_dst_set(skb, dst); 531 skb_dst_set(skb, dst);
535 532
536 idev = in6_dev_get(dst->dev); 533 rcu_read_lock();
534 idev = __in6_dev_get(dst->dev);
537 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 535 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
538 536
539 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 537 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
@@ -543,8 +541,7 @@ void ndisc_send_skb(struct sk_buff *skb,
543 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 541 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
544 } 542 }
545 543
546 if (likely(idev != NULL)) 544 rcu_read_unlock();
547 in6_dev_put(idev);
548} 545}
549 546
550EXPORT_SYMBOL(ndisc_send_skb); 547EXPORT_SYMBOL(ndisc_send_skb);
@@ -1039,7 +1036,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1039 if (skb->len < sizeof(*rs_msg)) 1036 if (skb->len < sizeof(*rs_msg))
1040 return; 1037 return;
1041 1038
1042 idev = in6_dev_get(skb->dev); 1039 idev = __in6_dev_get(skb->dev);
1043 if (!idev) { 1040 if (!idev) {
1044 if (net_ratelimit()) 1041 if (net_ratelimit())
1045 ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); 1042 ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
@@ -1080,7 +1077,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1080 neigh_release(neigh); 1077 neigh_release(neigh);
1081 } 1078 }
1082out: 1079out:
1083 in6_dev_put(idev); 1080 return;
1084} 1081}
1085 1082
1086static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) 1083static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1179,7 +1176,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1179 * set the RA_RECV flag in the interface 1176 * set the RA_RECV flag in the interface
1180 */ 1177 */
1181 1178
1182 in6_dev = in6_dev_get(skb->dev); 1179 in6_dev = __in6_dev_get(skb->dev);
1183 if (in6_dev == NULL) { 1180 if (in6_dev == NULL) {
1184 ND_PRINTK0(KERN_ERR 1181 ND_PRINTK0(KERN_ERR
1185 "ICMPv6 RA: can't find inet6 device for %s.\n", 1182 "ICMPv6 RA: can't find inet6 device for %s.\n",
@@ -1188,7 +1185,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1188 } 1185 }
1189 1186
1190 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1187 if (!ndisc_parse_options(opt, optlen, &ndopts)) {
1191 in6_dev_put(in6_dev);
1192 ND_PRINTK2(KERN_WARNING 1188 ND_PRINTK2(KERN_WARNING
1193 "ICMP6 RA: invalid ND options\n"); 1189 "ICMP6 RA: invalid ND options\n");
1194 return; 1190 return;
@@ -1225,6 +1221,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1225 if (!in6_dev->cnf.accept_ra_defrtr) 1221 if (!in6_dev->cnf.accept_ra_defrtr)
1226 goto skip_defrtr; 1222 goto skip_defrtr;
1227 1223
1224 if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
1225 goto skip_defrtr;
1226
1228 lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); 1227 lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
1229 1228
1230#ifdef CONFIG_IPV6_ROUTER_PREF 1229#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1255 ND_PRINTK0(KERN_ERR 1254 ND_PRINTK0(KERN_ERR
1256 "ICMPv6 RA: %s() failed to add default route.\n", 1255 "ICMPv6 RA: %s() failed to add default route.\n",
1257 __func__); 1256 __func__);
1258 in6_dev_put(in6_dev);
1259 return; 1257 return;
1260 } 1258 }
1261 1259
@@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1265 "ICMPv6 RA: %s() got default router without neighbour.\n", 1263 "ICMPv6 RA: %s() got default router without neighbour.\n",
1266 __func__); 1264 __func__);
1267 dst_release(&rt->dst); 1265 dst_release(&rt->dst);
1268 in6_dev_put(in6_dev);
1269 return; 1266 return;
1270 } 1267 }
1271 neigh->flags |= NTF_ROUTER; 1268 neigh->flags |= NTF_ROUTER;
@@ -1349,6 +1346,9 @@ skip_linkparms:
1349 goto out; 1346 goto out;
1350 1347
1351#ifdef CONFIG_IPV6_ROUTE_INFO 1348#ifdef CONFIG_IPV6_ROUTE_INFO
1349 if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
1350 goto skip_routeinfo;
1351
1352 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { 1352 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
1353 struct nd_opt_hdr *p; 1353 struct nd_opt_hdr *p;
1354 for (p = ndopts.nd_opts_ri; 1354 for (p = ndopts.nd_opts_ri;
@@ -1366,6 +1366,8 @@ skip_linkparms:
1366 &ipv6_hdr(skb)->saddr); 1366 &ipv6_hdr(skb)->saddr);
1367 } 1367 }
1368 } 1368 }
1369
1370skip_routeinfo:
1369#endif 1371#endif
1370 1372
1371#ifdef CONFIG_IPV6_NDISC_NODETYPE 1373#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1422,7 +1424,6 @@ out:
1422 dst_release(&rt->dst); 1424 dst_release(&rt->dst);
1423 else if (neigh) 1425 else if (neigh)
1424 neigh_release(neigh); 1426 neigh_release(neigh);
1425 in6_dev_put(in6_dev);
1426} 1427}
1427 1428
1428static void ndisc_redirect_rcv(struct sk_buff *skb) 1429static void ndisc_redirect_rcv(struct sk_buff *skb)
@@ -1481,13 +1482,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1481 return; 1482 return;
1482 } 1483 }
1483 1484
1484 in6_dev = in6_dev_get(skb->dev); 1485 in6_dev = __in6_dev_get(skb->dev);
1485 if (!in6_dev) 1486 if (!in6_dev)
1486 return; 1487 return;
1487 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) { 1488 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1488 in6_dev_put(in6_dev);
1489 return; 1489 return;
1490 }
1491 1490
1492 /* RFC2461 8.1: 1491 /* RFC2461 8.1:
1493 * The IP source address of the Redirect MUST be the same as the current 1492 * The IP source address of the Redirect MUST be the same as the current
@@ -1497,7 +1496,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1497 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1496 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1498 ND_PRINTK2(KERN_WARNING 1497 ND_PRINTK2(KERN_WARNING
1499 "ICMPv6 Redirect: invalid ND options\n"); 1498 "ICMPv6 Redirect: invalid ND options\n");
1500 in6_dev_put(in6_dev);
1501 return; 1499 return;
1502 } 1500 }
1503 if (ndopts.nd_opts_tgt_lladdr) { 1501 if (ndopts.nd_opts_tgt_lladdr) {
@@ -1506,7 +1504,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 if (!lladdr) { 1504 if (!lladdr) {
1507 ND_PRINTK2(KERN_WARNING 1505 ND_PRINTK2(KERN_WARNING
1508 "ICMPv6 Redirect: invalid link-layer address length\n"); 1506 "ICMPv6 Redirect: invalid link-layer address length\n");
1509 in6_dev_put(in6_dev);
1510 return; 1507 return;
1511 } 1508 }
1512 } 1509 }
@@ -1518,7 +1515,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1518 on_link); 1515 on_link);
1519 neigh_release(neigh); 1516 neigh_release(neigh);
1520 } 1517 }
1521 in6_dev_put(in6_dev);
1522} 1518}
1523 1519
1524void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1520void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
@@ -1651,7 +1647,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1651 csum_partial(icmph, len, 0)); 1647 csum_partial(icmph, len, 0));
1652 1648
1653 skb_dst_set(buff, dst); 1649 skb_dst_set(buff, dst);
1654 idev = in6_dev_get(dst->dev); 1650 rcu_read_lock();
1651 idev = __in6_dev_get(dst->dev);
1655 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1652 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1656 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, 1653 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1657 dst_output); 1654 dst_output);
@@ -1660,8 +1657,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1660 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1657 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1661 } 1658 }
1662 1659
1663 if (likely(idev != NULL)) 1660 rcu_read_unlock();
1664 in6_dev_put(idev);
1665 return; 1661 return;
1666 1662
1667release: 1663release:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 085727263812..e8762c73b170 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -378,8 +378,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
378 head->next = clone; 378 head->next = clone;
379 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 379 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
380 skb_frag_list_init(head); 380 skb_frag_list_init(head);
381 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 381 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
382 plen += skb_shinfo(head)->frags[i].size; 382 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
383 clone->len = clone->data_len = head->data_len - plen; 383 clone->len = clone->data_len = head->data_len - plen;
384 head->data_len -= clone->len; 384 head->data_len -= clone->len;
385 head->len -= clone->len; 385 head->len -= clone->len;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 343852e5c703..6f7824e1cea4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -130,14 +130,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
130 130
131int rawv6_mh_filter_register(mh_filter_t filter) 131int rawv6_mh_filter_register(mh_filter_t filter)
132{ 132{
133 rcu_assign_pointer(mh_filter, filter); 133 RCU_INIT_POINTER(mh_filter, filter);
134 return 0; 134 return 0;
135} 135}
136EXPORT_SYMBOL(rawv6_mh_filter_register); 136EXPORT_SYMBOL(rawv6_mh_filter_register);
137 137
138int rawv6_mh_filter_unregister(mh_filter_t filter) 138int rawv6_mh_filter_unregister(mh_filter_t filter)
139{ 139{
140 rcu_assign_pointer(mh_filter, NULL); 140 RCU_INIT_POINTER(mh_filter, NULL);
141 synchronize_rcu(); 141 synchronize_rcu();
142 return 0; 142 return 0;
143} 143}
@@ -372,9 +372,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
372 read_unlock(&raw_v6_hashinfo.lock); 372 read_unlock(&raw_v6_hashinfo.lock);
373} 373}
374 374
375static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 375static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
376{ 376{
377 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && 377 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
378 skb_checksum_complete(skb)) { 378 skb_checksum_complete(skb)) {
379 atomic_inc(&sk->sk_drops); 379 atomic_inc(&sk->sk_drops);
380 kfree_skb(skb); 380 kfree_skb(skb);
@@ -542,8 +542,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
542 goto out; 542 goto out;
543 543
544 offset = rp->offset; 544 offset = rp->offset;
545 total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) - 545 total_len = inet_sk(sk)->cork.base.length;
546 skb->data);
547 if (offset >= total_len - 1) { 546 if (offset >= total_len - 1) {
548 err = -EINVAL; 547 err = -EINVAL;
549 ip6_flush_pending_frames(sk); 548 ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7b954e2539d0..cc22099ac8b6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -464,8 +464,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
464 head->next = clone; 464 head->next = clone;
465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
466 skb_frag_list_init(head); 466 skb_frag_list_init(head);
467 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 467 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
468 plen += skb_shinfo(head)->frags[i].size; 468 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
469 clone->len = clone->data_len = head->data_len - plen; 469 clone->len = clone->data_len = head->data_len - plen;
470 head->data_len -= clone->len; 470 head->data_len -= clone->len;
471 head->len -= clone->len; 471 head->len -= clone->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb545edef6ea..57b82dc1ae91 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1086,11 +1086,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1086 rt->dst.output = ip6_output; 1086 rt->dst.output = ip6_output;
1087 dst_set_neighbour(&rt->dst, neigh); 1087 dst_set_neighbour(&rt->dst, neigh);
1088 atomic_set(&rt->dst.__refcnt, 1); 1088 atomic_set(&rt->dst.__refcnt, 1);
1089 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1090
1091 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1089 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1092 rt->rt6i_dst.plen = 128; 1090 rt->rt6i_dst.plen = 128;
1093 rt->rt6i_idev = idev; 1091 rt->rt6i_idev = idev;
1092 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1094 1093
1095 spin_lock_bh(&icmp6_dst_lock); 1094 spin_lock_bh(&icmp6_dst_lock);
1096 rt->dst.next = icmp6_dst_gc_list; 1095 rt->dst.next = icmp6_dst_gc_list;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 00b15ac7a702..a7a18602a046 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
182 (iter = rtnl_dereference(*tp)) != NULL; 182 (iter = rtnl_dereference(*tp)) != NULL;
183 tp = &iter->next) { 183 tp = &iter->next) {
184 if (t == iter) { 184 if (t == iter) {
185 rcu_assign_pointer(*tp, t->next); 185 RCU_INIT_POINTER(*tp, t->next);
186 break; 186 break;
187 } 187 }
188 } 188 }
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
192{ 192{
193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); 193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
194 194
195 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 195 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
196 rcu_assign_pointer(*tp, t); 196 RCU_INIT_POINTER(*tp, t);
197} 197}
198 198
199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) 199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
391 p->addr = a->addr; 391 p->addr = a->addr;
392 p->flags = a->flags; 392 p->flags = a->flags;
393 t->prl_count++; 393 t->prl_count++;
394 rcu_assign_pointer(t->prl, p); 394 RCU_INIT_POINTER(t->prl, p);
395out: 395out:
396 return err; 396 return err;
397} 397}
@@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
474 struct sit_net *sitn = net_generic(net, sit_net_id); 474 struct sit_net *sitn = net_generic(net, sit_net_id);
475 475
476 if (dev == sitn->fb_tunnel_dev) { 476 if (dev == sitn->fb_tunnel_dev) {
477 rcu_assign_pointer(sitn->tunnels_wc[0], NULL); 477 RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
478 } else { 478 } else {
479 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 479 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL); 480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
@@ -1176,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1176 if (!dev->tstats) 1176 if (!dev->tstats)
1177 return -ENOMEM; 1177 return -ENOMEM;
1178 dev_hold(dev); 1178 dev_hold(dev);
1179 rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); 1179 RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
1180 return 0; 1180 return 0;
1181} 1181}
1182 1182
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index ac838965ff34..5a0d6648bbbc 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -115,7 +115,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
115 & COOKIEMASK; 115 & COOKIEMASK;
116} 116}
117 117
118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
119{ 119{
120 const struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
121 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
137 jiffies / (HZ * 60), mssind); 137 jiffies / (HZ * 60), mssind);
138} 138}
139 139
140static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
141{ 141{
142 const struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
143 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
@@ -152,7 +152,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
152struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 152struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
153{ 153{
154 struct tcp_options_received tcp_opt; 154 struct tcp_options_received tcp_opt;
155 u8 *hash_location; 155 const u8 *hash_location;
156 struct inet_request_sock *ireq; 156 struct inet_request_sock *ireq;
157 struct inet6_request_sock *ireq6; 157 struct inet6_request_sock *ireq6;
158 struct tcp_request_sock *treq; 158 struct tcp_request_sock *treq;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7b8fc5794352..10b2b3165a1a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -114,7 +114,7 @@ static __inline__ __sum16 tcp_v6_check(int len,
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115} 115}
116 116
117static __u32 tcp_v6_init_sequence(struct sk_buff *skb) 117static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
118{ 118{
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32, 120 ipv6_hdr(skb)->saddr.s6_addr32,
@@ -513,7 +513,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 514
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); 515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt); 516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 517 err = net_xmit_eval(err);
518 } 518 }
519 519
@@ -761,7 +761,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
761 761
762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
763 const struct in6_addr *daddr, struct in6_addr *saddr, 763 const struct in6_addr *daddr, struct in6_addr *saddr,
764 struct tcphdr *th) 764 const struct tcphdr *th)
765{ 765{
766 struct tcp_md5sig_pool *hp; 766 struct tcp_md5sig_pool *hp;
767 struct hash_desc *desc; 767 struct hash_desc *desc;
@@ -793,13 +793,14 @@ clear_hash_noput:
793} 793}
794 794
795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
796 struct sock *sk, struct request_sock *req, 796 const struct sock *sk,
797 struct sk_buff *skb) 797 const struct request_sock *req,
798 const struct sk_buff *skb)
798{ 799{
799 const struct in6_addr *saddr, *daddr; 800 const struct in6_addr *saddr, *daddr;
800 struct tcp_md5sig_pool *hp; 801 struct tcp_md5sig_pool *hp;
801 struct hash_desc *desc; 802 struct hash_desc *desc;
802 struct tcphdr *th = tcp_hdr(skb); 803 const struct tcphdr *th = tcp_hdr(skb);
803 804
804 if (sk) { 805 if (sk) {
805 saddr = &inet6_sk(sk)->saddr; 806 saddr = &inet6_sk(sk)->saddr;
@@ -842,12 +843,12 @@ clear_hash_noput:
842 return 1; 843 return 1;
843} 844}
844 845
845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 846static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
846{ 847{
847 __u8 *hash_location = NULL; 848 const __u8 *hash_location = NULL;
848 struct tcp_md5sig_key *hash_expected; 849 struct tcp_md5sig_key *hash_expected;
849 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 850 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
850 struct tcphdr *th = tcp_hdr(skb); 851 const struct tcphdr *th = tcp_hdr(skb);
851 int genhash; 852 int genhash;
852 u8 newhash[16]; 853 u8 newhash[16];
853 854
@@ -978,9 +979,10 @@ static int tcp6_gro_complete(struct sk_buff *skb)
978} 979}
979 980
980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 981static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
981 u32 ts, struct tcp_md5sig_key *key, int rst) 982 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
982{ 983{
983 struct tcphdr *th = tcp_hdr(skb), *t1; 984 const struct tcphdr *th = tcp_hdr(skb);
985 struct tcphdr *t1;
984 struct sk_buff *buff; 986 struct sk_buff *buff;
985 struct flowi6 fl6; 987 struct flowi6 fl6;
986 struct net *net = dev_net(skb_dst(skb)->dev); 988 struct net *net = dev_net(skb_dst(skb)->dev);
@@ -1058,7 +1060,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 1060 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059 if (!IS_ERR(dst)) { 1061 if (!IS_ERR(dst)) {
1060 skb_dst_set(buff, dst); 1062 skb_dst_set(buff, dst);
1061 ip6_xmit(ctl_sk, buff, &fl6, NULL); 1063 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063 if (rst) 1065 if (rst)
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1066 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1070,7 +1072,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1070 1072
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 1073static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{ 1074{
1073 struct tcphdr *th = tcp_hdr(skb); 1075 const struct tcphdr *th = tcp_hdr(skb);
1074 u32 seq = 0, ack_seq = 0; 1076 u32 seq = 0, ack_seq = 0;
1075 struct tcp_md5sig_key *key = NULL; 1077 struct tcp_md5sig_key *key = NULL;
1076 1078
@@ -1091,13 +1093,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 1093 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092 (th->doff << 2); 1094 (th->doff << 2);
1093 1095
1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1); 1096 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
1095} 1097}
1096 1098
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 1099static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098 struct tcp_md5sig_key *key) 1100 struct tcp_md5sig_key *key, u8 tclass)
1099{ 1101{
1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0); 1102 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1101} 1103}
1102 1104
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1105static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -1107,7 +1109,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1107 1109
1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1110 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1111 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); 1112 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1113 tw->tw_tclass);
1111 1114
1112 inet_twsk_put(tw); 1115 inet_twsk_put(tw);
1113} 1116}
@@ -1116,7 +1119,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116 struct request_sock *req) 1119 struct request_sock *req)
1117{ 1120{
1118 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 1121 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); 1122 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1120} 1123}
1121 1124
1122 1125
@@ -1160,7 +1163,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{ 1163{
1161 struct tcp_extend_values tmp_ext; 1164 struct tcp_extend_values tmp_ext;
1162 struct tcp_options_received tmp_opt; 1165 struct tcp_options_received tmp_opt;
1163 u8 *hash_location; 1166 const u8 *hash_location;
1164 struct request_sock *req; 1167 struct request_sock *req;
1165 struct inet6_request_sock *treq; 1168 struct inet6_request_sock *treq;
1166 struct ipv6_pinfo *np = inet6_sk(sk); 1169 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1608,7 +1611,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1608 opt_skb = skb_clone(skb, GFP_ATOMIC); 1611 opt_skb = skb_clone(skb, GFP_ATOMIC);
1609 1612
1610 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1613 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611 sock_rps_save_rxhash(sk, skb->rxhash); 1614 sock_rps_save_rxhash(sk, skb);
1612 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1615 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613 goto reset; 1616 goto reset;
1614 if (opt_skb) 1617 if (opt_skb)
@@ -1630,7 +1633,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1630 * the new socket.. 1633 * the new socket..
1631 */ 1634 */
1632 if(nsk != sk) { 1635 if(nsk != sk) {
1633 sock_rps_save_rxhash(nsk, skb->rxhash); 1636 sock_rps_save_rxhash(nsk, skb);
1634 if (tcp_child_process(sk, nsk, skb)) 1637 if (tcp_child_process(sk, nsk, skb))
1635 goto reset; 1638 goto reset;
1636 if (opt_skb) 1639 if (opt_skb)
@@ -1638,7 +1641,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1638 return 0; 1641 return 0;
1639 } 1642 }
1640 } else 1643 } else
1641 sock_rps_save_rxhash(sk, skb->rxhash); 1644 sock_rps_save_rxhash(sk, skb);
1642 1645
1643 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1646 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644 goto reset; 1647 goto reset;
@@ -1688,7 +1691,7 @@ ipv6_pktoptions:
1688 1691
1689static int tcp_v6_rcv(struct sk_buff *skb) 1692static int tcp_v6_rcv(struct sk_buff *skb)
1690{ 1693{
1691 struct tcphdr *th; 1694 const struct tcphdr *th;
1692 const struct ipv6hdr *hdr; 1695 const struct ipv6hdr *hdr;
1693 struct sock *sk; 1696 struct sock *sk;
1694 int ret; 1697 int ret;
@@ -1722,7 +1725,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1722 skb->len - th->doff*4); 1725 skb->len - th->doff*4);
1723 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1726 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724 TCP_SKB_CB(skb)->when = 0; 1727 TCP_SKB_CB(skb)->when = 0;
1725 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr); 1728 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1726 TCP_SKB_CB(skb)->sacked = 0; 1729 TCP_SKB_CB(skb)->sacked = 0;
1727 1730
1728 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1731 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1856,8 +1859,8 @@ static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1856 1859
1857static void *tcp_v6_tw_get_peer(struct sock *sk) 1860static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{ 1861{
1859 struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 1862 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860 struct inet_timewait_sock *tw = inet_twsk(sk); 1863 const struct inet_timewait_sock *tw = inet_twsk(sk);
1861 1864
1862 if (tw->tw_family == AF_INET) 1865 if (tw->tw_family == AF_INET)
1863 return tcp_v4_tw_get_peer(sk); 1866 return tcp_v4_tw_get_peer(sk);
@@ -2012,7 +2015,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
2012#ifdef CONFIG_PROC_FS 2015#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */ 2016/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq, 2017static void get_openreq6(struct seq_file *seq,
2015 struct sock *sk, struct request_sock *req, int i, int uid) 2018 const struct sock *sk, struct request_sock *req, int i, int uid)
2016{ 2019{
2017 int ttd = req->expires - jiffies; 2020 int ttd = req->expires - jiffies;
2018 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2021 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -2048,10 +2051,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2048 __u16 destp, srcp; 2051 __u16 destp, srcp;
2049 int timer_active; 2052 int timer_active;
2050 unsigned long timer_expires; 2053 unsigned long timer_expires;
2051 struct inet_sock *inet = inet_sk(sp); 2054 const struct inet_sock *inet = inet_sk(sp);
2052 struct tcp_sock *tp = tcp_sk(sp); 2055 const struct tcp_sock *tp = tcp_sk(sp);
2053 const struct inet_connection_sock *icsk = inet_csk(sp); 2056 const struct inet_connection_sock *icsk = inet_csk(sp);
2054 struct ipv6_pinfo *np = inet6_sk(sp); 2057 const struct ipv6_pinfo *np = inet6_sk(sp);
2055 2058
2056 dest = &np->daddr; 2059 dest = &np->daddr;
2057 src = &np->rcv_saddr; 2060 src = &np->rcv_saddr;
@@ -2103,7 +2106,7 @@ static void get_timewait6_sock(struct seq_file *seq,
2103{ 2106{
2104 const struct in6_addr *dest, *src; 2107 const struct in6_addr *dest, *src;
2105 __u16 destp, srcp; 2108 __u16 destp, srcp;
2106 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2109 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107 int ttd = tw->tw_ttd - jiffies; 2110 int ttd = tw->tw_ttd - jiffies;
2108 2111
2109 if (ttd < 0) 2112 if (ttd < 0)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bb95e8e1c6f9..f4ca0a5b3457 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
509 int is_udplite = IS_UDPLITE(sk); 509 int is_udplite = IS_UDPLITE(sk);
510 510
511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) 511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
512 sock_rps_save_rxhash(sk, skb->rxhash); 512 sock_rps_save_rxhash(sk, skb);
513 513
514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
515 goto drop; 515 goto drop;
@@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
533 } 533 }
534 } 534 }
535 535
536 if (rcu_dereference_raw(sk->sk_filter)) { 536 if (rcu_access_pointer(sk->sk_filter)) {
537 if (udp_lib_checksum_complete(skb)) 537 if (udp_lib_checksum_complete(skb))
538 goto drop; 538 goto drop;
539 } 539 }
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 49a91c5f5623..faae41737fca 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -28,6 +28,43 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
28 28
29EXPORT_SYMBOL(xfrm6_find_1stfragopt); 29EXPORT_SYMBOL(xfrm6_find_1stfragopt);
30 30
31static int xfrm6_local_dontfrag(struct sk_buff *skb)
32{
33 int proto;
34 struct sock *sk = skb->sk;
35
36 if (sk) {
37 proto = sk->sk_protocol;
38
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag;
41 }
42
43 return 0;
44}
45
46static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
47{
48 struct flowi6 fl6;
49 struct sock *sk = skb->sk;
50
51 fl6.flowi6_oif = sk->sk_bound_dev_if;
52 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
53
54 ipv6_local_rxpmtu(sk, &fl6, mtu);
55}
56
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{
59 struct flowi6 fl6;
60 struct sock *sk = skb->sk;
61
62 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
64
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66}
67
31static int xfrm6_tunnel_check_size(struct sk_buff *skb) 68static int xfrm6_tunnel_check_size(struct sk_buff *skb)
32{ 69{
33 int mtu, ret = 0; 70 int mtu, ret = 0;
@@ -39,7 +76,13 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
39 76
40 if (!skb->local_df && skb->len > mtu) { 77 if (!skb->local_df && skb->len > mtu) {
41 skb->dev = dst->dev; 78 skb->dev = dst->dev;
42 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 79
80 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk)
83 xfrm6_local_error(skb, mtu);
84 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
43 ret = -EMSGSIZE; 86 ret = -EMSGSIZE;
44 } 87 }
45 88
@@ -93,9 +136,18 @@ static int __xfrm6_output(struct sk_buff *skb)
93{ 136{
94 struct dst_entry *dst = skb_dst(skb); 137 struct dst_entry *dst = skb_dst(skb);
95 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb);
140
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu);
146 return -EMSGSIZE;
147 }
96 148
97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 149 if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 150 ((skb->len > mtu && !skb_is_gso(skb)) ||
99 dst_allfrag(skb_dst(skb)))) { 151 dst_allfrag(skb_dst(skb)))) {
100 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
101 } 153 }
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b3cc8b3989a9..253695d43fd9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -551,7 +551,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
551 */ 551 */
552 tty->closing = 1; 552 tty->closing = 1;
553 if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE) 553 if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE)
554 tty_wait_until_sent(tty, self->closing_wait); 554 tty_wait_until_sent_from_close(tty, self->closing_wait);
555 555
556 ircomm_tty_shutdown(self); 556 ircomm_tty_shutdown(self);
557 557
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index e8d5f4405d68..d14152e866d9 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = {
50 .ndo_open = irlan_eth_open, 50 .ndo_open = irlan_eth_open,
51 .ndo_stop = irlan_eth_close, 51 .ndo_stop = irlan_eth_close,
52 .ndo_start_xmit = irlan_eth_xmit, 52 .ndo_start_xmit = irlan_eth_xmit,
53 .ndo_set_multicast_list = irlan_eth_set_multicast_list, 53 .ndo_set_rx_mode = irlan_eth_set_multicast_list,
54 .ndo_change_mtu = eth_change_mtu, 54 .ndo_change_mtu = eth_change_mtu,
55 .ndo_validate_addr = eth_validate_addr, 55 .ndo_validate_addr = eth_validate_addr,
56}; 56};
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39e..497fbe732def 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
1config IUCV 1config IUCV
2 tristate "IUCV support (S390 - z/VM only)"
3 depends on S390 2 depends on S390
3 def_tristate y if S390
4 prompt "IUCV support (S390 - z/VM only)"
4 help 5 help
5 Select this option if you want to use inter-user communication 6 Select this option if you want to use inter-user communication
6 under VM or VIF. If you run on z/VM, say "Y" to enable a fast 7 under VM or VIF. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests. 8 communication link between VM guests.
8 9
9config AFIUCV 10config AFIUCV
10 tristate "AF_IUCV support (S390 - z/VM only)" 11 depends on S390
11 depends on IUCV 12 def_tristate m if QETH_L3 || IUCV
13 prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
12 help 14 help
13 Select this option if you want to use inter-user communication under 15 Select this option if you want to use AF_IUCV socket applications
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast 16 based on z/VM inter-user communication vehicle or based on
15 communication link between VM guests. 17 HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e434d03..274d150320c0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
27#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29 29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
32 31
33#define VERSION "1.1" 32#define VERSION "1.2"
34 33
35static char iucv_userid[80]; 34static char iucv_userid[80];
36 35
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
42 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
43}; 42};
44 43
44static struct iucv_interface *pr_iucv;
45
45/* special AF_IUCV IPRM messages */ 46/* special AF_IUCV IPRM messages */
46static const u8 iprm_shutdown[8] = 47static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
90static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
91static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
92 93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
93/* Call Back functions */ 100/* Call Back functions */
94static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
165 case IUCV_CLOSING: 172 case IUCV_CLOSING:
166 case IUCV_CONNECTED: 173 case IUCV_CONNECTED:
167 if (iucv->path) { 174 if (iucv->path) {
168 err = iucv_path_sever(iucv->path, NULL); 175 err = pr_iucv->path_sever(iucv->path, NULL);
169 iucv_path_free(iucv->path); 176 iucv_path_free(iucv->path);
170 iucv->path = NULL; 177 iucv->path = NULL;
171 } 178 }
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
229static struct device_driver af_iucv_driver = { 236static struct device_driver af_iucv_driver = {
230 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
231 .name = "afiucv", 238 .name = "afiucv",
232 .bus = &iucv_bus, 239 .bus = NULL,
233 .pm = &afiucv_pm_ops, 240 .pm = &afiucv_pm_ops,
234}; 241};
235 242
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
294 301
295 if (sk->sk_state != IUCV_CONNECTED) 302 if (sk->sk_state != IUCV_CONNECTED)
296 return 1; 303 return 1;
297 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 304 if (iucv->transport == AF_IUCV_TRANS_IUCV)
305 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
306 else
307 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
308 (atomic_read(&iucv->pendings) <= 0));
298} 309}
299 310
300/** 311/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
314 325
326/**
327 * afiucv_hs_send() - send a message through HiperSockets transport
328 */
329static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
330 struct sk_buff *skb, u8 flags)
331{
332 struct net *net = sock_net(sock);
333 struct iucv_sock *iucv = iucv_sk(sock);
334 struct af_iucv_trans_hdr *phs_hdr;
335 struct sk_buff *nskb;
336 int err, confirm_recv = 0;
337
338 memset(skb->head, 0, ETH_HLEN);
339 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
340 sizeof(struct af_iucv_trans_hdr));
341 skb_reset_mac_header(skb);
342 skb_reset_network_header(skb);
343 skb_push(skb, ETH_HLEN);
344 skb_reset_mac_header(skb);
345 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
346
347 phs_hdr->magic = ETH_P_AF_IUCV;
348 phs_hdr->version = 1;
349 phs_hdr->flags = flags;
350 if (flags == AF_IUCV_FLAG_SYN)
351 phs_hdr->window = iucv->msglimit;
352 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
353 confirm_recv = atomic_read(&iucv->msg_recv);
354 phs_hdr->window = confirm_recv;
355 if (confirm_recv)
356 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
357 }
358 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
359 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
360 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
361 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
362 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
363 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
364 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
365 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
366 if (imsg)
367 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
368
369 rcu_read_lock();
370 skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
371 rcu_read_unlock();
372 if (!skb->dev)
373 return -ENODEV;
374 if (!(skb->dev->flags & IFF_UP))
375 return -ENETDOWN;
376 if (skb->len > skb->dev->mtu) {
377 if (sock->sk_type == SOCK_SEQPACKET)
378 return -EMSGSIZE;
379 else
380 skb_trim(skb, skb->dev->mtu);
381 }
382 skb->protocol = ETH_P_AF_IUCV;
383 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
384 nskb = skb_clone(skb, GFP_ATOMIC);
385 if (!nskb)
386 return -ENOMEM;
387 skb_queue_tail(&iucv->send_skb_q, nskb);
388 err = dev_queue_xmit(skb);
389 if (err) {
390 skb_unlink(nskb, &iucv->send_skb_q);
391 kfree_skb(nskb);
392 } else {
393 atomic_sub(confirm_recv, &iucv->msg_recv);
394 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
395 }
396 return err;
397}
398
315/* Timers */ 399/* Timers */
316static void iucv_sock_timeout(unsigned long arg) 400static void iucv_sock_timeout(unsigned long arg)
317{ 401{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
380 unsigned char user_data[16]; 464 unsigned char user_data[16];
381 struct iucv_sock *iucv = iucv_sk(sk); 465 struct iucv_sock *iucv = iucv_sk(sk);
382 unsigned long timeo; 466 unsigned long timeo;
467 int err, blen;
468 struct sk_buff *skb;
383 469
384 iucv_sock_clear_timer(sk); 470 iucv_sock_clear_timer(sk);
385 lock_sock(sk); 471 lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
390 break; 476 break;
391 477
392 case IUCV_CONNECTED: 478 case IUCV_CONNECTED:
479 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
480 /* send fin */
481 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
482 skb = sock_alloc_send_skb(sk, blen, 1, &err);
483 if (skb) {
484 skb_reserve(skb,
485 sizeof(struct af_iucv_trans_hdr) +
486 ETH_HLEN);
487 err = afiucv_hs_send(NULL, sk, skb,
488 AF_IUCV_FLAG_FIN);
489 }
490 sk->sk_state = IUCV_DISCONN;
491 sk->sk_state_change(sk);
492 }
393 case IUCV_DISCONN: 493 case IUCV_DISCONN:
394 sk->sk_state = IUCV_CLOSING; 494 sk->sk_state = IUCV_CLOSING;
395 sk->sk_state_change(sk); 495 sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
412 low_nmcpy(user_data, iucv->src_name); 512 low_nmcpy(user_data, iucv->src_name);
413 high_nmcpy(user_data, iucv->dst_name); 513 high_nmcpy(user_data, iucv->dst_name);
414 ASCEBC(user_data, sizeof(user_data)); 514 ASCEBC(user_data, sizeof(user_data));
415 iucv_path_sever(iucv->path, user_data); 515 pr_iucv->path_sever(iucv->path, user_data);
416 iucv_path_free(iucv->path); 516 iucv_path_free(iucv->path);
417 iucv->path = NULL; 517 iucv->path = NULL;
418 } 518 }
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
444static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 544static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
445{ 545{
446 struct sock *sk; 546 struct sock *sk;
547 struct iucv_sock *iucv;
447 548
448 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 549 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
449 if (!sk) 550 if (!sk)
450 return NULL; 551 return NULL;
552 iucv = iucv_sk(sk);
451 553
452 sock_init_data(sock, sk); 554 sock_init_data(sock, sk);
453 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 555 INIT_LIST_HEAD(&iucv->accept_q);
454 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 556 spin_lock_init(&iucv->accept_q_lock);
455 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 557 skb_queue_head_init(&iucv->send_skb_q);
456 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 558 INIT_LIST_HEAD(&iucv->message_q.list);
457 spin_lock_init(&iucv_sk(sk)->message_q.lock); 559 spin_lock_init(&iucv->message_q.lock);
458 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 560 skb_queue_head_init(&iucv->backlog_skb_q);
459 iucv_sk(sk)->send_tag = 0; 561 iucv->send_tag = 0;
460 iucv_sk(sk)->flags = 0; 562 atomic_set(&iucv->pendings, 0);
461 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; 563 iucv->flags = 0;
462 iucv_sk(sk)->path = NULL; 564 iucv->msglimit = 0;
463 memset(&iucv_sk(sk)->src_user_id , 0, 32); 565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
567 iucv->path = NULL;
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
569 memset(&iucv->src_user_id , 0, 32);
570 if (pr_iucv)
571 iucv->transport = AF_IUCV_TRANS_IUCV;
572 else
573 iucv->transport = AF_IUCV_TRANS_HIPER;
464 574
465 sk->sk_destruct = iucv_sock_destruct; 575 sk->sk_destruct = iucv_sock_destruct;
466 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
591 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
592 struct sock *sk = sock->sk; 702 struct sock *sk = sock->sk;
593 struct iucv_sock *iucv; 703 struct iucv_sock *iucv;
594 int err; 704 int err = 0;
705 struct net_device *dev;
706 char uid[9];
595 707
596 /* Verify the input sockaddr */ 708 /* Verify the input sockaddr */
597 if (!addr || addr->sa_family != AF_IUCV) 709 if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
610 err = -EADDRINUSE; 722 err = -EADDRINUSE;
611 goto done_unlock; 723 goto done_unlock;
612 } 724 }
613 if (iucv->path) { 725 if (iucv->path)
614 err = 0;
615 goto done_unlock; 726 goto done_unlock;
616 }
617 727
618 /* Bind the socket */ 728 /* Bind the socket */
619 memcpy(iucv->src_name, sa->siucv_name, 8);
620 729
621 /* Copy the user id */ 730 if (pr_iucv)
622 memcpy(iucv->src_user_id, iucv_userid, 8); 731 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
623 sk->sk_state = IUCV_BOUND; 732 goto vm_bind; /* VM IUCV transport */
624 err = 0;
625 733
734 /* try hiper transport */
735 memcpy(uid, sa->siucv_user_id, sizeof(uid));
736 ASCEBC(uid, 8);
737 rcu_read_lock();
738 for_each_netdev_rcu(&init_net, dev) {
739 if (!memcmp(dev->perm_addr, uid, 8)) {
740 memcpy(iucv->src_name, sa->siucv_name, 8);
741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
742 sock->sk->sk_bound_dev_if = dev->ifindex;
743 sk->sk_state = IUCV_BOUND;
744 iucv->transport = AF_IUCV_TRANS_HIPER;
745 if (!iucv->msglimit)
746 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
747 rcu_read_unlock();
748 goto done_unlock;
749 }
750 }
751 rcu_read_unlock();
752vm_bind:
753 if (pr_iucv) {
754 /* use local userid for backward compat */
755 memcpy(iucv->src_name, sa->siucv_name, 8);
756 memcpy(iucv->src_user_id, iucv_userid, 8);
757 sk->sk_state = IUCV_BOUND;
758 iucv->transport = AF_IUCV_TRANS_IUCV;
759 if (!iucv->msglimit)
760 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
761 goto done_unlock;
762 }
763 /* found no dev to bind */
764 err = -ENODEV;
626done_unlock: 765done_unlock:
627 /* Release the socket list lock */ 766 /* Release the socket list lock */
628 write_unlock_bh(&iucv_sk_list.lock); 767 write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
658 797
659 memcpy(&iucv->src_name, name, 8); 798 memcpy(&iucv->src_name, name, 8);
660 799
800 if (!iucv->msglimit)
801 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
802
661 return err; 803 return err;
662} 804}
663 805
664/* Connect an unconnected socket */ 806static int afiucv_hs_connect(struct socket *sock)
665static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
666 int alen, int flags)
667{ 807{
668 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
669 struct sock *sk = sock->sk; 808 struct sock *sk = sock->sk;
670 struct iucv_sock *iucv; 809 struct sk_buff *skb;
671 unsigned char user_data[16]; 810 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
672 int err; 811 int err = 0;
673
674 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
675 return -EINVAL;
676
677 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
678 return -EBADFD;
679
680 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
681 return -EINVAL;
682 812
683 if (sk->sk_state == IUCV_OPEN) { 813 /* send syn */
684 err = iucv_sock_autobind(sk); 814 skb = sock_alloc_send_skb(sk, blen, 1, &err);
685 if (unlikely(err)) 815 if (!skb) {
686 return err; 816 err = -ENOMEM;
817 goto done;
687 } 818 }
819 skb->dev = NULL;
820 skb_reserve(skb, blen);
821 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
822done:
823 return err;
824}
688 825
689 lock_sock(sk); 826static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
690 827{
691 /* Set the destination information */ 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
692 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 829 struct sock *sk = sock->sk;
693 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 830 struct iucv_sock *iucv = iucv_sk(sk);
831 unsigned char user_data[16];
832 int err;
694 833
695 high_nmcpy(user_data, sa->siucv_name); 834 high_nmcpy(user_data, sa->siucv_name);
696 low_nmcpy(user_data, iucv_sk(sk)->src_name); 835 low_nmcpy(user_data, iucv->src_name);
697 ASCEBC(user_data, sizeof(user_data)); 836 ASCEBC(user_data, sizeof(user_data));
698 837
699 iucv = iucv_sk(sk);
700 /* Create path. */ 838 /* Create path. */
701 iucv->path = iucv_path_alloc(iucv->msglimit, 839 iucv->path = iucv_path_alloc(iucv->msglimit,
702 IUCV_IPRMDATA, GFP_KERNEL); 840 IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
704 err = -ENOMEM; 842 err = -ENOMEM;
705 goto done; 843 goto done;
706 } 844 }
707 err = iucv_path_connect(iucv->path, &af_iucv_handler, 845 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
708 sa->siucv_user_id, NULL, user_data, sk); 846 sa->siucv_user_id, NULL, user_data,
847 sk);
709 if (err) { 848 if (err) {
710 iucv_path_free(iucv->path); 849 iucv_path_free(iucv->path);
711 iucv->path = NULL; 850 iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
724 err = -ECONNREFUSED; 863 err = -ECONNREFUSED;
725 break; 864 break;
726 } 865 }
727 goto done;
728 } 866 }
867done:
868 return err;
869}
729 870
730 if (sk->sk_state != IUCV_CONNECTED) { 871/* Connect an unconnected socket */
872static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
873 int alen, int flags)
874{
875 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
876 struct sock *sk = sock->sk;
877 struct iucv_sock *iucv = iucv_sk(sk);
878 int err;
879
880 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
881 return -EINVAL;
882
883 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
884 return -EBADFD;
885
886 if (sk->sk_state == IUCV_OPEN &&
887 iucv->transport == AF_IUCV_TRANS_HIPER)
888 return -EBADFD; /* explicit bind required */
889
890 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
891 return -EINVAL;
892
893 if (sk->sk_state == IUCV_OPEN) {
894 err = iucv_sock_autobind(sk);
895 if (unlikely(err))
896 return err;
897 }
898
899 lock_sock(sk);
900
901 /* Set the destination information */
902 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
903 memcpy(iucv->dst_name, sa->siucv_name, 8);
904
905 if (iucv->transport == AF_IUCV_TRANS_HIPER)
906 err = afiucv_hs_connect(sock);
907 else
908 err = afiucv_path_connect(sock, addr);
909 if (err)
910 goto done;
911
912 if (sk->sk_state != IUCV_CONNECTED)
731 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
732 IUCV_DISCONN), 914 IUCV_DISCONN),
733 sock_sndtimeo(sk, flags & O_NONBLOCK)); 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
734 }
735 916
736 if (sk->sk_state == IUCV_DISCONN) { 917 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
737 err = -ECONNREFUSED; 918 err = -ECONNREFUSED;
738 }
739 919
740 if (err) { 920 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
741 iucv_path_sever(iucv->path, NULL); 921 pr_iucv->path_sever(iucv->path, NULL);
742 iucv_path_free(iucv->path); 922 iucv_path_free(iucv->path);
743 iucv->path = NULL; 923 iucv->path = NULL;
744 } 924 }
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
833{ 1013{
834 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 1014 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
835 struct sock *sk = sock->sk; 1015 struct sock *sk = sock->sk;
1016 struct iucv_sock *iucv = iucv_sk(sk);
836 1017
837 addr->sa_family = AF_IUCV; 1018 addr->sa_family = AF_IUCV;
838 *len = sizeof(struct sockaddr_iucv); 1019 *len = sizeof(struct sockaddr_iucv);
839 1020
840 if (peer) { 1021 if (peer) {
841 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 1022 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
842 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 1023 memcpy(siucv->siucv_name, iucv->dst_name, 8);
843 } else { 1024 } else {
844 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 1025 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
845 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 1026 memcpy(siucv->siucv_name, iucv->src_name, 8);
846 } 1027 }
847 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1028 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
848 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1029 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
849 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1030 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
850 1031
851 return 0; 1032 return 0;
852} 1033}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
871 1052
872 memcpy(prmdata, (void *) skb->data, skb->len); 1053 memcpy(prmdata, (void *) skb->data, skb->len);
873 prmdata[7] = 0xff - (u8) skb->len; 1054 prmdata[7] = 0xff - (u8) skb->len;
874 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, 1055 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
875 (void *) prmdata, 8); 1056 (void *) prmdata, 8);
876} 1057}
877 1058
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 * this is fine for SOCK_SEQPACKET (unless we want to support 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
961 * segmented records using the MSG_EOR flag), but 1142 * segmented records using the MSG_EOR flag), but
962 * for SOCK_STREAM we might want to improve it in future */ 1143 * for SOCK_STREAM we might want to improve it in future */
963 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1144 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1145 skb = sock_alloc_send_skb(sk,
1146 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1147 noblock, &err);
1148 else
1149 skb = sock_alloc_send_skb(sk, len, noblock, &err);
964 if (!skb) 1150 if (!skb)
965 goto out; 1151 goto out;
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
966 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
967 err = -EFAULT; 1155 err = -EFAULT;
968 goto fail; 1156 goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
983 /* increment and save iucv message tag for msg_completion cbk */ 1171 /* increment and save iucv message tag for msg_completion cbk */
984 txmsg.tag = iucv->send_tag++; 1172 txmsg.tag = iucv->send_tag++;
985 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1174 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1175 atomic_inc(&iucv->msg_sent);
1176 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1177 if (err) {
1178 atomic_dec(&iucv->msg_sent);
1179 goto fail;
1180 }
1181 goto release;
1182 }
986 skb_queue_tail(&iucv->send_skb_q, skb); 1183 skb_queue_tail(&iucv->send_skb_q, skb);
987 1184
988 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
999 /* this error should never happen since the 1196 /* this error should never happen since the
1000 * IUCV_IPRMDATA path flag is set... sever path */ 1197 * IUCV_IPRMDATA path flag is set... sever path */
1001 if (err == 0x15) { 1198 if (err == 0x15) {
1002 iucv_path_sever(iucv->path, NULL); 1199 pr_iucv->path_sever(iucv->path, NULL);
1003 skb_unlink(skb, &iucv->send_skb_q); 1200 skb_unlink(skb, &iucv->send_skb_q);
1004 err = -EPIPE; 1201 err = -EPIPE;
1005 goto fail; 1202 goto fail;
1006 } 1203 }
1007 } else 1204 } else
1008 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1205 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1009 (void *) skb->data, skb->len); 1206 (void *) skb->data, skb->len);
1010 if (err) { 1207 if (err) {
1011 if (err == 3) { 1208 if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 goto fail; 1220 goto fail;
1024 } 1221 }
1025 1222
1223release:
1026 release_sock(sk); 1224 release_sock(sk);
1027 return len; 1225 return len;
1028 1226
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1095 skb->len = 0; 1293 skb->len = 0;
1096 } 1294 }
1097 } else { 1295 } else {
1098 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, 1296 rc = pr_iucv->message_receive(path, msg,
1099 skb->data, len, NULL); 1297 msg->flags & IUCV_IPRMDATA,
1298 skb->data, len, NULL);
1100 if (rc) { 1299 if (rc) {
1101 kfree_skb(skb); 1300 kfree_skb(skb);
1102 return; 1301 return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1110 kfree_skb(skb); 1309 kfree_skb(skb);
1111 skb = NULL; 1310 skb = NULL;
1112 if (rc) { 1311 if (rc) {
1113 iucv_path_sever(path, NULL); 1312 pr_iucv->path_sever(path, NULL);
1114 return; 1313 return;
1115 } 1314 }
1116 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 1315 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1154 struct sock *sk = sock->sk; 1353 struct sock *sk = sock->sk;
1155 struct iucv_sock *iucv = iucv_sk(sk); 1354 struct iucv_sock *iucv = iucv_sk(sk);
1156 unsigned int copied, rlen; 1355 unsigned int copied, rlen;
1157 struct sk_buff *skb, *rskb, *cskb; 1356 struct sk_buff *skb, *rskb, *cskb, *sskb;
1357 int blen;
1158 int err = 0; 1358 int err = 0;
1159 1359
1160 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1179 copied = min_t(unsigned int, rlen, len); 1379 copied = min_t(unsigned int, rlen, len);
1180 1380
1181 cskb = skb; 1381 cskb = skb;
1182 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1382 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1183 if (!(flags & MSG_PEEK)) 1383 if (!(flags & MSG_PEEK))
1184 skb_queue_head(&sk->sk_receive_queue, skb); 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1185 return -EFAULT; 1385 return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1217 } 1417 }
1218 1418
1219 kfree_skb(skb); 1419 kfree_skb(skb);
1420 atomic_inc(&iucv->msg_recv);
1220 1421
1221 /* Queue backlog skbs */ 1422 /* Queue backlog skbs */
1222 spin_lock_bh(&iucv->message_q.lock); 1423 spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1233 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1234 if (!list_empty(&iucv->message_q.list)) 1435 if (!list_empty(&iucv->message_q.list))
1235 iucv_process_message_q(sk); 1436 iucv_process_message_q(sk);
1437 if (atomic_read(&iucv->msg_recv) >=
1438 iucv->msglimit / 2) {
1439 /* send WIN to peer */
1440 blen = sizeof(struct af_iucv_trans_hdr) +
1441 ETH_HLEN;
1442 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1443 if (sskb) {
1444 skb_reserve(sskb,
1445 sizeof(struct af_iucv_trans_hdr)
1446 + ETH_HLEN);
1447 err = afiucv_hs_send(NULL, sk, sskb,
1448 AF_IUCV_FLAG_WIN);
1449 }
1450 if (err) {
1451 sk->sk_state = IUCV_DISCONN;
1452 sk->sk_state_change(sk);
1453 }
1454 }
1236 } 1455 }
1237 spin_unlock_bh(&iucv->message_q.lock); 1456 spin_unlock_bh(&iucv->message_q.lock);
1238 } 1457 }
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1327 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1546 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1328 txmsg.class = 0; 1547 txmsg.class = 0;
1329 txmsg.tag = 0; 1548 txmsg.tag = 0;
1330 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1549 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1331 (void *) iprm_shutdown, 8); 1550 0, (void *) iprm_shutdown, 8);
1332 if (err) { 1551 if (err) {
1333 switch (err) { 1552 switch (err) {
1334 case 1: 1553 case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1345 } 1564 }
1346 1565
1347 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1566 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1348 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 1567 err = pr_iucv->path_quiesce(iucv->path, NULL);
1349 if (err) 1568 if (err)
1350 err = -ENOTCONN; 1569 err = -ENOTCONN;
1351 1570
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
1372 1591
1373 /* Unregister with IUCV base support */ 1592 /* Unregister with IUCV base support */
1374 if (iucv_sk(sk)->path) { 1593 if (iucv_sk(sk)->path) {
1375 iucv_path_sever(iucv_sk(sk)->path, NULL); 1594 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1376 iucv_path_free(iucv_sk(sk)->path); 1595 iucv_path_free(iucv_sk(sk)->path);
1377 iucv_sk(sk)->path = NULL; 1596 iucv_sk(sk)->path = NULL;
1378 } 1597 }
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1514 high_nmcpy(user_data, iucv->dst_name); 1733 high_nmcpy(user_data, iucv->dst_name);
1515 ASCEBC(user_data, sizeof(user_data)); 1734 ASCEBC(user_data, sizeof(user_data));
1516 if (sk->sk_state != IUCV_LISTEN) { 1735 if (sk->sk_state != IUCV_LISTEN) {
1517 err = iucv_path_sever(path, user_data); 1736 err = pr_iucv->path_sever(path, user_data);
1518 iucv_path_free(path); 1737 iucv_path_free(path);
1519 goto fail; 1738 goto fail;
1520 } 1739 }
1521 1740
1522 /* Check for backlog size */ 1741 /* Check for backlog size */
1523 if (sk_acceptq_is_full(sk)) { 1742 if (sk_acceptq_is_full(sk)) {
1524 err = iucv_path_sever(path, user_data); 1743 err = pr_iucv->path_sever(path, user_data);
1525 iucv_path_free(path); 1744 iucv_path_free(path);
1526 goto fail; 1745 goto fail;
1527 } 1746 }
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1529 /* Create the new socket */ 1748 /* Create the new socket */
1530 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); 1749 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1531 if (!nsk) { 1750 if (!nsk) {
1532 err = iucv_path_sever(path, user_data); 1751 err = pr_iucv->path_sever(path, user_data);
1533 iucv_path_free(path); 1752 iucv_path_free(path);
1534 goto fail; 1753 goto fail;
1535 } 1754 }
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1553 /* set message limit for path based on msglimit of accepting socket */ 1772 /* set message limit for path based on msglimit of accepting socket */
1554 niucv->msglimit = iucv->msglimit; 1773 niucv->msglimit = iucv->msglimit;
1555 path->msglim = iucv->msglimit; 1774 path->msglim = iucv->msglimit;
1556 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1775 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1557 if (err) { 1776 if (err) {
1558 err = iucv_path_sever(path, user_data); 1777 err = pr_iucv->path_sever(path, user_data);
1559 iucv_path_free(path); 1778 iucv_path_free(path);
1560 iucv_sock_kill(nsk); 1779 iucv_sock_kill(nsk);
1561 goto fail; 1780 goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1589 int len; 1808 int len;
1590 1809
1591 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1810 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1592 iucv_message_reject(path, msg); 1811 pr_iucv->message_reject(path, msg);
1593 return; 1812 return;
1594 } 1813 }
1595 1814
@@ -1600,7 +1819,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1600 goto save_message; 1819 goto save_message;
1601 1820
1602 len = atomic_read(&sk->sk_rmem_alloc); 1821 len = atomic_read(&sk->sk_rmem_alloc);
1603 len += iucv_msg_length(msg) + sizeof(struct sk_buff); 1822 len += SKB_TRUESIZE(iucv_msg_length(msg));
1604 if (len > sk->sk_rcvbuf) 1823 if (len > sk->sk_rcvbuf)
1605 goto save_message; 1824 goto save_message;
1606 1825
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1692 bh_unlock_sock(sk); 1911 bh_unlock_sock(sk);
1693} 1912}
1694 1913
1914/***************** HiperSockets transport callbacks ********************/
1915static void afiucv_swap_src_dest(struct sk_buff *skb)
1916{
1917 struct af_iucv_trans_hdr *trans_hdr =
1918 (struct af_iucv_trans_hdr *)skb->data;
1919 char tmpID[8];
1920 char tmpName[8];
1921
1922 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1923 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1924 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1925 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1926 memcpy(tmpID, trans_hdr->srcUserID, 8);
1927 memcpy(tmpName, trans_hdr->srcAppName, 8);
1928 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1929 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1930 memcpy(trans_hdr->destUserID, tmpID, 8);
1931 memcpy(trans_hdr->destAppName, tmpName, 8);
1932 skb_push(skb, ETH_HLEN);
1933 memset(skb->data, 0, ETH_HLEN);
1934}
1935
1936/**
1937 * afiucv_hs_callback_syn - react on received SYN
1938 **/
1939static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct sock *nsk;
1942 struct iucv_sock *iucv, *niucv;
1943 struct af_iucv_trans_hdr *trans_hdr;
1944 int err;
1945
1946 iucv = iucv_sk(sk);
1947 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1948 if (!iucv) {
1949 /* no sock - connection refused */
1950 afiucv_swap_src_dest(skb);
1951 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1952 err = dev_queue_xmit(skb);
1953 goto out;
1954 }
1955
1956 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1957 bh_lock_sock(sk);
1958 if ((sk->sk_state != IUCV_LISTEN) ||
1959 sk_acceptq_is_full(sk) ||
1960 !nsk) {
1961 /* error on server socket - connection refused */
1962 if (nsk)
1963 sk_free(nsk);
1964 afiucv_swap_src_dest(skb);
1965 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1966 err = dev_queue_xmit(skb);
1967 bh_unlock_sock(sk);
1968 goto out;
1969 }
1970
1971 niucv = iucv_sk(nsk);
1972 iucv_sock_init(nsk, sk);
1973 niucv->transport = AF_IUCV_TRANS_HIPER;
1974 niucv->msglimit = iucv->msglimit;
1975 if (!trans_hdr->window)
1976 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1977 else
1978 niucv->msglimit_peer = trans_hdr->window;
1979 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1980 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1981 memcpy(niucv->src_name, iucv->src_name, 8);
1982 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1983 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1984 afiucv_swap_src_dest(skb);
1985 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1986 trans_hdr->window = niucv->msglimit;
1987 /* if receiver acks the xmit connection is established */
1988 err = dev_queue_xmit(skb);
1989 if (!err) {
1990 iucv_accept_enqueue(sk, nsk);
1991 nsk->sk_state = IUCV_CONNECTED;
1992 sk->sk_data_ready(sk, 1);
1993 } else
1994 iucv_sock_kill(nsk);
1995 bh_unlock_sock(sk);
1996
1997out:
1998 return NET_RX_SUCCESS;
1999}
2000
2001/**
2002 * afiucv_hs_callback_synack() - react on received SYN-ACK
2003 **/
2004static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2005{
2006 struct iucv_sock *iucv = iucv_sk(sk);
2007 struct af_iucv_trans_hdr *trans_hdr =
2008 (struct af_iucv_trans_hdr *)skb->data;
2009
2010 if (!iucv)
2011 goto out;
2012 if (sk->sk_state != IUCV_BOUND)
2013 goto out;
2014 bh_lock_sock(sk);
2015 iucv->msglimit_peer = trans_hdr->window;
2016 sk->sk_state = IUCV_CONNECTED;
2017 sk->sk_state_change(sk);
2018 bh_unlock_sock(sk);
2019out:
2020 kfree_skb(skb);
2021 return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2026 **/
2027static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2028{
2029 struct iucv_sock *iucv = iucv_sk(sk);
2030
2031 if (!iucv)
2032 goto out;
2033 if (sk->sk_state != IUCV_BOUND)
2034 goto out;
2035 bh_lock_sock(sk);
2036 sk->sk_state = IUCV_DISCONN;
2037 sk->sk_state_change(sk);
2038 bh_unlock_sock(sk);
2039out:
2040 kfree_skb(skb);
2041 return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_fin() - react on received FIN
2046 **/
2047static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2048{
2049 struct iucv_sock *iucv = iucv_sk(sk);
2050
2051 /* other end of connection closed */
2052 if (iucv) {
2053 bh_lock_sock(sk);
2054 if (!list_empty(&iucv->accept_q))
2055 sk->sk_state = IUCV_SEVERED;
2056 else
2057 sk->sk_state = IUCV_DISCONN;
2058 sk->sk_state_change(sk);
2059 bh_unlock_sock(sk);
2060 }
2061 kfree_skb(skb);
2062 return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_win() - react on received WIN
2067 **/
2068static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2069{
2070 struct iucv_sock *iucv = iucv_sk(sk);
2071 struct af_iucv_trans_hdr *trans_hdr =
2072 (struct af_iucv_trans_hdr *)skb->data;
2073
2074 if (!iucv)
2075 return NET_RX_SUCCESS;
2076
2077 if (sk->sk_state != IUCV_CONNECTED)
2078 return NET_RX_SUCCESS;
2079
2080 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2081 iucv_sock_wake_msglim(sk);
2082 return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_rx() - react on received data
2087 **/
2088static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2089{
2090 struct iucv_sock *iucv = iucv_sk(sk);
2091
2092 if (!iucv) {
2093 kfree_skb(skb);
2094 return NET_RX_SUCCESS;
2095 }
2096
2097 if (sk->sk_state != IUCV_CONNECTED) {
2098 kfree_skb(skb);
2099 return NET_RX_SUCCESS;
2100 }
2101
2102 /* write stuff from iucv_msg to skb cb */
2103 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2104 kfree_skb(skb);
2105 return NET_RX_SUCCESS;
2106 }
2107 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2108 skb_reset_transport_header(skb);
2109 skb_reset_network_header(skb);
2110 spin_lock(&iucv->message_q.lock);
2111 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2112 if (sock_queue_rcv_skb(sk, skb)) {
2113 /* handle rcv queue full */
2114 skb_queue_tail(&iucv->backlog_skb_q, skb);
2115 }
2116 } else
2117 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2118 spin_unlock(&iucv->message_q.lock);
2119 return NET_RX_SUCCESS;
2120}
2121
2122/**
2123 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2124 * transport
2125 * called from netif RX softirq
2126 **/
2127static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2128 struct packet_type *pt, struct net_device *orig_dev)
2129{
2130 struct hlist_node *node;
2131 struct sock *sk;
2132 struct iucv_sock *iucv;
2133 struct af_iucv_trans_hdr *trans_hdr;
2134 char nullstring[8];
2135 int err = 0;
2136
2137 skb_pull(skb, ETH_HLEN);
2138 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2139 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2140 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2141 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2142 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2143 memset(nullstring, 0, sizeof(nullstring));
2144 iucv = NULL;
2145 sk = NULL;
2146 read_lock(&iucv_sk_list.lock);
2147 sk_for_each(sk, node, &iucv_sk_list.head) {
2148 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2149 if ((!memcmp(&iucv_sk(sk)->src_name,
2150 trans_hdr->destAppName, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->src_user_id,
2152 trans_hdr->destUserID, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2154 (!memcmp(&iucv_sk(sk)->dst_user_id,
2155 nullstring, 8))) {
2156 iucv = iucv_sk(sk);
2157 break;
2158 }
2159 } else {
2160 if ((!memcmp(&iucv_sk(sk)->src_name,
2161 trans_hdr->destAppName, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->src_user_id,
2163 trans_hdr->destUserID, 8)) &&
2164 (!memcmp(&iucv_sk(sk)->dst_name,
2165 trans_hdr->srcAppName, 8)) &&
2166 (!memcmp(&iucv_sk(sk)->dst_user_id,
2167 trans_hdr->srcUserID, 8))) {
2168 iucv = iucv_sk(sk);
2169 break;
2170 }
2171 }
2172 }
2173 read_unlock(&iucv_sk_list.lock);
2174 if (!iucv)
2175 sk = NULL;
2176
2177 /* no sock
2178 how should we send with no sock
2179 1) send without sock no send rc checking?
2180 2) introduce default sock to handle this cases
2181
2182 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2183 data -> send FIN
2184 SYN|ACK, SYN|FIN, FIN -> no action? */
2185
2186 switch (trans_hdr->flags) {
2187 case AF_IUCV_FLAG_SYN:
2188 /* connect request */
2189 err = afiucv_hs_callback_syn(sk, skb);
2190 break;
2191 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2192 /* connect request confirmed */
2193 err = afiucv_hs_callback_synack(sk, skb);
2194 break;
2195 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2196 /* connect request refused */
2197 err = afiucv_hs_callback_synfin(sk, skb);
2198 break;
2199 case (AF_IUCV_FLAG_FIN):
2200 /* close request */
2201 err = afiucv_hs_callback_fin(sk, skb);
2202 break;
2203 case (AF_IUCV_FLAG_WIN):
2204 err = afiucv_hs_callback_win(sk, skb);
2205 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2206 err = afiucv_hs_callback_rx(sk, skb);
2207 else
2208 kfree(skb);
2209 break;
2210 case 0:
2211 /* plain data frame */
2212 err = afiucv_hs_callback_rx(sk, skb);
2213 break;
2214 default:
2215 ;
2216 }
2217
2218 return err;
2219}
2220
2221/**
2222 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2223 * transport
2224 **/
2225static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2226 enum iucv_tx_notify n)
2227{
2228 struct sock *isk = skb->sk;
2229 struct sock *sk = NULL;
2230 struct iucv_sock *iucv = NULL;
2231 struct sk_buff_head *list;
2232 struct sk_buff *list_skb;
2233 struct sk_buff *this = NULL;
2234 unsigned long flags;
2235 struct hlist_node *node;
2236
2237 read_lock(&iucv_sk_list.lock);
2238 sk_for_each(sk, node, &iucv_sk_list.head)
2239 if (sk == isk) {
2240 iucv = iucv_sk(sk);
2241 break;
2242 }
2243 read_unlock(&iucv_sk_list.lock);
2244
2245 if (!iucv)
2246 return;
2247
2248 bh_lock_sock(sk);
2249 list = &iucv->send_skb_q;
2250 list_skb = list->next;
2251 if (skb_queue_empty(list))
2252 goto out_unlock;
2253
2254 spin_lock_irqsave(&list->lock, flags);
2255 while (list_skb != (struct sk_buff *)list) {
2256 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2257 this = list_skb;
2258 switch (n) {
2259 case TX_NOTIFY_OK:
2260 __skb_unlink(this, list);
2261 iucv_sock_wake_msglim(sk);
2262 kfree_skb(this);
2263 break;
2264 case TX_NOTIFY_PENDING:
2265 atomic_inc(&iucv->pendings);
2266 break;
2267 case TX_NOTIFY_DELAYED_OK:
2268 __skb_unlink(this, list);
2269 atomic_dec(&iucv->pendings);
2270 if (atomic_read(&iucv->pendings) <= 0)
2271 iucv_sock_wake_msglim(sk);
2272 kfree_skb(this);
2273 break;
2274 case TX_NOTIFY_UNREACHABLE:
2275 case TX_NOTIFY_DELAYED_UNREACHABLE:
2276 case TX_NOTIFY_TPQFULL: /* not yet used */
2277 case TX_NOTIFY_GENERALERROR:
2278 case TX_NOTIFY_DELAYED_GENERALERROR:
2279 __skb_unlink(this, list);
2280 kfree_skb(this);
2281 if (!list_empty(&iucv->accept_q))
2282 sk->sk_state = IUCV_SEVERED;
2283 else
2284 sk->sk_state = IUCV_DISCONN;
2285 sk->sk_state_change(sk);
2286 break;
2287 }
2288 break;
2289 }
2290 list_skb = list_skb->next;
2291 }
2292 spin_unlock_irqrestore(&list->lock, flags);
2293
2294out_unlock:
2295 bh_unlock_sock(sk);
2296}
1695static const struct proto_ops iucv_sock_ops = { 2297static const struct proto_ops iucv_sock_ops = {
1696 .family = PF_IUCV, 2298 .family = PF_IUCV,
1697 .owner = THIS_MODULE, 2299 .owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
1718 .create = iucv_sock_create, 2320 .create = iucv_sock_create,
1719}; 2321};
1720 2322
1721static int __init afiucv_init(void) 2323static struct packet_type iucv_packet_type = {
2324 .type = cpu_to_be16(ETH_P_AF_IUCV),
2325 .func = afiucv_hs_rcv,
2326};
2327
2328static int afiucv_iucv_init(void)
1722{ 2329{
1723 int err; 2330 int err;
1724 2331
1725 if (!MACHINE_IS_VM) { 2332 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
1726 pr_err("The af_iucv module cannot be loaded"
1727 " without z/VM\n");
1728 err = -EPROTONOSUPPORT;
1729 goto out;
1730 }
1731 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1732 if (unlikely(err)) {
1733 WARN_ON(err);
1734 err = -EPROTONOSUPPORT;
1735 goto out;
1736 }
1737
1738 err = iucv_register(&af_iucv_handler, 0);
1739 if (err) 2333 if (err)
1740 goto out; 2334 goto out;
1741 err = proto_register(&iucv_proto, 0);
1742 if (err)
1743 goto out_iucv;
1744 err = sock_register(&iucv_sock_family_ops);
1745 if (err)
1746 goto out_proto;
1747 /* establish dummy device */ 2335 /* establish dummy device */
2336 af_iucv_driver.bus = pr_iucv->bus;
1748 err = driver_register(&af_iucv_driver); 2337 err = driver_register(&af_iucv_driver);
1749 if (err) 2338 if (err)
1750 goto out_sock; 2339 goto out_iucv;
1751 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2340 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 if (!af_iucv_dev) { 2341 if (!af_iucv_dev) {
1753 err = -ENOMEM; 2342 err = -ENOMEM;
1754 goto out_driver; 2343 goto out_driver;
1755 } 2344 }
1756 dev_set_name(af_iucv_dev, "af_iucv"); 2345 dev_set_name(af_iucv_dev, "af_iucv");
1757 af_iucv_dev->bus = &iucv_bus; 2346 af_iucv_dev->bus = pr_iucv->bus;
1758 af_iucv_dev->parent = iucv_root; 2347 af_iucv_dev->parent = pr_iucv->root;
1759 af_iucv_dev->release = (void (*)(struct device *))kfree; 2348 af_iucv_dev->release = (void (*)(struct device *))kfree;
1760 af_iucv_dev->driver = &af_iucv_driver; 2349 af_iucv_dev->driver = &af_iucv_driver;
1761 err = device_register(af_iucv_dev); 2350 err = device_register(af_iucv_dev);
1762 if (err) 2351 if (err)
1763 goto out_driver; 2352 goto out_driver;
1764
1765 return 0; 2353 return 0;
1766 2354
1767out_driver: 2355out_driver:
1768 driver_unregister(&af_iucv_driver); 2356 driver_unregister(&af_iucv_driver);
2357out_iucv:
2358 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2359out:
2360 return err;
2361}
2362
2363static int __init afiucv_init(void)
2364{
2365 int err;
2366
2367 if (MACHINE_IS_VM) {
2368 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2369 if (unlikely(err)) {
2370 WARN_ON(err);
2371 err = -EPROTONOSUPPORT;
2372 goto out;
2373 }
2374
2375 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2376 if (!pr_iucv) {
2377 printk(KERN_WARNING "iucv_if lookup failed\n");
2378 memset(&iucv_userid, 0, sizeof(iucv_userid));
2379 }
2380 } else {
2381 memset(&iucv_userid, 0, sizeof(iucv_userid));
2382 pr_iucv = NULL;
2383 }
2384
2385 err = proto_register(&iucv_proto, 0);
2386 if (err)
2387 goto out;
2388 err = sock_register(&iucv_sock_family_ops);
2389 if (err)
2390 goto out_proto;
2391
2392 if (pr_iucv) {
2393 err = afiucv_iucv_init();
2394 if (err)
2395 goto out_sock;
2396 }
2397 dev_add_pack(&iucv_packet_type);
2398 return 0;
2399
1769out_sock: 2400out_sock:
1770 sock_unregister(PF_IUCV); 2401 sock_unregister(PF_IUCV);
1771out_proto: 2402out_proto:
1772 proto_unregister(&iucv_proto); 2403 proto_unregister(&iucv_proto);
1773out_iucv:
1774 iucv_unregister(&af_iucv_handler, 0);
1775out: 2404out:
2405 if (pr_iucv)
2406 symbol_put(iucv_if);
1776 return err; 2407 return err;
1777} 2408}
1778 2409
1779static void __exit afiucv_exit(void) 2410static void __exit afiucv_exit(void)
1780{ 2411{
1781 device_unregister(af_iucv_dev); 2412 if (pr_iucv) {
1782 driver_unregister(&af_iucv_driver); 2413 device_unregister(af_iucv_dev);
2414 driver_unregister(&af_iucv_driver);
2415 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2416 symbol_put(iucv_if);
2417 }
2418 dev_remove_pack(&iucv_packet_type);
1783 sock_unregister(PF_IUCV); 2419 sock_unregister(PF_IUCV);
1784 proto_unregister(&iucv_proto); 2420 proto_unregister(&iucv_proto);
1785 iucv_unregister(&af_iucv_handler, 0);
1786} 2421}
1787 2422
1788module_init(afiucv_init); 2423module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1793MODULE_VERSION(VERSION); 2428MODULE_VERSION(VERSION);
1794MODULE_LICENSE("GPL"); 2429MODULE_LICENSE("GPL");
1795MODULE_ALIAS_NETPROTO(PF_IUCV); 2430MODULE_ALIAS_NETPROTO(PF_IUCV);
2431
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a3808aa40..403be43b793d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
1974 return rc; 1974 return rc;
1975} 1975}
1976 1976
1977struct iucv_interface iucv_if = {
1978 .message_receive = iucv_message_receive,
1979 .__message_receive = __iucv_message_receive,
1980 .message_reply = iucv_message_reply,
1981 .message_reject = iucv_message_reject,
1982 .message_send = iucv_message_send,
1983 .__message_send = __iucv_message_send,
1984 .message_send2way = iucv_message_send2way,
1985 .message_purge = iucv_message_purge,
1986 .path_accept = iucv_path_accept,
1987 .path_connect = iucv_path_connect,
1988 .path_quiesce = iucv_path_quiesce,
1989 .path_resume = iucv_path_resume,
1990 .path_sever = iucv_path_sever,
1991 .iucv_register = iucv_register,
1992 .iucv_unregister = iucv_unregister,
1993 .bus = NULL,
1994 .root = NULL,
1995};
1996EXPORT_SYMBOL(iucv_if);
1997
1977/** 1998/**
1978 * iucv_init 1999 * iucv_init
1979 * 2000 *
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
2038 rc = bus_register(&iucv_bus); 2059 rc = bus_register(&iucv_bus);
2039 if (rc) 2060 if (rc)
2040 goto out_reboot; 2061 goto out_reboot;
2062 iucv_if.root = iucv_root;
2063 iucv_if.bus = &iucv_bus;
2041 return 0; 2064 return 0;
2042 2065
2043out_reboot: 2066out_reboot:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ad4ac2601a56..34b2ddeacb67 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1046 uhlen + hdr_len; 1046 uhlen + hdr_len;
1047 old_headroom = skb_headroom(skb); 1047 old_headroom = skb_headroom(skb);
1048 if (skb_cow_head(skb, headroom)) 1048 if (skb_cow_head(skb, headroom)) {
1049 dev_kfree_skb(skb);
1049 goto abort; 1050 goto abort;
1051 }
1050 1052
1051 new_headroom = skb_headroom(skb); 1053 new_headroom = skb_headroom(skb);
1052 skb_orphan(skb); 1054 skb_orphan(skb);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f42cd0915966..8a90d756c904 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -395,6 +395,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
395 struct pppol2tp_session *ps; 395 struct pppol2tp_session *ps;
396 int old_headroom; 396 int old_headroom;
397 int new_headroom; 397 int new_headroom;
398 int uhlen, headroom;
398 399
399 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 400 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
400 goto abort; 401 goto abort;
@@ -413,7 +414,13 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
413 goto abort_put_sess; 414 goto abort_put_sess;
414 415
415 old_headroom = skb_headroom(skb); 416 old_headroom = skb_headroom(skb);
416 if (skb_cow_head(skb, sizeof(ppph))) 417 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
418 headroom = NET_SKB_PAD +
419 sizeof(struct iphdr) + /* IP header */
420 uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
421 session->hdr_len + /* L2TP header */
422 sizeof(ppph); /* PPP header */
423 if (skb_cow_head(skb, headroom))
417 goto abort_put_sess_tun; 424 goto abort_put_sess_tun;
418 425
419 new_headroom = skb_headroom(skb); 426 new_headroom = skb_headroom(skb);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 956b7e47dc52..8d0324bac01c 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -139,7 +139,8 @@ out:
139 return lapb; 139 return lapb;
140} 140}
141 141
142int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks) 142int lapb_register(struct net_device *dev,
143 const struct lapb_register_struct *callbacks)
143{ 144{
144 struct lapb_cb *lapb; 145 struct lapb_cb *lapb;
145 int rc = LAPB_BADTOKEN; 146 int rc = LAPB_BADTOKEN;
@@ -158,7 +159,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
158 goto out; 159 goto out;
159 160
160 lapb->dev = dev; 161 lapb->dev = dev;
161 lapb->callbacks = *callbacks; 162 lapb->callbacks = callbacks;
162 163
163 __lapb_insert_cb(lapb); 164 __lapb_insert_cb(lapb);
164 165
@@ -380,32 +381,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
380 381
381void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) 382void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
382{ 383{
383 if (lapb->callbacks.connect_confirmation) 384 if (lapb->callbacks->connect_confirmation)
384 lapb->callbacks.connect_confirmation(lapb->dev, reason); 385 lapb->callbacks->connect_confirmation(lapb->dev, reason);
385} 386}
386 387
387void lapb_connect_indication(struct lapb_cb *lapb, int reason) 388void lapb_connect_indication(struct lapb_cb *lapb, int reason)
388{ 389{
389 if (lapb->callbacks.connect_indication) 390 if (lapb->callbacks->connect_indication)
390 lapb->callbacks.connect_indication(lapb->dev, reason); 391 lapb->callbacks->connect_indication(lapb->dev, reason);
391} 392}
392 393
393void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) 394void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
394{ 395{
395 if (lapb->callbacks.disconnect_confirmation) 396 if (lapb->callbacks->disconnect_confirmation)
396 lapb->callbacks.disconnect_confirmation(lapb->dev, reason); 397 lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
397} 398}
398 399
399void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) 400void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
400{ 401{
401 if (lapb->callbacks.disconnect_indication) 402 if (lapb->callbacks->disconnect_indication)
402 lapb->callbacks.disconnect_indication(lapb->dev, reason); 403 lapb->callbacks->disconnect_indication(lapb->dev, reason);
403} 404}
404 405
405int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) 406int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
406{ 407{
407 if (lapb->callbacks.data_indication) 408 if (lapb->callbacks->data_indication)
408 return lapb->callbacks.data_indication(lapb->dev, skb); 409 return lapb->callbacks->data_indication(lapb->dev, skb);
409 410
410 kfree_skb(skb); 411 kfree_skb(skb);
411 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ 412 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
@@ -415,8 +416,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
415{ 416{
416 int used = 0; 417 int used = 0;
417 418
418 if (lapb->callbacks.data_transmit) { 419 if (lapb->callbacks->data_transmit) {
419 lapb->callbacks.data_transmit(lapb->dev, skb); 420 lapb->callbacks->data_transmit(lapb->dev, skb);
420 used = 1; 421 used = 1;
421 } 422 }
422 423
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f5fdfcbf552a..7d3b438755f0 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -199,6 +199,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
199 199
200 Do not select this option. 200 Do not select this option.
201 201
202config MAC80211_VERBOSE_MPATH_DEBUG
203 bool "Verbose mesh path debugging"
204 depends on MAC80211_DEBUG_MENU
205 depends on MAC80211_MESH
206 ---help---
207 Selecting this option causes mac80211 to print out very
208 verbose mesh path selection debugging messages (when mac80211
209 is taking part in a mesh network).
210 It should not be selected on production systems as those
211 messages are remotely triggerable.
212
213 Do not select this option.
214
202config MAC80211_VERBOSE_MHWMP_DEBUG 215config MAC80211_VERBOSE_MHWMP_DEBUG
203 bool "Verbose mesh HWMP routing debugging" 216 bool "Verbose mesh HWMP routing debugging"
204 depends on MAC80211_DEBUG_MENU 217 depends on MAC80211_DEBUG_MENU
@@ -212,6 +225,18 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
212 225
213 Do not select this option. 226 Do not select this option.
214 227
228config MAC80211_VERBOSE_TDLS_DEBUG
229 bool "Verbose TDLS debugging"
230 depends on MAC80211_DEBUG_MENU
231 ---help---
232 Selecting this option causes mac80211 to print out very
233 verbose TDLS selection debugging messages (when mac80211
234 is a TDLS STA).
235 It should not be selected on production systems as those
236 messages are remotely triggerable.
237
238 Do not select this option.
239
215config MAC80211_DEBUG_COUNTERS 240config MAC80211_DEBUG_COUNTERS
216 bool "Extra statistics for TX/RX debugging" 241 bool "Extra statistics for TX/RX debugging"
217 depends on MAC80211_DEBUG_MENU 242 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index fd1aaf2a4a6c..97f33588b65f 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -69,7 +69,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
69 if (!tid_rx) 69 if (!tid_rx)
70 return; 70 return;
71 71
72 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); 72 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
73 73
74#ifdef CONFIG_MAC80211_HT_DEBUG 74#ifdef CONFIG_MAC80211_HT_DEBUG
75 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 75 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -167,12 +167,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
167 u16 capab; 167 u16 capab;
168 168
169 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 169 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
170 170 if (!skb)
171 if (!skb) {
172 printk(KERN_DEBUG "%s: failed to allocate buffer "
173 "for addba resp frame\n", sdata->name);
174 return; 171 return;
175 }
176 172
177 skb_reserve(skb, local->hw.extra_tx_headroom); 173 skb_reserve(skb, local->hw.extra_tx_headroom);
178 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 174 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -227,7 +223,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
227 223
228 status = WLAN_STATUS_REQUEST_DECLINED; 224 status = WLAN_STATUS_REQUEST_DECLINED;
229 225
230 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { 226 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
231#ifdef CONFIG_MAC80211_HT_DEBUG 227#ifdef CONFIG_MAC80211_HT_DEBUG
232 printk(KERN_DEBUG "Suspend in progress. " 228 printk(KERN_DEBUG "Suspend in progress. "
233 "Denying ADDBA request\n"); 229 "Denying ADDBA request\n");
@@ -279,14 +275,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
279 275
280 /* prepare A-MPDU MLME for Rx aggregation */ 276 /* prepare A-MPDU MLME for Rx aggregation */
281 tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); 277 tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
282 if (!tid_agg_rx) { 278 if (!tid_agg_rx)
283#ifdef CONFIG_MAC80211_HT_DEBUG
284 if (net_ratelimit())
285 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
286 tid);
287#endif
288 goto end; 279 goto end;
289 }
290 280
291 spin_lock_init(&tid_agg_rx->reorder_lock); 281 spin_lock_init(&tid_agg_rx->reorder_lock);
292 282
@@ -306,11 +296,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
306 tid_agg_rx->reorder_time = 296 tid_agg_rx->reorder_time =
307 kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); 297 kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
308 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { 298 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
309#ifdef CONFIG_MAC80211_HT_DEBUG
310 if (net_ratelimit())
311 printk(KERN_ERR "can not allocate reordering buffer "
312 "to tid %d\n", tid);
313#endif
314 kfree(tid_agg_rx->reorder_buf); 299 kfree(tid_agg_rx->reorder_buf);
315 kfree(tid_agg_rx->reorder_time); 300 kfree(tid_agg_rx->reorder_time);
316 kfree(tid_agg_rx); 301 kfree(tid_agg_rx);
@@ -340,7 +325,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
340 status = WLAN_STATUS_SUCCESS; 325 status = WLAN_STATUS_SUCCESS;
341 326
342 /* activate it for RX */ 327 /* activate it for RX */
343 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); 328 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
344 329
345 if (timeout) 330 if (timeout)
346 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); 331 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8be8eff70da..2ac033989e01 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -68,11 +68,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
68 68
69 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 69 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
70 70
71 if (!skb) { 71 if (!skb)
72 printk(KERN_ERR "%s: failed to allocate buffer "
73 "for addba request frame\n", sdata->name);
74 return; 72 return;
75 } 73
76 skb_reserve(skb, local->hw.extra_tx_headroom); 74 skb_reserve(skb, local->hw.extra_tx_headroom);
77 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 75 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
78 memset(mgmt, 0, 24); 76 memset(mgmt, 0, 24);
@@ -106,19 +104,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
106 ieee80211_tx_skb(sdata, skb); 104 ieee80211_tx_skb(sdata, skb);
107} 105}
108 106
109void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) 107void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
110{ 108{
109 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
111 struct ieee80211_local *local = sdata->local; 110 struct ieee80211_local *local = sdata->local;
112 struct sk_buff *skb; 111 struct sk_buff *skb;
113 struct ieee80211_bar *bar; 112 struct ieee80211_bar *bar;
114 u16 bar_control = 0; 113 u16 bar_control = 0;
115 114
116 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 115 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
117 if (!skb) { 116 if (!skb)
118 printk(KERN_ERR "%s: failed to allocate buffer for "
119 "bar frame\n", sdata->name);
120 return; 117 return;
121 } 118
122 skb_reserve(skb, local->hw.extra_tx_headroom); 119 skb_reserve(skb, local->hw.extra_tx_headroom);
123 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 120 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
124 memset(bar, 0, sizeof(*bar)); 121 memset(bar, 0, sizeof(*bar));
@@ -128,13 +125,14 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
128 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 125 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
129 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 126 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
130 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 127 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
131 bar_control |= (u16)(tid << 12); 128 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
132 bar->control = cpu_to_le16(bar_control); 129 bar->control = cpu_to_le16(bar_control);
133 bar->start_seq_num = cpu_to_le16(ssn); 130 bar->start_seq_num = cpu_to_le16(ssn);
134 131
135 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 132 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
136 ieee80211_tx_skb(sdata, skb); 133 ieee80211_tx_skb(sdata, skb);
137} 134}
135EXPORT_SYMBOL(ieee80211_send_bar);
138 136
139void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 137void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
140 struct tid_ampdu_tx *tid_tx) 138 struct tid_ampdu_tx *tid_tx)
@@ -364,7 +362,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
364 return -EINVAL; 362 return -EINVAL;
365 363
366 if ((tid >= STA_TID_NUM) || 364 if ((tid >= STA_TID_NUM) ||
367 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) 365 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
366 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
368 return -EINVAL; 367 return -EINVAL;
369 368
370#ifdef CONFIG_MAC80211_HT_DEBUG 369#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -383,7 +382,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
383 sdata->vif.type != NL80211_IFTYPE_AP) 382 sdata->vif.type != NL80211_IFTYPE_AP)
384 return -EINVAL; 383 return -EINVAL;
385 384
386 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { 385 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
387#ifdef CONFIG_MAC80211_HT_DEBUG 386#ifdef CONFIG_MAC80211_HT_DEBUG
388 printk(KERN_DEBUG "BA sessions blocked. " 387 printk(KERN_DEBUG "BA sessions blocked. "
389 "Denying BA session request\n"); 388 "Denying BA session request\n");
@@ -413,11 +412,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
413 /* prepare A-MPDU MLME for Tx aggregation */ 412 /* prepare A-MPDU MLME for Tx aggregation */
414 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 413 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
415 if (!tid_tx) { 414 if (!tid_tx) {
416#ifdef CONFIG_MAC80211_HT_DEBUG
417 if (net_ratelimit())
418 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
419 tid);
420#endif
421 ret = -ENOMEM; 415 ret = -ENOMEM;
422 goto err_unlock_sta; 416 goto err_unlock_sta;
423 } 417 }
@@ -574,14 +568,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
574 struct ieee80211_ra_tid *ra_tid; 568 struct ieee80211_ra_tid *ra_tid;
575 struct sk_buff *skb = dev_alloc_skb(0); 569 struct sk_buff *skb = dev_alloc_skb(0);
576 570
577 if (unlikely(!skb)) { 571 if (unlikely(!skb))
578#ifdef CONFIG_MAC80211_HT_DEBUG
579 if (net_ratelimit())
580 printk(KERN_WARNING "%s: Not enough memory, "
581 "dropping start BA session", sdata->name);
582#endif
583 return; 572 return;
584 } 573
585 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 574 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
586 memcpy(&ra_tid->ra, ra, ETH_ALEN); 575 memcpy(&ra_tid->ra, ra, ETH_ALEN);
587 ra_tid->tid = tid; 576 ra_tid->tid = tid;
@@ -727,14 +716,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
727 struct ieee80211_ra_tid *ra_tid; 716 struct ieee80211_ra_tid *ra_tid;
728 struct sk_buff *skb = dev_alloc_skb(0); 717 struct sk_buff *skb = dev_alloc_skb(0);
729 718
730 if (unlikely(!skb)) { 719 if (unlikely(!skb))
731#ifdef CONFIG_MAC80211_HT_DEBUG
732 if (net_ratelimit())
733 printk(KERN_WARNING "%s: Not enough memory, "
734 "dropping stop BA session", sdata->name);
735#endif
736 return; 720 return;
737 } 721
738 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 722 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
739 memcpy(&ra_tid->ra, ra, ETH_ALEN); 723 memcpy(&ra_tid->ra, ra, ETH_ALEN);
740 ra_tid->tid = tid; 724 ra_tid->tid = tid;
@@ -777,18 +761,14 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
777#ifdef CONFIG_MAC80211_HT_DEBUG 761#ifdef CONFIG_MAC80211_HT_DEBUG
778 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 762 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
779#endif 763#endif
780 764 /*
765 * IEEE 802.11-2007 7.3.1.14:
766 * In an ADDBA Response frame, when the Status Code field
767 * is set to 0, the Buffer Size subfield is set to a value
768 * of at least 1.
769 */
781 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 770 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
782 == WLAN_STATUS_SUCCESS) { 771 == WLAN_STATUS_SUCCESS && buf_size) {
783 /*
784 * IEEE 802.11-2007 7.3.1.14:
785 * In an ADDBA Response frame, when the Status Code field
786 * is set to 0, the Buffer Size subfield is set to a value
787 * of at least 1.
788 */
789 if (!buf_size)
790 goto out;
791
792 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 772 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
793 &tid_tx->state)) { 773 &tid_tx->state)) {
794 /* ignore duplicate response */ 774 /* ignore duplicate response */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 3d1b091d9b2e..ebd7fb101fbf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -12,6 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <net/net_namespace.h> 13#include <net/net_namespace.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/if_ether.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
16#include "ieee80211_i.h" 17#include "ieee80211_i.h"
17#include "driver-ops.h" 18#include "driver-ops.h"
@@ -62,7 +63,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
62 63
63 if (type == NL80211_IFTYPE_AP_VLAN && 64 if (type == NL80211_IFTYPE_AP_VLAN &&
64 params && params->use_4addr == 0) 65 params && params->use_4addr == 0)
65 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 66 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
66 else if (type == NL80211_IFTYPE_STATION && 67 else if (type == NL80211_IFTYPE_STATION &&
67 params && params->use_4addr >= 0) 68 params && params->use_4addr >= 0)
68 sdata->u.mgd.use_4addr = params->use_4addr; 69 sdata->u.mgd.use_4addr = params->use_4addr;
@@ -343,7 +344,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
343 STATION_INFO_RX_BITRATE | 344 STATION_INFO_RX_BITRATE |
344 STATION_INFO_RX_DROP_MISC | 345 STATION_INFO_RX_DROP_MISC |
345 STATION_INFO_BSS_PARAM | 346 STATION_INFO_BSS_PARAM |
346 STATION_INFO_CONNECTED_TIME; 347 STATION_INFO_CONNECTED_TIME |
348 STATION_INFO_STA_FLAGS;
347 349
348 do_posix_clock_monotonic_gettime(&uptime); 350 do_posix_clock_monotonic_gettime(&uptime);
349 sinfo->connected_time = uptime.tv_sec - sta->last_connected; 351 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -403,6 +405,23 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
403 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 405 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
404 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; 406 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
405 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 407 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
408
409 sinfo->sta_flags.set = 0;
410 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
411 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
412 BIT(NL80211_STA_FLAG_WME) |
413 BIT(NL80211_STA_FLAG_MFP) |
414 BIT(NL80211_STA_FLAG_AUTHENTICATED);
415 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
416 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
417 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
418 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
419 if (test_sta_flag(sta, WLAN_STA_WME))
420 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
421 if (test_sta_flag(sta, WLAN_STA_MFP))
422 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
423 if (test_sta_flag(sta, WLAN_STA_AUTH))
424 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
406} 425}
407 426
408 427
@@ -455,6 +474,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
455 return ret; 474 return ret;
456} 475}
457 476
477static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
478 struct beacon_parameters *params)
479{
480 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
481
482 bss_conf->ssid_len = params->ssid_len;
483
484 if (params->ssid_len)
485 memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
486
487 bss_conf->hidden_ssid =
488 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
489}
490
458/* 491/*
459 * This handles both adding a beacon and setting new beacon info 492 * This handles both adding a beacon and setting new beacon info
460 */ 493 */
@@ -542,14 +575,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
542 575
543 sdata->vif.bss_conf.dtim_period = new->dtim_period; 576 sdata->vif.bss_conf.dtim_period = new->dtim_period;
544 577
545 rcu_assign_pointer(sdata->u.ap.beacon, new); 578 RCU_INIT_POINTER(sdata->u.ap.beacon, new);
546 579
547 synchronize_rcu(); 580 synchronize_rcu();
548 581
549 kfree(old); 582 kfree(old);
550 583
584 ieee80211_config_ap_ssid(sdata, params);
585
551 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 586 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
552 BSS_CHANGED_BEACON); 587 BSS_CHANGED_BEACON |
588 BSS_CHANGED_SSID);
553 return 0; 589 return 0;
554} 590}
555 591
@@ -594,7 +630,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
594 if (!old) 630 if (!old)
595 return -ENOENT; 631 return -ENOENT;
596 632
597 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 633 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
598 synchronize_rcu(); 634 synchronize_rcu();
599 kfree(old); 635 kfree(old);
600 636
@@ -650,7 +686,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
650 struct sta_info *sta, 686 struct sta_info *sta,
651 struct station_parameters *params) 687 struct station_parameters *params)
652{ 688{
653 unsigned long flags;
654 u32 rates; 689 u32 rates;
655 int i, j; 690 int i, j;
656 struct ieee80211_supported_band *sband; 691 struct ieee80211_supported_band *sband;
@@ -659,43 +694,58 @@ static void sta_apply_parameters(struct ieee80211_local *local,
659 694
660 sband = local->hw.wiphy->bands[local->oper_channel->band]; 695 sband = local->hw.wiphy->bands[local->oper_channel->band];
661 696
662 spin_lock_irqsave(&sta->flaglock, flags);
663 mask = params->sta_flags_mask; 697 mask = params->sta_flags_mask;
664 set = params->sta_flags_set; 698 set = params->sta_flags_set;
665 699
666 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { 700 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
667 sta->flags &= ~WLAN_STA_AUTHORIZED;
668 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 701 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
669 sta->flags |= WLAN_STA_AUTHORIZED; 702 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
703 else
704 clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
670 } 705 }
671 706
672 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { 707 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
673 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
674 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) 708 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
675 sta->flags |= WLAN_STA_SHORT_PREAMBLE; 709 set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
710 else
711 clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
676 } 712 }
677 713
678 if (mask & BIT(NL80211_STA_FLAG_WME)) { 714 if (mask & BIT(NL80211_STA_FLAG_WME)) {
679 sta->flags &= ~WLAN_STA_WME;
680 sta->sta.wme = false;
681 if (set & BIT(NL80211_STA_FLAG_WME)) { 715 if (set & BIT(NL80211_STA_FLAG_WME)) {
682 sta->flags |= WLAN_STA_WME; 716 set_sta_flag(sta, WLAN_STA_WME);
683 sta->sta.wme = true; 717 sta->sta.wme = true;
718 } else {
719 clear_sta_flag(sta, WLAN_STA_WME);
720 sta->sta.wme = false;
684 } 721 }
685 } 722 }
686 723
687 if (mask & BIT(NL80211_STA_FLAG_MFP)) { 724 if (mask & BIT(NL80211_STA_FLAG_MFP)) {
688 sta->flags &= ~WLAN_STA_MFP;
689 if (set & BIT(NL80211_STA_FLAG_MFP)) 725 if (set & BIT(NL80211_STA_FLAG_MFP))
690 sta->flags |= WLAN_STA_MFP; 726 set_sta_flag(sta, WLAN_STA_MFP);
727 else
728 clear_sta_flag(sta, WLAN_STA_MFP);
691 } 729 }
692 730
693 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { 731 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
694 sta->flags &= ~WLAN_STA_AUTH;
695 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) 732 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
696 sta->flags |= WLAN_STA_AUTH; 733 set_sta_flag(sta, WLAN_STA_AUTH);
734 else
735 clear_sta_flag(sta, WLAN_STA_AUTH);
736 }
737
738 if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
739 if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
740 set_sta_flag(sta, WLAN_STA_TDLS_PEER);
741 else
742 clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
743 }
744
745 if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
746 sta->sta.uapsd_queues = params->uapsd_queues;
747 sta->sta.max_sp = params->max_sp;
697 } 748 }
698 spin_unlock_irqrestore(&sta->flaglock, flags);
699 749
700 /* 750 /*
701 * cfg80211 validates this (1-2007) and allows setting the AID 751 * cfg80211 validates this (1-2007) and allows setting the AID
@@ -786,10 +836,17 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
786 if (!sta) 836 if (!sta)
787 return -ENOMEM; 837 return -ENOMEM;
788 838
789 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; 839 set_sta_flag(sta, WLAN_STA_AUTH);
840 set_sta_flag(sta, WLAN_STA_ASSOC);
790 841
791 sta_apply_parameters(local, sta, params); 842 sta_apply_parameters(local, sta, params);
792 843
844 /* Only TDLS-supporting stations can add TDLS peers */
845 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
846 !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
847 sdata->vif.type == NL80211_IFTYPE_STATION))
848 return -ENOTSUPP;
849
793 rate_control_rate_init(sta); 850 rate_control_rate_init(sta);
794 851
795 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 852 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
@@ -842,6 +899,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
842 return -ENOENT; 899 return -ENOENT;
843 } 900 }
844 901
902 /* The TDLS bit cannot be toggled after the STA was added */
903 if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
904 !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
905 !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
906 rcu_read_unlock();
907 return -EINVAL;
908 }
909
845 if (params->vlan && params->vlan != sta->sdata->dev) { 910 if (params->vlan && params->vlan != sta->sdata->dev) {
846 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 911 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
847 912
@@ -857,7 +922,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
857 return -EBUSY; 922 return -EBUSY;
858 } 923 }
859 924
860 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 925 RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
861 } 926 }
862 927
863 sta->sdata = vlansdata; 928 sta->sdata = vlansdata;
@@ -918,7 +983,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
918 if (dst) 983 if (dst)
919 return mesh_path_del(dst, sdata); 984 return mesh_path_del(dst, sdata);
920 985
921 mesh_path_flush(sdata); 986 mesh_path_flush_by_iface(sdata);
922 return 0; 987 return 0;
923} 988}
924 989
@@ -1137,6 +1202,22 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1137 conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; 1202 conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
1138 ieee80211_mesh_root_setup(ifmsh); 1203 ieee80211_mesh_root_setup(ifmsh);
1139 } 1204 }
1205 if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) {
1206 /* our current gate announcement implementation rides on root
1207 * announcements, so require this ifmsh to also be a root node
1208 * */
1209 if (nconf->dot11MeshGateAnnouncementProtocol &&
1210 !conf->dot11MeshHWMPRootMode) {
1211 conf->dot11MeshHWMPRootMode = 1;
1212 ieee80211_mesh_root_setup(ifmsh);
1213 }
1214 conf->dot11MeshGateAnnouncementProtocol =
1215 nconf->dot11MeshGateAnnouncementProtocol;
1216 }
1217 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
1218 conf->dot11MeshHWMPRannInterval =
1219 nconf->dot11MeshHWMPRannInterval;
1220 }
1140 return 0; 1221 return 0;
1141} 1222}
1142 1223
@@ -1235,9 +1316,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1235} 1316}
1236 1317
1237static int ieee80211_set_txq_params(struct wiphy *wiphy, 1318static int ieee80211_set_txq_params(struct wiphy *wiphy,
1319 struct net_device *dev,
1238 struct ieee80211_txq_params *params) 1320 struct ieee80211_txq_params *params)
1239{ 1321{
1240 struct ieee80211_local *local = wiphy_priv(wiphy); 1322 struct ieee80211_local *local = wiphy_priv(wiphy);
1323 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1241 struct ieee80211_tx_queue_params p; 1324 struct ieee80211_tx_queue_params p;
1242 1325
1243 if (!local->ops->conf_tx) 1326 if (!local->ops->conf_tx)
@@ -1258,8 +1341,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1258 if (params->queue >= local->hw.queues) 1341 if (params->queue >= local->hw.queues)
1259 return -EINVAL; 1342 return -EINVAL;
1260 1343
1261 local->tx_conf[params->queue] = p; 1344 sdata->tx_conf[params->queue] = p;
1262 if (drv_conf_tx(local, params->queue, &p)) { 1345 if (drv_conf_tx(local, sdata, params->queue, &p)) {
1263 wiphy_debug(local->hw.wiphy, 1346 wiphy_debug(local->hw.wiphy,
1264 "failed to set TX queue parameters for queue %d\n", 1347 "failed to set TX queue parameters for queue %d\n",
1265 params->queue); 1348 params->queue);
@@ -1821,7 +1904,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
1821 * so in that case userspace will have to deal with it. 1904 * so in that case userspace will have to deal with it.
1822 */ 1905 */
1823 1906
1824 if (wk->offchan_tx.wait && wk->offchan_tx.frame) 1907 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
1825 cfg80211_mgmt_tx_status(wk->sdata->dev, 1908 cfg80211_mgmt_tx_status(wk->sdata->dev,
1826 (unsigned long) wk->offchan_tx.frame, 1909 (unsigned long) wk->offchan_tx.frame,
1827 wk->ie, wk->ie_len, false, GFP_KERNEL); 1910 wk->ie, wk->ie_len, false, GFP_KERNEL);
@@ -1833,7 +1916,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1833 struct ieee80211_channel *chan, bool offchan, 1916 struct ieee80211_channel *chan, bool offchan,
1834 enum nl80211_channel_type channel_type, 1917 enum nl80211_channel_type channel_type,
1835 bool channel_type_valid, unsigned int wait, 1918 bool channel_type_valid, unsigned int wait,
1836 const u8 *buf, size_t len, u64 *cookie) 1919 const u8 *buf, size_t len, bool no_cck,
1920 u64 *cookie)
1837{ 1921{
1838 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1922 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1839 struct ieee80211_local *local = sdata->local; 1923 struct ieee80211_local *local = sdata->local;
@@ -1860,6 +1944,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1860 flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 1944 flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
1861 } 1945 }
1862 1946
1947 if (no_cck)
1948 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
1949
1863 if (is_offchan && !offchan) 1950 if (is_offchan && !offchan)
1864 return -EBUSY; 1951 return -EBUSY;
1865 1952
@@ -1898,33 +1985,6 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1898 1985
1899 *cookie = (unsigned long) skb; 1986 *cookie = (unsigned long) skb;
1900 1987
1901 if (is_offchan && local->ops->offchannel_tx) {
1902 int ret;
1903
1904 IEEE80211_SKB_CB(skb)->band = chan->band;
1905
1906 mutex_lock(&local->mtx);
1907
1908 if (local->hw_offchan_tx_cookie) {
1909 mutex_unlock(&local->mtx);
1910 return -EBUSY;
1911 }
1912
1913 /* TODO: bitrate control, TX processing? */
1914 ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
1915
1916 if (ret == 0)
1917 local->hw_offchan_tx_cookie = *cookie;
1918 mutex_unlock(&local->mtx);
1919
1920 /*
1921 * Allow driver to return 1 to indicate it wants to have the
1922 * frame transmitted with a remain_on_channel + regular TX.
1923 */
1924 if (ret != 1)
1925 return ret;
1926 }
1927
1928 if (is_offchan && local->ops->remain_on_channel) { 1988 if (is_offchan && local->ops->remain_on_channel) {
1929 unsigned int duration; 1989 unsigned int duration;
1930 int ret; 1990 int ret;
@@ -2011,18 +2071,6 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
2011 2071
2012 mutex_lock(&local->mtx); 2072 mutex_lock(&local->mtx);
2013 2073
2014 if (local->ops->offchannel_tx_cancel_wait &&
2015 local->hw_offchan_tx_cookie == cookie) {
2016 ret = drv_offchannel_tx_cancel_wait(local);
2017
2018 if (!ret)
2019 local->hw_offchan_tx_cookie = 0;
2020
2021 mutex_unlock(&local->mtx);
2022
2023 return ret;
2024 }
2025
2026 if (local->ops->cancel_remain_on_channel) { 2074 if (local->ops->cancel_remain_on_channel) {
2027 cookie ^= 2; 2075 cookie ^= 2;
2028 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); 2076 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -2123,6 +2171,323 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
2123 return 0; 2171 return 0;
2124} 2172}
2125 2173
2174static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
2175{
2176 u8 *pos = (void *)skb_put(skb, 7);
2177
2178 *pos++ = WLAN_EID_EXT_CAPABILITY;
2179 *pos++ = 5; /* len */
2180 *pos++ = 0x0;
2181 *pos++ = 0x0;
2182 *pos++ = 0x0;
2183 *pos++ = 0x0;
2184 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
2185}
2186
2187static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
2188{
2189 struct ieee80211_local *local = sdata->local;
2190 u16 capab;
2191
2192 capab = 0;
2193 if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
2194 return capab;
2195
2196 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
2197 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
2198 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
2199 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
2200
2201 return capab;
2202}
2203
2204static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
2205 u8 *peer, u8 *bssid)
2206{
2207 struct ieee80211_tdls_lnkie *lnkid;
2208
2209 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
2210
2211 lnkid->ie_type = WLAN_EID_LINK_ID;
2212 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
2213
2214 memcpy(lnkid->bssid, bssid, ETH_ALEN);
2215 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
2216 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
2217}
2218
2219static int
2220ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2221 u8 *peer, u8 action_code, u8 dialog_token,
2222 u16 status_code, struct sk_buff *skb)
2223{
2224 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2225 struct ieee80211_tdls_data *tf;
2226
2227 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
2228
2229 memcpy(tf->da, peer, ETH_ALEN);
2230 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
2231 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
2232 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
2233
2234 switch (action_code) {
2235 case WLAN_TDLS_SETUP_REQUEST:
2236 tf->category = WLAN_CATEGORY_TDLS;
2237 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
2238
2239 skb_put(skb, sizeof(tf->u.setup_req));
2240 tf->u.setup_req.dialog_token = dialog_token;
2241 tf->u.setup_req.capability =
2242 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2243
2244 ieee80211_add_srates_ie(&sdata->vif, skb);
2245 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2246 ieee80211_tdls_add_ext_capab(skb);
2247 break;
2248 case WLAN_TDLS_SETUP_RESPONSE:
2249 tf->category = WLAN_CATEGORY_TDLS;
2250 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
2251
2252 skb_put(skb, sizeof(tf->u.setup_resp));
2253 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
2254 tf->u.setup_resp.dialog_token = dialog_token;
2255 tf->u.setup_resp.capability =
2256 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2257
2258 ieee80211_add_srates_ie(&sdata->vif, skb);
2259 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2260 ieee80211_tdls_add_ext_capab(skb);
2261 break;
2262 case WLAN_TDLS_SETUP_CONFIRM:
2263 tf->category = WLAN_CATEGORY_TDLS;
2264 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
2265
2266 skb_put(skb, sizeof(tf->u.setup_cfm));
2267 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
2268 tf->u.setup_cfm.dialog_token = dialog_token;
2269 break;
2270 case WLAN_TDLS_TEARDOWN:
2271 tf->category = WLAN_CATEGORY_TDLS;
2272 tf->action_code = WLAN_TDLS_TEARDOWN;
2273
2274 skb_put(skb, sizeof(tf->u.teardown));
2275 tf->u.teardown.reason_code = cpu_to_le16(status_code);
2276 break;
2277 case WLAN_TDLS_DISCOVERY_REQUEST:
2278 tf->category = WLAN_CATEGORY_TDLS;
2279 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
2280
2281 skb_put(skb, sizeof(tf->u.discover_req));
2282 tf->u.discover_req.dialog_token = dialog_token;
2283 break;
2284 default:
2285 return -EINVAL;
2286 }
2287
2288 return 0;
2289}
2290
2291static int
2292ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2293 u8 *peer, u8 action_code, u8 dialog_token,
2294 u16 status_code, struct sk_buff *skb)
2295{
2296 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2297 struct ieee80211_mgmt *mgmt;
2298
2299 mgmt = (void *)skb_put(skb, 24);
2300 memset(mgmt, 0, 24);
2301 memcpy(mgmt->da, peer, ETH_ALEN);
2302 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2303 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2304
2305 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2306 IEEE80211_STYPE_ACTION);
2307
2308 switch (action_code) {
2309 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2310 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
2311 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
2312 mgmt->u.action.u.tdls_discover_resp.action_code =
2313 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
2314 mgmt->u.action.u.tdls_discover_resp.dialog_token =
2315 dialog_token;
2316 mgmt->u.action.u.tdls_discover_resp.capability =
2317 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2318
2319 ieee80211_add_srates_ie(&sdata->vif, skb);
2320 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2321 ieee80211_tdls_add_ext_capab(skb);
2322 break;
2323 default:
2324 return -EINVAL;
2325 }
2326
2327 return 0;
2328}
2329
2330static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2331 u8 *peer, u8 action_code, u8 dialog_token,
2332 u16 status_code, const u8 *extra_ies,
2333 size_t extra_ies_len)
2334{
2335 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2336 struct ieee80211_local *local = sdata->local;
2337 struct ieee80211_tx_info *info;
2338 struct sk_buff *skb = NULL;
2339 bool send_direct;
2340 int ret;
2341
2342 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
2343 return -ENOTSUPP;
2344
2345 /* make sure we are in managed mode, and associated */
2346 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
2347 !sdata->u.mgd.associated)
2348 return -EINVAL;
2349
2350#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
2351 printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
2352#endif
2353
2354 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
2355 max(sizeof(struct ieee80211_mgmt),
2356 sizeof(struct ieee80211_tdls_data)) +
2357 50 + /* supported rates */
2358 7 + /* ext capab */
2359 extra_ies_len +
2360 sizeof(struct ieee80211_tdls_lnkie));
2361 if (!skb)
2362 return -ENOMEM;
2363
2364 info = IEEE80211_SKB_CB(skb);
2365 skb_reserve(skb, local->hw.extra_tx_headroom);
2366
2367 switch (action_code) {
2368 case WLAN_TDLS_SETUP_REQUEST:
2369 case WLAN_TDLS_SETUP_RESPONSE:
2370 case WLAN_TDLS_SETUP_CONFIRM:
2371 case WLAN_TDLS_TEARDOWN:
2372 case WLAN_TDLS_DISCOVERY_REQUEST:
2373 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
2374 action_code, dialog_token,
2375 status_code, skb);
2376 send_direct = false;
2377 break;
2378 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2379 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
2380 dialog_token, status_code,
2381 skb);
2382 send_direct = true;
2383 break;
2384 default:
2385 ret = -ENOTSUPP;
2386 break;
2387 }
2388
2389 if (ret < 0)
2390 goto fail;
2391
2392 if (extra_ies_len)
2393 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
2394
2395 /* the TDLS link IE is always added last */
2396 switch (action_code) {
2397 case WLAN_TDLS_SETUP_REQUEST:
2398 case WLAN_TDLS_SETUP_CONFIRM:
2399 case WLAN_TDLS_TEARDOWN:
2400 case WLAN_TDLS_DISCOVERY_REQUEST:
2401 /* we are the initiator */
2402 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
2403 sdata->u.mgd.bssid);
2404 break;
2405 case WLAN_TDLS_SETUP_RESPONSE:
2406 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2407 /* we are the responder */
2408 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
2409 sdata->u.mgd.bssid);
2410 break;
2411 default:
2412 ret = -ENOTSUPP;
2413 goto fail;
2414 }
2415
2416 if (send_direct) {
2417 ieee80211_tx_skb(sdata, skb);
2418 return 0;
2419 }
2420
2421 /*
2422 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
2423 * we should default to AC_VI.
2424 */
2425 switch (action_code) {
2426 case WLAN_TDLS_SETUP_REQUEST:
2427 case WLAN_TDLS_SETUP_RESPONSE:
2428 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
2429 skb->priority = 2;
2430 break;
2431 default:
2432 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
2433 skb->priority = 5;
2434 break;
2435 }
2436
2437 /* disable bottom halves when entering the Tx path */
2438 local_bh_disable();
2439 ret = ieee80211_subif_start_xmit(skb, dev);
2440 local_bh_enable();
2441
2442 return ret;
2443
2444fail:
2445 dev_kfree_skb(skb);
2446 return ret;
2447}
2448
2449static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2450 u8 *peer, enum nl80211_tdls_operation oper)
2451{
2452 struct sta_info *sta;
2453 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2454
2455 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
2456 return -ENOTSUPP;
2457
2458 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2459 return -EINVAL;
2460
2461#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
2462 printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
2463#endif
2464
2465 switch (oper) {
2466 case NL80211_TDLS_ENABLE_LINK:
2467 rcu_read_lock();
2468 sta = sta_info_get(sdata, peer);
2469 if (!sta) {
2470 rcu_read_unlock();
2471 return -ENOLINK;
2472 }
2473
2474 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
2475 rcu_read_unlock();
2476 break;
2477 case NL80211_TDLS_DISABLE_LINK:
2478 return sta_info_destroy_addr(sdata, peer);
2479 case NL80211_TDLS_TEARDOWN:
2480 case NL80211_TDLS_SETUP:
2481 case NL80211_TDLS_DISCOVERY_REQ:
2482 /* We don't support in-driver setup/teardown/discovery */
2483 return -ENOTSUPP;
2484 default:
2485 return -ENOTSUPP;
2486 }
2487
2488 return 0;
2489}
2490
2126struct cfg80211_ops mac80211_config_ops = { 2491struct cfg80211_ops mac80211_config_ops = {
2127 .add_virtual_intf = ieee80211_add_iface, 2492 .add_virtual_intf = ieee80211_add_iface,
2128 .del_virtual_intf = ieee80211_del_iface, 2493 .del_virtual_intf = ieee80211_del_iface,
@@ -2186,4 +2551,6 @@ struct cfg80211_ops mac80211_config_ops = {
2186 .set_ringparam = ieee80211_set_ringparam, 2551 .set_ringparam = ieee80211_set_ringparam,
2187 .get_ringparam = ieee80211_get_ringparam, 2552 .get_ringparam = ieee80211_get_ringparam,
2188 .set_rekey_data = ieee80211_set_rekey_data, 2553 .set_rekey_data = ieee80211_set_rekey_data,
2554 .tdls_oper = ieee80211_tdls_oper,
2555 .tdls_mgmt = ieee80211_tdls_mgmt,
2189}; 2556};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f7cc32..883996b2f99f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -78,57 +78,6 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
78DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", 78DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
79 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); 79 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
80 80
81static ssize_t tsf_read(struct file *file, char __user *user_buf,
82 size_t count, loff_t *ppos)
83{
84 struct ieee80211_local *local = file->private_data;
85 u64 tsf;
86
87 tsf = drv_get_tsf(local);
88
89 return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
90 (unsigned long long) tsf);
91}
92
93static ssize_t tsf_write(struct file *file,
94 const char __user *user_buf,
95 size_t count, loff_t *ppos)
96{
97 struct ieee80211_local *local = file->private_data;
98 unsigned long long tsf;
99 char buf[100];
100 size_t len;
101
102 len = min(count, sizeof(buf) - 1);
103 if (copy_from_user(buf, user_buf, len))
104 return -EFAULT;
105 buf[len] = '\0';
106
107 if (strncmp(buf, "reset", 5) == 0) {
108 if (local->ops->reset_tsf) {
109 drv_reset_tsf(local);
110 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
111 }
112 } else {
113 tsf = simple_strtoul(buf, NULL, 0);
114 if (local->ops->set_tsf) {
115 drv_set_tsf(local, tsf);
116 wiphy_info(local->hw.wiphy,
117 "debugfs set TSF to %#018llx\n", tsf);
118
119 }
120 }
121
122 return count;
123}
124
125static const struct file_operations tsf_ops = {
126 .read = tsf_read,
127 .write = tsf_write,
128 .open = mac80211_open_file_generic,
129 .llseek = default_llseek,
130};
131
132static ssize_t reset_write(struct file *file, const char __user *user_buf, 81static ssize_t reset_write(struct file *file, const char __user *user_buf,
133 size_t count, loff_t *ppos) 82 size_t count, loff_t *ppos)
134{ 83{
@@ -195,20 +144,12 @@ static ssize_t uapsd_queues_write(struct file *file,
195 size_t count, loff_t *ppos) 144 size_t count, loff_t *ppos)
196{ 145{
197 struct ieee80211_local *local = file->private_data; 146 struct ieee80211_local *local = file->private_data;
198 unsigned long val; 147 u8 val;
199 char buf[10];
200 size_t len;
201 int ret; 148 int ret;
202 149
203 len = min(count, sizeof(buf) - 1); 150 ret = kstrtou8_from_user(user_buf, count, 0, &val);
204 if (copy_from_user(buf, user_buf, len))
205 return -EFAULT;
206 buf[len] = '\0';
207
208 ret = strict_strtoul(buf, 0, &val);
209
210 if (ret) 151 if (ret)
211 return -EINVAL; 152 return ret;
212 153
213 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) 154 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
214 return -ERANGE; 155 return -ERANGE;
@@ -305,6 +246,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
305 char *buf = kzalloc(mxln, GFP_KERNEL); 246 char *buf = kzalloc(mxln, GFP_KERNEL);
306 int sf = 0; /* how many written so far */ 247 int sf = 0; /* how many written so far */
307 248
249 if (!buf)
250 return 0;
251
308 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags); 252 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
309 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 253 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
310 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n"); 254 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
@@ -355,6 +299,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
355 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); 299 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
356 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) 300 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
357 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 301 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
302 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
303 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
358 304
359 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 305 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
360 kfree(buf); 306 kfree(buf);
@@ -450,7 +396,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
450 DEBUGFS_ADD(frequency); 396 DEBUGFS_ADD(frequency);
451 DEBUGFS_ADD(total_ps_buffered); 397 DEBUGFS_ADD(total_ps_buffered);
452 DEBUGFS_ADD(wep_iv); 398 DEBUGFS_ADD(wep_iv);
453 DEBUGFS_ADD(tsf);
454 DEBUGFS_ADD(queues); 399 DEBUGFS_ADD(queues);
455 DEBUGFS_ADD_MODE(reset, 0200); 400 DEBUGFS_ADD_MODE(reset, 0200);
456 DEBUGFS_ADD(noack); 401 DEBUGFS_ADD(noack);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9ea7c0d0103f..9352819a986b 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -21,6 +21,7 @@
21#include "rate.h" 21#include "rate.h"
22#include "debugfs.h" 22#include "debugfs.h"
23#include "debugfs_netdev.h" 23#include "debugfs_netdev.h"
24#include "driver-ops.h"
24 25
25static ssize_t ieee80211_if_read( 26static ssize_t ieee80211_if_read(
26 struct ieee80211_sub_if_data *sdata, 27 struct ieee80211_sub_if_data *sdata,
@@ -331,6 +332,46 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
331} 332}
332__IEEE80211_IF_FILE(num_buffered_multicast, NULL); 333__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
333 334
335/* IBSS attributes */
336static ssize_t ieee80211_if_fmt_tsf(
337 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
338{
339 struct ieee80211_local *local = sdata->local;
340 u64 tsf;
341
342 tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
343
344 return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
345}
346
347static ssize_t ieee80211_if_parse_tsf(
348 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
349{
350 struct ieee80211_local *local = sdata->local;
351 unsigned long long tsf;
352 int ret;
353
354 if (strncmp(buf, "reset", 5) == 0) {
355 if (local->ops->reset_tsf) {
356 drv_reset_tsf(local, sdata);
357 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
358 }
359 } else {
360 ret = kstrtoull(buf, 10, &tsf);
361 if (ret < 0)
362 return -EINVAL;
363 if (local->ops->set_tsf) {
364 drv_set_tsf(local, sdata, tsf);
365 wiphy_info(local->hw.wiphy,
366 "debugfs set TSF to %#018llx\n", tsf);
367 }
368 }
369
370 return buflen;
371}
372__IEEE80211_IF_FILE_W(tsf);
373
374
334/* WDS attributes */ 375/* WDS attributes */
335IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 376IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
336 377
@@ -340,6 +381,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
340IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); 381IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
341IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); 382IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
342IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); 383IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
384IEEE80211_IF_FILE(dropped_frames_congestion,
385 u.mesh.mshstats.dropped_frames_congestion, DEC);
343IEEE80211_IF_FILE(dropped_frames_no_route, 386IEEE80211_IF_FILE(dropped_frames_no_route,
344 u.mesh.mshstats.dropped_frames_no_route, DEC); 387 u.mesh.mshstats.dropped_frames_no_route, DEC);
345IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); 388IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
@@ -372,6 +415,10 @@ IEEE80211_IF_FILE(min_discovery_timeout,
372 u.mesh.mshcfg.min_discovery_timeout, DEC); 415 u.mesh.mshcfg.min_discovery_timeout, DEC);
373IEEE80211_IF_FILE(dot11MeshHWMPRootMode, 416IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
374 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); 417 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
418IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
419 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
420IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
421 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
375#endif 422#endif
376 423
377 424
@@ -415,6 +462,11 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
415 DEBUGFS_ADD_MODE(tkip_mic_test, 0200); 462 DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
416} 463}
417 464
465static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
466{
467 DEBUGFS_ADD_MODE(tsf, 0600);
468}
469
418static void add_wds_files(struct ieee80211_sub_if_data *sdata) 470static void add_wds_files(struct ieee80211_sub_if_data *sdata)
419{ 471{
420 DEBUGFS_ADD(drop_unencrypted); 472 DEBUGFS_ADD(drop_unencrypted);
@@ -459,6 +511,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
459 MESHSTATS_ADD(fwded_frames); 511 MESHSTATS_ADD(fwded_frames);
460 MESHSTATS_ADD(dropped_frames_ttl); 512 MESHSTATS_ADD(dropped_frames_ttl);
461 MESHSTATS_ADD(dropped_frames_no_route); 513 MESHSTATS_ADD(dropped_frames_no_route);
514 MESHSTATS_ADD(dropped_frames_congestion);
462 MESHSTATS_ADD(estab_plinks); 515 MESHSTATS_ADD(estab_plinks);
463#undef MESHSTATS_ADD 516#undef MESHSTATS_ADD
464} 517}
@@ -485,7 +538,9 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
485 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); 538 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
486 MESHPARAMS_ADD(path_refresh_time); 539 MESHPARAMS_ADD(path_refresh_time);
487 MESHPARAMS_ADD(min_discovery_timeout); 540 MESHPARAMS_ADD(min_discovery_timeout);
488 541 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
542 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
543 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
489#undef MESHPARAMS_ADD 544#undef MESHPARAMS_ADD
490} 545}
491#endif 546#endif
@@ -506,7 +561,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
506 add_sta_files(sdata); 561 add_sta_files(sdata);
507 break; 562 break;
508 case NL80211_IFTYPE_ADHOC: 563 case NL80211_IFTYPE_ADHOC:
509 /* XXX */ 564 add_ibss_files(sdata);
510 break; 565 break;
511 case NL80211_IFTYPE_AP: 566 case NL80211_IFTYPE_AP:
512 add_ap_files(sdata); 567 add_ap_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a01d2137fddc..c5f341798c16 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -56,19 +56,22 @@ STA_FILE(last_signal, last_signal, D);
56static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 56static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
57 size_t count, loff_t *ppos) 57 size_t count, loff_t *ppos)
58{ 58{
59 char buf[100]; 59 char buf[121];
60 struct sta_info *sta = file->private_data; 60 struct sta_info *sta = file->private_data;
61 u32 staflags = get_sta_flags(sta); 61
62 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", 62#define TEST(flg) \
63 staflags & WLAN_STA_AUTH ? "AUTH\n" : "", 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 64
65 staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "", 65 int res = scnprintf(buf, sizeof(buf),
66 staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 staflags & WLAN_STA_WME ? "WME\n" : "", 69 TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
70 staflags & WLAN_STA_WDS ? "WDS\n" : "", 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 staflags & WLAN_STA_MFP ? "MFP\n" : ""); 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH));
74#undef TEST
72 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 75 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
73} 76}
74STA_OPS(flags); 77STA_OPS(flags);
@@ -78,8 +81,14 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
78 size_t count, loff_t *ppos) 81 size_t count, loff_t *ppos)
79{ 82{
80 struct sta_info *sta = file->private_data; 83 struct sta_info *sta = file->private_data;
81 return mac80211_format_buffer(userbuf, count, ppos, "%u\n", 84 char buf[17*IEEE80211_NUM_ACS], *p = buf;
82 skb_queue_len(&sta->ps_tx_buf)); 85 int ac;
86
87 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
88 p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
89 skb_queue_len(&sta->ps_tx_buf[ac]) +
90 skb_queue_len(&sta->tx_filtered[ac]));
91 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
83} 92}
84STA_OPS(num_ps_buf_frames); 93STA_OPS(num_ps_buf_frames);
85 94
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 1425380983f7..5f165d7eb2db 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -413,50 +413,56 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
413 trace_drv_return_void(local); 413 trace_drv_return_void(local);
414} 414}
415 415
416static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 416static inline int drv_conf_tx(struct ieee80211_local *local,
417 struct ieee80211_sub_if_data *sdata, u16 queue,
417 const struct ieee80211_tx_queue_params *params) 418 const struct ieee80211_tx_queue_params *params)
418{ 419{
419 int ret = -EOPNOTSUPP; 420 int ret = -EOPNOTSUPP;
420 421
421 might_sleep(); 422 might_sleep();
422 423
423 trace_drv_conf_tx(local, queue, params); 424 trace_drv_conf_tx(local, sdata, queue, params);
424 if (local->ops->conf_tx) 425 if (local->ops->conf_tx)
425 ret = local->ops->conf_tx(&local->hw, queue, params); 426 ret = local->ops->conf_tx(&local->hw, &sdata->vif,
427 queue, params);
426 trace_drv_return_int(local, ret); 428 trace_drv_return_int(local, ret);
427 return ret; 429 return ret;
428} 430}
429 431
430static inline u64 drv_get_tsf(struct ieee80211_local *local) 432static inline u64 drv_get_tsf(struct ieee80211_local *local,
433 struct ieee80211_sub_if_data *sdata)
431{ 434{
432 u64 ret = -1ULL; 435 u64 ret = -1ULL;
433 436
434 might_sleep(); 437 might_sleep();
435 438
436 trace_drv_get_tsf(local); 439 trace_drv_get_tsf(local, sdata);
437 if (local->ops->get_tsf) 440 if (local->ops->get_tsf)
438 ret = local->ops->get_tsf(&local->hw); 441 ret = local->ops->get_tsf(&local->hw, &sdata->vif);
439 trace_drv_return_u64(local, ret); 442 trace_drv_return_u64(local, ret);
440 return ret; 443 return ret;
441} 444}
442 445
443static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 446static inline void drv_set_tsf(struct ieee80211_local *local,
447 struct ieee80211_sub_if_data *sdata,
448 u64 tsf)
444{ 449{
445 might_sleep(); 450 might_sleep();
446 451
447 trace_drv_set_tsf(local, tsf); 452 trace_drv_set_tsf(local, sdata, tsf);
448 if (local->ops->set_tsf) 453 if (local->ops->set_tsf)
449 local->ops->set_tsf(&local->hw, tsf); 454 local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
450 trace_drv_return_void(local); 455 trace_drv_return_void(local);
451} 456}
452 457
453static inline void drv_reset_tsf(struct ieee80211_local *local) 458static inline void drv_reset_tsf(struct ieee80211_local *local,
459 struct ieee80211_sub_if_data *sdata)
454{ 460{
455 might_sleep(); 461 might_sleep();
456 462
457 trace_drv_reset_tsf(local); 463 trace_drv_reset_tsf(local, sdata);
458 if (local->ops->reset_tsf) 464 if (local->ops->reset_tsf)
459 local->ops->reset_tsf(&local->hw); 465 local->ops->reset_tsf(&local->hw, &sdata->vif);
460 trace_drv_return_void(local); 466 trace_drv_return_void(local);
461} 467}
462 468
@@ -590,37 +596,6 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
590 return ret; 596 return ret;
591} 597}
592 598
593static inline int drv_offchannel_tx(struct ieee80211_local *local,
594 struct sk_buff *skb,
595 struct ieee80211_channel *chan,
596 enum nl80211_channel_type channel_type,
597 unsigned int wait)
598{
599 int ret;
600
601 might_sleep();
602
603 trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
604 ret = local->ops->offchannel_tx(&local->hw, skb, chan,
605 channel_type, wait);
606 trace_drv_return_int(local, ret);
607
608 return ret;
609}
610
611static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
612{
613 int ret;
614
615 might_sleep();
616
617 trace_drv_offchannel_tx_cancel_wait(local);
618 ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
619 trace_drv_return_int(local, ret);
620
621 return ret;
622}
623
624static inline int drv_set_ringparam(struct ieee80211_local *local, 599static inline int drv_set_ringparam(struct ieee80211_local *local,
625 u32 tx, u32 rx) 600 u32 tx, u32 rx)
626{ 601{
@@ -696,4 +671,34 @@ static inline void drv_rssi_callback(struct ieee80211_local *local,
696 local->ops->rssi_callback(&local->hw, event); 671 local->ops->rssi_callback(&local->hw, event);
697 trace_drv_return_void(local); 672 trace_drv_return_void(local);
698} 673}
674
675static inline void
676drv_release_buffered_frames(struct ieee80211_local *local,
677 struct sta_info *sta, u16 tids, int num_frames,
678 enum ieee80211_frame_release_type reason,
679 bool more_data)
680{
681 trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames,
682 reason, more_data);
683 if (local->ops->release_buffered_frames)
684 local->ops->release_buffered_frames(&local->hw, &sta->sta, tids,
685 num_frames, reason,
686 more_data);
687 trace_drv_return_void(local);
688}
689
690static inline void
691drv_allow_buffered_frames(struct ieee80211_local *local,
692 struct sta_info *sta, u16 tids, int num_frames,
693 enum ieee80211_frame_release_type reason,
694 bool more_data)
695{
696 trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames,
697 reason, more_data);
698 if (local->ops->allow_buffered_frames)
699 local->ops->allow_buffered_frames(&local->hw, &sta->sta,
700 tids, num_frames, reason,
701 more_data);
702 trace_drv_return_void(local);
703}
699#endif /* __MAC80211_DRIVER_OPS */ 704#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index f47b00dc7afd..2af4fca55337 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -697,64 +697,76 @@ TRACE_EVENT(drv_sta_remove,
697); 697);
698 698
699TRACE_EVENT(drv_conf_tx, 699TRACE_EVENT(drv_conf_tx,
700 TP_PROTO(struct ieee80211_local *local, u16 queue, 700 TP_PROTO(struct ieee80211_local *local,
701 struct ieee80211_sub_if_data *sdata,
702 u16 queue,
701 const struct ieee80211_tx_queue_params *params), 703 const struct ieee80211_tx_queue_params *params),
702 704
703 TP_ARGS(local, queue, params), 705 TP_ARGS(local, sdata, queue, params),
704 706
705 TP_STRUCT__entry( 707 TP_STRUCT__entry(
706 LOCAL_ENTRY 708 LOCAL_ENTRY
709 VIF_ENTRY
707 __field(u16, queue) 710 __field(u16, queue)
708 __field(u16, txop) 711 __field(u16, txop)
709 __field(u16, cw_min) 712 __field(u16, cw_min)
710 __field(u16, cw_max) 713 __field(u16, cw_max)
711 __field(u8, aifs) 714 __field(u8, aifs)
715 __field(bool, uapsd)
712 ), 716 ),
713 717
714 TP_fast_assign( 718 TP_fast_assign(
715 LOCAL_ASSIGN; 719 LOCAL_ASSIGN;
720 VIF_ASSIGN;
716 __entry->queue = queue; 721 __entry->queue = queue;
717 __entry->txop = params->txop; 722 __entry->txop = params->txop;
718 __entry->cw_max = params->cw_max; 723 __entry->cw_max = params->cw_max;
719 __entry->cw_min = params->cw_min; 724 __entry->cw_min = params->cw_min;
720 __entry->aifs = params->aifs; 725 __entry->aifs = params->aifs;
726 __entry->uapsd = params->uapsd;
721 ), 727 ),
722 728
723 TP_printk( 729 TP_printk(
724 LOCAL_PR_FMT " queue:%d", 730 LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
725 LOCAL_PR_ARG, __entry->queue 731 LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
726 ) 732 )
727); 733);
728 734
729DEFINE_EVENT(local_only_evt, drv_get_tsf, 735DEFINE_EVENT(local_sdata_evt, drv_get_tsf,
730 TP_PROTO(struct ieee80211_local *local), 736 TP_PROTO(struct ieee80211_local *local,
731 TP_ARGS(local) 737 struct ieee80211_sub_if_data *sdata),
738 TP_ARGS(local, sdata)
732); 739);
733 740
734TRACE_EVENT(drv_set_tsf, 741TRACE_EVENT(drv_set_tsf,
735 TP_PROTO(struct ieee80211_local *local, u64 tsf), 742 TP_PROTO(struct ieee80211_local *local,
743 struct ieee80211_sub_if_data *sdata,
744 u64 tsf),
736 745
737 TP_ARGS(local, tsf), 746 TP_ARGS(local, sdata, tsf),
738 747
739 TP_STRUCT__entry( 748 TP_STRUCT__entry(
740 LOCAL_ENTRY 749 LOCAL_ENTRY
750 VIF_ENTRY
741 __field(u64, tsf) 751 __field(u64, tsf)
742 ), 752 ),
743 753
744 TP_fast_assign( 754 TP_fast_assign(
745 LOCAL_ASSIGN; 755 LOCAL_ASSIGN;
756 VIF_ASSIGN;
746 __entry->tsf = tsf; 757 __entry->tsf = tsf;
747 ), 758 ),
748 759
749 TP_printk( 760 TP_printk(
750 LOCAL_PR_FMT " tsf:%llu", 761 LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu",
751 LOCAL_PR_ARG, (unsigned long long)__entry->tsf 762 LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf
752 ) 763 )
753); 764);
754 765
755DEFINE_EVENT(local_only_evt, drv_reset_tsf, 766DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
756 TP_PROTO(struct ieee80211_local *local), 767 TP_PROTO(struct ieee80211_local *local,
757 TP_ARGS(local) 768 struct ieee80211_sub_if_data *sdata),
769 TP_ARGS(local, sdata)
758); 770);
759 771
760DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, 772DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
@@ -1117,6 +1129,61 @@ TRACE_EVENT(drv_rssi_callback,
1117 ) 1129 )
1118); 1130);
1119 1131
1132DECLARE_EVENT_CLASS(release_evt,
1133 TP_PROTO(struct ieee80211_local *local,
1134 struct ieee80211_sta *sta,
1135 u16 tids, int num_frames,
1136 enum ieee80211_frame_release_type reason,
1137 bool more_data),
1138
1139 TP_ARGS(local, sta, tids, num_frames, reason, more_data),
1140
1141 TP_STRUCT__entry(
1142 LOCAL_ENTRY
1143 STA_ENTRY
1144 __field(u16, tids)
1145 __field(int, num_frames)
1146 __field(int, reason)
1147 __field(bool, more_data)
1148 ),
1149
1150 TP_fast_assign(
1151 LOCAL_ASSIGN;
1152 STA_ASSIGN;
1153 __entry->tids = tids;
1154 __entry->num_frames = num_frames;
1155 __entry->reason = reason;
1156 __entry->more_data = more_data;
1157 ),
1158
1159 TP_printk(
1160 LOCAL_PR_FMT STA_PR_FMT
1161 " TIDs:0x%.4x frames:%d reason:%d more:%d",
1162 LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames,
1163 __entry->reason, __entry->more_data
1164 )
1165);
1166
1167DEFINE_EVENT(release_evt, drv_release_buffered_frames,
1168 TP_PROTO(struct ieee80211_local *local,
1169 struct ieee80211_sta *sta,
1170 u16 tids, int num_frames,
1171 enum ieee80211_frame_release_type reason,
1172 bool more_data),
1173
1174 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1175);
1176
1177DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
1178 TP_PROTO(struct ieee80211_local *local,
1179 struct ieee80211_sta *sta,
1180 u16 tids, int num_frames,
1181 enum ieee80211_frame_release_type reason,
1182 bool more_data),
1183
1184 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1185);
1186
1120/* 1187/*
1121 * Tracing for API calls that drivers call. 1188 * Tracing for API calls that drivers call.
1122 */ 1189 */
@@ -1431,6 +1498,28 @@ TRACE_EVENT(api_enable_rssi_reports,
1431 ) 1498 )
1432); 1499);
1433 1500
1501TRACE_EVENT(api_eosp,
1502 TP_PROTO(struct ieee80211_local *local,
1503 struct ieee80211_sta *sta),
1504
1505 TP_ARGS(local, sta),
1506
1507 TP_STRUCT__entry(
1508 LOCAL_ENTRY
1509 STA_ENTRY
1510 ),
1511
1512 TP_fast_assign(
1513 LOCAL_ASSIGN;
1514 STA_ASSIGN;
1515 ),
1516
1517 TP_printk(
1518 LOCAL_PR_FMT STA_PR_FMT,
1519 LOCAL_PR_ARG, STA_PR_FMT
1520 )
1521);
1522
1434/* 1523/*
1435 * Tracing for internal functions 1524 * Tracing for internal functions
1436 * (which may also be called in response to driver calls) 1525 * (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 7cfc286946c0..f80a35c0d000 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -130,7 +130,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
130 * down by the code that set the flag, so this 130 * down by the code that set the flag, so this
131 * need not run. 131 * need not run.
132 */ 132 */
133 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) 133 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA))
134 return; 134 return;
135 135
136 mutex_lock(&sta->ampdu_mlme.mtx); 136 mutex_lock(&sta->ampdu_mlme.mtx);
@@ -186,12 +186,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
186 u16 params; 186 u16 params;
187 187
188 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 188 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
189 189 if (!skb)
190 if (!skb) {
191 printk(KERN_ERR "%s: failed to allocate buffer "
192 "for delba frame\n", sdata->name);
193 return; 190 return;
194 }
195 191
196 skb_reserve(skb, local->hw.extra_tx_headroom); 192 skb_reserve(skb, local->hw.extra_tx_headroom);
197 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 193 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56c24cabf26d..ede9a8b341ac 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -81,10 +81,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
81 lockdep_assert_held(&ifibss->mtx); 81 lockdep_assert_held(&ifibss->mtx);
82 82
83 /* Reset own TSF to allow time synchronization work. */ 83 /* Reset own TSF to allow time synchronization work. */
84 drv_reset_tsf(local); 84 drv_reset_tsf(local, sdata);
85 85
86 skb = ifibss->skb; 86 skb = ifibss->skb;
87 rcu_assign_pointer(ifibss->presp, NULL); 87 RCU_INIT_POINTER(ifibss->presp, NULL);
88 synchronize_rcu(); 88 synchronize_rcu();
89 skb->data = skb->head; 89 skb->data = skb->head;
90 skb->len = 0; 90 skb->len = 0;
@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
184 *pos++ = 0; /* U-APSD no in use */ 184 *pos++ = 0; /* U-APSD no in use */
185 } 185 }
186 186
187 rcu_assign_pointer(ifibss->presp, skb); 187 RCU_INIT_POINTER(ifibss->presp, skb);
188 188
189 sdata->vif.bss_conf.beacon_int = beacon_int; 189 sdata->vif.bss_conf.beacon_int = beacon_int;
190 sdata->vif.bss_conf.basic_rates = basic_rates; 190 sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -314,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
314 } 314 }
315 315
316 if (sta && elems->wmm_info) 316 if (sta && elems->wmm_info)
317 set_sta_flags(sta, WLAN_STA_WME); 317 set_sta_flag(sta, WLAN_STA_WME);
318 318
319 rcu_read_unlock(); 319 rcu_read_unlock();
320 } 320 }
@@ -382,7 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
382 * second best option: get current TSF 382 * second best option: get current TSF
383 * (will return -1 if not supported) 383 * (will return -1 if not supported)
384 */ 384 */
385 rx_timestamp = drv_get_tsf(local); 385 rx_timestamp = drv_get_tsf(local, sdata);
386 } 386 }
387 387
388#ifdef CONFIG_MAC80211_IBSS_DEBUG 388#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -417,7 +417,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
417 * must be callable in atomic context. 417 * must be callable in atomic context.
418 */ 418 */
419struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 419struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
420 u8 *bssid,u8 *addr, u32 supp_rates, 420 u8 *bssid, u8 *addr, u32 supp_rates,
421 gfp_t gfp) 421 gfp_t gfp)
422{ 422{
423 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 423 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -452,7 +452,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
452 return NULL; 452 return NULL;
453 453
454 sta->last_rx = jiffies; 454 sta->last_rx = jiffies;
455 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 455 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
456 456
457 /* make sure mandatory rates are always added */ 457 /* make sure mandatory rates are always added */
458 sta->sta.supp_rates[band] = supp_rates | 458 sta->sta.supp_rates[band] = supp_rates |
@@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
995 kfree(sdata->u.ibss.ie); 995 kfree(sdata->u.ibss.ie);
996 skb = rcu_dereference_protected(sdata->u.ibss.presp, 996 skb = rcu_dereference_protected(sdata->u.ibss.presp,
997 lockdep_is_held(&sdata->u.ibss.mtx)); 997 lockdep_is_held(&sdata->u.ibss.mtx));
998 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 998 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
999 sdata->vif.bss_conf.ibss_joined = false; 999 sdata->vif.bss_conf.ibss_joined = false;
1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1001 BSS_CHANGED_IBSS); 1001 BSS_CHANGED_IBSS);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 400c09bea639..4c3d1f591bec 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -136,7 +136,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
136#define TX_DROP ((__force ieee80211_tx_result) 1u) 136#define TX_DROP ((__force ieee80211_tx_result) 1u)
137#define TX_QUEUED ((__force ieee80211_tx_result) 2u) 137#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
138 138
139#define IEEE80211_TX_FRAGMENTED BIT(0)
140#define IEEE80211_TX_UNICAST BIT(1) 139#define IEEE80211_TX_UNICAST BIT(1)
141#define IEEE80211_TX_PS_BUFFERED BIT(2) 140#define IEEE80211_TX_PS_BUFFERED BIT(2)
142 141
@@ -149,7 +148,6 @@ struct ieee80211_tx_data {
149 148
150 struct ieee80211_channel *channel; 149 struct ieee80211_channel *channel;
151 150
152 u16 ethertype;
153 unsigned int flags; 151 unsigned int flags;
154}; 152};
155 153
@@ -261,6 +259,7 @@ struct mesh_stats {
261 __u32 fwded_frames; /* Mesh total forwarded frames */ 259 __u32 fwded_frames; /* Mesh total forwarded frames */
262 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ 260 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
263 __u32 dropped_frames_no_route; /* Not transmitted, no route found */ 261 __u32 dropped_frames_no_route; /* Not transmitted, no route found */
262 __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
264 atomic_t estab_plinks; 263 atomic_t estab_plinks;
265}; 264};
266 265
@@ -345,6 +344,7 @@ struct ieee80211_work {
345 struct { 344 struct {
346 struct sk_buff *frame; 345 struct sk_buff *frame;
347 u32 wait; 346 u32 wait;
347 bool status;
348 } offchan_tx; 348 } offchan_tx;
349 }; 349 };
350 350
@@ -514,6 +514,7 @@ struct ieee80211_if_mesh {
514 struct mesh_config mshcfg; 514 struct mesh_config mshcfg;
515 u32 mesh_seqnum; 515 u32 mesh_seqnum;
516 bool accepting_plinks; 516 bool accepting_plinks;
517 int num_gates;
517 const u8 *ie; 518 const u8 *ie;
518 u8 ie_len; 519 u8 ie_len;
519 enum { 520 enum {
@@ -607,6 +608,8 @@ struct ieee80211_sub_if_data {
607 __be16 control_port_protocol; 608 __be16 control_port_protocol;
608 bool control_port_no_encrypt; 609 bool control_port_no_encrypt;
609 610
611 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
612
610 struct work_struct work; 613 struct work_struct work;
611 struct sk_buff_head skb_queue; 614 struct sk_buff_head skb_queue;
612 615
@@ -660,6 +663,11 @@ enum sdata_queue_type {
660enum { 663enum {
661 IEEE80211_RX_MSG = 1, 664 IEEE80211_RX_MSG = 1,
662 IEEE80211_TX_STATUS_MSG = 2, 665 IEEE80211_TX_STATUS_MSG = 2,
666 IEEE80211_EOSP_MSG = 3,
667};
668
669struct skb_eosp_msg_data {
670 u8 sta[ETH_ALEN], iface[ETH_ALEN];
663}; 671};
664 672
665enum queue_stop_reason { 673enum queue_stop_reason {
@@ -669,6 +677,7 @@ enum queue_stop_reason {
669 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 677 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
670 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 678 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
671 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 679 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
680 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
672}; 681};
673 682
674#ifdef CONFIG_MAC80211_LEDS 683#ifdef CONFIG_MAC80211_LEDS
@@ -748,7 +757,6 @@ struct ieee80211_local {
748 struct workqueue_struct *workqueue; 757 struct workqueue_struct *workqueue;
749 758
750 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; 759 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
751 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
752 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ 760 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
753 spinlock_t queue_stop_reason_lock; 761 spinlock_t queue_stop_reason_lock;
754 762
@@ -1002,7 +1010,6 @@ struct ieee80211_local {
1002 unsigned int hw_roc_duration; 1010 unsigned int hw_roc_duration;
1003 u32 hw_roc_cookie; 1011 u32 hw_roc_cookie;
1004 bool hw_roc_for_tx; 1012 bool hw_roc_for_tx;
1005 unsigned long hw_offchan_tx_cookie;
1006 1013
1007 /* dummy netdev for use w/ NAPI */ 1014 /* dummy netdev for use w/ NAPI */
1008 struct net_device napi_dev; 1015 struct net_device napi_dev;
@@ -1022,69 +1029,6 @@ struct ieee80211_ra_tid {
1022 u16 tid; 1029 u16 tid;
1023}; 1030};
1024 1031
1025/* Parsed Information Elements */
1026struct ieee802_11_elems {
1027 u8 *ie_start;
1028 size_t total_len;
1029
1030 /* pointers to IEs */
1031 u8 *ssid;
1032 u8 *supp_rates;
1033 u8 *fh_params;
1034 u8 *ds_params;
1035 u8 *cf_params;
1036 struct ieee80211_tim_ie *tim;
1037 u8 *ibss_params;
1038 u8 *challenge;
1039 u8 *wpa;
1040 u8 *rsn;
1041 u8 *erp_info;
1042 u8 *ext_supp_rates;
1043 u8 *wmm_info;
1044 u8 *wmm_param;
1045 struct ieee80211_ht_cap *ht_cap_elem;
1046 struct ieee80211_ht_info *ht_info_elem;
1047 struct ieee80211_meshconf_ie *mesh_config;
1048 u8 *mesh_id;
1049 u8 *peer_link;
1050 u8 *preq;
1051 u8 *prep;
1052 u8 *perr;
1053 struct ieee80211_rann_ie *rann;
1054 u8 *ch_switch_elem;
1055 u8 *country_elem;
1056 u8 *pwr_constr_elem;
1057 u8 *quiet_elem; /* first quite element */
1058 u8 *timeout_int;
1059
1060 /* length of them, respectively */
1061 u8 ssid_len;
1062 u8 supp_rates_len;
1063 u8 fh_params_len;
1064 u8 ds_params_len;
1065 u8 cf_params_len;
1066 u8 tim_len;
1067 u8 ibss_params_len;
1068 u8 challenge_len;
1069 u8 wpa_len;
1070 u8 rsn_len;
1071 u8 erp_info_len;
1072 u8 ext_supp_rates_len;
1073 u8 wmm_info_len;
1074 u8 wmm_param_len;
1075 u8 mesh_id_len;
1076 u8 peer_link_len;
1077 u8 preq_len;
1078 u8 prep_len;
1079 u8 perr_len;
1080 u8 ch_switch_elem_len;
1081 u8 country_elem_len;
1082 u8 pwr_constr_elem_len;
1083 u8 quiet_elem_len;
1084 u8 num_of_quiet_elem; /* can be more the one */
1085 u8 timeout_int_len;
1086};
1087
1088static inline struct ieee80211_local *hw_to_local( 1032static inline struct ieee80211_local *hw_to_local(
1089 struct ieee80211_hw *hw) 1033 struct ieee80211_hw *hw)
1090{ 1034{
@@ -1233,23 +1177,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1233netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, 1177netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1234 struct net_device *dev); 1178 struct net_device *dev);
1235 1179
1236/*
1237 * radiotap header for status frames
1238 */
1239struct ieee80211_tx_status_rtap_hdr {
1240 struct ieee80211_radiotap_header hdr;
1241 u8 rate;
1242 u8 padding_for_rate;
1243 __le16 tx_flags;
1244 u8 data_retries;
1245} __packed;
1246
1247
1248/* HT */ 1180/* HT */
1249void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1181void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
1250 struct ieee80211_ht_cap *ht_cap_ie, 1182 struct ieee80211_ht_cap *ht_cap_ie,
1251 struct ieee80211_sta_ht_cap *ht_cap); 1183 struct ieee80211_sta_ht_cap *ht_cap);
1252void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
1253void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1184void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1254 const u8 *da, u16 tid, 1185 const u8 *da, u16 tid,
1255 u16 initiator, u16 reason_code); 1186 u16 initiator, u16 reason_code);
@@ -1333,6 +1264,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
1333 struct ieee80211_hdr *hdr, const u8 *tsc, 1264 struct ieee80211_hdr *hdr, const u8 *tsc,
1334 gfp_t gfp); 1265 gfp_t gfp);
1335void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1266void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1267void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1336void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1268void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1337void ieee802_11_parse_elems(u8 *start, size_t len, 1269void ieee802_11_parse_elems(u8 *start, size_t len,
1338 struct ieee802_11_elems *elems); 1270 struct ieee802_11_elems *elems);
@@ -1364,11 +1296,11 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1364 enum queue_stop_reason reason); 1296 enum queue_stop_reason reason);
1365void ieee80211_add_pending_skb(struct ieee80211_local *local, 1297void ieee80211_add_pending_skb(struct ieee80211_local *local,
1366 struct sk_buff *skb); 1298 struct sk_buff *skb);
1367int ieee80211_add_pending_skbs(struct ieee80211_local *local, 1299void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1368 struct sk_buff_head *skbs); 1300 struct sk_buff_head *skbs);
1369int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 1301void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1370 struct sk_buff_head *skbs, 1302 struct sk_buff_head *skbs,
1371 void (*fn)(void *data), void *data); 1303 void (*fn)(void *data), void *data);
1372 1304
1373void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1305void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1374 u16 transaction, u16 auth_alg, 1306 u16 transaction, u16 auth_alg,
@@ -1386,7 +1318,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1386void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1318void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1387 const u8 *ssid, size_t ssid_len, 1319 const u8 *ssid, size_t ssid_len,
1388 const u8 *ie, size_t ie_len, 1320 const u8 *ie, size_t ie_len,
1389 u32 ratemask, bool directed); 1321 u32 ratemask, bool directed, bool no_cck);
1390 1322
1391void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1323void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1392 const size_t supp_rates_len, 1324 const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 556e7e6ddf0a..30d73552e9ab 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -299,8 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
299 goto err_del_interface; 299 goto err_del_interface;
300 } 300 }
301 301
302 /* no locking required since STA is not live yet */ 302 /* no atomic bitop required since STA is not live yet */
303 sta->flags |= WLAN_STA_AUTHORIZED; 303 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
304 304
305 res = sta_info_insert(sta); 305 res = sta_info_insert(sta);
306 if (res) { 306 if (res) {
@@ -456,21 +456,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
456 BSS_CHANGED_BEACON_ENABLED); 456 BSS_CHANGED_BEACON_ENABLED);
457 457
458 /* remove beacon */ 458 /* remove beacon */
459 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 459 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
460 synchronize_rcu(); 460 synchronize_rcu();
461 kfree(old_beacon); 461 kfree(old_beacon);
462 462
463 /* free all potentially still buffered bcast frames */
464 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
465 local->total_ps_buffered--;
466 dev_kfree_skb(skb);
467 }
468
469 /* down all dependent devices, that is VLANs */ 463 /* down all dependent devices, that is VLANs */
470 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 464 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
471 u.vlan.list) 465 u.vlan.list)
472 dev_close(vlan->dev); 466 dev_close(vlan->dev);
473 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 467 WARN_ON(!list_empty(&sdata->u.ap.vlans));
468
469 /* free all potentially still buffered bcast frames */
470 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
471 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
474 } 472 }
475 473
476 if (going_down) 474 if (going_down)
@@ -645,7 +643,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
645 .ndo_stop = ieee80211_stop, 643 .ndo_stop = ieee80211_stop,
646 .ndo_uninit = ieee80211_teardown_sdata, 644 .ndo_uninit = ieee80211_teardown_sdata,
647 .ndo_start_xmit = ieee80211_subif_start_xmit, 645 .ndo_start_xmit = ieee80211_subif_start_xmit,
648 .ndo_set_multicast_list = ieee80211_set_multicast_list, 646 .ndo_set_rx_mode = ieee80211_set_multicast_list,
649 .ndo_change_mtu = ieee80211_change_mtu, 647 .ndo_change_mtu = ieee80211_change_mtu,
650 .ndo_set_mac_address = ieee80211_change_mac, 648 .ndo_set_mac_address = ieee80211_change_mac,
651 .ndo_select_queue = ieee80211_netdev_select_queue, 649 .ndo_select_queue = ieee80211_netdev_select_queue,
@@ -689,7 +687,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
689 .ndo_stop = ieee80211_stop, 687 .ndo_stop = ieee80211_stop,
690 .ndo_uninit = ieee80211_teardown_sdata, 688 .ndo_uninit = ieee80211_teardown_sdata,
691 .ndo_start_xmit = ieee80211_monitor_start_xmit, 689 .ndo_start_xmit = ieee80211_monitor_start_xmit,
692 .ndo_set_multicast_list = ieee80211_set_multicast_list, 690 .ndo_set_rx_mode = ieee80211_set_multicast_list,
693 .ndo_change_mtu = ieee80211_change_mtu, 691 .ndo_change_mtu = ieee80211_change_mtu,
694 .ndo_set_mac_address = eth_mac_addr, 692 .ndo_set_mac_address = eth_mac_addr,
695 .ndo_select_queue = ieee80211_monitor_select_queue, 693 .ndo_select_queue = ieee80211_monitor_select_queue,
@@ -1214,6 +1212,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1214 list_del_rcu(&sdata->list); 1212 list_del_rcu(&sdata->list);
1215 mutex_unlock(&sdata->local->iflist_mtx); 1213 mutex_unlock(&sdata->local->iflist_mtx);
1216 1214
1215 if (ieee80211_vif_is_mesh(&sdata->vif))
1216 mesh_path_flush_by_iface(sdata);
1217
1217 synchronize_rcu(); 1218 synchronize_rcu();
1218 unregister_netdevice(sdata->dev); 1219 unregister_netdevice(sdata->dev);
1219} 1220}
@@ -1233,6 +1234,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1233 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1234 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1234 list_del(&sdata->list); 1235 list_del(&sdata->list);
1235 1236
1237 if (ieee80211_vif_is_mesh(&sdata->vif))
1238 mesh_path_flush_by_iface(sdata);
1239
1236 unregister_netdevice_queue(sdata->dev, &unreg_list); 1240 unregister_netdevice_queue(sdata->dev, &unreg_list);
1237 } 1241 }
1238 mutex_unlock(&local->iflist_mtx); 1242 mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5150c6d11b57..756b157c2edd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -464,7 +464,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
464 * some hardware cannot handle TKIP with QoS, so 464 * some hardware cannot handle TKIP with QoS, so
465 * we indicate whether QoS could be in use. 465 * we indicate whether QoS could be in use.
466 */ 466 */
467 if (test_sta_flags(sta, WLAN_STA_WME)) 467 if (test_sta_flag(sta, WLAN_STA_WME))
468 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; 468 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
469 } else { 469 } else {
470 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 470 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -478,7 +478,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
478 /* same here, the AP could be using QoS */ 478 /* same here, the AP could be using QoS */
479 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid); 479 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
480 if (ap) { 480 if (ap) {
481 if (test_sta_flags(ap, WLAN_STA_WME)) 481 if (test_sta_flag(ap, WLAN_STA_WME))
482 key->conf.flags |= 482 key->conf.flags |=
483 IEEE80211_KEY_FLAG_WMM_STA; 483 IEEE80211_KEY_FLAG_WMM_STA;
484 } 484 }
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index acb44230b251..d999bf3b84e1 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -19,7 +19,7 @@
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22#include <linux/pm_qos_params.h> 22#include <linux/pm_qos.h>
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <net/net_namespace.h> 24#include <net/net_namespace.h>
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
@@ -325,6 +325,8 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
325static void ieee80211_tasklet_handler(unsigned long data) 325static void ieee80211_tasklet_handler(unsigned long data)
326{ 326{
327 struct ieee80211_local *local = (struct ieee80211_local *) data; 327 struct ieee80211_local *local = (struct ieee80211_local *) data;
328 struct sta_info *sta, *tmp;
329 struct skb_eosp_msg_data *eosp_data;
328 struct sk_buff *skb; 330 struct sk_buff *skb;
329 331
330 while ((skb = skb_dequeue(&local->skb_queue)) || 332 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -340,6 +342,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
340 skb->pkt_type = 0; 342 skb->pkt_type = 0;
341 ieee80211_tx_status(local_to_hw(local), skb); 343 ieee80211_tx_status(local_to_hw(local), skb);
342 break; 344 break;
345 case IEEE80211_EOSP_MSG:
346 eosp_data = (void *)skb->cb;
347 for_each_sta_info(local, eosp_data->sta, sta, tmp) {
348 /* skip wrong virtual interface */
349 if (memcmp(eosp_data->iface,
350 sta->sdata->vif.addr, ETH_ALEN))
351 continue;
352 clear_sta_flag(sta, WLAN_STA_SP);
353 break;
354 }
355 dev_kfree_skb(skb);
356 break;
343 default: 357 default:
344 WARN(1, "mac80211: Packet is of unknown type %d\n", 358 WARN(1, "mac80211: Packet is of unknown type %d\n",
345 skb->pkt_type); 359 skb->pkt_type);
@@ -608,6 +622,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
608 local->hw.max_rates = 1; 622 local->hw.max_rates = 1;
609 local->hw.max_report_rates = 0; 623 local->hw.max_report_rates = 0;
610 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 624 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
625 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
611 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 626 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
612 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 627 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
613 local->user_power_level = -1; 628 local->user_power_level = -1;
@@ -862,6 +877,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
862 if (local->ops->sched_scan_start) 877 if (local->ops->sched_scan_start)
863 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 878 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
864 879
880 /* mac80211 based drivers don't support internal TDLS setup */
881 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
882 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
883
865 result = wiphy_register(local->hw.wiphy); 884 result = wiphy_register(local->hw.wiphy);
866 if (result < 0) 885 if (result < 0)
867 goto fail_wiphy_register; 886 goto fail_wiphy_register;
@@ -885,12 +904,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
885 * and we need some headroom for passing the frame to monitor 904 * and we need some headroom for passing the frame to monitor
886 * interfaces, but never both at the same time. 905 * interfaces, but never both at the same time.
887 */ 906 */
888#ifndef __CHECKER__
889 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
890 sizeof(struct ieee80211_tx_status_rtap_hdr));
891#endif
892 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 907 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
893 sizeof(struct ieee80211_tx_status_rtap_hdr)); 908 IEEE80211_TX_STATUS_HEADROOM);
894 909
895 debugfs_hw_add(local); 910 debugfs_hw_add(local);
896 911
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980c8e60..a7078fdba8ca 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
17#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
18#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
19
20#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 16#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
21#define MESHCONF_CAPAB_FORWARDING 0x08 17#define MESHCONF_CAPAB_FORWARDING 0x08
22 18
@@ -27,6 +23,17 @@
27int mesh_allocated; 23int mesh_allocated;
28static struct kmem_cache *rm_cache; 24static struct kmem_cache *rm_cache;
29 25
26#ifdef CONFIG_MAC80211_MESH
27bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
28{
29 return (mgmt->u.action.u.mesh_action.action_code ==
30 WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
31}
32#else
33bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
34{ return false; }
35#endif
36
30void ieee80211s_init(void) 37void ieee80211s_init(void)
31{ 38{
32 mesh_pathtbl_init(); 39 mesh_pathtbl_init();
@@ -193,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
193 } 200 }
194 201
195 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); 202 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
196 if (!p) { 203 if (!p)
197 printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
198 return 0; 204 return 0;
199 } 205
200 p->seqnum = seqnum; 206 p->seqnum = seqnum;
201 p->exp_time = jiffies + RMC_TIMEOUT; 207 p->exp_time = jiffies + RMC_TIMEOUT;
202 memcpy(p->sa, sa, ETH_ALEN); 208 memcpy(p->sa, sa, ETH_ALEN);
@@ -204,89 +210,136 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
204 return 0; 210 return 0;
205} 211}
206 212
207void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) 213int
214mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
208{ 215{
209 struct ieee80211_local *local = sdata->local; 216 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
210 struct ieee80211_supported_band *sband; 217 u8 *pos, neighbors;
211 u8 *pos; 218 u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
212 int len, i, rate;
213 u8 neighbors;
214
215 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
216 len = sband->n_bitrates;
217 if (len > 8)
218 len = 8;
219 pos = skb_put(skb, len + 2);
220 *pos++ = WLAN_EID_SUPP_RATES;
221 *pos++ = len;
222 for (i = 0; i < len; i++) {
223 rate = sband->bitrates[i].bitrate;
224 *pos++ = (u8) (rate / 5);
225 }
226
227 if (sband->n_bitrates > len) {
228 pos = skb_put(skb, sband->n_bitrates - len + 2);
229 *pos++ = WLAN_EID_EXT_SUPP_RATES;
230 *pos++ = sband->n_bitrates - len;
231 for (i = len; i < sband->n_bitrates; i++) {
232 rate = sband->bitrates[i].bitrate;
233 *pos++ = (u8) (rate / 5);
234 }
235 }
236
237 if (sband->band == IEEE80211_BAND_2GHZ) {
238 pos = skb_put(skb, 2 + 1);
239 *pos++ = WLAN_EID_DS_PARAMS;
240 *pos++ = 1;
241 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
242 }
243 219
244 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); 220 if (skb_tailroom(skb) < 2 + meshconf_len)
245 *pos++ = WLAN_EID_MESH_ID; 221 return -ENOMEM;
246 *pos++ = sdata->u.mesh.mesh_id_len;
247 if (sdata->u.mesh.mesh_id_len)
248 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
249 222
250 pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie)); 223 pos = skb_put(skb, 2 + meshconf_len);
251 *pos++ = WLAN_EID_MESH_CONFIG; 224 *pos++ = WLAN_EID_MESH_CONFIG;
252 *pos++ = sizeof(struct ieee80211_meshconf_ie); 225 *pos++ = meshconf_len;
253 226
254 /* Active path selection protocol ID */ 227 /* Active path selection protocol ID */
255 *pos++ = sdata->u.mesh.mesh_pp_id; 228 *pos++ = ifmsh->mesh_pp_id;
256
257 /* Active path selection metric ID */ 229 /* Active path selection metric ID */
258 *pos++ = sdata->u.mesh.mesh_pm_id; 230 *pos++ = ifmsh->mesh_pm_id;
259
260 /* Congestion control mode identifier */ 231 /* Congestion control mode identifier */
261 *pos++ = sdata->u.mesh.mesh_cc_id; 232 *pos++ = ifmsh->mesh_cc_id;
262
263 /* Synchronization protocol identifier */ 233 /* Synchronization protocol identifier */
264 *pos++ = sdata->u.mesh.mesh_sp_id; 234 *pos++ = ifmsh->mesh_sp_id;
265
266 /* Authentication Protocol identifier */ 235 /* Authentication Protocol identifier */
267 *pos++ = sdata->u.mesh.mesh_auth_id; 236 *pos++ = ifmsh->mesh_auth_id;
268
269 /* Mesh Formation Info - number of neighbors */ 237 /* Mesh Formation Info - number of neighbors */
270 neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); 238 neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
271 /* Number of neighbor mesh STAs or 15 whichever is smaller */ 239 /* Number of neighbor mesh STAs or 15 whichever is smaller */
272 neighbors = (neighbors > 15) ? 15 : neighbors; 240 neighbors = (neighbors > 15) ? 15 : neighbors;
273 *pos++ = neighbors << 1; 241 *pos++ = neighbors << 1;
274
275 /* Mesh capability */ 242 /* Mesh capability */
276 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); 243 ifmsh->accepting_plinks = mesh_plink_availables(sdata);
277 *pos = MESHCONF_CAPAB_FORWARDING; 244 *pos = MESHCONF_CAPAB_FORWARDING;
278 *pos++ |= sdata->u.mesh.accepting_plinks ? 245 *pos++ |= ifmsh->accepting_plinks ?
279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 246 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
280 *pos++ = 0x00; 247 *pos++ = 0x00;
281 248
282 if (sdata->u.mesh.ie) { 249 return 0;
283 int len = sdata->u.mesh.ie_len; 250}
284 const u8 *data = sdata->u.mesh.ie; 251
285 if (skb_tailroom(skb) > len) 252int
286 memcpy(skb_put(skb, len), data, len); 253mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
254{
255 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
256 u8 *pos;
257
258 if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
259 return -ENOMEM;
260
261 pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
262 *pos++ = WLAN_EID_MESH_ID;
263 *pos++ = ifmsh->mesh_id_len;
264 if (ifmsh->mesh_id_len)
265 memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
266
267 return 0;
268}
269
270int
271mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
272{
273 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
274 u8 offset, len;
275 const u8 *data;
276
277 if (!ifmsh->ie || !ifmsh->ie_len)
278 return 0;
279
280 /* fast-forward to vendor IEs */
281 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
282
283 if (offset) {
284 len = ifmsh->ie_len - offset;
285 data = ifmsh->ie + offset;
286 if (skb_tailroom(skb) < len)
287 return -ENOMEM;
288 memcpy(skb_put(skb, len), data, len);
287 } 289 }
290
291 return 0;
288} 292}
289 293
294int
295mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
296{
297 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
298 u8 len = 0;
299 const u8 *data;
300
301 if (!ifmsh->ie || !ifmsh->ie_len)
302 return 0;
303
304 /* find RSN IE */
305 data = ifmsh->ie;
306 while (data < ifmsh->ie + ifmsh->ie_len) {
307 if (*data == WLAN_EID_RSN) {
308 len = data[1] + 2;
309 break;
310 }
311 data++;
312 }
313
314 if (len) {
315 if (skb_tailroom(skb) < len)
316 return -ENOMEM;
317 memcpy(skb_put(skb, len), data, len);
318 }
319
320 return 0;
321}
322
323int mesh_add_ds_params_ie(struct sk_buff *skb,
324 struct ieee80211_sub_if_data *sdata)
325{
326 struct ieee80211_local *local = sdata->local;
327 struct ieee80211_supported_band *sband;
328 u8 *pos;
329
330 if (skb_tailroom(skb) < 3)
331 return -ENOMEM;
332
333 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
334 if (sband->band == IEEE80211_BAND_2GHZ) {
335 pos = skb_put(skb, 2 + 1);
336 *pos++ = WLAN_EID_DS_PARAMS;
337 *pos++ = 1;
338 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
339 }
340
341 return 0;
342}
290 343
291static void ieee80211_mesh_path_timer(unsigned long data) 344static void ieee80211_mesh_path_timer(unsigned long data)
292{ 345{
@@ -352,8 +405,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
352 memcpy(hdr->addr3, meshsa, ETH_ALEN); 405 memcpy(hdr->addr3, meshsa, ETH_ALEN);
353 return 24; 406 return 24;
354 } else { 407 } else {
355 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | 408 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
356 IEEE80211_FCTL_TODS);
357 /* RA TA DA SA */ 409 /* RA TA DA SA */
358 memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ 410 memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
359 memcpy(hdr->addr2, meshsa, ETH_ALEN); 411 memcpy(hdr->addr2, meshsa, ETH_ALEN);
@@ -425,7 +477,8 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
425 477
426 mesh_path_tx_root_frame(sdata); 478 mesh_path_tx_root_frame(sdata);
427 mod_timer(&ifmsh->mesh_path_root_timer, 479 mod_timer(&ifmsh->mesh_path_root_timer,
428 round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); 480 round_jiffies(TU_TO_EXP_TIME(
481 ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
429} 482}
430 483
431#ifdef CONFIG_PM 484#ifdef CONFIG_PM
@@ -433,7 +486,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
433{ 486{
434 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 487 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
435 488
436 /* use atomic bitops in case both timers fire at the same time */ 489 /* use atomic bitops in case all timers fire at the same time */
437 490
438 if (del_timer_sync(&ifmsh->housekeeping_timer)) 491 if (del_timer_sync(&ifmsh->housekeeping_timer))
439 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); 492 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -557,11 +610,18 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
557 struct ieee80211_rx_status *rx_status) 610 struct ieee80211_rx_status *rx_status)
558{ 611{
559 switch (mgmt->u.action.category) { 612 switch (mgmt->u.action.category) {
560 case WLAN_CATEGORY_MESH_ACTION: 613 case WLAN_CATEGORY_SELF_PROTECTED:
561 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 614 switch (mgmt->u.action.u.self_prot.action_code) {
615 case WLAN_SP_MESH_PEERING_OPEN:
616 case WLAN_SP_MESH_PEERING_CLOSE:
617 case WLAN_SP_MESH_PEERING_CONFIRM:
618 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
619 break;
620 }
562 break; 621 break;
563 case WLAN_CATEGORY_MESH_PATH_SEL: 622 case WLAN_CATEGORY_MESH_ACTION:
564 mesh_rx_path_sel_frame(sdata, mgmt, len); 623 if (mesh_action_is_path_sel(mgmt))
624 mesh_rx_path_sel_frame(sdata, mgmt, len);
565 break; 625 break;
566 } 626 }
567} 627}
@@ -633,6 +693,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
633 ifmsh->accepting_plinks = true; 693 ifmsh->accepting_plinks = true;
634 ifmsh->preq_id = 0; 694 ifmsh->preq_id = 0;
635 ifmsh->sn = 0; 695 ifmsh->sn = 0;
696 ifmsh->num_gates = 0;
636 atomic_set(&ifmsh->mpaths, 0); 697 atomic_set(&ifmsh->mpaths, 0);
637 mesh_rmc_init(sdata); 698 mesh_rmc_init(sdata);
638 ifmsh->last_preq = jiffies; 699 ifmsh->last_preq = jiffies;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 249e733362e7..8c00e2d1d636 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -80,7 +80,10 @@ enum mesh_deferred_task_flags {
80 * retry 80 * retry
81 * @discovery_retries: number of discovery retries 81 * @discovery_retries: number of discovery retries
82 * @flags: mesh path flags, as specified on &enum mesh_path_flags 82 * @flags: mesh path flags, as specified on &enum mesh_path_flags
83 * @state_lock: mesh path state lock 83 * @state_lock: mesh path state lock used to protect changes to the
84 * mpath itself. No need to take this lock when adding or removing
85 * an mpath to a hash bucket on a path table.
86 * @is_gate: the destination station of this path is a mesh gate
84 * 87 *
85 * 88 *
86 * The combination of dst and sdata is unique in the mesh path table. Since the 89 * The combination of dst and sdata is unique in the mesh path table. Since the
@@ -104,6 +107,7 @@ struct mesh_path {
104 u8 discovery_retries; 107 u8 discovery_retries;
105 enum mesh_path_flags flags; 108 enum mesh_path_flags flags;
106 spinlock_t state_lock; 109 spinlock_t state_lock;
110 bool is_gate;
107}; 111};
108 112
109/** 113/**
@@ -120,6 +124,9 @@ struct mesh_path {
120 * buckets 124 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 125 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 126 * reached, the table will grow
127 * @known_gates: list of known mesh gates and their mpaths by the station. The
128 * gate's mpath may or may not be resolved and active.
129 *
123 * rcu_head: RCU head to free the table 130 * rcu_head: RCU head to free the table
124 */ 131 */
125struct mesh_table { 132struct mesh_table {
@@ -133,6 +140,8 @@ struct mesh_table {
133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 140 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
134 int size_order; 141 int size_order;
135 int mean_chain_len; 142 int mean_chain_len;
143 struct hlist_head *known_gates;
144 spinlock_t gates_lock;
136 145
137 struct rcu_head rcu_head; 146 struct rcu_head rcu_head;
138}; 147};
@@ -166,6 +175,8 @@ struct mesh_rmc {
166 u32 idx_mask; 175 u32 idx_mask;
167}; 176};
168 177
178#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
179#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
169 180
170#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ 181#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
171 182
@@ -177,14 +188,6 @@ struct mesh_rmc {
177/* Maximum number of paths per interface */ 188/* Maximum number of paths per interface */
178#define MESH_MAX_MPATHS 1024 189#define MESH_MAX_MPATHS 1024
179 190
180/* Pending ANA approval */
181#define MESH_PATH_SEL_ACTION 0
182
183/* PERR reason codes */
184#define PEER_RCODE_UNSPECIFIED 11
185#define PERR_RCODE_NO_ROUTE 12
186#define PERR_RCODE_DEST_UNREACH 13
187
188/* Public interfaces */ 191/* Public interfaces */
189/* Various */ 192/* Various */
190int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 193int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -199,6 +202,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie,
199void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); 202void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
200void mesh_mgmt_ies_add(struct sk_buff *skb, 203void mesh_mgmt_ies_add(struct sk_buff *skb,
201 struct ieee80211_sub_if_data *sdata); 204 struct ieee80211_sub_if_data *sdata);
205int mesh_add_meshconf_ie(struct sk_buff *skb,
206 struct ieee80211_sub_if_data *sdata);
207int mesh_add_meshid_ie(struct sk_buff *skb,
208 struct ieee80211_sub_if_data *sdata);
209int mesh_add_rsn_ie(struct sk_buff *skb,
210 struct ieee80211_sub_if_data *sdata);
211int mesh_add_vendor_ies(struct sk_buff *skb,
212 struct ieee80211_sub_if_data *sdata);
213int mesh_add_ds_params_ie(struct sk_buff *skb,
214 struct ieee80211_sub_if_data *sdata);
202void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 215void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
203int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 216int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
204void ieee80211s_init(void); 217void ieee80211s_init(void);
@@ -223,10 +236,13 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx,
223 struct ieee80211_sub_if_data *sdata); 236 struct ieee80211_sub_if_data *sdata);
224void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 237void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
225void mesh_path_expire(struct ieee80211_sub_if_data *sdata); 238void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
226void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
227void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 239void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
228 struct ieee80211_mgmt *mgmt, size_t len); 240 struct ieee80211_mgmt *mgmt, size_t len);
229int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 241int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
242
243int mesh_path_add_gate(struct mesh_path *mpath);
244int mesh_path_send_to_gates(struct mesh_path *mpath);
245int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
230/* Mesh plinks */ 246/* Mesh plinks */
231void mesh_neighbour_update(u8 *hw_addr, u32 rates, 247void mesh_neighbour_update(u8 *hw_addr, u32 rates,
232 struct ieee80211_sub_if_data *sdata, 248 struct ieee80211_sub_if_data *sdata,
@@ -256,12 +272,14 @@ void mesh_pathtbl_unregister(void);
256int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); 272int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
257void mesh_path_timer(unsigned long data); 273void mesh_path_timer(unsigned long data);
258void mesh_path_flush_by_nexthop(struct sta_info *sta); 274void mesh_path_flush_by_nexthop(struct sta_info *sta);
275void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
259void mesh_path_discard_frame(struct sk_buff *skb, 276void mesh_path_discard_frame(struct sk_buff *skb,
260 struct ieee80211_sub_if_data *sdata); 277 struct ieee80211_sub_if_data *sdata);
261void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 278void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
262void mesh_path_restart(struct ieee80211_sub_if_data *sdata); 279void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
263void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); 280void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
264 281
282bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
265extern int mesh_paths_generation; 283extern int mesh_paths_generation;
266 284
267#ifdef CONFIG_MAC80211_MESH 285#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3460108810d5..174040a42887 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,10 +8,12 @@
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include "wme.h"
11#include "mesh.h" 12#include "mesh.h"
12 13
13#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG 14#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
14#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args) 15#define mhwmp_dbg(fmt, args...) \
16 printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
15#else 17#else
16#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0) 18#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
17#endif 19#endif
@@ -68,12 +70,12 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
68#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) 70#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
69#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) 71#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
70#define PREP_IE_TTL(x) PREQ_IE_TTL(x) 72#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
71#define PREP_IE_ORIG_ADDR(x) (x + 3) 73#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
72#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0) 74#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
73#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)) 75#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
74#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)) 76#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
75#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) 77#define PREP_IE_TARGET_ADDR(x) (x + 3)
76#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x)) 78#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
77 79
78#define PERR_IE_TTL(x) (*(x)) 80#define PERR_IE_TTL(x) (*(x))
79#define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) 81#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
@@ -132,24 +134,25 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 134 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
133 /* BSSID == SA */ 135 /* BSSID == SA */
134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 136 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
135 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; 137 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 138 mgmt->u.action.u.mesh_action.action_code =
139 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
137 140
138 switch (action) { 141 switch (action) {
139 case MPATH_PREQ: 142 case MPATH_PREQ:
140 mhwmp_dbg("sending PREQ to %pM\n", target); 143 mhwmp_dbg("sending PREQ to %pM", target);
141 ie_len = 37; 144 ie_len = 37;
142 pos = skb_put(skb, 2 + ie_len); 145 pos = skb_put(skb, 2 + ie_len);
143 *pos++ = WLAN_EID_PREQ; 146 *pos++ = WLAN_EID_PREQ;
144 break; 147 break;
145 case MPATH_PREP: 148 case MPATH_PREP:
146 mhwmp_dbg("sending PREP to %pM\n", target); 149 mhwmp_dbg("sending PREP to %pM", target);
147 ie_len = 31; 150 ie_len = 31;
148 pos = skb_put(skb, 2 + ie_len); 151 pos = skb_put(skb, 2 + ie_len);
149 *pos++ = WLAN_EID_PREP; 152 *pos++ = WLAN_EID_PREP;
150 break; 153 break;
151 case MPATH_RANN: 154 case MPATH_RANN:
152 mhwmp_dbg("sending RANN from %pM\n", orig_addr); 155 mhwmp_dbg("sending RANN from %pM", orig_addr);
153 ie_len = sizeof(struct ieee80211_rann_ie); 156 ie_len = sizeof(struct ieee80211_rann_ie);
154 pos = skb_put(skb, 2 + ie_len); 157 pos = skb_put(skb, 2 + ie_len);
155 *pos++ = WLAN_EID_RANN; 158 *pos++ = WLAN_EID_RANN;
@@ -163,35 +166,63 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
163 *pos++ = flags; 166 *pos++ = flags;
164 *pos++ = hop_count; 167 *pos++ = hop_count;
165 *pos++ = ttl; 168 *pos++ = ttl;
166 if (action == MPATH_PREQ) { 169 if (action == MPATH_PREP) {
167 memcpy(pos, &preq_id, 4); 170 memcpy(pos, target, ETH_ALEN);
171 pos += ETH_ALEN;
172 memcpy(pos, &target_sn, 4);
168 pos += 4; 173 pos += 4;
169 } 174 } else {
170 memcpy(pos, orig_addr, ETH_ALEN); 175 if (action == MPATH_PREQ) {
171 pos += ETH_ALEN; 176 memcpy(pos, &preq_id, 4);
172 memcpy(pos, &orig_sn, 4); 177 pos += 4;
173 pos += 4; 178 }
174 if (action != MPATH_RANN) { 179 memcpy(pos, orig_addr, ETH_ALEN);
175 memcpy(pos, &lifetime, 4); 180 pos += ETH_ALEN;
181 memcpy(pos, &orig_sn, 4);
176 pos += 4; 182 pos += 4;
177 } 183 }
184 memcpy(pos, &lifetime, 4); /* interval for RANN */
185 pos += 4;
178 memcpy(pos, &metric, 4); 186 memcpy(pos, &metric, 4);
179 pos += 4; 187 pos += 4;
180 if (action == MPATH_PREQ) { 188 if (action == MPATH_PREQ) {
181 /* destination count */ 189 *pos++ = 1; /* destination count */
182 *pos++ = 1;
183 *pos++ = target_flags; 190 *pos++ = target_flags;
184 }
185 if (action != MPATH_RANN) {
186 memcpy(pos, target, ETH_ALEN); 191 memcpy(pos, target, ETH_ALEN);
187 pos += ETH_ALEN; 192 pos += ETH_ALEN;
188 memcpy(pos, &target_sn, 4); 193 memcpy(pos, &target_sn, 4);
194 pos += 4;
195 } else if (action == MPATH_PREP) {
196 memcpy(pos, orig_addr, ETH_ALEN);
197 pos += ETH_ALEN;
198 memcpy(pos, &orig_sn, 4);
199 pos += 4;
189 } 200 }
190 201
191 ieee80211_tx_skb(sdata, skb); 202 ieee80211_tx_skb(sdata, skb);
192 return 0; 203 return 0;
193} 204}
194 205
206
207/* Headroom is not adjusted. Caller should ensure that skb has sufficient
208 * headroom in case the frame is encrypted. */
209static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
210 struct sk_buff *skb)
211{
212 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
213
214 skb_set_mac_header(skb, 0);
215 skb_set_network_header(skb, 0);
216 skb_set_transport_header(skb, 0);
217
218 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
219 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
220 skb->priority = 7;
221
222 info->control.vif = &sdata->vif;
223 ieee80211_set_qos_hdr(sdata, skb);
224}
225
195/** 226/**
196 * mesh_send_path error - Sends a PERR mesh management frame 227 * mesh_send_path error - Sends a PERR mesh management frame
197 * 228 *
@@ -199,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
199 * @target_sn: SN of the broken destination 230 * @target_sn: SN of the broken destination
200 * @target_rcode: reason code for this PERR 231 * @target_rcode: reason code for this PERR
201 * @ra: node this frame is addressed to 232 * @ra: node this frame is addressed to
233 *
234 * Note: This function may be called with driver locks taken that the driver
235 * also acquires in the TX path. To avoid a deadlock we don't transmit the
236 * frame directly but add it to the pending queue instead.
202 */ 237 */
203int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, 238int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
204 __le16 target_rcode, const u8 *ra, 239 __le16 target_rcode, const u8 *ra,
@@ -212,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
212 247
213 if (!skb) 248 if (!skb)
214 return -1; 249 return -1;
215 skb_reserve(skb, local->hw.extra_tx_headroom); 250 skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
216 /* 25 is the size of the common mgmt part (24) plus the size of the 251 /* 25 is the size of the common mgmt part (24) plus the size of the
217 * common action part (1) 252 * common action part (1)
218 */ 253 */
@@ -224,9 +259,11 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
224 259
225 memcpy(mgmt->da, ra, ETH_ALEN); 260 memcpy(mgmt->da, ra, ETH_ALEN);
226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
227 /* BSSID is left zeroed, wildcard value */ 262 /* BSSID == SA */
228 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; 263 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 264 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
265 mgmt->u.action.u.mesh_action.action_code =
266 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
230 ie_len = 15; 267 ie_len = 15;
231 pos = skb_put(skb, 2 + ie_len); 268 pos = skb_put(skb, 2 + ie_len);
232 *pos++ = WLAN_EID_PERR; 269 *pos++ = WLAN_EID_PERR;
@@ -251,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
251 pos += 4; 288 pos += 4;
252 memcpy(pos, &target_rcode, 2); 289 memcpy(pos, &target_rcode, 2);
253 290
254 ieee80211_tx_skb(sdata, skb); 291 /* see note in function header */
292 prepare_frame_for_deferred_tx(sdata, skb);
293 ieee80211_add_pending_skb(local, skb);
255 return 0; 294 return 0;
256} 295}
257 296
@@ -449,7 +488,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
449 488
450 if (fresh_info) { 489 if (fresh_info) {
451 mesh_path_assign_nexthop(mpath, sta); 490 mesh_path_assign_nexthop(mpath, sta);
452 mpath->flags &= ~MESH_PATH_SN_VALID;
453 mpath->metric = last_hop_metric; 491 mpath->metric = last_hop_metric;
454 mpath->exp_time = time_after(mpath->exp_time, exp_time) 492 mpath->exp_time = time_after(mpath->exp_time, exp_time)
455 ? mpath->exp_time : exp_time; 493 ? mpath->exp_time : exp_time;
@@ -484,10 +522,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
484 orig_sn = PREQ_IE_ORIG_SN(preq_elem); 522 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
485 target_flags = PREQ_IE_TARGET_F(preq_elem); 523 target_flags = PREQ_IE_TARGET_F(preq_elem);
486 524
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 525 mhwmp_dbg("received PREQ from %pM", orig_addr);
488 526
489 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) { 527 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 528 mhwmp_dbg("PREQ is for us");
491 forward = false; 529 forward = false;
492 reply = true; 530 reply = true;
493 metric = 0; 531 metric = 0;
@@ -523,7 +561,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
523 lifetime = PREQ_IE_LIFETIME(preq_elem); 561 lifetime = PREQ_IE_LIFETIME(preq_elem);
524 ttl = ifmsh->mshcfg.element_ttl; 562 ttl = ifmsh->mshcfg.element_ttl;
525 if (ttl != 0) { 563 if (ttl != 0) {
526 mhwmp_dbg("replying to the PREQ\n"); 564 mhwmp_dbg("replying to the PREQ");
527 mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr, 565 mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
528 cpu_to_le32(target_sn), 0, orig_addr, 566 cpu_to_le32(target_sn), 0, orig_addr,
529 cpu_to_le32(orig_sn), mgmt->sa, 0, ttl, 567 cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
@@ -543,7 +581,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
543 ifmsh->mshstats.dropped_frames_ttl++; 581 ifmsh->mshstats.dropped_frames_ttl++;
544 return; 582 return;
545 } 583 }
546 mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr); 584 mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
547 --ttl; 585 --ttl;
548 flags = PREQ_IE_FLAGS(preq_elem); 586 flags = PREQ_IE_FLAGS(preq_elem);
549 preq_id = PREQ_IE_PREQ_ID(preq_elem); 587 preq_id = PREQ_IE_PREQ_ID(preq_elem);
@@ -578,7 +616,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
578 u8 next_hop[ETH_ALEN]; 616 u8 next_hop[ETH_ALEN];
579 u32 target_sn, orig_sn, lifetime; 617 u32 target_sn, orig_sn, lifetime;
580 618
581 mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem)); 619 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
582 620
583 /* Note that we divert from the draft nomenclature and denominate 621 /* Note that we divert from the draft nomenclature and denominate
584 * destination to what the draft refers to as origininator. So in this 622 * destination to what the draft refers to as origininator. So in this
@@ -684,6 +722,8 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
684 u8 ttl, flags, hopcount; 722 u8 ttl, flags, hopcount;
685 u8 *orig_addr; 723 u8 *orig_addr;
686 u32 orig_sn, metric; 724 u32 orig_sn, metric;
725 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
726 bool root_is_gate;
687 727
688 ttl = rann->rann_ttl; 728 ttl = rann->rann_ttl;
689 if (ttl <= 1) { 729 if (ttl <= 1) {
@@ -692,12 +732,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
692 } 732 }
693 ttl--; 733 ttl--;
694 flags = rann->rann_flags; 734 flags = rann->rann_flags;
735 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
695 orig_addr = rann->rann_addr; 736 orig_addr = rann->rann_addr;
696 orig_sn = rann->rann_seq; 737 orig_sn = rann->rann_seq;
697 hopcount = rann->rann_hopcount; 738 hopcount = rann->rann_hopcount;
698 hopcount++; 739 hopcount++;
699 metric = rann->rann_metric; 740 metric = rann->rann_metric;
700 mhwmp_dbg("received RANN from %pM\n", orig_addr); 741
742 /* Ignore our own RANNs */
743 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
744 return;
745
746 mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
747 root_is_gate);
701 748
702 rcu_read_lock(); 749 rcu_read_lock();
703 mpath = mesh_path_lookup(orig_addr, sdata); 750 mpath = mesh_path_lookup(orig_addr, sdata);
@@ -709,18 +756,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
709 sdata->u.mesh.mshstats.dropped_frames_no_route++; 756 sdata->u.mesh.mshstats.dropped_frames_no_route++;
710 return; 757 return;
711 } 758 }
712 mesh_queue_preq(mpath,
713 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
714 } 759 }
760
761 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
762 time_after(jiffies, mpath->exp_time - 1*HZ)) &&
763 !(mpath->flags & MESH_PATH_FIXED)) {
764 mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
765 orig_addr);
766 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
767 }
768
715 if (mpath->sn < orig_sn) { 769 if (mpath->sn < orig_sn) {
716 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 770 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
717 cpu_to_le32(orig_sn), 771 cpu_to_le32(orig_sn),
718 0, NULL, 0, broadcast_addr, 772 0, NULL, 0, broadcast_addr,
719 hopcount, ttl, 0, 773 hopcount, ttl, cpu_to_le32(interval),
720 cpu_to_le32(metric + mpath->metric), 774 cpu_to_le32(metric + mpath->metric),
721 0, sdata); 775 0, sdata);
722 mpath->sn = orig_sn; 776 mpath->sn = orig_sn;
723 } 777 }
778 if (root_is_gate)
779 mesh_path_add_gate(mpath);
780
724 rcu_read_unlock(); 781 rcu_read_unlock();
725} 782}
726 783
@@ -732,11 +789,20 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
732 struct ieee802_11_elems elems; 789 struct ieee802_11_elems elems;
733 size_t baselen; 790 size_t baselen;
734 u32 last_hop_metric; 791 u32 last_hop_metric;
792 struct sta_info *sta;
735 793
736 /* need action_code */ 794 /* need action_code */
737 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 795 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
738 return; 796 return;
739 797
798 rcu_read_lock();
799 sta = sta_info_get(sdata, mgmt->sa);
800 if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
801 rcu_read_unlock();
802 return;
803 }
804 rcu_read_unlock();
805
740 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; 806 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
741 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 807 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
742 len - baselen, &elems); 808 len - baselen, &elems);
@@ -788,16 +854,16 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
788 854
789 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); 855 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
790 if (!preq_node) { 856 if (!preq_node) {
791 mhwmp_dbg("could not allocate PREQ node\n"); 857 mhwmp_dbg("could not allocate PREQ node");
792 return; 858 return;
793 } 859 }
794 860
795 spin_lock(&ifmsh->mesh_preq_queue_lock); 861 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
796 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 862 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
797 spin_unlock(&ifmsh->mesh_preq_queue_lock); 863 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
798 kfree(preq_node); 864 kfree(preq_node);
799 if (printk_ratelimit()) 865 if (printk_ratelimit())
800 mhwmp_dbg("PREQ node queue full\n"); 866 mhwmp_dbg("PREQ node queue full");
801 return; 867 return;
802 } 868 }
803 869
@@ -806,7 +872,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
806 872
807 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); 873 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
808 ++ifmsh->preq_queue_len; 874 ++ifmsh->preq_queue_len;
809 spin_unlock(&ifmsh->mesh_preq_queue_lock); 875 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
810 876
811 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) 877 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
812 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 878 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
@@ -982,35 +1048,46 @@ void mesh_path_timer(unsigned long data)
982{ 1048{
983 struct mesh_path *mpath = (void *) data; 1049 struct mesh_path *mpath = (void *) data;
984 struct ieee80211_sub_if_data *sdata = mpath->sdata; 1050 struct ieee80211_sub_if_data *sdata = mpath->sdata;
1051 int ret;
985 1052
986 if (sdata->local->quiescing) 1053 if (sdata->local->quiescing)
987 return; 1054 return;
988 1055
989 spin_lock_bh(&mpath->state_lock); 1056 spin_lock_bh(&mpath->state_lock);
990 if (mpath->flags & MESH_PATH_RESOLVED || 1057 if (mpath->flags & MESH_PATH_RESOLVED ||
991 (!(mpath->flags & MESH_PATH_RESOLVING))) 1058 (!(mpath->flags & MESH_PATH_RESOLVING))) {
992 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 1059 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
993 else if (mpath->discovery_retries < max_preq_retries(sdata)) { 1060 spin_unlock_bh(&mpath->state_lock);
1061 } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
994 ++mpath->discovery_retries; 1062 ++mpath->discovery_retries;
995 mpath->discovery_timeout *= 2; 1063 mpath->discovery_timeout *= 2;
1064 spin_unlock_bh(&mpath->state_lock);
996 mesh_queue_preq(mpath, 0); 1065 mesh_queue_preq(mpath, 0);
997 } else { 1066 } else {
998 mpath->flags = 0; 1067 mpath->flags = 0;
999 mpath->exp_time = jiffies; 1068 mpath->exp_time = jiffies;
1000 mesh_path_flush_pending(mpath); 1069 spin_unlock_bh(&mpath->state_lock);
1070 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1071 ret = mesh_path_send_to_gates(mpath);
1072 if (ret)
1073 mhwmp_dbg("no gate was reachable");
1074 } else
1075 mesh_path_flush_pending(mpath);
1001 } 1076 }
1002
1003 spin_unlock_bh(&mpath->state_lock);
1004} 1077}
1005 1078
1006void 1079void
1007mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) 1080mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1008{ 1081{
1009 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1082 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1083 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1084 u8 flags;
1010 1085
1011 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr, 1086 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1087 ? RANN_FLAG_IS_GATE : 0;
1088 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1012 cpu_to_le32(++ifmsh->sn), 1089 cpu_to_le32(++ifmsh->sn),
1013 0, NULL, 0, broadcast_addr, 1090 0, NULL, 0, broadcast_addr,
1014 0, sdata->u.mesh.mshcfg.element_ttl, 1091 0, sdata->u.mesh.mshcfg.element_ttl,
1015 0, 0, 0, sdata); 1092 cpu_to_le32(interval), 0, 0, sdata);
1016} 1093}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee6518254..7f54c5042235 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "wme.h"
17#include "ieee80211_i.h" 18#include "ieee80211_i.h"
18#include "mesh.h" 19#include "mesh.h"
19 20
21#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23#else
24#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25#endif
26
20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 27/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21#define INIT_PATHS_SIZE_ORDER 2 28#define INIT_PATHS_SIZE_ORDER 2
22 29
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42int mesh_paths_generation; 49int mesh_paths_generation;
43 50
44/* This lock will have the grow table function as writer and add / delete nodes 51/* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected 52 * as readers. RCU provides sufficient protection only when reading the table
46 * by RCU 53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
47 */ 56 */
48static DEFINE_RWLOCK(pathtbl_resize_lock); 57static DEFINE_RWLOCK(pathtbl_resize_lock);
49 58
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
60 lockdep_is_held(&pathtbl_resize_lock)); 69 lockdep_is_held(&pathtbl_resize_lock));
61} 70}
62 71
72static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
73
63/* 74/*
64 * CAREFUL -- "tbl" must not be an expression, 75 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since 76 * in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
103 sizeof(newtbl->hash_rnd)); 114 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++) 115 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]); 116 spin_lock_init(&newtbl->hashwlock[i]);
117 spin_lock_init(&newtbl->gates_lock);
106 118
107 return newtbl; 119 return newtbl;
108} 120}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
118{ 130{
119 struct hlist_head *mesh_hash; 131 struct hlist_head *mesh_hash;
120 struct hlist_node *p, *q; 132 struct hlist_node *p, *q;
133 struct mpath_node *gate;
121 int i; 134 int i;
122 135
123 mesh_hash = tbl->hash_buckets; 136 mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
129 } 142 }
130 spin_unlock_bh(&tbl->hashwlock[i]); 143 spin_unlock_bh(&tbl->hashwlock[i]);
131 } 144 }
145 if (free_leafs) {
146 spin_lock_bh(&tbl->gates_lock);
147 hlist_for_each_entry_safe(gate, p, q,
148 tbl->known_gates, list) {
149 hlist_del(&gate->list);
150 kfree(gate);
151 }
152 kfree(tbl->known_gates);
153 spin_unlock_bh(&tbl->gates_lock);
154 }
155
132 __mesh_table_free(tbl); 156 __mesh_table_free(tbl);
133} 157}
134 158
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
146 newtbl->free_node = oldtbl->free_node; 170 newtbl->free_node = oldtbl->free_node;
147 newtbl->mean_chain_len = oldtbl->mean_chain_len; 171 newtbl->mean_chain_len = oldtbl->mean_chain_len;
148 newtbl->copy_node = oldtbl->copy_node; 172 newtbl->copy_node = oldtbl->copy_node;
173 newtbl->known_gates = oldtbl->known_gates;
149 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); 174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
150 175
151 oldhash = oldtbl->hash_buckets; 176 oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
188 struct ieee80211_hdr *hdr; 213 struct ieee80211_hdr *hdr;
189 struct sk_buff_head tmpq; 214 struct sk_buff_head tmpq;
190 unsigned long flags; 215 unsigned long flags;
216 struct ieee80211_sub_if_data *sdata = mpath->sdata;
191 217
192 rcu_assign_pointer(mpath->next_hop, sta); 218 rcu_assign_pointer(mpath->next_hop, sta);
193 219
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
198 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { 224 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
199 hdr = (struct ieee80211_hdr *) skb->data; 225 hdr = (struct ieee80211_hdr *) skb->data;
200 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 226 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
227 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
228 ieee80211_set_qos_hdr(sdata, skb);
201 __skb_queue_tail(&tmpq, skb); 229 __skb_queue_tail(&tmpq, skb);
202 } 230 }
203 231
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
205 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 233 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
206} 234}
207 235
236static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
237 struct mesh_path *gate_mpath)
238{
239 struct ieee80211_hdr *hdr;
240 struct ieee80211s_hdr *mshdr;
241 int mesh_hdrlen, hdrlen;
242 char *next_hop;
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
246 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
247
248 if (!(mshdr->flags & MESH_FLAGS_AE)) {
249 /* size of the fixed part of the mesh header */
250 mesh_hdrlen = 6;
251
252 /* make room for the two extended addresses */
253 skb_push(skb, 2 * ETH_ALEN);
254 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
255
256 hdr = (struct ieee80211_hdr *) skb->data;
257
258 /* we preserve the previous mesh header and only add
259 * the new addreses */
260 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
261 mshdr->flags = MESH_FLAGS_AE_A5_A6;
262 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
263 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
264 }
265
266 /* update next hop */
267 hdr = (struct ieee80211_hdr *) skb->data;
268 rcu_read_lock();
269 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
270 memcpy(hdr->addr1, next_hop, ETH_ALEN);
271 rcu_read_unlock();
272 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
273}
208 274
209/** 275/**
210 * mesh_path_lookup - look up a path in the mesh path table
211 * @dst: hardware address (ETH_ALEN length) of destination
212 * @sdata: local subif
213 * 276 *
214 * Returns: pointer to the mesh path structure, or NULL if not found 277 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
215 * 278 *
216 * Locking: must be called within a read rcu section. 279 * This function is used to transfer or copy frames from an unresolved mpath to
280 * a gate mpath. The function also adds the Address Extension field and
281 * updates the next hop.
282 *
283 * If a frame already has an Address Extension field, only the next hop and
284 * destination addresses are updated.
285 *
286 * The gate mpath must be an active mpath with a valid mpath->next_hop.
287 *
288 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
289 * @from_mpath: The failed mpath
290 * @copy: When true, copy all the frames to the new mpath queue. When false,
291 * move them.
217 */ 292 */
218struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 293static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
294 struct mesh_path *from_mpath,
295 bool copy)
219{ 296{
220 struct mesh_path *mpath; 297 struct sk_buff *skb, *cp_skb = NULL;
221 struct hlist_node *n; 298 struct sk_buff_head gateq, failq;
222 struct hlist_head *bucket; 299 unsigned long flags;
223 struct mesh_table *tbl; 300 int num_skbs;
224 struct mpath_node *node;
225 301
226 tbl = rcu_dereference(mesh_paths); 302 BUG_ON(gate_mpath == from_mpath);
303 BUG_ON(!gate_mpath->next_hop);
227 304
228 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 305 __skb_queue_head_init(&gateq);
229 hlist_for_each_entry_rcu(node, n, bucket, list) { 306 __skb_queue_head_init(&failq);
230 mpath = node->mpath; 307
231 if (mpath->sdata == sdata && 308 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
232 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 309 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
233 if (MPATH_EXPIRED(mpath)) { 310 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
234 spin_lock_bh(&mpath->state_lock); 311
235 if (MPATH_EXPIRED(mpath)) 312 num_skbs = skb_queue_len(&failq);
236 mpath->flags &= ~MESH_PATH_ACTIVE; 313
237 spin_unlock_bh(&mpath->state_lock); 314 while (num_skbs--) {
238 } 315 skb = __skb_dequeue(&failq);
239 return mpath; 316 if (copy) {
317 cp_skb = skb_copy(skb, GFP_ATOMIC);
318 if (cp_skb)
319 __skb_queue_tail(&failq, cp_skb);
240 } 320 }
321
322 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
323 __skb_queue_tail(&gateq, skb);
241 } 324 }
242 return NULL; 325
326 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
327 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
328 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
329 gate_mpath->dst,
330 skb_queue_len(&gate_mpath->frame_queue));
331 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
332
333 if (!copy)
334 return;
335
336 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
337 skb_queue_splice(&failq, &from_mpath->frame_queue);
338 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
243} 339}
244 340
245struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 341
342static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
343 struct ieee80211_sub_if_data *sdata)
246{ 344{
247 struct mesh_path *mpath; 345 struct mesh_path *mpath;
248 struct hlist_node *n; 346 struct hlist_node *n;
249 struct hlist_head *bucket; 347 struct hlist_head *bucket;
250 struct mesh_table *tbl;
251 struct mpath_node *node; 348 struct mpath_node *node;
252 349
253 tbl = rcu_dereference(mpp_paths);
254
255 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 350 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
256 hlist_for_each_entry_rcu(node, n, bucket, list) { 351 hlist_for_each_entry_rcu(node, n, bucket, list) {
257 mpath = node->mpath; 352 mpath = node->mpath;
258 if (mpath->sdata == sdata && 353 if (mpath->sdata == sdata &&
259 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 354 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
260 if (MPATH_EXPIRED(mpath)) { 355 if (MPATH_EXPIRED(mpath)) {
261 spin_lock_bh(&mpath->state_lock); 356 spin_lock_bh(&mpath->state_lock);
262 if (MPATH_EXPIRED(mpath)) 357 mpath->flags &= ~MESH_PATH_ACTIVE;
263 mpath->flags &= ~MESH_PATH_ACTIVE;
264 spin_unlock_bh(&mpath->state_lock); 358 spin_unlock_bh(&mpath->state_lock);
265 } 359 }
266 return mpath; 360 return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
269 return NULL; 363 return NULL;
270} 364}
271 365
366/**
367 * mesh_path_lookup - look up a path in the mesh path table
368 * @dst: hardware address (ETH_ALEN length) of destination
369 * @sdata: local subif
370 *
371 * Returns: pointer to the mesh path structure, or NULL if not found
372 *
373 * Locking: must be called within a read rcu section.
374 */
375struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
376{
377 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
378}
379
380struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
381{
382 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
383}
384
272 385
273/** 386/**
274 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 387 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
293 if (j++ == idx) { 406 if (j++ == idx) {
294 if (MPATH_EXPIRED(node->mpath)) { 407 if (MPATH_EXPIRED(node->mpath)) {
295 spin_lock_bh(&node->mpath->state_lock); 408 spin_lock_bh(&node->mpath->state_lock);
296 if (MPATH_EXPIRED(node->mpath)) 409 node->mpath->flags &= ~MESH_PATH_ACTIVE;
297 node->mpath->flags &= ~MESH_PATH_ACTIVE;
298 spin_unlock_bh(&node->mpath->state_lock); 410 spin_unlock_bh(&node->mpath->state_lock);
299 } 411 }
300 return node->mpath; 412 return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
304 return NULL; 416 return NULL;
305} 417}
306 418
419static void mesh_gate_node_reclaim(struct rcu_head *rp)
420{
421 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
422 kfree(node);
423}
424
425/**
426 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
427 * @mesh_tbl: table which contains known_gates list
428 * @mpath: mpath to known mesh gate
429 *
430 * Returns: 0 on success
431 *
432 */
433static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
434{
435 struct mpath_node *gate, *new_gate;
436 struct hlist_node *n;
437 int err;
438
439 rcu_read_lock();
440 tbl = rcu_dereference(tbl);
441
442 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
443 if (gate->mpath == mpath) {
444 err = -EEXIST;
445 goto err_rcu;
446 }
447
448 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
449 if (!new_gate) {
450 err = -ENOMEM;
451 goto err_rcu;
452 }
453
454 mpath->is_gate = true;
455 mpath->sdata->u.mesh.num_gates++;
456 new_gate->mpath = mpath;
457 spin_lock_bh(&tbl->gates_lock);
458 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
459 spin_unlock_bh(&tbl->gates_lock);
460 rcu_read_unlock();
461 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
462 mpath->sdata->name, mpath->dst,
463 mpath->sdata->u.mesh.num_gates);
464 return 0;
465err_rcu:
466 rcu_read_unlock();
467 return err;
468}
469
470/**
471 * mesh_gate_del - remove a mesh gate from the list of known gates
472 * @tbl: table which holds our list of known gates
473 * @mpath: gate mpath
474 *
475 * Returns: 0 on success
476 *
477 * Locking: must be called inside rcu_read_lock() section
478 */
479static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
480{
481 struct mpath_node *gate;
482 struct hlist_node *p, *q;
483
484 tbl = rcu_dereference(tbl);
485
486 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
487 if (gate->mpath == mpath) {
488 spin_lock_bh(&tbl->gates_lock);
489 hlist_del_rcu(&gate->list);
490 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
491 spin_unlock_bh(&tbl->gates_lock);
492 mpath->sdata->u.mesh.num_gates--;
493 mpath->is_gate = false;
494 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
495 "%d known gates\n", mpath->sdata->name,
496 mpath->dst, mpath->sdata->u.mesh.num_gates);
497 break;
498 }
499
500 return 0;
501}
502
503/**
504 *
505 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
506 * @mpath: gate path to add to table
507 */
508int mesh_path_add_gate(struct mesh_path *mpath)
509{
510 return mesh_gate_add(mesh_paths, mpath);
511}
512
513/**
514 * mesh_gate_num - number of gates known to this interface
515 * @sdata: subif data
516 */
517int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
518{
519 return sdata->u.mesh.num_gates;
520}
521
307/** 522/**
308 * mesh_path_add - allocate and add a new path to the mesh path table 523 * mesh_path_add - allocate and add a new path to the mesh path table
309 * @addr: destination address of the path (ETH_ALEN length) 524 * @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
481 new_mpath->flags = 0; 696 new_mpath->flags = 0;
482 skb_queue_head_init(&new_mpath->frame_queue); 697 skb_queue_head_init(&new_mpath->frame_queue);
483 new_node->mpath = new_mpath; 698 new_node->mpath = new_mpath;
699 init_timer(&new_mpath->timer);
484 new_mpath->exp_time = jiffies; 700 new_mpath->exp_time = jiffies;
485 spin_lock_init(&new_mpath->state_lock); 701 spin_lock_init(&new_mpath->state_lock);
486 702
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
539 struct hlist_node *p; 755 struct hlist_node *p;
540 struct ieee80211_sub_if_data *sdata = sta->sdata; 756 struct ieee80211_sub_if_data *sdata = sta->sdata;
541 int i; 757 int i;
758 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
542 759
543 rcu_read_lock(); 760 rcu_read_lock();
544 tbl = rcu_dereference(mesh_paths); 761 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) { 762 for_each_mesh_entry(tbl, p, node, i) {
546 mpath = node->mpath; 763 mpath = node->mpath;
547 spin_lock_bh(&mpath->state_lock);
548 if (rcu_dereference(mpath->next_hop) == sta && 764 if (rcu_dereference(mpath->next_hop) == sta &&
549 mpath->flags & MESH_PATH_ACTIVE && 765 mpath->flags & MESH_PATH_ACTIVE &&
550 !(mpath->flags & MESH_PATH_FIXED)) { 766 !(mpath->flags & MESH_PATH_FIXED)) {
767 spin_lock_bh(&mpath->state_lock);
551 mpath->flags &= ~MESH_PATH_ACTIVE; 768 mpath->flags &= ~MESH_PATH_ACTIVE;
552 ++mpath->sn; 769 ++mpath->sn;
553 spin_unlock_bh(&mpath->state_lock); 770 spin_unlock_bh(&mpath->state_lock);
554 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, 771 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
555 mpath->dst, cpu_to_le32(mpath->sn), 772 mpath->dst, cpu_to_le32(mpath->sn),
556 cpu_to_le16(PERR_RCODE_DEST_UNREACH), 773 reason, bcast, sdata);
557 bcast, sdata); 774 }
558 } else
559 spin_unlock_bh(&mpath->state_lock);
560 } 775 }
561 rcu_read_unlock(); 776 rcu_read_unlock();
562} 777}
563 778
779static void mesh_path_node_reclaim(struct rcu_head *rp)
780{
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783
784 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath);
787 kfree(node);
788}
789
790/* needs to be called with the corresponding hashwlock taken */
791static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792{
793 struct mesh_path *mpath;
794 mpath = node->mpath;
795 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate)
798 mesh_gate_del(tbl, mpath);
799 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock);
802 atomic_dec(&tbl->entries);
803}
804
564/** 805/**
565 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 806 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
566 * 807 *
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
581 int i; 822 int i;
582 823
583 rcu_read_lock(); 824 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths); 825 read_lock_bh(&pathtbl_resize_lock);
826 tbl = resize_dereference_mesh_paths();
585 for_each_mesh_entry(tbl, p, node, i) { 827 for_each_mesh_entry(tbl, p, node, i) {
586 mpath = node->mpath; 828 mpath = node->mpath;
587 if (rcu_dereference(mpath->next_hop) == sta) 829 if (rcu_dereference(mpath->next_hop) == sta) {
588 mesh_path_del(mpath->dst, mpath->sdata); 830 spin_lock_bh(&tbl->hashwlock[i]);
831 __mesh_path_del(tbl, node);
832 spin_unlock_bh(&tbl->hashwlock[i]);
833 }
589 } 834 }
835 read_unlock_bh(&pathtbl_resize_lock);
590 rcu_read_unlock(); 836 rcu_read_unlock();
591} 837}
592 838
593void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 839static void table_flush_by_iface(struct mesh_table *tbl,
840 struct ieee80211_sub_if_data *sdata)
594{ 841{
595 struct mesh_table *tbl;
596 struct mesh_path *mpath; 842 struct mesh_path *mpath;
597 struct mpath_node *node; 843 struct mpath_node *node;
598 struct hlist_node *p; 844 struct hlist_node *p;
599 int i; 845 int i;
600 846
601 rcu_read_lock(); 847 WARN_ON(!rcu_read_lock_held());
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) { 848 for_each_mesh_entry(tbl, p, node, i) {
604 mpath = node->mpath; 849 mpath = node->mpath;
605 if (mpath->sdata == sdata) 850 if (mpath->sdata != sdata)
606 mesh_path_del(mpath->dst, mpath->sdata); 851 continue;
852 spin_lock_bh(&tbl->hashwlock[i]);
853 __mesh_path_del(tbl, node);
854 spin_unlock_bh(&tbl->hashwlock[i]);
607 } 855 }
608 rcu_read_unlock();
609} 856}
610 857
611static void mesh_path_node_reclaim(struct rcu_head *rp) 858/**
859 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
860 *
861 * This function deletes both mesh paths as well as mesh portal paths.
862 *
863 * @sdata - interface data to match
864 *
865 */
866void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
612{ 867{
613 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 868 struct mesh_table *tbl;
614 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
615 869
616 del_timer_sync(&node->mpath->timer); 870 rcu_read_lock();
617 atomic_dec(&sdata->u.mesh.mpaths); 871 read_lock_bh(&pathtbl_resize_lock);
618 kfree(node->mpath); 872 tbl = resize_dereference_mesh_paths();
619 kfree(node); 873 table_flush_by_iface(tbl, sdata);
874 tbl = resize_dereference_mpp_paths();
875 table_flush_by_iface(tbl, sdata);
876 read_unlock_bh(&pathtbl_resize_lock);
877 rcu_read_unlock();
620} 878}
621 879
622/** 880/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
647 mpath = node->mpath; 905 mpath = node->mpath;
648 if (mpath->sdata == sdata && 906 if (mpath->sdata == sdata &&
649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 907 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650 spin_lock(&mpath->state_lock); 908 __mesh_path_del(tbl, node);
651 mpath->flags |= MESH_PATH_RESOLVING;
652 hlist_del_rcu(&node->list);
653 call_rcu(&node->rcu, mesh_path_node_reclaim);
654 atomic_dec(&tbl->entries);
655 spin_unlock(&mpath->state_lock);
656 goto enddel; 909 goto enddel;
657 } 910 }
658 } 911 }
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
681} 934}
682 935
683/** 936/**
937 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
938 *
939 * @mpath: mesh path whose queue will be emptied
940 *
941 * If there is only one gate, the frames are transferred from the failed mpath
942 * queue to that gate's queue. If there are more than one gates, the frames
943 * are copied from each gate to the next. After frames are copied, the
944 * mpath queues are emptied onto the transmission queue.
945 */
946int mesh_path_send_to_gates(struct mesh_path *mpath)
947{
948 struct ieee80211_sub_if_data *sdata = mpath->sdata;
949 struct hlist_node *n;
950 struct mesh_table *tbl;
951 struct mesh_path *from_mpath = mpath;
952 struct mpath_node *gate = NULL;
953 bool copy = false;
954 struct hlist_head *known_gates;
955
956 rcu_read_lock();
957 tbl = rcu_dereference(mesh_paths);
958 known_gates = tbl->known_gates;
959 rcu_read_unlock();
960
961 if (!known_gates)
962 return -EHOSTUNREACH;
963
964 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
965 if (gate->mpath->sdata != sdata)
966 continue;
967
968 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
969 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
970 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
971 from_mpath = gate->mpath;
972 copy = true;
973 } else {
974 mpath_dbg("Not forwarding %p\n", gate->mpath);
975 mpath_dbg("flags %x\n", gate->mpath->flags);
976 }
977 }
978
979 hlist_for_each_entry_rcu(gate, n, known_gates, list)
980 if (gate->mpath->sdata == sdata) {
981 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
982 mesh_path_tx_pending(gate->mpath);
983 }
984
985 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
986}
987
988/**
684 * mesh_path_discard_frame - discard a frame whose path could not be resolved 989 * mesh_path_discard_frame - discard a frame whose path could not be resolved
685 * 990 *
686 * @skb: frame to discard 991 * @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
699 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
700 struct mesh_path *mpath; 1005 struct mesh_path *mpath;
701 u32 sn = 0; 1006 u32 sn = 0;
1007 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
702 1008
703 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { 1009 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
704 u8 *ra, *da; 1010 u8 *ra, *da;
705 1011
706 da = hdr->addr3; 1012 da = hdr->addr3;
707 ra = hdr->addr1; 1013 ra = hdr->addr1;
1014 rcu_read_lock();
708 mpath = mesh_path_lookup(da, sdata); 1015 mpath = mesh_path_lookup(da, sdata);
709 if (mpath) 1016 if (mpath) {
1017 spin_lock_bh(&mpath->state_lock);
710 sn = ++mpath->sn; 1018 sn = ++mpath->sn;
1019 spin_unlock_bh(&mpath->state_lock);
1020 }
1021 rcu_read_unlock();
711 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, 1022 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
712 cpu_to_le32(sn), 1023 cpu_to_le32(sn), reason, ra, sdata);
713 cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
714 } 1024 }
715 1025
716 kfree_skb(skb); 1026 kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
728{ 1038{
729 struct sk_buff *skb; 1039 struct sk_buff *skb;
730 1040
731 while ((skb = skb_dequeue(&mpath->frame_queue)) && 1041 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
732 (mpath->flags & MESH_PATH_ACTIVE))
733 mesh_path_discard_frame(skb, mpath->sdata); 1042 mesh_path_discard_frame(skb, mpath->sdata);
734} 1043}
735 1044
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
790int mesh_pathtbl_init(void) 1099int mesh_pathtbl_init(void)
791{ 1100{
792 struct mesh_table *tbl_path, *tbl_mpp; 1101 struct mesh_table *tbl_path, *tbl_mpp;
1102 int ret;
793 1103
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1104 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path) 1105 if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
797 tbl_path->free_node = &mesh_path_node_free; 1107 tbl_path->free_node = &mesh_path_node_free;
798 tbl_path->copy_node = &mesh_path_node_copy; 1108 tbl_path->copy_node = &mesh_path_node_copy;
799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN; 1109 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1110 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1111 if (!tbl_path->known_gates) {
1112 ret = -ENOMEM;
1113 goto free_path;
1114 }
1115 INIT_HLIST_HEAD(tbl_path->known_gates);
1116
800 1117
801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1118 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
802 if (!tbl_mpp) { 1119 if (!tbl_mpp) {
803 mesh_table_free(tbl_path, true); 1120 ret = -ENOMEM;
804 return -ENOMEM; 1121 goto free_path;
805 } 1122 }
806 tbl_mpp->free_node = &mesh_path_node_free; 1123 tbl_mpp->free_node = &mesh_path_node_free;
807 tbl_mpp->copy_node = &mesh_path_node_copy; 1124 tbl_mpp->copy_node = &mesh_path_node_copy;
808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; 1125 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1126 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1127 if (!tbl_mpp->known_gates) {
1128 ret = -ENOMEM;
1129 goto free_mpp;
1130 }
1131 INIT_HLIST_HEAD(tbl_mpp->known_gates);
809 1132
810 /* Need no locking since this is during init */ 1133 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path); 1134 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp); 1135 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
813 1136
814 return 0; 1137 return 0;
1138
1139free_mpp:
1140 mesh_table_free(tbl_mpp, true);
1141free_path:
1142 mesh_table_free(tbl_path, true);
1143 return ret;
815} 1144}
816 1145
817void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 1146void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
828 if (node->mpath->sdata != sdata) 1157 if (node->mpath->sdata != sdata)
829 continue; 1158 continue;
830 mpath = node->mpath; 1159 mpath = node->mpath;
831 spin_lock_bh(&mpath->state_lock);
832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1160 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
833 (!(mpath->flags & MESH_PATH_FIXED)) && 1161 (!(mpath->flags & MESH_PATH_FIXED)) &&
834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { 1162 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
835 spin_unlock_bh(&mpath->state_lock);
836 mesh_path_del(mpath->dst, mpath->sdata); 1163 mesh_path_del(mpath->dst, mpath->sdata);
837 } else
838 spin_unlock_bh(&mpath->state_lock);
839 } 1164 }
840 rcu_read_unlock(); 1165 rcu_read_unlock();
841} 1166}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
843void mesh_pathtbl_unregister(void) 1168void mesh_pathtbl_unregister(void)
844{ 1169{
845 /* no need for locking during exit path */ 1170 /* no need for locking during exit path */
846 mesh_table_free(rcu_dereference_raw(mesh_paths), true); 1171 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true); 1172 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
848} 1173}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f4adc0917888..7e57f5d07f66 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,35 +19,18 @@
19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0) 19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif 20#endif
21 21
22#define PLINK_GET_LLID(p) (p + 4) 22#define PLINK_GET_LLID(p) (p + 2)
23#define PLINK_GET_PLID(p) (p + 6) 23#define PLINK_GET_PLID(p) (p + 4)
24 24
25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ 25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
26 jiffies + HZ * t / 1000)) 26 jiffies + HZ * t / 1000))
27 27
28/* Peer link cancel reasons, all subject to ANA approval */
29#define MESH_LINK_CANCELLED 2
30#define MESH_MAX_NEIGHBORS 3
31#define MESH_CAPABILITY_POLICY_VIOLATION 4
32#define MESH_CLOSE_RCVD 5
33#define MESH_MAX_RETRIES 6
34#define MESH_CONFIRM_TIMEOUT 7
35#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
37#define MESH_SECURITY_FAILED_VERIFICATION 10
38
39#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) 28#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
40#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) 29#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
41#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) 30#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
42#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) 31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 33
45enum plink_frame_type {
46 PLINK_OPEN = 1,
47 PLINK_CONFIRM,
48 PLINK_CLOSE
49};
50
51enum plink_event { 34enum plink_event {
52 PLINK_UNDEFINED, 35 PLINK_UNDEFINED,
53 OPN_ACPT, 36 OPN_ACPT,
@@ -60,6 +43,10 @@ enum plink_event {
60 CLS_IGNR 43 CLS_IGNR
61}; 44};
62 45
46static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
47 enum ieee80211_self_protected_actioncode action,
48 u8 *da, __le16 llid, __le16 plid, __le16 reason);
49
63static inline 50static inline
64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 51void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 52{
@@ -105,7 +92,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 92 if (!sta)
106 return NULL; 93 return NULL;
107 94
108 sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH; 95 set_sta_flag(sta, WLAN_STA_AUTH);
96 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
97 set_sta_flag(sta, WLAN_STA_WME);
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 98 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta); 99 rate_control_rate_init(sta);
111 100
@@ -150,6 +139,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
150 139
151 spin_lock_bh(&sta->lock); 140 spin_lock_bh(&sta->lock);
152 deactivated = __mesh_plink_deactivate(sta); 141 deactivated = __mesh_plink_deactivate(sta);
142 sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
143 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
144 sta->sta.addr, sta->llid, sta->plid,
145 sta->reason);
153 spin_unlock_bh(&sta->lock); 146 spin_unlock_bh(&sta->lock);
154 147
155 if (deactivated) 148 if (deactivated)
@@ -157,16 +150,16 @@ void mesh_plink_deactivate(struct sta_info *sta)
157} 150}
158 151
159static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 152static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
160 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, 153 enum ieee80211_self_protected_actioncode action,
161 __le16 reason) { 154 u8 *da, __le16 llid, __le16 plid, __le16 reason) {
162 struct ieee80211_local *local = sdata->local; 155 struct ieee80211_local *local = sdata->local;
163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + 156 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
164 sdata->u.mesh.ie_len); 157 sdata->u.mesh.ie_len);
165 struct ieee80211_mgmt *mgmt; 158 struct ieee80211_mgmt *mgmt;
166 bool include_plid = false; 159 bool include_plid = false;
167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; 160 int ie_len = 4;
161 u16 peering_proto = 0;
168 u8 *pos; 162 u8 *pos;
169 int ie_len;
170 163
171 if (!skb) 164 if (!skb)
172 return -1; 165 return -1;
@@ -175,63 +168,75 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
175 * common action part (1) 168 * common action part (1)
176 */ 169 */
177 mgmt = (struct ieee80211_mgmt *) 170 mgmt = (struct ieee80211_mgmt *)
178 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); 171 skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
179 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); 172 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
180 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 173 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
181 IEEE80211_STYPE_ACTION); 174 IEEE80211_STYPE_ACTION);
182 memcpy(mgmt->da, da, ETH_ALEN); 175 memcpy(mgmt->da, da, ETH_ALEN);
183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 176 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
184 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 177 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
185 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; 178 mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
186 mgmt->u.action.u.plink_action.action_code = action; 179 mgmt->u.action.u.self_prot.action_code = action;
187 180
188 if (action == PLINK_CLOSE) 181 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
189 mgmt->u.action.u.plink_action.aux = reason; 182 /* capability info */
190 else { 183 pos = skb_put(skb, 2);
191 mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0); 184 memset(pos, 0, 2);
192 if (action == PLINK_CONFIRM) { 185 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
193 pos = skb_put(skb, 4); 186 /* AID */
194 /* two-byte status code followed by two-byte AID */ 187 pos = skb_put(skb, 2);
195 memset(pos, 0, 2);
196 memcpy(pos + 2, &plid, 2); 188 memcpy(pos + 2, &plid, 2);
197 } 189 }
198 mesh_mgmt_ies_add(skb, sdata); 190 if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
191 ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
192 mesh_add_rsn_ie(skb, sdata) ||
193 mesh_add_meshid_ie(skb, sdata) ||
194 mesh_add_meshconf_ie(skb, sdata))
195 return -1;
196 } else { /* WLAN_SP_MESH_PEERING_CLOSE */
197 if (mesh_add_meshid_ie(skb, sdata))
198 return -1;
199 } 199 }
200 200
201 /* Add Peer Link Management element */ 201 /* Add Mesh Peering Management element */
202 switch (action) { 202 switch (action) {
203 case PLINK_OPEN: 203 case WLAN_SP_MESH_PEERING_OPEN:
204 ie_len = 6;
205 break; 204 break;
206 case PLINK_CONFIRM: 205 case WLAN_SP_MESH_PEERING_CONFIRM:
207 ie_len = 8; 206 ie_len += 2;
208 include_plid = true; 207 include_plid = true;
209 break; 208 break;
210 case PLINK_CLOSE: 209 case WLAN_SP_MESH_PEERING_CLOSE:
211 default: 210 if (plid) {
212 if (!plid) 211 ie_len += 2;
213 ie_len = 8;
214 else {
215 ie_len = 10;
216 include_plid = true; 212 include_plid = true;
217 } 213 }
214 ie_len += 2; /* reason code */
218 break; 215 break;
216 default:
217 return -EINVAL;
219 } 218 }
220 219
220 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
221 return -ENOMEM;
222
221 pos = skb_put(skb, 2 + ie_len); 223 pos = skb_put(skb, 2 + ie_len);
222 *pos++ = WLAN_EID_PEER_LINK; 224 *pos++ = WLAN_EID_PEER_MGMT;
223 *pos++ = ie_len; 225 *pos++ = ie_len;
224 memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto)); 226 memcpy(pos, &peering_proto, 2);
225 pos += 4; 227 pos += 2;
226 memcpy(pos, &llid, 2); 228 memcpy(pos, &llid, 2);
229 pos += 2;
227 if (include_plid) { 230 if (include_plid) {
228 pos += 2;
229 memcpy(pos, &plid, 2); 231 memcpy(pos, &plid, 2);
230 }
231 if (action == PLINK_CLOSE) {
232 pos += 2; 232 pos += 2;
233 }
234 if (action == WLAN_SP_MESH_PEERING_CLOSE) {
233 memcpy(pos, &reason, 2); 235 memcpy(pos, &reason, 2);
236 pos += 2;
234 } 237 }
238 if (mesh_add_vendor_ies(skb, sdata))
239 return -1;
235 240
236 ieee80211_tx_skb(sdata, skb); 241 ieee80211_tx_skb(sdata, skb);
237 return 0; 242 return 0;
@@ -322,21 +327,21 @@ static void mesh_plink_timer(unsigned long data)
322 ++sta->plink_retries; 327 ++sta->plink_retries;
323 mod_plink_timer(sta, sta->plink_timeout); 328 mod_plink_timer(sta, sta->plink_timeout);
324 spin_unlock_bh(&sta->lock); 329 spin_unlock_bh(&sta->lock);
325 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 330 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
326 0, 0); 331 sta->sta.addr, llid, 0, 0);
327 break; 332 break;
328 } 333 }
329 reason = cpu_to_le16(MESH_MAX_RETRIES); 334 reason = cpu_to_le16(WLAN_REASON_MESH_MAX_RETRIES);
330 /* fall through on else */ 335 /* fall through on else */
331 case NL80211_PLINK_CNF_RCVD: 336 case NL80211_PLINK_CNF_RCVD:
332 /* confirm timer */ 337 /* confirm timer */
333 if (!reason) 338 if (!reason)
334 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 339 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
335 sta->plink_state = NL80211_PLINK_HOLDING; 340 sta->plink_state = NL80211_PLINK_HOLDING;
336 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 341 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
337 spin_unlock_bh(&sta->lock); 342 spin_unlock_bh(&sta->lock);
338 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, 343 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
339 reason); 344 sta->sta.addr, llid, plid, reason);
340 break; 345 break;
341 case NL80211_PLINK_HOLDING: 346 case NL80211_PLINK_HOLDING:
342 /* holding timer */ 347 /* holding timer */
@@ -380,7 +385,7 @@ int mesh_plink_open(struct sta_info *sta)
380 __le16 llid; 385 __le16 llid;
381 struct ieee80211_sub_if_data *sdata = sta->sdata; 386 struct ieee80211_sub_if_data *sdata = sta->sdata;
382 387
383 if (!test_sta_flags(sta, WLAN_STA_AUTH)) 388 if (!test_sta_flag(sta, WLAN_STA_AUTH))
384 return -EPERM; 389 return -EPERM;
385 390
386 spin_lock_bh(&sta->lock); 391 spin_lock_bh(&sta->lock);
@@ -396,7 +401,7 @@ int mesh_plink_open(struct sta_info *sta)
396 mpl_dbg("Mesh plink: starting establishment with %pM\n", 401 mpl_dbg("Mesh plink: starting establishment with %pM\n",
397 sta->sta.addr); 402 sta->sta.addr);
398 403
399 return mesh_plink_frame_tx(sdata, PLINK_OPEN, 404 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
400 sta->sta.addr, llid, 0, 0); 405 sta->sta.addr, llid, 0, 0);
401} 406}
402 407
@@ -422,7 +427,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
422 struct ieee802_11_elems elems; 427 struct ieee802_11_elems elems;
423 struct sta_info *sta; 428 struct sta_info *sta;
424 enum plink_event event; 429 enum plink_event event;
425 enum plink_frame_type ftype; 430 enum ieee80211_self_protected_actioncode ftype;
426 size_t baselen; 431 size_t baselen;
427 bool deactivated, matches_local = true; 432 bool deactivated, matches_local = true;
428 u8 ie_len; 433 u8 ie_len;
@@ -449,14 +454,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
449 return; 454 return;
450 } 455 }
451 456
452 baseaddr = mgmt->u.action.u.plink_action.variable; 457 baseaddr = mgmt->u.action.u.self_prot.variable;
453 baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; 458 baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
454 if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { 459 if (mgmt->u.action.u.self_prot.action_code ==
460 WLAN_SP_MESH_PEERING_CONFIRM) {
455 baseaddr += 4; 461 baseaddr += 4;
456 baselen += 4; 462 baselen += 4;
457 } 463 }
458 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 464 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
459 if (!elems.peer_link) { 465 if (!elems.peering) {
460 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 466 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
461 return; 467 return;
462 } 468 }
@@ -466,37 +472,40 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
466 return; 472 return;
467 } 473 }
468 474
469 ftype = mgmt->u.action.u.plink_action.action_code; 475 ftype = mgmt->u.action.u.self_prot.action_code;
470 ie_len = elems.peer_link_len; 476 ie_len = elems.peering_len;
471 if ((ftype == PLINK_OPEN && ie_len != 6) || 477 if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
472 (ftype == PLINK_CONFIRM && ie_len != 8) || 478 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
473 (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) { 479 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
480 && ie_len != 8)) {
474 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", 481 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
475 ftype, ie_len); 482 ftype, ie_len);
476 return; 483 return;
477 } 484 }
478 485
479 if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) { 486 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
487 (!elems.mesh_id || !elems.mesh_config)) {
480 mpl_dbg("Mesh plink: missing necessary ie\n"); 488 mpl_dbg("Mesh plink: missing necessary ie\n");
481 return; 489 return;
482 } 490 }
483 /* Note the lines below are correct, the llid in the frame is the plid 491 /* Note the lines below are correct, the llid in the frame is the plid
484 * from the point of view of this host. 492 * from the point of view of this host.
485 */ 493 */
486 memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); 494 memcpy(&plid, PLINK_GET_LLID(elems.peering), 2);
487 if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10)) 495 if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
488 memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); 496 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
497 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
489 498
490 rcu_read_lock(); 499 rcu_read_lock();
491 500
492 sta = sta_info_get(sdata, mgmt->sa); 501 sta = sta_info_get(sdata, mgmt->sa);
493 if (!sta && ftype != PLINK_OPEN) { 502 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
494 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 503 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
495 rcu_read_unlock(); 504 rcu_read_unlock();
496 return; 505 return;
497 } 506 }
498 507
499 if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) { 508 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
500 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 509 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
501 rcu_read_unlock(); 510 rcu_read_unlock();
502 return; 511 return;
@@ -509,30 +518,30 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
509 518
510 /* Now we will figure out the appropriate event... */ 519 /* Now we will figure out the appropriate event... */
511 event = PLINK_UNDEFINED; 520 event = PLINK_UNDEFINED;
512 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { 521 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
522 (!mesh_matches_local(&elems, sdata))) {
513 matches_local = false; 523 matches_local = false;
514 switch (ftype) { 524 switch (ftype) {
515 case PLINK_OPEN: 525 case WLAN_SP_MESH_PEERING_OPEN:
516 event = OPN_RJCT; 526 event = OPN_RJCT;
517 break; 527 break;
518 case PLINK_CONFIRM: 528 case WLAN_SP_MESH_PEERING_CONFIRM:
519 event = CNF_RJCT; 529 event = CNF_RJCT;
520 break; 530 break;
521 case PLINK_CLOSE: 531 default:
522 /* avoid warning */
523 break; 532 break;
524 } 533 }
525 } 534 }
526 535
527 if (!sta && !matches_local) { 536 if (!sta && !matches_local) {
528 rcu_read_unlock(); 537 rcu_read_unlock();
529 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 538 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
530 llid = 0; 539 llid = 0;
531 mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid, 540 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
532 plid, reason); 541 mgmt->sa, llid, plid, reason);
533 return; 542 return;
534 } else if (!sta) { 543 } else if (!sta) {
535 /* ftype == PLINK_OPEN */ 544 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
536 u32 rates; 545 u32 rates;
537 546
538 rcu_read_unlock(); 547 rcu_read_unlock();
@@ -557,21 +566,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
557 } else if (matches_local) { 566 } else if (matches_local) {
558 spin_lock_bh(&sta->lock); 567 spin_lock_bh(&sta->lock);
559 switch (ftype) { 568 switch (ftype) {
560 case PLINK_OPEN: 569 case WLAN_SP_MESH_PEERING_OPEN:
561 if (!mesh_plink_free_count(sdata) || 570 if (!mesh_plink_free_count(sdata) ||
562 (sta->plid && sta->plid != plid)) 571 (sta->plid && sta->plid != plid))
563 event = OPN_IGNR; 572 event = OPN_IGNR;
564 else 573 else
565 event = OPN_ACPT; 574 event = OPN_ACPT;
566 break; 575 break;
567 case PLINK_CONFIRM: 576 case WLAN_SP_MESH_PEERING_CONFIRM:
568 if (!mesh_plink_free_count(sdata) || 577 if (!mesh_plink_free_count(sdata) ||
569 (sta->llid != llid || sta->plid != plid)) 578 (sta->llid != llid || sta->plid != plid))
570 event = CNF_IGNR; 579 event = CNF_IGNR;
571 else 580 else
572 event = CNF_ACPT; 581 event = CNF_ACPT;
573 break; 582 break;
574 case PLINK_CLOSE: 583 case WLAN_SP_MESH_PEERING_CLOSE:
575 if (sta->plink_state == NL80211_PLINK_ESTAB) 584 if (sta->plink_state == NL80211_PLINK_ESTAB)
576 /* Do not check for llid or plid. This does not 585 /* Do not check for llid or plid. This does not
577 * follow the standard but since multiple plinks 586 * follow the standard but since multiple plinks
@@ -620,10 +629,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
620 sta->llid = llid; 629 sta->llid = llid;
621 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 630 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
622 spin_unlock_bh(&sta->lock); 631 spin_unlock_bh(&sta->lock);
623 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 632 mesh_plink_frame_tx(sdata,
624 0, 0); 633 WLAN_SP_MESH_PEERING_OPEN,
625 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, 634 sta->sta.addr, llid, 0, 0);
626 llid, plid, 0); 635 mesh_plink_frame_tx(sdata,
636 WLAN_SP_MESH_PEERING_CONFIRM,
637 sta->sta.addr, llid, plid, 0);
627 break; 638 break;
628 default: 639 default:
629 spin_unlock_bh(&sta->lock); 640 spin_unlock_bh(&sta->lock);
@@ -635,10 +646,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
635 switch (event) { 646 switch (event) {
636 case OPN_RJCT: 647 case OPN_RJCT:
637 case CNF_RJCT: 648 case CNF_RJCT:
638 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 649 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
639 case CLS_ACPT: 650 case CLS_ACPT:
640 if (!reason) 651 if (!reason)
641 reason = cpu_to_le16(MESH_CLOSE_RCVD); 652 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
642 sta->reason = reason; 653 sta->reason = reason;
643 sta->plink_state = NL80211_PLINK_HOLDING; 654 sta->plink_state = NL80211_PLINK_HOLDING;
644 if (!mod_plink_timer(sta, 655 if (!mod_plink_timer(sta,
@@ -647,8 +658,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
647 658
648 llid = sta->llid; 659 llid = sta->llid;
649 spin_unlock_bh(&sta->lock); 660 spin_unlock_bh(&sta->lock);
650 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 661 mesh_plink_frame_tx(sdata,
651 plid, reason); 662 WLAN_SP_MESH_PEERING_CLOSE,
663 sta->sta.addr, llid, plid, reason);
652 break; 664 break;
653 case OPN_ACPT: 665 case OPN_ACPT:
654 /* retry timer is left untouched */ 666 /* retry timer is left untouched */
@@ -656,8 +668,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
656 sta->plid = plid; 668 sta->plid = plid;
657 llid = sta->llid; 669 llid = sta->llid;
658 spin_unlock_bh(&sta->lock); 670 spin_unlock_bh(&sta->lock);
659 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 671 mesh_plink_frame_tx(sdata,
660 plid, 0); 672 WLAN_SP_MESH_PEERING_CONFIRM,
673 sta->sta.addr, llid, plid, 0);
661 break; 674 break;
662 case CNF_ACPT: 675 case CNF_ACPT:
663 sta->plink_state = NL80211_PLINK_CNF_RCVD; 676 sta->plink_state = NL80211_PLINK_CNF_RCVD;
@@ -677,10 +690,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
677 switch (event) { 690 switch (event) {
678 case OPN_RJCT: 691 case OPN_RJCT:
679 case CNF_RJCT: 692 case CNF_RJCT:
680 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 693 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
681 case CLS_ACPT: 694 case CLS_ACPT:
682 if (!reason) 695 if (!reason)
683 reason = cpu_to_le16(MESH_CLOSE_RCVD); 696 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
684 sta->reason = reason; 697 sta->reason = reason;
685 sta->plink_state = NL80211_PLINK_HOLDING; 698 sta->plink_state = NL80211_PLINK_HOLDING;
686 if (!mod_plink_timer(sta, 699 if (!mod_plink_timer(sta,
@@ -689,14 +702,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
689 702
690 llid = sta->llid; 703 llid = sta->llid;
691 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
692 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 705 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
693 plid, reason); 706 sta->sta.addr, llid, plid, reason);
694 break; 707 break;
695 case OPN_ACPT: 708 case OPN_ACPT:
696 llid = sta->llid; 709 llid = sta->llid;
697 spin_unlock_bh(&sta->lock); 710 spin_unlock_bh(&sta->lock);
698 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 711 mesh_plink_frame_tx(sdata,
699 plid, 0); 712 WLAN_SP_MESH_PEERING_CONFIRM,
713 sta->sta.addr, llid, plid, 0);
700 break; 714 break;
701 case CNF_ACPT: 715 case CNF_ACPT:
702 del_timer(&sta->plink_timer); 716 del_timer(&sta->plink_timer);
@@ -717,10 +731,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
717 switch (event) { 731 switch (event) {
718 case OPN_RJCT: 732 case OPN_RJCT:
719 case CNF_RJCT: 733 case CNF_RJCT:
720 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 734 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
721 case CLS_ACPT: 735 case CLS_ACPT:
722 if (!reason) 736 if (!reason)
723 reason = cpu_to_le16(MESH_CLOSE_RCVD); 737 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
724 sta->reason = reason; 738 sta->reason = reason;
725 sta->plink_state = NL80211_PLINK_HOLDING; 739 sta->plink_state = NL80211_PLINK_HOLDING;
726 if (!mod_plink_timer(sta, 740 if (!mod_plink_timer(sta,
@@ -729,8 +743,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
729 743
730 llid = sta->llid; 744 llid = sta->llid;
731 spin_unlock_bh(&sta->lock); 745 spin_unlock_bh(&sta->lock);
732 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 746 mesh_plink_frame_tx(sdata,
733 plid, reason); 747 WLAN_SP_MESH_PEERING_CLOSE,
748 sta->sta.addr, llid, plid, reason);
734 break; 749 break;
735 case OPN_ACPT: 750 case OPN_ACPT:
736 del_timer(&sta->plink_timer); 751 del_timer(&sta->plink_timer);
@@ -740,8 +755,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
740 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 755 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
741 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 756 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
742 sta->sta.addr); 757 sta->sta.addr);
743 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 758 mesh_plink_frame_tx(sdata,
744 plid, 0); 759 WLAN_SP_MESH_PEERING_CONFIRM,
760 sta->sta.addr, llid, plid, 0);
745 break; 761 break;
746 default: 762 default:
747 spin_unlock_bh(&sta->lock); 763 spin_unlock_bh(&sta->lock);
@@ -752,7 +768,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
752 case NL80211_PLINK_ESTAB: 768 case NL80211_PLINK_ESTAB:
753 switch (event) { 769 switch (event) {
754 case CLS_ACPT: 770 case CLS_ACPT:
755 reason = cpu_to_le16(MESH_CLOSE_RCVD); 771 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
756 sta->reason = reason; 772 sta->reason = reason;
757 deactivated = __mesh_plink_deactivate(sta); 773 deactivated = __mesh_plink_deactivate(sta);
758 sta->plink_state = NL80211_PLINK_HOLDING; 774 sta->plink_state = NL80211_PLINK_HOLDING;
@@ -761,14 +777,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
761 spin_unlock_bh(&sta->lock); 777 spin_unlock_bh(&sta->lock);
762 if (deactivated) 778 if (deactivated)
763 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 779 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
764 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 780 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
765 plid, reason); 781 sta->sta.addr, llid, plid, reason);
766 break; 782 break;
767 case OPN_ACPT: 783 case OPN_ACPT:
768 llid = sta->llid; 784 llid = sta->llid;
769 spin_unlock_bh(&sta->lock); 785 spin_unlock_bh(&sta->lock);
770 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 786 mesh_plink_frame_tx(sdata,
771 plid, 0); 787 WLAN_SP_MESH_PEERING_CONFIRM,
788 sta->sta.addr, llid, plid, 0);
772 break; 789 break;
773 default: 790 default:
774 spin_unlock_bh(&sta->lock); 791 spin_unlock_bh(&sta->lock);
@@ -790,8 +807,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
790 llid = sta->llid; 807 llid = sta->llid;
791 reason = sta->reason; 808 reason = sta->reason;
792 spin_unlock_bh(&sta->lock); 809 spin_unlock_bh(&sta->lock);
793 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, 810 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
794 llid, plid, reason); 811 sta->sta.addr, llid, plid, reason);
795 break; 812 break;
796 default: 813 default:
797 spin_unlock_bh(&sta->lock); 814 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d6470c7fd6ce..ba2da11a997b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -17,7 +17,7 @@
17#include <linux/if_arp.h> 17#include <linux/if_arp.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h> 20#include <linux/pm_qos.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <net/mac80211.h> 23#include <net/mac80211.h>
@@ -160,7 +160,8 @@ static int ecw2cw(int ecw)
160 */ 160 */
161static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, 161static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
162 struct ieee80211_ht_info *hti, 162 struct ieee80211_ht_info *hti,
163 const u8 *bssid, u16 ap_ht_cap_flags) 163 const u8 *bssid, u16 ap_ht_cap_flags,
164 bool beacon_htcap_ie)
164{ 165{
165 struct ieee80211_local *local = sdata->local; 166 struct ieee80211_local *local = sdata->local;
166 struct ieee80211_supported_band *sband; 167 struct ieee80211_supported_band *sband;
@@ -232,6 +233,21 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 233 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 234 }
234 235
236 if (beacon_htcap_ie && (prev_chantype != channel_type)) {
237 /*
238 * Whenever the AP announces the HT mode change that can be
239 * 40MHz intolerant or etc., it would be safer to stop tx
240 * queues before doing hw config to avoid buffer overflow.
241 */
242 ieee80211_stop_queues_by_reason(&sdata->local->hw,
243 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
244
245 /* flush out all packets */
246 synchronize_net();
247
248 drv_flush(local, false);
249 }
250
235 /* channel_type change automatically detected */ 251 /* channel_type change automatically detected */
236 ieee80211_hw_config(local, 0); 252 ieee80211_hw_config(local, 0);
237 253
@@ -243,6 +259,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
243 IEEE80211_RC_HT_CHANGED, 259 IEEE80211_RC_HT_CHANGED,
244 channel_type); 260 channel_type);
245 rcu_read_unlock(); 261 rcu_read_unlock();
262
263 if (beacon_htcap_ie)
264 ieee80211_wake_queues_by_reason(&sdata->local->hw,
265 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
246 } 266 }
247 267
248 ht_opmode = le16_to_cpu(hti->operation_mode); 268 ht_opmode = le16_to_cpu(hti->operation_mode);
@@ -271,11 +291,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
271 struct ieee80211_mgmt *mgmt; 291 struct ieee80211_mgmt *mgmt;
272 292
273 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 293 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
274 if (!skb) { 294 if (!skb)
275 printk(KERN_DEBUG "%s: failed to allocate buffer for "
276 "deauth/disassoc frame\n", sdata->name);
277 return; 295 return;
278 } 296
279 skb_reserve(skb, local->hw.extra_tx_headroom); 297 skb_reserve(skb, local->hw.extra_tx_headroom);
280 298
281 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 299 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -330,6 +348,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
330{ 348{
331 struct sk_buff *skb; 349 struct sk_buff *skb;
332 struct ieee80211_hdr_3addr *nullfunc; 350 struct ieee80211_hdr_3addr *nullfunc;
351 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
333 352
334 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); 353 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
335 if (!skb) 354 if (!skb)
@@ -340,6 +359,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
340 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 359 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
341 360
342 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 361 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
362 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
363 IEEE80211_STA_CONNECTION_POLL))
364 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
365
343 ieee80211_tx_skb(sdata, skb); 366 ieee80211_tx_skb(sdata, skb);
344} 367}
345 368
@@ -354,11 +377,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
354 return; 377 return;
355 378
356 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); 379 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
357 if (!skb) { 380 if (!skb)
358 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
359 "nullfunc frame\n", sdata->name);
360 return; 381 return;
361 } 382
362 skb_reserve(skb, local->hw.extra_tx_headroom); 383 skb_reserve(skb, local->hw.extra_tx_headroom);
363 384
364 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); 385 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
@@ -394,6 +415,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
394 /* call "hw_config" only if doing sw channel switch */ 415 /* call "hw_config" only if doing sw channel switch */
395 ieee80211_hw_config(sdata->local, 416 ieee80211_hw_config(sdata->local,
396 IEEE80211_CONF_CHANGE_CHANNEL); 417 IEEE80211_CONF_CHANGE_CHANNEL);
418 } else {
419 /* update the device channel directly */
420 sdata->local->hw.conf.channel = sdata->local->oper_channel;
397 } 421 }
398 422
399 /* XXX: shouldn't really modify cfg80211-owned data! */ 423 /* XXX: shouldn't really modify cfg80211-owned data! */
@@ -608,7 +632,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
608{ 632{
609 struct ieee80211_if_managed *mgd = &sdata->u.mgd; 633 struct ieee80211_if_managed *mgd = &sdata->u.mgd;
610 struct sta_info *sta = NULL; 634 struct sta_info *sta = NULL;
611 u32 sta_flags = 0; 635 bool authorized = false;
612 636
613 if (!mgd->powersave) 637 if (!mgd->powersave)
614 return false; 638 return false;
@@ -626,13 +650,10 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
626 rcu_read_lock(); 650 rcu_read_lock();
627 sta = sta_info_get(sdata, mgd->bssid); 651 sta = sta_info_get(sdata, mgd->bssid);
628 if (sta) 652 if (sta)
629 sta_flags = get_sta_flags(sta); 653 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
630 rcu_read_unlock(); 654 rcu_read_unlock();
631 655
632 if (!(sta_flags & WLAN_STA_AUTHORIZED)) 656 return authorized;
633 return false;
634
635 return true;
636} 657}
637 658
638/* need to hold RTNL or interface lock */ 659/* need to hold RTNL or interface lock */
@@ -917,8 +938,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
917 params.aifs, params.cw_min, params.cw_max, 938 params.aifs, params.cw_min, params.cw_max,
918 params.txop, params.uapsd); 939 params.txop, params.uapsd);
919#endif 940#endif
920 local->tx_conf[queue] = params; 941 sdata->tx_conf[queue] = params;
921 if (drv_conf_tx(local, queue, &params)) 942 if (drv_conf_tx(local, sdata, queue, &params))
922 wiphy_debug(local->hw.wiphy, 943 wiphy_debug(local->hw.wiphy,
923 "failed to set TX queue parameters for queue %d\n", 944 "failed to set TX queue parameters for queue %d\n",
924 queue); 945 queue);
@@ -1076,7 +1097,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1076 mutex_lock(&local->sta_mtx); 1097 mutex_lock(&local->sta_mtx);
1077 sta = sta_info_get(sdata, bssid); 1098 sta = sta_info_get(sdata, bssid);
1078 if (sta) { 1099 if (sta) {
1079 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 1100 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1080 ieee80211_sta_tear_down_BA_sessions(sta, tx); 1101 ieee80211_sta_tear_down_BA_sessions(sta, tx);
1081 } 1102 }
1082 mutex_unlock(&local->sta_mtx); 1103 mutex_unlock(&local->sta_mtx);
@@ -1118,8 +1139,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1118 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 1139 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
1119 ieee80211_bss_info_change_notify(sdata, changed); 1140 ieee80211_bss_info_change_notify(sdata, changed);
1120 1141
1142 /* remove AP and TDLS peers */
1121 if (remove_sta) 1143 if (remove_sta)
1122 sta_info_destroy_addr(sdata, bssid); 1144 sta_info_flush(local, sdata);
1123 1145
1124 del_timer_sync(&sdata->u.mgd.conn_mon_timer); 1146 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1125 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1147 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1220,7 +1242,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1220 } else { 1242 } else {
1221 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1243 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1222 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, 1244 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
1223 (u32) -1, true); 1245 (u32) -1, true, false);
1224 } 1246 }
1225 1247
1226 ifmgd->probe_send_count++; 1248 ifmgd->probe_send_count++;
@@ -1482,17 +1504,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1482 1504
1483 ifmgd->aid = aid; 1505 ifmgd->aid = aid;
1484 1506
1485 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL); 1507 mutex_lock(&sdata->local->sta_mtx);
1486 if (!sta) { 1508 /*
1487 printk(KERN_DEBUG "%s: failed to alloc STA entry for" 1509 * station info was already allocated and inserted before
1488 " the AP\n", sdata->name); 1510 * the association and should be available to us
1511 */
1512 sta = sta_info_get_rx(sdata, cbss->bssid);
1513 if (WARN_ON(!sta)) {
1514 mutex_unlock(&sdata->local->sta_mtx);
1489 return false; 1515 return false;
1490 } 1516 }
1491 1517
1492 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | 1518 set_sta_flag(sta, WLAN_STA_AUTH);
1493 WLAN_STA_ASSOC_AP); 1519 set_sta_flag(sta, WLAN_STA_ASSOC);
1520 set_sta_flag(sta, WLAN_STA_ASSOC_AP);
1494 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 1521 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1495 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 1522 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
1496 1523
1497 rates = 0; 1524 rates = 0;
1498 basic_rates = 0; 1525 basic_rates = 0;
@@ -1551,12 +1578,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1551 rate_control_rate_init(sta); 1578 rate_control_rate_init(sta);
1552 1579
1553 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) 1580 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
1554 set_sta_flags(sta, WLAN_STA_MFP); 1581 set_sta_flag(sta, WLAN_STA_MFP);
1555 1582
1556 if (elems.wmm_param) 1583 if (elems.wmm_param)
1557 set_sta_flags(sta, WLAN_STA_WME); 1584 set_sta_flag(sta, WLAN_STA_WME);
1558 1585
1559 err = sta_info_insert(sta); 1586 /* sta_info_reinsert will also unlock the mutex lock */
1587 err = sta_info_reinsert(sta);
1560 sta = NULL; 1588 sta = NULL;
1561 if (err) { 1589 if (err) {
1562 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1590 printk(KERN_DEBUG "%s: failed to insert STA entry for"
@@ -1584,7 +1612,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1584 (sdata->local->hw.queues >= 4) && 1612 (sdata->local->hw.queues >= 4) &&
1585 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1613 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1586 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1614 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1587 cbss->bssid, ap_ht_cap_flags); 1615 cbss->bssid, ap_ht_cap_flags,
1616 false);
1588 1617
1589 /* set AID and assoc capability, 1618 /* set AID and assoc capability,
1590 * ieee80211_set_associated() will tell the driver */ 1619 * ieee80211_set_associated() will tell the driver */
@@ -1918,7 +1947,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1918 rcu_read_unlock(); 1947 rcu_read_unlock();
1919 1948
1920 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1949 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1921 bssid, ap_ht_cap_flags); 1950 bssid, ap_ht_cap_flags, true);
1922 } 1951 }
1923 1952
1924 /* Note: country IE parsing is done for us by cfg80211 */ 1953 /* Note: country IE parsing is done for us by cfg80211 */
@@ -2429,6 +2458,29 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2429 return 0; 2458 return 0;
2430} 2459}
2431 2460
2461/* create and insert a dummy station entry */
2462static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
2463 u8 *bssid) {
2464 struct sta_info *sta;
2465 int err;
2466
2467 sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
2468 if (!sta)
2469 return -ENOMEM;
2470
2471 sta->dummy = true;
2472
2473 err = sta_info_insert(sta);
2474 sta = NULL;
2475 if (err) {
2476 printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
2477 " the AP (error %d)\n", sdata->name, err);
2478 return err;
2479 }
2480
2481 return 0;
2482}
2483
2432static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, 2484static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2433 struct sk_buff *skb) 2485 struct sk_buff *skb)
2434{ 2486{
@@ -2436,9 +2488,11 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2436 struct ieee80211_mgmt *mgmt; 2488 struct ieee80211_mgmt *mgmt;
2437 struct ieee80211_rx_status *rx_status; 2489 struct ieee80211_rx_status *rx_status;
2438 struct ieee802_11_elems elems; 2490 struct ieee802_11_elems elems;
2491 struct cfg80211_bss *cbss = wk->assoc.bss;
2439 u16 status; 2492 u16 status;
2440 2493
2441 if (!skb) { 2494 if (!skb) {
2495 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2442 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); 2496 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
2443 goto destroy; 2497 goto destroy;
2444 } 2498 }
@@ -2468,12 +2522,16 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2468 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { 2522 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
2469 mutex_unlock(&wk->sdata->u.mgd.mtx); 2523 mutex_unlock(&wk->sdata->u.mgd.mtx);
2470 /* oops -- internal error -- send timeout for now */ 2524 /* oops -- internal error -- send timeout for now */
2525 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2471 cfg80211_send_assoc_timeout(wk->sdata->dev, 2526 cfg80211_send_assoc_timeout(wk->sdata->dev,
2472 wk->filter_ta); 2527 wk->filter_ta);
2473 return WORK_DONE_DESTROY; 2528 return WORK_DONE_DESTROY;
2474 } 2529 }
2475 2530
2476 mutex_unlock(&wk->sdata->u.mgd.mtx); 2531 mutex_unlock(&wk->sdata->u.mgd.mtx);
2532 } else {
2533 /* assoc failed - destroy the dummy station entry */
2534 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2477 } 2535 }
2478 2536
2479 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); 2537 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@ -2492,7 +2550,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2492 struct ieee80211_bss *bss = (void *)req->bss->priv; 2550 struct ieee80211_bss *bss = (void *)req->bss->priv;
2493 struct ieee80211_work *wk; 2551 struct ieee80211_work *wk;
2494 const u8 *ssid; 2552 const u8 *ssid;
2495 int i; 2553 int i, err;
2496 2554
2497 mutex_lock(&ifmgd->mtx); 2555 mutex_lock(&ifmgd->mtx);
2498 if (ifmgd->associated) { 2556 if (ifmgd->associated) {
@@ -2517,6 +2575,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2517 if (!wk) 2575 if (!wk)
2518 return -ENOMEM; 2576 return -ENOMEM;
2519 2577
2578 /*
2579 * create a dummy station info entry in order
2580 * to start accepting incoming EAPOL packets from the station
2581 */
2582 err = ieee80211_pre_assoc(sdata, req->bss->bssid);
2583 if (err) {
2584 kfree(wk);
2585 return err;
2586 }
2587
2520 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 2588 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
2521 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 2589 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2522 2590
@@ -2674,7 +2742,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2674 req->reason_code, cookie, 2742 req->reason_code, cookie,
2675 !req->local_state_change); 2743 !req->local_state_change);
2676 if (assoc_bss) 2744 if (assoc_bss)
2677 sta_info_destroy_addr(sdata, bssid); 2745 sta_info_flush(sdata->local, sdata);
2678 2746
2679 mutex_lock(&sdata->local->mtx); 2747 mutex_lock(&sdata->local->mtx);
2680 ieee80211_recalc_idle(sdata->local); 2748 ieee80211_recalc_idle(sdata->local);
@@ -2714,7 +2782,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2714 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 2782 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
2715 IEEE80211_STYPE_DISASSOC, req->reason_code, 2783 IEEE80211_STYPE_DISASSOC, req->reason_code,
2716 cookie, !req->local_state_change); 2784 cookie, !req->local_state_change);
2717 sta_info_destroy_addr(sdata, bssid); 2785 sta_info_flush(sdata->local, sdata);
2718 2786
2719 mutex_lock(&sdata->local->mtx); 2787 mutex_lock(&sdata->local->mtx);
2720 ieee80211_recalc_idle(sdata->local); 2788 ieee80211_recalc_idle(sdata->local);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 6326d3439861..9ee7164b207c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -42,7 +42,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
42 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 42 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
43 mutex_lock(&local->sta_mtx); 43 mutex_lock(&local->sta_mtx);
44 list_for_each_entry(sta, &local->sta_list, list) { 44 list_for_each_entry(sta, &local->sta_list, list) {
45 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 45 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
46 ieee80211_sta_tear_down_BA_sessions(sta, true); 46 ieee80211_sta_tear_down_BA_sessions(sta, true);
47 } 47 }
48 mutex_unlock(&local->sta_mtx); 48 mutex_unlock(&local->sta_mtx);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3d5a2cb835c4..ff5c3aa48a15 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -199,7 +199,7 @@ static void rate_control_release(struct kref *kref)
199 kfree(ctrl_ref); 199 kfree(ctrl_ref);
200} 200}
201 201
202static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc) 202static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
203{ 203{
204 struct sk_buff *skb = txrc->skb; 204 struct sk_buff *skb = txrc->skb;
205 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 205 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -208,7 +208,9 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
208 208
209 fc = hdr->frame_control; 209 fc = hdr->frame_control;
210 210
211 return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc); 211 return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
212 IEEE80211_TX_CTL_USE_MINRATE)) ||
213 !ieee80211_is_data(fc);
212} 214}
213 215
214static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, 216static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
@@ -233,6 +235,27 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
233 /* could not find a basic rate; use original selection */ 235 /* could not find a basic rate; use original selection */
234} 236}
235 237
238static inline s8
239rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
240 struct ieee80211_sta *sta)
241{
242 int i;
243
244 for (i = 0; i < sband->n_bitrates; i++) {
245 struct ieee80211_rate *srate = &sband->bitrates[i];
246 if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
247 (srate->bitrate == 55) || (srate->bitrate == 110))
248 continue;
249
250 if (rate_supported(sta, sband->band, i))
251 return i;
252 }
253
254 /* No matching rate found */
255 return 0;
256}
257
258
236bool rate_control_send_low(struct ieee80211_sta *sta, 259bool rate_control_send_low(struct ieee80211_sta *sta,
237 void *priv_sta, 260 void *priv_sta,
238 struct ieee80211_tx_rate_control *txrc) 261 struct ieee80211_tx_rate_control *txrc)
@@ -241,8 +264,14 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
241 struct ieee80211_supported_band *sband = txrc->sband; 264 struct ieee80211_supported_band *sband = txrc->sband;
242 int mcast_rate; 265 int mcast_rate;
243 266
244 if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) { 267 if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
245 info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta); 268 if ((sband->band != IEEE80211_BAND_2GHZ) ||
269 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
270 info->control.rates[0].idx =
271 rate_lowest_index(txrc->sband, sta);
272 else
273 info->control.rates[0].idx =
274 rate_lowest_non_cck_index(txrc->sband, sta);
246 info->control.rates[0].count = 275 info->control.rates[0].count =
247 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 276 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
248 1 : txrc->hw->max_rate_tries; 277 1 : txrc->hw->max_rate_tries;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 66a1eeb279c6..cdb28535716b 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -281,6 +281,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
281 281
282 mr = minstrel_get_ratestats(mi, mg->max_tp_rate); 282 mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
283 if (cur_tp < mr->cur_tp) { 283 if (cur_tp < mr->cur_tp) {
284 mi->max_tp_rate2 = mi->max_tp_rate;
285 cur_tp2 = cur_tp;
284 mi->max_tp_rate = mg->max_tp_rate; 286 mi->max_tp_rate = mg->max_tp_rate;
285 cur_tp = mr->cur_tp; 287 cur_tp = mr->cur_tp;
286 } 288 }
@@ -452,7 +454,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
452 454
453 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 455 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
454 minstrel_ht_update_stats(mp, mi); 456 minstrel_ht_update_stats(mp, mi);
455 minstrel_aggr_check(mp, sta, skb); 457 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
458 minstrel_aggr_check(mp, sta, skb);
456 } 459 }
457} 460}
458 461
@@ -608,7 +611,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
608 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); 611 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
609 612
610 info->flags |= mi->tx_flags; 613 info->flags |= mi->tx_flags;
611 sample_idx = minstrel_get_sample_rate(mp, mi); 614
615 /* Don't use EAPOL frames for sampling on non-mrr hw */
616 if (mp->hw->max_rates == 1 &&
617 txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
618 sample_idx = -1;
619 else
620 sample_idx = minstrel_get_sample_rate(mp, mi);
612 621
613#ifdef CONFIG_MAC80211_DEBUGFS 622#ifdef CONFIG_MAC80211_DEBUGFS
614 /* use fixed index if set */ 623 /* use fixed index if set */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fe2c2a717793..b867bd55de7a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -476,7 +476,6 @@ static ieee80211_rx_result
476ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 476ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
477{ 477{
478 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 478 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
479 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
480 char *dev_addr = rx->sdata->vif.addr; 479 char *dev_addr = rx->sdata->vif.addr;
481 480
482 if (ieee80211_is_data(hdr->frame_control)) { 481 if (ieee80211_is_data(hdr->frame_control)) {
@@ -524,14 +523,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
524 523
525 } 524 }
526 525
527#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
528
529 if (ieee80211_is_data(hdr->frame_control) &&
530 is_multicast_ether_addr(hdr->addr1) &&
531 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
532 return RX_DROP_MONITOR;
533#undef msh_h_get
534
535 return RX_CONTINUE; 526 return RX_CONTINUE;
536} 527}
537 528
@@ -850,8 +841,21 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
850 ieee80211_is_pspoll(hdr->frame_control)) && 841 ieee80211_is_pspoll(hdr->frame_control)) &&
851 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 842 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
852 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 843 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
853 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) 844 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
845 if (rx->sta && rx->sta->dummy &&
846 ieee80211_is_data_present(hdr->frame_control)) {
847 u16 ethertype;
848 u8 *payload;
849
850 payload = rx->skb->data +
851 ieee80211_hdrlen(hdr->frame_control);
852 ethertype = (payload[6] << 8) | payload[7];
853 if (cpu_to_be16(ethertype) ==
854 rx->sdata->control_port_protocol)
855 return RX_CONTINUE;
856 }
854 return RX_DROP_MONITOR; 857 return RX_DROP_MONITOR;
858 }
855 859
856 return RX_CONTINUE; 860 return RX_CONTINUE;
857} 861}
@@ -1106,7 +1110,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
1106 struct ieee80211_local *local = sdata->local; 1110 struct ieee80211_local *local = sdata->local;
1107 1111
1108 atomic_inc(&sdata->bss->num_sta_ps); 1112 atomic_inc(&sdata->bss->num_sta_ps);
1109 set_sta_flags(sta, WLAN_STA_PS_STA); 1113 set_sta_flag(sta, WLAN_STA_PS_STA);
1110 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1114 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1111 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1115 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1112#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1116#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -1126,7 +1130,7 @@ static void ap_sta_ps_end(struct sta_info *sta)
1126 sdata->name, sta->sta.addr, sta->sta.aid); 1130 sdata->name, sta->sta.addr, sta->sta.aid);
1127#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1131#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1128 1132
1129 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1133 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1130#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1134#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1131 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1135 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1132 sdata->name, sta->sta.addr, sta->sta.aid); 1136 sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1145,7 +1149,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1145 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1149 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1146 1150
1147 /* Don't let the same PS state be set twice */ 1151 /* Don't let the same PS state be set twice */
1148 in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA); 1152 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1149 if ((start && in_ps) || (!start && !in_ps)) 1153 if ((start && in_ps) || (!start && !in_ps))
1150 return -EINVAL; 1154 return -EINVAL;
1151 1155
@@ -1159,6 +1163,81 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1159EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1163EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1160 1164
1161static ieee80211_rx_result debug_noinline 1165static ieee80211_rx_result debug_noinline
1166ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1167{
1168 struct ieee80211_sub_if_data *sdata = rx->sdata;
1169 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1170 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1171 int tid, ac;
1172
1173 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1174 return RX_CONTINUE;
1175
1176 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1177 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1178 return RX_CONTINUE;
1179
1180 /*
1181 * The device handles station powersave, so don't do anything about
1182 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1183 * it to mac80211 since they're handled.)
1184 */
1185 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1186 return RX_CONTINUE;
1187
1188 /*
1189 * Don't do anything if the station isn't already asleep. In
1190 * the uAPSD case, the station will probably be marked asleep,
1191 * in the PS-Poll case the station must be confused ...
1192 */
1193 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1194 return RX_CONTINUE;
1195
1196 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1197 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1198 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1199 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1200 else
1201 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1202 }
1203
1204 /* Free PS Poll skb here instead of returning RX_DROP that would
1205 * count as an dropped frame. */
1206 dev_kfree_skb(rx->skb);
1207
1208 return RX_QUEUED;
1209 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1210 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1211 ieee80211_has_pm(hdr->frame_control) &&
1212 (ieee80211_is_data_qos(hdr->frame_control) ||
1213 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1214 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1215 ac = ieee802_1d_to_ac[tid & 7];
1216
1217 /*
1218 * If this AC is not trigger-enabled do nothing.
1219 *
1220 * NB: This could/should check a separate bitmap of trigger-
1221 * enabled queues, but for now we only implement uAPSD w/o
1222 * TSPEC changes to the ACs, so they're always the same.
1223 */
1224 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1225 return RX_CONTINUE;
1226
1227 /* if we are in a service period, do nothing */
1228 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1229 return RX_CONTINUE;
1230
1231 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1232 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1233 else
1234 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1235 }
1236
1237 return RX_CONTINUE;
1238}
1239
1240static ieee80211_rx_result debug_noinline
1162ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1241ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1163{ 1242{
1164 struct sta_info *sta = rx->sta; 1243 struct sta_info *sta = rx->sta;
@@ -1216,7 +1295,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1216 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1295 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1217 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1296 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1218 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1297 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1219 if (test_sta_flags(sta, WLAN_STA_PS_STA)) { 1298 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1220 /* 1299 /*
1221 * Ignore doze->wake transitions that are 1300 * Ignore doze->wake transitions that are
1222 * indicated by non-data frames, the standard 1301 * indicated by non-data frames, the standard
@@ -1469,33 +1548,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1469} 1548}
1470 1549
1471static ieee80211_rx_result debug_noinline 1550static ieee80211_rx_result debug_noinline
1472ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1473{
1474 struct ieee80211_sub_if_data *sdata = rx->sdata;
1475 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1476 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1477
1478 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1479 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1480 return RX_CONTINUE;
1481
1482 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1483 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1484 return RX_DROP_UNUSABLE;
1485
1486 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1487 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1488 else
1489 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1490
1491 /* Free PS Poll skb here instead of returning RX_DROP that would
1492 * count as an dropped frame. */
1493 dev_kfree_skb(rx->skb);
1494
1495 return RX_QUEUED;
1496}
1497
1498static ieee80211_rx_result debug_noinline
1499ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1551ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1500{ 1552{
1501 u8 *data = rx->skb->data; 1553 u8 *data = rx->skb->data;
@@ -1518,7 +1570,7 @@ static int
1518ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1570ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1519{ 1571{
1520 if (unlikely(!rx->sta || 1572 if (unlikely(!rx->sta ||
1521 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) 1573 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1522 return -EACCES; 1574 return -EACCES;
1523 1575
1524 return 0; 1576 return 0;
@@ -1561,7 +1613,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1561 if (status->flag & RX_FLAG_DECRYPTED) 1613 if (status->flag & RX_FLAG_DECRYPTED)
1562 return 0; 1614 return 0;
1563 1615
1564 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1616 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1565 if (unlikely(!ieee80211_has_protected(fc) && 1617 if (unlikely(!ieee80211_has_protected(fc) &&
1566 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1618 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1567 rx->key)) { 1619 rx->key)) {
@@ -1827,6 +1879,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1827 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1879 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1828 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1880 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1829 1881
1882 /* frame is in RMC, don't forward */
1883 if (ieee80211_is_data(hdr->frame_control) &&
1884 is_multicast_ether_addr(hdr->addr1) &&
1885 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1886 return RX_DROP_MONITOR;
1887
1830 if (!ieee80211_is_data(hdr->frame_control)) 1888 if (!ieee80211_is_data(hdr->frame_control))
1831 return RX_CONTINUE; 1889 return RX_CONTINUE;
1832 1890
@@ -1834,6 +1892,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1834 /* illegal frame */ 1892 /* illegal frame */
1835 return RX_DROP_MONITOR; 1893 return RX_DROP_MONITOR;
1836 1894
1895 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1896 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1897 dropped_frames_congestion);
1898 return RX_DROP_MONITOR;
1899 }
1900
1837 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1901 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1838 struct mesh_path *mppath; 1902 struct mesh_path *mppath;
1839 char *proxied_addr; 1903 char *proxied_addr;
@@ -1889,13 +1953,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1889 memset(info, 0, sizeof(*info)); 1953 memset(info, 0, sizeof(*info));
1890 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1954 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1891 info->control.vif = &rx->sdata->vif; 1955 info->control.vif = &rx->sdata->vif;
1892 skb_set_queue_mapping(skb, 1956 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1893 ieee80211_select_queue(rx->sdata, fwd_skb));
1894 ieee80211_set_qos_hdr(local, skb);
1895 if (is_multicast_ether_addr(fwd_hdr->addr1))
1896 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1957 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1897 fwded_mcast); 1958 fwded_mcast);
1898 else { 1959 skb_set_queue_mapping(fwd_skb,
1960 ieee80211_select_queue(sdata, fwd_skb));
1961 ieee80211_set_qos_hdr(sdata, fwd_skb);
1962 } else {
1899 int err; 1963 int err;
1900 /* 1964 /*
1901 * Save TA to addr1 to send TA a path error if a 1965 * Save TA to addr1 to send TA a path error if a
@@ -2220,12 +2284,29 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2220 goto handled; 2284 goto handled;
2221 } 2285 }
2222 break; 2286 break;
2287 case WLAN_CATEGORY_SELF_PROTECTED:
2288 switch (mgmt->u.action.u.self_prot.action_code) {
2289 case WLAN_SP_MESH_PEERING_OPEN:
2290 case WLAN_SP_MESH_PEERING_CLOSE:
2291 case WLAN_SP_MESH_PEERING_CONFIRM:
2292 if (!ieee80211_vif_is_mesh(&sdata->vif))
2293 goto invalid;
2294 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2295 /* userspace handles this frame */
2296 break;
2297 goto queue;
2298 case WLAN_SP_MGK_INFORM:
2299 case WLAN_SP_MGK_ACK:
2300 if (!ieee80211_vif_is_mesh(&sdata->vif))
2301 goto invalid;
2302 break;
2303 }
2304 break;
2223 case WLAN_CATEGORY_MESH_ACTION: 2305 case WLAN_CATEGORY_MESH_ACTION:
2224 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2306 if (!ieee80211_vif_is_mesh(&sdata->vif))
2225 break; 2307 break;
2226 goto queue; 2308 if (mesh_action_is_path_sel(mgmt) &&
2227 case WLAN_CATEGORY_MESH_PATH_SEL: 2309 (!mesh_path_sel_is_hwmp(sdata)))
2228 if (!mesh_path_sel_is_hwmp(sdata))
2229 break; 2310 break;
2230 goto queue; 2311 goto queue;
2231 } 2312 }
@@ -2534,17 +2615,17 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2534 2615
2535 CALL_RXH(ieee80211_rx_h_decrypt) 2616 CALL_RXH(ieee80211_rx_h_decrypt)
2536 CALL_RXH(ieee80211_rx_h_check_more_data) 2617 CALL_RXH(ieee80211_rx_h_check_more_data)
2618 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2537 CALL_RXH(ieee80211_rx_h_sta_process) 2619 CALL_RXH(ieee80211_rx_h_sta_process)
2538 CALL_RXH(ieee80211_rx_h_defragment) 2620 CALL_RXH(ieee80211_rx_h_defragment)
2539 CALL_RXH(ieee80211_rx_h_ps_poll)
2540 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2621 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2541 /* must be after MMIC verify so header is counted in MPDU mic */ 2622 /* must be after MMIC verify so header is counted in MPDU mic */
2542 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2543 CALL_RXH(ieee80211_rx_h_amsdu)
2544#ifdef CONFIG_MAC80211_MESH 2623#ifdef CONFIG_MAC80211_MESH
2545 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2624 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2546 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2625 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2547#endif 2626#endif
2627 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2628 CALL_RXH(ieee80211_rx_h_amsdu)
2548 CALL_RXH(ieee80211_rx_h_data) 2629 CALL_RXH(ieee80211_rx_h_data)
2549 CALL_RXH(ieee80211_rx_h_ctrl); 2630 CALL_RXH(ieee80211_rx_h_ctrl);
2550 CALL_RXH(ieee80211_rx_h_mgmt_check) 2631 CALL_RXH(ieee80211_rx_h_mgmt_check)
@@ -2686,7 +2767,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2686 } else if (!ieee80211_bssid_match(bssid, 2767 } else if (!ieee80211_bssid_match(bssid,
2687 sdata->vif.addr)) { 2768 sdata->vif.addr)) {
2688 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2769 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2689 !ieee80211_is_beacon(hdr->frame_control)) 2770 !ieee80211_is_beacon(hdr->frame_control) &&
2771 !(ieee80211_is_action(hdr->frame_control) &&
2772 sdata->vif.p2p))
2690 return 0; 2773 return 0;
2691 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2774 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2692 } 2775 }
@@ -2791,7 +2874,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2791 if (ieee80211_is_data(fc)) { 2874 if (ieee80211_is_data(fc)) {
2792 prev_sta = NULL; 2875 prev_sta = NULL;
2793 2876
2794 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2877 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2795 if (!prev_sta) { 2878 if (!prev_sta) {
2796 prev_sta = sta; 2879 prev_sta = sta;
2797 continue; 2880 continue;
@@ -2835,7 +2918,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2835 continue; 2918 continue;
2836 } 2919 }
2837 2920
2838 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2921 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2839 rx.sdata = prev; 2922 rx.sdata = prev;
2840 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2923 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2841 2924
@@ -2843,7 +2926,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2843 } 2926 }
2844 2927
2845 if (prev) { 2928 if (prev) {
2846 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2929 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2847 rx.sdata = prev; 2930 rx.sdata = prev;
2848 2931
2849 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2932 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 6f09eca01112..83a0b050b374 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,7 +14,7 @@
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos.h>
18#include <net/sch_generic.h> 18#include <net/sch_generic.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <net/mac80211.h> 20#include <net/mac80211.h>
@@ -254,6 +254,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
254 req->ie, req->ie_len, band, 254 req->ie, req->ie_len, band,
255 req->rates[band], 0); 255 req->rates[band], 0);
256 local->hw_scan_req->ie_len = ielen; 256 local->hw_scan_req->ie_len = ielen;
257 local->hw_scan_req->no_cck = req->no_cck;
257 258
258 return true; 259 return true;
259} 260}
@@ -660,7 +661,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
660 local->scan_req->ssids[i].ssid, 661 local->scan_req->ssids[i].ssid,
661 local->scan_req->ssids[i].ssid_len, 662 local->scan_req->ssids[i].ssid_len,
662 local->scan_req->ie, local->scan_req->ie_len, 663 local->scan_req->ie, local->scan_req->ie_len,
663 local->scan_req->rates[band], false); 664 local->scan_req->rates[band], false,
665 local->scan_req->no_cck);
664 666
665 /* 667 /*
666 * After sending probe requests, wait for probe responses 668 * After sending probe requests, wait for probe responses
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 7733f66ee2c4..578eea3fc04d 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
32 32
33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + 33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
34 sizeof(struct ieee80211_msrment_ie)); 34 sizeof(struct ieee80211_msrment_ie));
35 35 if (!skb)
36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->name);
39 return; 36 return;
40 }
41 37
42 skb_reserve(skb, local->hw.extra_tx_headroom); 38 skb_reserve(skb, local->hw.extra_tx_headroom);
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 39 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 21070e9bc8d0..ce962d2c8782 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -24,6 +24,7 @@
24#include "sta_info.h" 24#include "sta_info.h"
25#include "debugfs_sta.h" 25#include "debugfs_sta.h"
26#include "mesh.h" 26#include "mesh.h"
27#include "wme.h"
27 28
28/** 29/**
29 * DOC: STA information lifetime rules 30 * DOC: STA information lifetime rules
@@ -72,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
72 if (!s) 73 if (!s)
73 return -ENOENT; 74 return -ENOENT;
74 if (s == sta) { 75 if (s == sta) {
75 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], 76 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
76 s->hnext); 77 s->hnext);
77 return 0; 78 return 0;
78 } 79 }
@@ -82,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
82 s = rcu_dereference_protected(s->hnext, 83 s = rcu_dereference_protected(s->hnext,
83 lockdep_is_held(&local->sta_lock)); 84 lockdep_is_held(&local->sta_lock));
84 if (rcu_access_pointer(s->hnext)) { 85 if (rcu_access_pointer(s->hnext)) {
85 rcu_assign_pointer(s->hnext, sta->hnext); 86 RCU_INIT_POINTER(s->hnext, sta->hnext);
86 return 0; 87 return 0;
87 } 88 }
88 89
@@ -100,6 +101,27 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
100 lockdep_is_held(&local->sta_lock) || 101 lockdep_is_held(&local->sta_lock) ||
101 lockdep_is_held(&local->sta_mtx)); 102 lockdep_is_held(&local->sta_mtx));
102 while (sta) { 103 while (sta) {
104 if (sta->sdata == sdata && !sta->dummy &&
105 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
106 break;
107 sta = rcu_dereference_check(sta->hnext,
108 lockdep_is_held(&local->sta_lock) ||
109 lockdep_is_held(&local->sta_mtx));
110 }
111 return sta;
112}
113
114/* get a station info entry even if it is a dummy station*/
115struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
116 const u8 *addr)
117{
118 struct ieee80211_local *local = sdata->local;
119 struct sta_info *sta;
120
121 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
122 lockdep_is_held(&local->sta_lock) ||
123 lockdep_is_held(&local->sta_mtx));
124 while (sta) {
103 if (sta->sdata == sdata && 125 if (sta->sdata == sdata &&
104 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 126 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
105 break; 127 break;
@@ -126,6 +148,32 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
126 while (sta) { 148 while (sta) {
127 if ((sta->sdata == sdata || 149 if ((sta->sdata == sdata ||
128 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && 150 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
151 !sta->dummy &&
152 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
153 break;
154 sta = rcu_dereference_check(sta->hnext,
155 lockdep_is_held(&local->sta_lock) ||
156 lockdep_is_held(&local->sta_mtx));
157 }
158 return sta;
159}
160
161/*
162 * Get sta info either from the specified interface
163 * or from one of its vlans (including dummy stations)
164 */
165struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
166 const u8 *addr)
167{
168 struct ieee80211_local *local = sdata->local;
169 struct sta_info *sta;
170
171 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
172 lockdep_is_held(&local->sta_lock) ||
173 lockdep_is_held(&local->sta_mtx));
174 while (sta) {
175 if ((sta->sdata == sdata ||
176 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 177 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
130 break; 178 break;
131 sta = rcu_dereference_check(sta->hnext, 179 sta = rcu_dereference_check(sta->hnext,
@@ -184,7 +232,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
184 struct sta_info *sta) 232 struct sta_info *sta)
185{ 233{
186 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; 234 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
187 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); 235 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
188} 236}
189 237
190static void sta_unblock(struct work_struct *wk) 238static void sta_unblock(struct work_struct *wk)
@@ -196,13 +244,22 @@ static void sta_unblock(struct work_struct *wk)
196 if (sta->dead) 244 if (sta->dead)
197 return; 245 return;
198 246
199 if (!test_sta_flags(sta, WLAN_STA_PS_STA)) 247 if (!test_sta_flag(sta, WLAN_STA_PS_STA))
200 ieee80211_sta_ps_deliver_wakeup(sta); 248 ieee80211_sta_ps_deliver_wakeup(sta);
201 else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) { 249 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
202 clear_sta_flags(sta, WLAN_STA_PS_DRIVER); 250 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
251
252 local_bh_disable();
203 ieee80211_sta_ps_deliver_poll_response(sta); 253 ieee80211_sta_ps_deliver_poll_response(sta);
254 local_bh_enable();
255 } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
256 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
257
258 local_bh_disable();
259 ieee80211_sta_ps_deliver_uapsd(sta);
260 local_bh_enable();
204 } else 261 } else
205 clear_sta_flags(sta, WLAN_STA_PS_DRIVER); 262 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
206} 263}
207 264
208static int sta_prepare_rate_control(struct ieee80211_local *local, 265static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -235,7 +292,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
235 return NULL; 292 return NULL;
236 293
237 spin_lock_init(&sta->lock); 294 spin_lock_init(&sta->lock);
238 spin_lock_init(&sta->flaglock);
239 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 295 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
240 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 296 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
241 mutex_init(&sta->ampdu_mlme.mtx); 297 mutex_init(&sta->ampdu_mlme.mtx);
@@ -262,8 +318,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
262 */ 318 */
263 sta->timer_to_tid[i] = i; 319 sta->timer_to_tid[i] = i;
264 } 320 }
265 skb_queue_head_init(&sta->ps_tx_buf); 321 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
266 skb_queue_head_init(&sta->tx_filtered); 322 skb_queue_head_init(&sta->ps_tx_buf[i]);
323 skb_queue_head_init(&sta->tx_filtered[i]);
324 }
267 325
268 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 326 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
269 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 327 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
@@ -280,7 +338,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
280 return sta; 338 return sta;
281} 339}
282 340
283static int sta_info_finish_insert(struct sta_info *sta, bool async) 341static int sta_info_finish_insert(struct sta_info *sta,
342 bool async, bool dummy_reinsert)
284{ 343{
285 struct ieee80211_local *local = sta->local; 344 struct ieee80211_local *local = sta->local;
286 struct ieee80211_sub_if_data *sdata = sta->sdata; 345 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,50 +349,58 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
290 349
291 lockdep_assert_held(&local->sta_mtx); 350 lockdep_assert_held(&local->sta_mtx);
292 351
293 /* notify driver */ 352 if (!sta->dummy || dummy_reinsert) {
294 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 353 /* notify driver */
295 sdata = container_of(sdata->bss, 354 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
296 struct ieee80211_sub_if_data, 355 sdata = container_of(sdata->bss,
297 u.ap); 356 struct ieee80211_sub_if_data,
298 err = drv_sta_add(local, sdata, &sta->sta); 357 u.ap);
299 if (err) { 358 err = drv_sta_add(local, sdata, &sta->sta);
300 if (!async) 359 if (err) {
301 return err; 360 if (!async)
302 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)" 361 return err;
303 " - keeping it anyway.\n", 362 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
304 sdata->name, sta->sta.addr, err); 363 "driver (%d) - keeping it anyway.\n",
305 } else { 364 sdata->name, sta->sta.addr, err);
306 sta->uploaded = true; 365 } else {
366 sta->uploaded = true;
307#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 367#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
308 if (async) 368 if (async)
309 wiphy_debug(local->hw.wiphy, 369 wiphy_debug(local->hw.wiphy,
310 "Finished adding IBSS STA %pM\n", 370 "Finished adding IBSS STA %pM\n",
311 sta->sta.addr); 371 sta->sta.addr);
312#endif 372#endif
373 }
374
375 sdata = sta->sdata;
313 } 376 }
314 377
315 sdata = sta->sdata; 378 if (!dummy_reinsert) {
379 if (!async) {
380 local->num_sta++;
381 local->sta_generation++;
382 smp_mb();
316 383
317 if (!async) { 384 /* make the station visible */
318 local->num_sta++; 385 spin_lock_irqsave(&local->sta_lock, flags);
319 local->sta_generation++; 386 sta_info_hash_add(local, sta);
320 smp_mb(); 387 spin_unlock_irqrestore(&local->sta_lock, flags);
388 }
321 389
322 /* make the station visible */ 390 list_add(&sta->list, &local->sta_list);
323 spin_lock_irqsave(&local->sta_lock, flags); 391 } else {
324 sta_info_hash_add(local, sta); 392 sta->dummy = false;
325 spin_unlock_irqrestore(&local->sta_lock, flags);
326 } 393 }
327 394
328 list_add(&sta->list, &local->sta_list); 395 if (!sta->dummy) {
329 396 ieee80211_sta_debugfs_add(sta);
330 ieee80211_sta_debugfs_add(sta); 397 rate_control_add_sta_debugfs(sta);
331 rate_control_add_sta_debugfs(sta);
332
333 sinfo.filled = 0;
334 sinfo.generation = local->sta_generation;
335 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
336 398
399 memset(&sinfo, 0, sizeof(sinfo));
400 sinfo.filled = 0;
401 sinfo.generation = local->sta_generation;
402 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
403 }
337 404
338 return 0; 405 return 0;
339} 406}
@@ -350,7 +417,7 @@ static void sta_info_finish_pending(struct ieee80211_local *local)
350 list_del(&sta->list); 417 list_del(&sta->list);
351 spin_unlock_irqrestore(&local->sta_lock, flags); 418 spin_unlock_irqrestore(&local->sta_lock, flags);
352 419
353 sta_info_finish_insert(sta, true); 420 sta_info_finish_insert(sta, true, false);
354 421
355 spin_lock_irqsave(&local->sta_lock, flags); 422 spin_lock_irqsave(&local->sta_lock, flags);
356 } 423 }
@@ -367,106 +434,117 @@ static void sta_info_finish_work(struct work_struct *work)
367 mutex_unlock(&local->sta_mtx); 434 mutex_unlock(&local->sta_mtx);
368} 435}
369 436
370int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 437static int sta_info_insert_check(struct sta_info *sta)
371{ 438{
372 struct ieee80211_local *local = sta->local;
373 struct ieee80211_sub_if_data *sdata = sta->sdata; 439 struct ieee80211_sub_if_data *sdata = sta->sdata;
374 unsigned long flags;
375 int err = 0;
376 440
377 /* 441 /*
378 * Can't be a WARN_ON because it can be triggered through a race: 442 * Can't be a WARN_ON because it can be triggered through a race:
379 * something inserts a STA (on one CPU) without holding the RTNL 443 * something inserts a STA (on one CPU) without holding the RTNL
380 * and another CPU turns off the net device. 444 * and another CPU turns off the net device.
381 */ 445 */
382 if (unlikely(!ieee80211_sdata_running(sdata))) { 446 if (unlikely(!ieee80211_sdata_running(sdata)))
383 err = -ENETDOWN; 447 return -ENETDOWN;
384 rcu_read_lock();
385 goto out_free;
386 }
387 448
388 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || 449 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
389 is_multicast_ether_addr(sta->sta.addr))) { 450 is_multicast_ether_addr(sta->sta.addr)))
390 err = -EINVAL; 451 return -EINVAL;
452
453 return 0;
454}
455
456static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
457{
458 struct ieee80211_local *local = sta->local;
459 struct ieee80211_sub_if_data *sdata = sta->sdata;
460 unsigned long flags;
461
462 spin_lock_irqsave(&local->sta_lock, flags);
463 /* check if STA exists already */
464 if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
465 spin_unlock_irqrestore(&local->sta_lock, flags);
391 rcu_read_lock(); 466 rcu_read_lock();
392 goto out_free; 467 return -EEXIST;
393 } 468 }
394 469
395 /* 470 local->num_sta++;
396 * In ad-hoc mode, we sometimes need to insert stations 471 local->sta_generation++;
397 * from tasklet context from the RX path. To avoid races, 472 smp_mb();
398 * always do so in that case -- see the comment below. 473 sta_info_hash_add(local, sta);
399 */
400 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
401 spin_lock_irqsave(&local->sta_lock, flags);
402 /* check if STA exists already */
403 if (sta_info_get_bss(sdata, sta->sta.addr)) {
404 spin_unlock_irqrestore(&local->sta_lock, flags);
405 rcu_read_lock();
406 err = -EEXIST;
407 goto out_free;
408 }
409
410 local->num_sta++;
411 local->sta_generation++;
412 smp_mb();
413 sta_info_hash_add(local, sta);
414 474
415 list_add_tail(&sta->list, &local->sta_pending_list); 475 list_add_tail(&sta->list, &local->sta_pending_list);
416 476
417 rcu_read_lock(); 477 rcu_read_lock();
418 spin_unlock_irqrestore(&local->sta_lock, flags); 478 spin_unlock_irqrestore(&local->sta_lock, flags);
419 479
420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 480#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
421 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n", 481 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
422 sta->sta.addr); 482 sta->sta.addr);
423#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 483#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
424 484
425 ieee80211_queue_work(&local->hw, &local->sta_finish_work); 485 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
426 486
427 return 0; 487 return 0;
428 } 488}
489
490/*
491 * should be called with sta_mtx locked
492 * this function replaces the mutex lock
493 * with a RCU lock
494 */
495static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
496{
497 struct ieee80211_local *local = sta->local;
498 struct ieee80211_sub_if_data *sdata = sta->sdata;
499 unsigned long flags;
500 struct sta_info *exist_sta;
501 bool dummy_reinsert = false;
502 int err = 0;
503
504 lockdep_assert_held(&local->sta_mtx);
429 505
430 /* 506 /*
431 * On first glance, this will look racy, because the code 507 * On first glance, this will look racy, because the code
432 * below this point, which inserts a station with sleeping, 508 * in this function, which inserts a station with sleeping,
433 * unlocks the sta_lock between checking existence in the 509 * unlocks the sta_lock between checking existence in the
434 * hash table and inserting into it. 510 * hash table and inserting into it.
435 * 511 *
436 * However, it is not racy against itself because it keeps 512 * However, it is not racy against itself because it keeps
437 * the mutex locked. It still seems to race against the 513 * the mutex locked.
438 * above code that atomically inserts the station... That,
439 * however, is not true because the above code can only
440 * be invoked for IBSS interfaces, and the below code will
441 * not be -- and the two do not race against each other as
442 * the hash table also keys off the interface.
443 */ 514 */
444 515
445 might_sleep();
446
447 mutex_lock(&local->sta_mtx);
448
449 spin_lock_irqsave(&local->sta_lock, flags); 516 spin_lock_irqsave(&local->sta_lock, flags);
450 /* check if STA exists already */ 517 /*
451 if (sta_info_get_bss(sdata, sta->sta.addr)) { 518 * check if STA exists already.
452 spin_unlock_irqrestore(&local->sta_lock, flags); 519 * only accept a scenario of a second call to sta_info_insert_non_ibss
453 mutex_unlock(&local->sta_mtx); 520 * with a dummy station entry that was inserted earlier
454 rcu_read_lock(); 521 * in that case - assume that the dummy station flag should
455 err = -EEXIST; 522 * be removed.
456 goto out_free; 523 */
524 exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
525 if (exist_sta) {
526 if (exist_sta == sta && sta->dummy) {
527 dummy_reinsert = true;
528 } else {
529 spin_unlock_irqrestore(&local->sta_lock, flags);
530 mutex_unlock(&local->sta_mtx);
531 rcu_read_lock();
532 return -EEXIST;
533 }
457 } 534 }
458 535
459 spin_unlock_irqrestore(&local->sta_lock, flags); 536 spin_unlock_irqrestore(&local->sta_lock, flags);
460 537
461 err = sta_info_finish_insert(sta, false); 538 err = sta_info_finish_insert(sta, false, dummy_reinsert);
462 if (err) { 539 if (err) {
463 mutex_unlock(&local->sta_mtx); 540 mutex_unlock(&local->sta_mtx);
464 rcu_read_lock(); 541 rcu_read_lock();
465 goto out_free; 542 return err;
466 } 543 }
467 544
468#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 545#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
469 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr); 546 wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
547 sta->dummy ? "dummy " : "", sta->sta.addr);
470#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 548#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
471 549
472 /* move reference to rcu-protected */ 550 /* move reference to rcu-protected */
@@ -477,6 +555,51 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
477 mesh_accept_plinks_update(sdata); 555 mesh_accept_plinks_update(sdata);
478 556
479 return 0; 557 return 0;
558}
559
560int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
561{
562 struct ieee80211_local *local = sta->local;
563 struct ieee80211_sub_if_data *sdata = sta->sdata;
564 int err = 0;
565
566 err = sta_info_insert_check(sta);
567 if (err) {
568 rcu_read_lock();
569 goto out_free;
570 }
571
572 /*
573 * In ad-hoc mode, we sometimes need to insert stations
574 * from tasklet context from the RX path. To avoid races,
575 * always do so in that case -- see the comment below.
576 */
577 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
578 err = sta_info_insert_ibss(sta);
579 if (err)
580 goto out_free;
581
582 return 0;
583 }
584
585 /*
586 * It might seem that the function called below is in race against
587 * the function call above that atomically inserts the station... That,
588 * however, is not true because the above code can only
589 * be invoked for IBSS interfaces, and the below code will
590 * not be -- and the two do not race against each other as
591 * the hash table also keys off the interface.
592 */
593
594 might_sleep();
595
596 mutex_lock(&local->sta_mtx);
597
598 err = sta_info_insert_non_ibss(sta);
599 if (err)
600 goto out_free;
601
602 return 0;
480 out_free: 603 out_free:
481 BUG_ON(!err); 604 BUG_ON(!err);
482 __sta_info_free(local, sta); 605 __sta_info_free(local, sta);
@@ -492,6 +615,25 @@ int sta_info_insert(struct sta_info *sta)
492 return err; 615 return err;
493} 616}
494 617
618/* Caller must hold sta->local->sta_mtx */
619int sta_info_reinsert(struct sta_info *sta)
620{
621 struct ieee80211_local *local = sta->local;
622 int err = 0;
623
624 err = sta_info_insert_check(sta);
625 if (err) {
626 mutex_unlock(&local->sta_mtx);
627 return err;
628 }
629
630 might_sleep();
631
632 err = sta_info_insert_non_ibss(sta);
633 rcu_read_unlock();
634 return err;
635}
636
495static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 637static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
496{ 638{
497 /* 639 /*
@@ -510,64 +652,93 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
510 bss->tim[aid / 8] &= ~(1 << (aid % 8)); 652 bss->tim[aid / 8] &= ~(1 << (aid % 8));
511} 653}
512 654
513static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, 655static unsigned long ieee80211_tids_for_ac(int ac)
514 struct sta_info *sta)
515{ 656{
516 BUG_ON(!bss); 657 /* If we ever support TIDs > 7, this obviously needs to be adjusted */
517 658 switch (ac) {
518 __bss_tim_set(bss, sta->sta.aid); 659 case IEEE80211_AC_VO:
519 660 return BIT(6) | BIT(7);
520 if (sta->local->ops->set_tim) { 661 case IEEE80211_AC_VI:
521 sta->local->tim_in_locked_section = true; 662 return BIT(4) | BIT(5);
522 drv_set_tim(sta->local, &sta->sta, true); 663 case IEEE80211_AC_BE:
523 sta->local->tim_in_locked_section = false; 664 return BIT(0) | BIT(3);
665 case IEEE80211_AC_BK:
666 return BIT(1) | BIT(2);
667 default:
668 WARN_ON(1);
669 return 0;
524 } 670 }
525} 671}
526 672
527void sta_info_set_tim_bit(struct sta_info *sta) 673void sta_info_recalc_tim(struct sta_info *sta)
528{ 674{
675 struct ieee80211_local *local = sta->local;
676 struct ieee80211_if_ap *bss = sta->sdata->bss;
529 unsigned long flags; 677 unsigned long flags;
678 bool indicate_tim = false;
679 u8 ignore_for_tim = sta->sta.uapsd_queues;
680 int ac;
530 681
531 BUG_ON(!sta->sdata->bss); 682 if (WARN_ON_ONCE(!sta->sdata->bss))
683 return;
532 684
533 spin_lock_irqsave(&sta->local->sta_lock, flags); 685 /* No need to do anything if the driver does all */
534 __sta_info_set_tim_bit(sta->sdata->bss, sta); 686 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
535 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 687 return;
536}
537 688
538static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, 689 if (sta->dead)
539 struct sta_info *sta) 690 goto done;
540{ 691
541 BUG_ON(!bss); 692 /*
693 * If all ACs are delivery-enabled then we should build
694 * the TIM bit for all ACs anyway; if only some are then
695 * we ignore those and build the TIM bit using only the
696 * non-enabled ones.
697 */
698 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
699 ignore_for_tim = 0;
700
701 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
702 unsigned long tids;
542 703
543 __bss_tim_clear(bss, sta->sta.aid); 704 if (ignore_for_tim & BIT(ac))
705 continue;
706
707 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
708 !skb_queue_empty(&sta->ps_tx_buf[ac]);
709 if (indicate_tim)
710 break;
544 711
545 if (sta->local->ops->set_tim) { 712 tids = ieee80211_tids_for_ac(ac);
546 sta->local->tim_in_locked_section = true; 713
547 drv_set_tim(sta->local, &sta->sta, false); 714 indicate_tim |=
548 sta->local->tim_in_locked_section = false; 715 sta->driver_buffered_tids & tids;
549 } 716 }
550}
551 717
552void sta_info_clear_tim_bit(struct sta_info *sta) 718 done:
553{ 719 spin_lock_irqsave(&local->sta_lock, flags);
554 unsigned long flags;
555 720
556 BUG_ON(!sta->sdata->bss); 721 if (indicate_tim)
722 __bss_tim_set(bss, sta->sta.aid);
723 else
724 __bss_tim_clear(bss, sta->sta.aid);
557 725
558 spin_lock_irqsave(&sta->local->sta_lock, flags); 726 if (local->ops->set_tim) {
559 __sta_info_clear_tim_bit(sta->sdata->bss, sta); 727 local->tim_in_locked_section = true;
560 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 728 drv_set_tim(local, &sta->sta, indicate_tim);
729 local->tim_in_locked_section = false;
730 }
731
732 spin_unlock_irqrestore(&local->sta_lock, flags);
561} 733}
562 734
563static int sta_info_buffer_expired(struct sta_info *sta, 735static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
564 struct sk_buff *skb)
565{ 736{
566 struct ieee80211_tx_info *info; 737 struct ieee80211_tx_info *info;
567 int timeout; 738 int timeout;
568 739
569 if (!skb) 740 if (!skb)
570 return 0; 741 return false;
571 742
572 info = IEEE80211_SKB_CB(skb); 743 info = IEEE80211_SKB_CB(skb);
573 744
@@ -581,24 +752,59 @@ static int sta_info_buffer_expired(struct sta_info *sta,
581} 752}
582 753
583 754
584static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 755static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
585 struct sta_info *sta) 756 struct sta_info *sta, int ac)
586{ 757{
587 unsigned long flags; 758 unsigned long flags;
588 struct sk_buff *skb; 759 struct sk_buff *skb;
589 760
590 if (skb_queue_empty(&sta->ps_tx_buf)) 761 /*
591 return false; 762 * First check for frames that should expire on the filtered
763 * queue. Frames here were rejected by the driver and are on
764 * a separate queue to avoid reordering with normal PS-buffered
765 * frames. They also aren't accounted for right now in the
766 * total_ps_buffered counter.
767 */
768 for (;;) {
769 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
770 skb = skb_peek(&sta->tx_filtered[ac]);
771 if (sta_info_buffer_expired(sta, skb))
772 skb = __skb_dequeue(&sta->tx_filtered[ac]);
773 else
774 skb = NULL;
775 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
592 776
777 /*
778 * Frames are queued in order, so if this one
779 * hasn't expired yet we can stop testing. If
780 * we actually reached the end of the queue we
781 * also need to stop, of course.
782 */
783 if (!skb)
784 break;
785 dev_kfree_skb(skb);
786 }
787
788 /*
789 * Now also check the normal PS-buffered queue, this will
790 * only find something if the filtered queue was emptied
791 * since the filtered frames are all before the normal PS
792 * buffered frames.
793 */
593 for (;;) { 794 for (;;) {
594 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 795 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
595 skb = skb_peek(&sta->ps_tx_buf); 796 skb = skb_peek(&sta->ps_tx_buf[ac]);
596 if (sta_info_buffer_expired(sta, skb)) 797 if (sta_info_buffer_expired(sta, skb))
597 skb = __skb_dequeue(&sta->ps_tx_buf); 798 skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
598 else 799 else
599 skb = NULL; 800 skb = NULL;
600 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); 801 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
601 802
803 /*
804 * frames are queued in order, so if this one
805 * hasn't expired yet (or we reached the end of
806 * the queue) we can stop testing
807 */
602 if (!skb) 808 if (!skb)
603 break; 809 break;
604 810
@@ -608,22 +814,47 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
608 sta->sta.addr); 814 sta->sta.addr);
609#endif 815#endif
610 dev_kfree_skb(skb); 816 dev_kfree_skb(skb);
611
612 if (skb_queue_empty(&sta->ps_tx_buf) &&
613 !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
614 sta_info_clear_tim_bit(sta);
615 } 817 }
616 818
617 return true; 819 /*
820 * Finally, recalculate the TIM bit for this station -- it might
821 * now be clear because the station was too slow to retrieve its
822 * frames.
823 */
824 sta_info_recalc_tim(sta);
825
826 /*
827 * Return whether there are any frames still buffered, this is
828 * used to check whether the cleanup timer still needs to run,
829 * if there are no frames we don't need to rearm the timer.
830 */
831 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
832 skb_queue_empty(&sta->tx_filtered[ac]));
833}
834
835static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
836 struct sta_info *sta)
837{
838 bool have_buffered = false;
839 int ac;
840
841 /* This is only necessary for stations on BSS interfaces */
842 if (!sta->sdata->bss)
843 return false;
844
845 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
846 have_buffered |=
847 sta_info_cleanup_expire_buffered_ac(local, sta, ac);
848
849 return have_buffered;
618} 850}
619 851
620static int __must_check __sta_info_destroy(struct sta_info *sta) 852static int __must_check __sta_info_destroy(struct sta_info *sta)
621{ 853{
622 struct ieee80211_local *local; 854 struct ieee80211_local *local;
623 struct ieee80211_sub_if_data *sdata; 855 struct ieee80211_sub_if_data *sdata;
624 struct sk_buff *skb;
625 unsigned long flags; 856 unsigned long flags;
626 int ret, i; 857 int ret, i, ac;
627 858
628 might_sleep(); 859 might_sleep();
629 860
@@ -639,7 +870,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
639 * sessions -- block that to make sure the tear-down 870 * sessions -- block that to make sure the tear-down
640 * will be sufficient. 871 * will be sufficient.
641 */ 872 */
642 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 873 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
643 ieee80211_sta_tear_down_BA_sessions(sta, true); 874 ieee80211_sta_tear_down_BA_sessions(sta, true);
644 875
645 spin_lock_irqsave(&local->sta_lock, flags); 876 spin_lock_irqsave(&local->sta_lock, flags);
@@ -660,19 +891,22 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
660 891
661 sta->dead = true; 892 sta->dead = true;
662 893
663 if (test_and_clear_sta_flags(sta, 894 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
664 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) { 895 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
665 BUG_ON(!sdata->bss); 896 BUG_ON(!sdata->bss);
666 897
898 clear_sta_flag(sta, WLAN_STA_PS_STA);
899 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
900
667 atomic_dec(&sdata->bss->num_sta_ps); 901 atomic_dec(&sdata->bss->num_sta_ps);
668 sta_info_clear_tim_bit(sta); 902 sta_info_recalc_tim(sta);
669 } 903 }
670 904
671 local->num_sta--; 905 local->num_sta--;
672 local->sta_generation++; 906 local->sta_generation++;
673 907
674 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 908 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
675 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 909 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
676 910
677 if (sta->uploaded) { 911 if (sta->uploaded) {
678 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 912 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -691,6 +925,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
691 */ 925 */
692 synchronize_rcu(); 926 synchronize_rcu();
693 927
928 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
929 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
930 __skb_queue_purge(&sta->ps_tx_buf[ac]);
931 __skb_queue_purge(&sta->tx_filtered[ac]);
932 }
933
694#ifdef CONFIG_MAC80211_MESH 934#ifdef CONFIG_MAC80211_MESH
695 if (ieee80211_vif_is_mesh(&sdata->vif)) 935 if (ieee80211_vif_is_mesh(&sdata->vif))
696 mesh_accept_plinks_update(sdata); 936 mesh_accept_plinks_update(sdata);
@@ -713,14 +953,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
713 } 953 }
714#endif 954#endif
715 955
716 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
717 local->total_ps_buffered--;
718 dev_kfree_skb_any(skb);
719 }
720
721 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
722 dev_kfree_skb_any(skb);
723
724 __sta_info_free(local, sta); 956 __sta_info_free(local, sta);
725 957
726 return 0; 958 return 0;
@@ -732,7 +964,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
732 int ret; 964 int ret;
733 965
734 mutex_lock(&sdata->local->sta_mtx); 966 mutex_lock(&sdata->local->sta_mtx);
735 sta = sta_info_get(sdata, addr); 967 sta = sta_info_get_rx(sdata, addr);
736 ret = __sta_info_destroy(sta); 968 ret = __sta_info_destroy(sta);
737 mutex_unlock(&sdata->local->sta_mtx); 969 mutex_unlock(&sdata->local->sta_mtx);
738 970
@@ -746,7 +978,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
746 int ret; 978 int ret;
747 979
748 mutex_lock(&sdata->local->sta_mtx); 980 mutex_lock(&sdata->local->sta_mtx);
749 sta = sta_info_get_bss(sdata, addr); 981 sta = sta_info_get_bss_rx(sdata, addr);
750 ret = __sta_info_destroy(sta); 982 ret = __sta_info_destroy(sta);
751 mutex_unlock(&sdata->local->sta_mtx); 983 mutex_unlock(&sdata->local->sta_mtx);
752 984
@@ -886,7 +1118,8 @@ static void clear_sta_ps_flags(void *_sta)
886{ 1118{
887 struct sta_info *sta = _sta; 1119 struct sta_info *sta = _sta;
888 1120
889 clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA); 1121 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1122 clear_sta_flag(sta, WLAN_STA_PS_STA);
890} 1123}
891 1124
892/* powersave support code */ 1125/* powersave support code */
@@ -894,88 +1127,341 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
894{ 1127{
895 struct ieee80211_sub_if_data *sdata = sta->sdata; 1128 struct ieee80211_sub_if_data *sdata = sta->sdata;
896 struct ieee80211_local *local = sdata->local; 1129 struct ieee80211_local *local = sdata->local;
897 int sent, buffered; 1130 struct sk_buff_head pending;
1131 int filtered = 0, buffered = 0, ac;
1132
1133 clear_sta_flag(sta, WLAN_STA_SP);
1134
1135 BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
1136 sta->driver_buffered_tids = 0;
898 1137
899 clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
900 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1138 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
901 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1139 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
902 1140
903 if (!skb_queue_empty(&sta->ps_tx_buf)) 1141 skb_queue_head_init(&pending);
904 sta_info_clear_tim_bit(sta);
905 1142
906 /* Send all buffered frames to the station */ 1143 /* Send all buffered frames to the station */
907 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); 1144 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
908 buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf, 1145 int count = skb_queue_len(&pending), tmp;
909 clear_sta_ps_flags, sta); 1146
910 sent += buffered; 1147 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
1148 tmp = skb_queue_len(&pending);
1149 filtered += tmp - count;
1150 count = tmp;
1151
1152 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
1153 tmp = skb_queue_len(&pending);
1154 buffered += tmp - count;
1155 }
1156
1157 ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
1158
911 local->total_ps_buffered -= buffered; 1159 local->total_ps_buffered -= buffered;
912 1160
1161 sta_info_recalc_tim(sta);
1162
913#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1163#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
914 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 1164 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
915 "since STA not sleeping anymore\n", sdata->name, 1165 "since STA not sleeping anymore\n", sdata->name,
916 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 1166 sta->sta.addr, sta->sta.aid, filtered, buffered);
917#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1167#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
918} 1168}
919 1169
920void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1170static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1171 struct sta_info *sta, int tid,
1172 enum ieee80211_frame_release_type reason)
921{ 1173{
922 struct ieee80211_sub_if_data *sdata = sta->sdata;
923 struct ieee80211_local *local = sdata->local; 1174 struct ieee80211_local *local = sdata->local;
1175 struct ieee80211_qos_hdr *nullfunc;
924 struct sk_buff *skb; 1176 struct sk_buff *skb;
925 int no_pending_pkts; 1177 int size = sizeof(*nullfunc);
1178 __le16 fc;
1179 bool qos = test_sta_flag(sta, WLAN_STA_WME);
1180 struct ieee80211_tx_info *info;
926 1181
927 skb = skb_dequeue(&sta->tx_filtered); 1182 if (qos) {
928 if (!skb) { 1183 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
929 skb = skb_dequeue(&sta->ps_tx_buf); 1184 IEEE80211_STYPE_QOS_NULLFUNC |
930 if (skb) 1185 IEEE80211_FCTL_FROMDS);
931 local->total_ps_buffered--; 1186 } else {
1187 size -= 2;
1188 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1189 IEEE80211_STYPE_NULLFUNC |
1190 IEEE80211_FCTL_FROMDS);
1191 }
1192
1193 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
1194 if (!skb)
1195 return;
1196
1197 skb_reserve(skb, local->hw.extra_tx_headroom);
1198
1199 nullfunc = (void *) skb_put(skb, size);
1200 nullfunc->frame_control = fc;
1201 nullfunc->duration_id = 0;
1202 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
1203 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
1204 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
1205
1206 skb->priority = tid;
1207 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
1208 if (qos) {
1209 nullfunc->qos_ctrl = cpu_to_le16(tid);
1210
1211 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
1212 nullfunc->qos_ctrl |=
1213 cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
932 } 1214 }
933 no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
934 skb_queue_empty(&sta->ps_tx_buf);
935 1215
936 if (skb) { 1216 info = IEEE80211_SKB_CB(skb);
937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1217
938 struct ieee80211_hdr *hdr = 1218 /*
939 (struct ieee80211_hdr *) skb->data; 1219 * Tell TX path to send this frame even though the
1220 * STA may still remain is PS mode after this frame
1221 * exchange. Also set EOSP to indicate this packet
1222 * ends the poll/service period.
1223 */
1224 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE |
1225 IEEE80211_TX_STATUS_EOSP |
1226 IEEE80211_TX_CTL_REQ_TX_STATUS;
1227
1228 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
1229
1230 ieee80211_xmit(sdata, skb);
1231}
1232
1233static void
1234ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1235 int n_frames, u8 ignored_acs,
1236 enum ieee80211_frame_release_type reason)
1237{
1238 struct ieee80211_sub_if_data *sdata = sta->sdata;
1239 struct ieee80211_local *local = sdata->local;
1240 bool found = false;
1241 bool more_data = false;
1242 int ac;
1243 unsigned long driver_release_tids = 0;
1244 struct sk_buff_head frames;
1245
1246 /* Service or PS-Poll period starts */
1247 set_sta_flag(sta, WLAN_STA_SP);
1248
1249 __skb_queue_head_init(&frames);
1250
1251 /*
1252 * Get response frame(s) and more data bit for it.
1253 */
1254 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1255 unsigned long tids;
1256
1257 if (ignored_acs & BIT(ac))
1258 continue;
1259
1260 tids = ieee80211_tids_for_ac(ac);
1261
1262 if (!found) {
1263 driver_release_tids = sta->driver_buffered_tids & tids;
1264 if (driver_release_tids) {
1265 found = true;
1266 } else {
1267 struct sk_buff *skb;
1268
1269 while (n_frames > 0) {
1270 skb = skb_dequeue(&sta->tx_filtered[ac]);
1271 if (!skb) {
1272 skb = skb_dequeue(
1273 &sta->ps_tx_buf[ac]);
1274 if (skb)
1275 local->total_ps_buffered--;
1276 }
1277 if (!skb)
1278 break;
1279 n_frames--;
1280 found = true;
1281 __skb_queue_tail(&frames, skb);
1282 }
1283 }
1284
1285 /*
1286 * If the driver has data on more than one TID then
1287 * certainly there's more data if we release just a
1288 * single frame now (from a single TID).
1289 */
1290 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
1291 hweight16(driver_release_tids) > 1) {
1292 more_data = true;
1293 driver_release_tids =
1294 BIT(ffs(driver_release_tids) - 1);
1295 break;
1296 }
1297 }
1298
1299 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1300 !skb_queue_empty(&sta->ps_tx_buf[ac])) {
1301 more_data = true;
1302 break;
1303 }
1304 }
1305
1306 if (!found) {
1307 int tid;
940 1308
941 /* 1309 /*
942 * Tell TX path to send this frame even though the STA may 1310 * For PS-Poll, this can only happen due to a race condition
943 * still remain is PS mode after this frame exchange. 1311 * when we set the TIM bit and the station notices it, but
1312 * before it can poll for the frame we expire it.
1313 *
1314 * For uAPSD, this is said in the standard (11.2.1.5 h):
1315 * At each unscheduled SP for a non-AP STA, the AP shall
1316 * attempt to transmit at least one MSDU or MMPDU, but no
1317 * more than the value specified in the Max SP Length field
1318 * in the QoS Capability element from delivery-enabled ACs,
1319 * that are destined for the non-AP STA.
1320 *
1321 * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
944 */ 1322 */
945 info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
946 1323
947#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1324 /* This will evaluate to 1, 3, 5 or 7. */
948 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", 1325 tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
949 sta->sta.addr, sta->sta.aid,
950 skb_queue_len(&sta->ps_tx_buf));
951#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
952 1326
953 /* Use MoreData flag to indicate whether there are more 1327 ieee80211_send_null_response(sdata, sta, tid, reason);
954 * buffered frames for this STA */ 1328 return;
955 if (no_pending_pkts) 1329 }
956 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1330
957 else 1331 if (!driver_release_tids) {
958 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1332 struct sk_buff_head pending;
1333 struct sk_buff *skb;
1334 int num = 0;
1335 u16 tids = 0;
1336
1337 skb_queue_head_init(&pending);
1338
1339 while ((skb = __skb_dequeue(&frames))) {
1340 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1341 struct ieee80211_hdr *hdr = (void *) skb->data;
1342 u8 *qoshdr = NULL;
1343
1344 num++;
1345
1346 /*
1347 * Tell TX path to send this frame even though the
1348 * STA may still remain is PS mode after this frame
1349 * exchange.
1350 */
1351 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE;
1352
1353 /*
1354 * Use MoreData flag to indicate whether there are
1355 * more buffered frames for this STA
1356 */
1357 if (!more_data)
1358 hdr->frame_control &=
1359 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1363
1364 if (ieee80211_is_data_qos(hdr->frame_control) ||
1365 ieee80211_is_qos_nullfunc(hdr->frame_control))
1366 qoshdr = ieee80211_get_qos_ctl(hdr);
1367
1368 /* set EOSP for the frame */
1369 if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
1370 qoshdr && skb_queue_empty(&frames))
1371 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
1372
1373 info->flags |= IEEE80211_TX_STATUS_EOSP |
1374 IEEE80211_TX_CTL_REQ_TX_STATUS;
1375
1376 if (qoshdr)
1377 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
1378 else
1379 tids |= BIT(0);
1380
1381 __skb_queue_tail(&pending, skb);
1382 }
959 1383
960 ieee80211_add_pending_skb(local, skb); 1384 drv_allow_buffered_frames(local, sta, tids, num,
1385 reason, more_data);
961 1386
962 if (no_pending_pkts) 1387 ieee80211_add_pending_skbs(local, &pending);
963 sta_info_clear_tim_bit(sta); 1388
964#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1389 sta_info_recalc_tim(sta);
965 } else { 1390 } else {
966 /* 1391 /*
967 * FIXME: This can be the result of a race condition between 1392 * We need to release a frame that is buffered somewhere in the
968 * us expiring a frame and the station polling for it. 1393 * driver ... it'll have to handle that.
969 * Should we send it a null-func frame indicating we 1394 * Note that, as per the comment above, it'll also have to see
970 * have nothing buffered for it? 1395 * if there is more than just one frame on the specific TID that
1396 * we're releasing from, and it needs to set the more-data bit
1397 * accordingly if we tell it that there's no more data. If we do
1398 * tell it there's more data, then of course the more-data bit
1399 * needs to be set anyway.
1400 */
1401 drv_release_buffered_frames(local, sta, driver_release_tids,
1402 n_frames, reason, more_data);
1403
1404 /*
1405 * Note that we don't recalculate the TIM bit here as it would
1406 * most likely have no effect at all unless the driver told us
1407 * that the TID became empty before returning here from the
1408 * release function.
1409 * Either way, however, when the driver tells us that the TID
1410 * became empty we'll do the TIM recalculation.
971 */ 1411 */
972 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
973 "though there are no buffered frames for it\n",
974 sdata->name, sta->sta.addr);
975#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
976 } 1412 }
977} 1413}
978 1414
1415void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
1416{
1417 u8 ignore_for_response = sta->sta.uapsd_queues;
1418
1419 /*
1420 * If all ACs are delivery-enabled then we should reply
1421 * from any of them, if only some are enabled we reply
1422 * only from the non-enabled ones.
1423 */
1424 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
1425 ignore_for_response = 0;
1426
1427 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
1428 IEEE80211_FRAME_RELEASE_PSPOLL);
1429}
1430
1431void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
1432{
1433 int n_frames = sta->sta.max_sp;
1434 u8 delivery_enabled = sta->sta.uapsd_queues;
1435
1436 /*
1437 * If we ever grow support for TSPEC this might happen if
1438 * the TSPEC update from hostapd comes in between a trigger
1439 * frame setting WLAN_STA_UAPSD in the RX path and this
1440 * actually getting called.
1441 */
1442 if (!delivery_enabled)
1443 return;
1444
1445 switch (sta->sta.max_sp) {
1446 case 1:
1447 n_frames = 2;
1448 break;
1449 case 2:
1450 n_frames = 4;
1451 break;
1452 case 3:
1453 n_frames = 6;
1454 break;
1455 case 0:
1456 /* XXX: what is a good value? */
1457 n_frames = 8;
1458 break;
1459 }
1460
1461 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
1462 IEEE80211_FRAME_RELEASE_UAPSD);
1463}
1464
979void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1465void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
980 struct ieee80211_sta *pubsta, bool block) 1466 struct ieee80211_sta *pubsta, bool block)
981{ 1467{
@@ -984,17 +1470,50 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
984 trace_api_sta_block_awake(sta->local, pubsta, block); 1470 trace_api_sta_block_awake(sta->local, pubsta, block);
985 1471
986 if (block) 1472 if (block)
987 set_sta_flags(sta, WLAN_STA_PS_DRIVER); 1473 set_sta_flag(sta, WLAN_STA_PS_DRIVER);
988 else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) 1474 else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
989 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 1475 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
990} 1476}
991EXPORT_SYMBOL(ieee80211_sta_block_awake); 1477EXPORT_SYMBOL(ieee80211_sta_block_awake);
992 1478
993void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta) 1479void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta)
994{ 1480{
995 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1481 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1482 struct ieee80211_local *local = sta->local;
1483 struct sk_buff *skb;
1484 struct skb_eosp_msg_data *data;
1485
1486 trace_api_eosp(local, pubsta);
1487
1488 skb = alloc_skb(0, GFP_ATOMIC);
1489 if (!skb) {
1490 /* too bad ... but race is better than loss */
1491 clear_sta_flag(sta, WLAN_STA_SP);
1492 return;
1493 }
1494
1495 data = (void *)skb->cb;
1496 memcpy(data->sta, pubsta->addr, ETH_ALEN);
1497 memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
1498 skb->pkt_type = IEEE80211_EOSP_MSG;
1499 skb_queue_tail(&local->skb_queue, skb);
1500 tasklet_schedule(&local->tasklet);
1501}
1502EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe);
1503
1504void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1505 u8 tid, bool buffered)
1506{
1507 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1508
1509 if (WARN_ON(tid >= STA_TID_NUM))
1510 return;
1511
1512 if (buffered)
1513 set_bit(tid, &sta->driver_buffered_tids);
1514 else
1515 clear_bit(tid, &sta->driver_buffered_tids);
996 1516
997 set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF); 1517 sta_info_recalc_tim(sta);
998 sta_info_set_tim_bit(sta);
999} 1518}
1000EXPORT_SYMBOL(ieee80211_sta_set_tim); 1519EXPORT_SYMBOL(ieee80211_sta_set_buffered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 28beb78e601e..8c8ce05ad26f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -19,7 +19,8 @@
19/** 19/**
20 * enum ieee80211_sta_info_flags - Stations flags 20 * enum ieee80211_sta_info_flags - Stations flags
21 * 21 *
22 * These flags are used with &struct sta_info's @flags member. 22 * These flags are used with &struct sta_info's @flags member, but
23 * only indirectly with set_sta_flag() and friends.
23 * 24 *
24 * @WLAN_STA_AUTH: Station is authenticated. 25 * @WLAN_STA_AUTH: Station is authenticated.
25 * @WLAN_STA_ASSOC: Station is associated. 26 * @WLAN_STA_ASSOC: Station is associated.
@@ -43,24 +44,33 @@
43 * be in the queues 44 * be in the queues
44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 45 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
45 * station in power-save mode, reply when the driver unblocks. 46 * station in power-save mode, reply when the driver unblocks.
46 * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal 47 * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
47 * buffers. Automatically cleared on station wake-up. 48 * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
49 * packets. This means the link is enabled.
50 * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
51 * keeping station in power-save mode, reply when the driver
52 * unblocks the station.
53 * @WLAN_STA_SP: Station is in a service period, so don't try to
54 * reply to other uAPSD trigger frames or PS-Poll.
48 */ 55 */
49enum ieee80211_sta_info_flags { 56enum ieee80211_sta_info_flags {
50 WLAN_STA_AUTH = 1<<0, 57 WLAN_STA_AUTH,
51 WLAN_STA_ASSOC = 1<<1, 58 WLAN_STA_ASSOC,
52 WLAN_STA_PS_STA = 1<<2, 59 WLAN_STA_PS_STA,
53 WLAN_STA_AUTHORIZED = 1<<3, 60 WLAN_STA_AUTHORIZED,
54 WLAN_STA_SHORT_PREAMBLE = 1<<4, 61 WLAN_STA_SHORT_PREAMBLE,
55 WLAN_STA_ASSOC_AP = 1<<5, 62 WLAN_STA_ASSOC_AP,
56 WLAN_STA_WME = 1<<6, 63 WLAN_STA_WME,
57 WLAN_STA_WDS = 1<<7, 64 WLAN_STA_WDS,
58 WLAN_STA_CLEAR_PS_FILT = 1<<9, 65 WLAN_STA_CLEAR_PS_FILT,
59 WLAN_STA_MFP = 1<<10, 66 WLAN_STA_MFP,
60 WLAN_STA_BLOCK_BA = 1<<11, 67 WLAN_STA_BLOCK_BA,
61 WLAN_STA_PS_DRIVER = 1<<12, 68 WLAN_STA_PS_DRIVER,
62 WLAN_STA_PSPOLL = 1<<13, 69 WLAN_STA_PSPOLL,
63 WLAN_STA_PS_DRIVER_BUF = 1<<14, 70 WLAN_STA_TDLS_PEER,
71 WLAN_STA_TDLS_PEER_AUTH,
72 WLAN_STA_UAPSD,
73 WLAN_STA_SP,
64}; 74};
65 75
66#define STA_TID_NUM 16 76#define STA_TID_NUM 16
@@ -86,6 +96,8 @@ enum ieee80211_sta_info_flags {
86 * @stop_initiator: initiator of a session stop 96 * @stop_initiator: initiator of a session stop
87 * @tx_stop: TX DelBA frame when stopping 97 * @tx_stop: TX DelBA frame when stopping
88 * @buf_size: reorder buffer size at receiver 98 * @buf_size: reorder buffer size at receiver
99 * @failed_bar_ssn: ssn of the last failed BAR tx attempt
100 * @bar_pending: BAR needs to be re-sent
89 * 101 *
90 * This structure's lifetime is managed by RCU, assignments to 102 * This structure's lifetime is managed by RCU, assignments to
91 * the array holding it must hold the aggregation mutex. 103 * the array holding it must hold the aggregation mutex.
@@ -106,6 +118,9 @@ struct tid_ampdu_tx {
106 u8 stop_initiator; 118 u8 stop_initiator;
107 bool tx_stop; 119 bool tx_stop;
108 u8 buf_size; 120 u8 buf_size;
121
122 u16 failed_bar_ssn;
123 bool bar_pending;
109}; 124};
110 125
111/** 126/**
@@ -198,15 +213,16 @@ struct sta_ampdu_mlme {
198 * @last_rx_rate_flag: rx status flag of the last data packet 213 * @last_rx_rate_flag: rx status flag of the last data packet
199 * @lock: used for locking all fields that require locking, see comments 214 * @lock: used for locking all fields that require locking, see comments
200 * in the header file. 215 * in the header file.
201 * @flaglock: spinlock for flags accesses
202 * @drv_unblock_wk: used for driver PS unblocking 216 * @drv_unblock_wk: used for driver PS unblocking
203 * @listen_interval: listen interval of this station, when we're acting as AP 217 * @listen_interval: listen interval of this station, when we're acting as AP
204 * @flags: STA flags, see &enum ieee80211_sta_info_flags 218 * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
205 * @ps_tx_buf: buffer of frames to transmit to this station 219 * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
206 * when it leaves power saving state 220 * when it leaves power saving state or polls
207 * @tx_filtered: buffer of frames we already tried to transmit 221 * @tx_filtered: buffers (per AC) of frames we already tried to
208 * but were filtered by hardware due to STA having entered 222 * transmit but were filtered by hardware due to STA having
209 * power saving state 223 * entered power saving state, these are also delivered to
224 * the station when it leaves powersave or polls for frames
225 * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
210 * @rx_packets: Number of MSDUs received from this STA 226 * @rx_packets: Number of MSDUs received from this STA
211 * @rx_bytes: Number of bytes received from this STA 227 * @rx_bytes: Number of bytes received from this STA
212 * @wep_weak_iv_count: number of weak WEP IVs received from this station 228 * @wep_weak_iv_count: number of weak WEP IVs received from this station
@@ -238,10 +254,12 @@ struct sta_ampdu_mlme {
238 * @plink_timer: peer link watch timer 254 * @plink_timer: peer link watch timer
239 * @plink_timer_was_running: used by suspend/resume to restore timers 255 * @plink_timer_was_running: used by suspend/resume to restore timers
240 * @debugfs: debug filesystem info 256 * @debugfs: debug filesystem info
241 * @sta: station information we share with the driver
242 * @dead: set to true when sta is unlinked 257 * @dead: set to true when sta is unlinked
243 * @uploaded: set to true when sta is uploaded to the driver 258 * @uploaded: set to true when sta is uploaded to the driver
244 * @lost_packets: number of consecutive lost packets 259 * @lost_packets: number of consecutive lost packets
260 * @dummy: indicate a dummy station created for receiving
261 * EAP frames before association
262 * @sta: station information we share with the driver
245 */ 263 */
246struct sta_info { 264struct sta_info {
247 /* General information, mostly static */ 265 /* General information, mostly static */
@@ -254,7 +272,6 @@ struct sta_info {
254 struct rate_control_ref *rate_ctrl; 272 struct rate_control_ref *rate_ctrl;
255 void *rate_ctrl_priv; 273 void *rate_ctrl_priv;
256 spinlock_t lock; 274 spinlock_t lock;
257 spinlock_t flaglock;
258 275
259 struct work_struct drv_unblock_wk; 276 struct work_struct drv_unblock_wk;
260 277
@@ -264,18 +281,16 @@ struct sta_info {
264 281
265 bool uploaded; 282 bool uploaded;
266 283
267 /* 284 /* use the accessors defined below */
268 * frequently updated, locked with own spinlock (flaglock), 285 unsigned long _flags;
269 * use the accessors defined below
270 */
271 u32 flags;
272 286
273 /* 287 /*
274 * STA powersave frame queues, no more than the internal 288 * STA powersave frame queues, no more than the internal
275 * locking required. 289 * locking required.
276 */ 290 */
277 struct sk_buff_head ps_tx_buf; 291 struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
278 struct sk_buff_head tx_filtered; 292 struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
293 unsigned long driver_buffered_tids;
279 294
280 /* Updated from RX path only, no locking requirements */ 295 /* Updated from RX path only, no locking requirements */
281 unsigned long rx_packets, rx_bytes; 296 unsigned long rx_packets, rx_bytes;
@@ -336,6 +351,9 @@ struct sta_info {
336 351
337 unsigned int lost_packets; 352 unsigned int lost_packets;
338 353
354 /* should be right in front of sta to be in the same cache line */
355 bool dummy;
356
339 /* keep last! */ 357 /* keep last! */
340 struct ieee80211_sta sta; 358 struct ieee80211_sta sta;
341}; 359};
@@ -348,60 +366,28 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
348 return NL80211_PLINK_LISTEN; 366 return NL80211_PLINK_LISTEN;
349} 367}
350 368
351static inline void set_sta_flags(struct sta_info *sta, const u32 flags) 369static inline void set_sta_flag(struct sta_info *sta,
370 enum ieee80211_sta_info_flags flag)
352{ 371{
353 unsigned long irqfl; 372 set_bit(flag, &sta->_flags);
354
355 spin_lock_irqsave(&sta->flaglock, irqfl);
356 sta->flags |= flags;
357 spin_unlock_irqrestore(&sta->flaglock, irqfl);
358} 373}
359 374
360static inline void clear_sta_flags(struct sta_info *sta, const u32 flags) 375static inline void clear_sta_flag(struct sta_info *sta,
376 enum ieee80211_sta_info_flags flag)
361{ 377{
362 unsigned long irqfl; 378 clear_bit(flag, &sta->_flags);
363
364 spin_lock_irqsave(&sta->flaglock, irqfl);
365 sta->flags &= ~flags;
366 spin_unlock_irqrestore(&sta->flaglock, irqfl);
367} 379}
368 380
369static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) 381static inline int test_sta_flag(struct sta_info *sta,
382 enum ieee80211_sta_info_flags flag)
370{ 383{
371 u32 ret; 384 return test_bit(flag, &sta->_flags);
372 unsigned long irqfl;
373
374 spin_lock_irqsave(&sta->flaglock, irqfl);
375 ret = sta->flags & flags;
376 spin_unlock_irqrestore(&sta->flaglock, irqfl);
377
378 return ret;
379}
380
381static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
382 const u32 flags)
383{
384 u32 ret;
385 unsigned long irqfl;
386
387 spin_lock_irqsave(&sta->flaglock, irqfl);
388 ret = sta->flags & flags;
389 sta->flags &= ~flags;
390 spin_unlock_irqrestore(&sta->flaglock, irqfl);
391
392 return ret;
393} 385}
394 386
395static inline u32 get_sta_flags(struct sta_info *sta) 387static inline int test_and_clear_sta_flag(struct sta_info *sta,
388 enum ieee80211_sta_info_flags flag)
396{ 389{
397 u32 ret; 390 return test_and_clear_bit(flag, &sta->_flags);
398 unsigned long irqfl;
399
400 spin_lock_irqsave(&sta->flaglock, irqfl);
401 ret = sta->flags;
402 spin_unlock_irqrestore(&sta->flaglock, irqfl);
403
404 return ret;
405} 391}
406 392
407void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 393void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
@@ -419,8 +405,8 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
419#define STA_HASH(sta) (sta[5]) 405#define STA_HASH(sta) (sta[5])
420 406
421 407
422/* Maximum number of frames to buffer per power saving station */ 408/* Maximum number of frames to buffer per power saving station per AC */
423#define STA_MAX_TX_BUFFER 128 409#define STA_MAX_TX_BUFFER 64
424 410
425/* Minimum buffered frame expiry time. If STA uses listen interval that is 411/* Minimum buffered frame expiry time. If STA uses listen interval that is
426 * smaller than this value, the minimum value here is used instead. */ 412 * smaller than this value, the minimum value here is used instead. */
@@ -436,9 +422,15 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
436struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 422struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
437 const u8 *addr); 423 const u8 *addr);
438 424
425struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
426 const u8 *addr);
427
439struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 428struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
440 const u8 *addr); 429 const u8 *addr);
441 430
431struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
432 const u8 *addr);
433
442static inline 434static inline
443void for_each_sta_info_type_check(struct ieee80211_local *local, 435void for_each_sta_info_type_check(struct ieee80211_local *local,
444 const u8 *addr, 436 const u8 *addr,
@@ -459,6 +451,22 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
459 _sta = nxt, \ 451 _sta = nxt, \
460 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ 452 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
461 ) \ 453 ) \
454 /* run code only if address matches and it's not a dummy sta */ \
455 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
456 !_sta->dummy)
457
458#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
459 for ( /* initialise loop */ \
460 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
461 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
462 /* typecheck */ \
463 for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
464 /* continue condition */ \
465 _sta; \
466 /* advance loop */ \
467 _sta = nxt, \
468 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
469 ) \
462 /* compare address and run code only if it matches */ \ 470 /* compare address and run code only if it matches */ \
463 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0) 471 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
464 472
@@ -484,14 +492,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
484int sta_info_insert(struct sta_info *sta); 492int sta_info_insert(struct sta_info *sta);
485int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); 493int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
486int sta_info_insert_atomic(struct sta_info *sta); 494int sta_info_insert_atomic(struct sta_info *sta);
495int sta_info_reinsert(struct sta_info *sta);
487 496
488int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, 497int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
489 const u8 *addr); 498 const u8 *addr);
490int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 499int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
491 const u8 *addr); 500 const u8 *addr);
492 501
493void sta_info_set_tim_bit(struct sta_info *sta); 502void sta_info_recalc_tim(struct sta_info *sta);
494void sta_info_clear_tim_bit(struct sta_info *sta);
495 503
496void sta_info_init(struct ieee80211_local *local); 504void sta_info_init(struct ieee80211_local *local);
497void sta_info_stop(struct ieee80211_local *local); 505void sta_info_stop(struct ieee80211_local *local);
@@ -502,5 +510,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
502 510
503void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); 511void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
504void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); 512void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
513void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
505 514
506#endif /* STA_INFO_H */ 515#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1658efaa2e8e..df643cedf9b9 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -14,6 +14,7 @@
14#include "rate.h" 14#include "rate.h"
15#include "mesh.h" 15#include "mesh.h"
16#include "led.h" 16#include "led.h"
17#include "wme.h"
17 18
18 19
19void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 20void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -43,6 +44,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
43 struct sk_buff *skb) 44 struct sk_buff *skb)
44{ 45{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 46 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
47 struct ieee80211_hdr *hdr = (void *)skb->data;
48 int ac;
46 49
47 /* 50 /*
48 * This skb 'survived' a round-trip through the driver, and 51 * This skb 'survived' a round-trip through the driver, and
@@ -63,11 +66,37 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
63 sta->tx_filtered_count++; 66 sta->tx_filtered_count++;
64 67
65 /* 68 /*
69 * Clear more-data bit on filtered frames, it might be set
70 * but later frames might time out so it might have to be
71 * clear again ... It's all rather unlikely (this frame
72 * should time out first, right?) but let's not confuse
73 * peers unnecessarily.
74 */
75 if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
76 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
77
78 if (ieee80211_is_data_qos(hdr->frame_control)) {
79 u8 *p = ieee80211_get_qos_ctl(hdr);
80 int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
81
82 /*
83 * Clear EOSP if set, this could happen e.g.
84 * if an absence period (us being a P2P GO)
85 * shortens the SP.
86 */
87 if (*p & IEEE80211_QOS_CTL_EOSP)
88 *p &= ~IEEE80211_QOS_CTL_EOSP;
89 ac = ieee802_1d_to_ac[tid & 7];
90 } else {
91 ac = IEEE80211_AC_BE;
92 }
93
94 /*
66 * Clear the TX filter mask for this STA when sending the next 95 * Clear the TX filter mask for this STA when sending the next
67 * packet. If the STA went to power save mode, this will happen 96 * packet. If the STA went to power save mode, this will happen
68 * when it wakes up for the next time. 97 * when it wakes up for the next time.
69 */ 98 */
70 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT); 99 set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
71 100
72 /* 101 /*
73 * This code races in the following way: 102 * This code races in the following way:
@@ -103,13 +132,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
103 * changes before calling TX status events if ordering can be 132 * changes before calling TX status events if ordering can be
104 * unknown. 133 * unknown.
105 */ 134 */
106 if (test_sta_flags(sta, WLAN_STA_PS_STA) && 135 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
107 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 136 skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
108 skb_queue_tail(&sta->tx_filtered, skb); 137 skb_queue_tail(&sta->tx_filtered[ac], skb);
138 sta_info_recalc_tim(sta);
139
140 if (!timer_pending(&local->sta_cleanup))
141 mod_timer(&local->sta_cleanup,
142 round_jiffies(jiffies +
143 STA_INFO_CLEANUP_INTERVAL));
109 return; 144 return;
110 } 145 }
111 146
112 if (!test_sta_flags(sta, WLAN_STA_PS_STA) && 147 if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
113 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { 148 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
114 /* Software retry the packet once */ 149 /* Software retry the packet once */
115 info->flags |= IEEE80211_TX_INTFL_RETRIED; 150 info->flags |= IEEE80211_TX_INTFL_RETRIED;
@@ -121,18 +156,38 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
121 if (net_ratelimit()) 156 if (net_ratelimit())
122 wiphy_debug(local->hw.wiphy, 157 wiphy_debug(local->hw.wiphy,
123 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", 158 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
124 skb_queue_len(&sta->tx_filtered), 159 skb_queue_len(&sta->tx_filtered[ac]),
125 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies); 160 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
126#endif 161#endif
127 dev_kfree_skb(skb); 162 dev_kfree_skb(skb);
128} 163}
129 164
165static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
166{
167 struct tid_ampdu_tx *tid_tx;
168
169 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
170 if (!tid_tx || !tid_tx->bar_pending)
171 return;
172
173 tid_tx->bar_pending = false;
174 ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
175}
176
130static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) 177static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
131{ 178{
132 struct ieee80211_mgmt *mgmt = (void *) skb->data; 179 struct ieee80211_mgmt *mgmt = (void *) skb->data;
133 struct ieee80211_local *local = sta->local; 180 struct ieee80211_local *local = sta->local;
134 struct ieee80211_sub_if_data *sdata = sta->sdata; 181 struct ieee80211_sub_if_data *sdata = sta->sdata;
135 182
183 if (ieee80211_is_data_qos(mgmt->frame_control)) {
184 struct ieee80211_hdr *hdr = (void *) skb->data;
185 u8 *qc = ieee80211_get_qos_ctl(hdr);
186 u16 tid = qc[0] & 0xf;
187
188 ieee80211_check_pending_bar(sta, hdr->addr1, tid);
189 }
190
136 if (ieee80211_is_action(mgmt->frame_control) && 191 if (ieee80211_is_action(mgmt->frame_control) &&
137 sdata->vif.type == NL80211_IFTYPE_STATION && 192 sdata->vif.type == NL80211_IFTYPE_STATION &&
138 mgmt->u.action.category == WLAN_CATEGORY_HT && 193 mgmt->u.action.category == WLAN_CATEGORY_HT &&
@@ -161,6 +216,114 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
161 } 216 }
162} 217}
163 218
219static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
220{
221 struct tid_ampdu_tx *tid_tx;
222
223 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
224 if (!tid_tx)
225 return;
226
227 tid_tx->failed_bar_ssn = ssn;
228 tid_tx->bar_pending = true;
229}
230
231static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
232{
233 int len = sizeof(struct ieee80211_radiotap_header);
234
235 /* IEEE80211_RADIOTAP_RATE rate */
236 if (info->status.rates[0].idx >= 0 &&
237 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
238 len += 2;
239
240 /* IEEE80211_RADIOTAP_TX_FLAGS */
241 len += 2;
242
243 /* IEEE80211_RADIOTAP_DATA_RETRIES */
244 len += 1;
245
246 /* IEEE80211_TX_RC_MCS */
247 if (info->status.rates[0].idx >= 0 &&
248 info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
249 len += 3;
250
251 return len;
252}
253
254static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
255 *sband, struct sk_buff *skb,
256 int retry_count, int rtap_len)
257{
258 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
259 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
260 struct ieee80211_radiotap_header *rthdr;
261 unsigned char *pos;
262 __le16 txflags;
263
264 rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
265
266 memset(rthdr, 0, rtap_len);
267 rthdr->it_len = cpu_to_le16(rtap_len);
268 rthdr->it_present =
269 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
270 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
271 pos = (unsigned char *)(rthdr + 1);
272
273 /*
274 * XXX: Once radiotap gets the bitmap reset thing the vendor
275 * extensions proposal contains, we can actually report
276 * the whole set of tries we did.
277 */
278
279 /* IEEE80211_RADIOTAP_RATE */
280 if (info->status.rates[0].idx >= 0 &&
281 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
282 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
283 *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
284 /* padding for tx flags */
285 pos += 2;
286 }
287
288 /* IEEE80211_RADIOTAP_TX_FLAGS */
289 txflags = 0;
290 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
291 !is_multicast_ether_addr(hdr->addr1))
292 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
293
294 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
295 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
296 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
297 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
298 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
299
300 put_unaligned_le16(txflags, pos);
301 pos += 2;
302
303 /* IEEE80211_RADIOTAP_DATA_RETRIES */
304 /* for now report the total retry_count */
305 *pos = retry_count;
306 pos++;
307
308 /* IEEE80211_TX_RC_MCS */
309 if (info->status.rates[0].idx >= 0 &&
310 info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
311 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
312 pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
313 IEEE80211_RADIOTAP_MCS_HAVE_GI |
314 IEEE80211_RADIOTAP_MCS_HAVE_BW;
315 if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
316 pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
317 if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
318 pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
319 if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
320 pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
321 pos[2] = info->status.rates[0].idx;
322 pos += 3;
323 }
324
325}
326
164/* 327/*
165 * Use a static threshold for now, best value to be determined 328 * Use a static threshold for now, best value to be determined
166 * by testing ... 329 * by testing ...
@@ -179,7 +342,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
179 u16 frag, type; 342 u16 frag, type;
180 __le16 fc; 343 __le16 fc;
181 struct ieee80211_supported_band *sband; 344 struct ieee80211_supported_band *sband;
182 struct ieee80211_tx_status_rtap_hdr *rthdr;
183 struct ieee80211_sub_if_data *sdata; 345 struct ieee80211_sub_if_data *sdata;
184 struct net_device *prev_dev = NULL; 346 struct net_device *prev_dev = NULL;
185 struct sta_info *sta, *tmp; 347 struct sta_info *sta, *tmp;
@@ -187,6 +349,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
187 int rates_idx = -1; 349 int rates_idx = -1;
188 bool send_to_cooked; 350 bool send_to_cooked;
189 bool acked; 351 bool acked;
352 struct ieee80211_bar *bar;
353 u16 tid;
354 int rtap_len;
190 355
191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 356 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
192 if (info->status.rates[i].idx < 0) { 357 if (info->status.rates[i].idx < 0) {
@@ -215,8 +380,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
215 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN)) 380 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
216 continue; 381 continue;
217 382
383 if (info->flags & IEEE80211_TX_STATUS_EOSP)
384 clear_sta_flag(sta, WLAN_STA_SP);
385
218 acked = !!(info->flags & IEEE80211_TX_STAT_ACK); 386 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
219 if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) { 387 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
220 /* 388 /*
221 * The STA is in power save mode, so assume 389 * The STA is in power save mode, so assume
222 * that this TX packet failed because of that. 390 * that this TX packet failed because of that.
@@ -239,10 +407,31 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
239 tid = qc[0] & 0xf; 407 tid = qc[0] & 0xf;
240 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) 408 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
241 & IEEE80211_SCTL_SEQ); 409 & IEEE80211_SCTL_SEQ);
242 ieee80211_send_bar(sta->sdata, hdr->addr1, 410 ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
243 tid, ssn); 411 tid, ssn);
244 } 412 }
245 413
414 if (!acked && ieee80211_is_back_req(fc)) {
415 u16 control;
416
417 /*
418 * BAR failed, store the last SSN and retry sending
419 * the BAR when the next unicast transmission on the
420 * same TID succeeds.
421 */
422 bar = (struct ieee80211_bar *) skb->data;
423 control = le16_to_cpu(bar->control);
424 if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
425 u16 ssn = le16_to_cpu(bar->start_seq_num);
426
427 tid = (control &
428 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
429 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
430
431 ieee80211_set_bar_pending(sta, tid, ssn);
432 }
433 }
434
246 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { 435 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
247 ieee80211_handle_filtered_frame(local, sta, skb); 436 ieee80211_handle_filtered_frame(local, sta, skb);
248 rcu_read_unlock(); 437 rcu_read_unlock();
@@ -336,7 +525,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
336 continue; 525 continue;
337 if (wk->offchan_tx.frame != skb) 526 if (wk->offchan_tx.frame != skb)
338 continue; 527 continue;
339 wk->offchan_tx.frame = NULL; 528 wk->offchan_tx.status = true;
340 break; 529 break;
341 } 530 }
342 rcu_read_unlock(); 531 rcu_read_unlock();
@@ -345,9 +534,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
345 local->hw_roc_skb_for_status = NULL; 534 local->hw_roc_skb_for_status = NULL;
346 } 535 }
347 536
348 if (cookie == local->hw_offchan_tx_cookie)
349 local->hw_offchan_tx_cookie = 0;
350
351 cfg80211_mgmt_tx_status( 537 cfg80211_mgmt_tx_status(
352 skb->dev, cookie, skb->data, skb->len, 538 skb->dev, cookie, skb->data, skb->len,
353 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 539 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
@@ -370,44 +556,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
370 } 556 }
371 557
372 /* send frame to monitor interfaces now */ 558 /* send frame to monitor interfaces now */
373 559 rtap_len = ieee80211_tx_radiotap_len(info);
374 if (skb_headroom(skb) < sizeof(*rthdr)) { 560 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
375 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n"); 561 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
376 dev_kfree_skb(skb); 562 dev_kfree_skb(skb);
377 return; 563 return;
378 } 564 }
379 565 ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
380 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
381 skb_push(skb, sizeof(*rthdr));
382
383 memset(rthdr, 0, sizeof(*rthdr));
384 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
385 rthdr->hdr.it_present =
386 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
387 (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
388 (1 << IEEE80211_RADIOTAP_RATE));
389
390 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
391 !is_multicast_ether_addr(hdr->addr1))
392 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
393
394 /*
395 * XXX: Once radiotap gets the bitmap reset thing the vendor
396 * extensions proposal contains, we can actually report
397 * the whole set of tries we did.
398 */
399 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
400 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
401 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
402 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
403 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
404 if (info->status.rates[0].idx >= 0 &&
405 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
406 rthdr->rate = sband->bitrates[
407 info->status.rates[0].idx].bitrate / 5;
408
409 /* for now report the total retry_count */
410 rthdr->data_retries = retry_count;
411 566
412 /* XXX: is this sufficient for BPF? */ 567 /* XXX: is this sufficient for BPF? */
413 skb_set_mac_header(skb, 0); 568 skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8cb0d2d0ac69..48bbb96d8edb 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -253,7 +253,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
253 253
254 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 254 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
255 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 255 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
256 u32 sta_flags; 256 bool assoc = false;
257 257
258 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 258 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
259 return TX_CONTINUE; 259 return TX_CONTINUE;
@@ -284,10 +284,11 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
284 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 284 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
285 return TX_CONTINUE; 285 return TX_CONTINUE;
286 286
287 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 287 if (tx->sta)
288 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
288 289
289 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 290 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
290 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 291 if (unlikely(!assoc &&
291 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 292 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
292 ieee80211_is_data(hdr->frame_control))) { 293 ieee80211_is_data(hdr->frame_control))) {
293#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 294#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -343,13 +344,22 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
343 total += skb_queue_len(&ap->ps_bc_buf); 344 total += skb_queue_len(&ap->ps_bc_buf);
344 } 345 }
345 346
347 /*
348 * Drop one frame from each station from the lowest-priority
349 * AC that has frames at all.
350 */
346 list_for_each_entry_rcu(sta, &local->sta_list, list) { 351 list_for_each_entry_rcu(sta, &local->sta_list, list) {
347 skb = skb_dequeue(&sta->ps_tx_buf); 352 int ac;
348 if (skb) { 353
349 purged++; 354 for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
350 dev_kfree_skb(skb); 355 skb = skb_dequeue(&sta->ps_tx_buf[ac]);
356 total += skb_queue_len(&sta->ps_tx_buf[ac]);
357 if (skb) {
358 purged++;
359 dev_kfree_skb(skb);
360 break;
361 }
351 } 362 }
352 total += skb_queue_len(&sta->ps_tx_buf);
353 } 363 }
354 364
355 rcu_read_unlock(); 365 rcu_read_unlock();
@@ -418,7 +428,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
418 if (!ieee80211_is_mgmt(fc)) 428 if (!ieee80211_is_mgmt(fc))
419 return 0; 429 return 0;
420 430
421 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP)) 431 if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
422 return 0; 432 return 0;
423 433
424 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) 434 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
@@ -435,7 +445,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
435 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 445 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
436 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
437 struct ieee80211_local *local = tx->local; 447 struct ieee80211_local *local = tx->local;
438 u32 staflags;
439 448
440 if (unlikely(!sta || 449 if (unlikely(!sta ||
441 ieee80211_is_probe_resp(hdr->frame_control) || 450 ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -444,57 +453,52 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
444 ieee80211_is_reassoc_resp(hdr->frame_control))) 453 ieee80211_is_reassoc_resp(hdr->frame_control)))
445 return TX_CONTINUE; 454 return TX_CONTINUE;
446 455
447 staflags = get_sta_flags(sta); 456 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
457 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
458 !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) {
459 int ac = skb_get_queue_mapping(tx->skb);
448 460
449 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
450 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
451#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 461#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
452 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " 462 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
453 "before %d)\n", 463 sta->sta.addr, sta->sta.aid, ac);
454 sta->sta.addr, sta->sta.aid,
455 skb_queue_len(&sta->ps_tx_buf));
456#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 464#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
457 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 465 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
458 purge_old_ps_buffers(tx->local); 466 purge_old_ps_buffers(tx->local);
459 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 467 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
460 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); 468 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
461#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 469#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
462 if (net_ratelimit()) { 470 if (net_ratelimit())
463 printk(KERN_DEBUG "%s: STA %pM TX " 471 printk(KERN_DEBUG "%s: STA %pM TX buffer for "
464 "buffer full - dropping oldest frame\n", 472 "AC %d full - dropping oldest frame\n",
465 tx->sdata->name, sta->sta.addr); 473 tx->sdata->name, sta->sta.addr, ac);
466 }
467#endif 474#endif
468 dev_kfree_skb(old); 475 dev_kfree_skb(old);
469 } else 476 } else
470 tx->local->total_ps_buffered++; 477 tx->local->total_ps_buffered++;
471 478
472 /*
473 * Queue frame to be sent after STA wakes up/polls,
474 * but don't set the TIM bit if the driver is blocking
475 * wakeup or poll response transmissions anyway.
476 */
477 if (skb_queue_empty(&sta->ps_tx_buf) &&
478 !(staflags & WLAN_STA_PS_DRIVER))
479 sta_info_set_tim_bit(sta);
480
481 info->control.jiffies = jiffies; 479 info->control.jiffies = jiffies;
482 info->control.vif = &tx->sdata->vif; 480 info->control.vif = &tx->sdata->vif;
483 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 481 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
484 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 482 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
485 483
486 if (!timer_pending(&local->sta_cleanup)) 484 if (!timer_pending(&local->sta_cleanup))
487 mod_timer(&local->sta_cleanup, 485 mod_timer(&local->sta_cleanup,
488 round_jiffies(jiffies + 486 round_jiffies(jiffies +
489 STA_INFO_CLEANUP_INTERVAL)); 487 STA_INFO_CLEANUP_INTERVAL));
490 488
489 /*
490 * We queued up some frames, so the TIM bit might
491 * need to be set, recalculate it.
492 */
493 sta_info_recalc_tim(sta);
494
491 return TX_QUEUED; 495 return TX_QUEUED;
492 } 496 }
493#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 497#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
494 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 498 else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
495 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 499 printk(KERN_DEBUG
496 "set -> send frame\n", tx->sdata->name, 500 "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
497 sta->sta.addr); 501 tx->sdata->name, sta->sta.addr);
498 } 502 }
499#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 503#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
500 504
@@ -552,7 +556,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
552 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 556 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
553 (!ieee80211_is_robust_mgmt_frame(hdr) || 557 (!ieee80211_is_robust_mgmt_frame(hdr) ||
554 (ieee80211_is_action(hdr->frame_control) && 558 (ieee80211_is_action(hdr->frame_control) &&
555 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) { 559 tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
556 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 560 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
557 return TX_DROP; 561 return TX_DROP;
558 } else 562 } else
@@ -611,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
611 u32 len; 615 u32 len;
612 bool inval = false, rts = false, short_preamble = false; 616 bool inval = false, rts = false, short_preamble = false;
613 struct ieee80211_tx_rate_control txrc; 617 struct ieee80211_tx_rate_control txrc;
614 u32 sta_flags; 618 bool assoc = false;
615 619
616 memset(&txrc, 0, sizeof(txrc)); 620 memset(&txrc, 0, sizeof(txrc));
617 621
@@ -647,17 +651,17 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
647 */ 651 */
648 if (tx->sdata->vif.bss_conf.use_short_preamble && 652 if (tx->sdata->vif.bss_conf.use_short_preamble &&
649 (ieee80211_is_data(hdr->frame_control) || 653 (ieee80211_is_data(hdr->frame_control) ||
650 (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) 654 (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
651 txrc.short_preamble = short_preamble = true; 655 txrc.short_preamble = short_preamble = true;
652 656
653 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 657 if (tx->sta)
658 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
654 659
655 /* 660 /*
656 * Lets not bother rate control if we're associated and cannot 661 * Lets not bother rate control if we're associated and cannot
657 * talk to the sta. This should not happen. 662 * talk to the sta. This should not happen.
658 */ 663 */
659 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && 664 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
660 (sta_flags & WLAN_STA_ASSOC) &&
661 !rate_usable_index_exists(sband, &tx->sta->sta), 665 !rate_usable_index_exists(sband, &tx->sta->sta),
662 "%s: Dropped data frame as no usable bitrate found while " 666 "%s: Dropped data frame as no usable bitrate found while "
663 "scanning and associated. Target station: " 667 "scanning and associated. Target station: "
@@ -800,6 +804,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
800 if (ieee80211_hdrlen(hdr->frame_control) < 24) 804 if (ieee80211_hdrlen(hdr->frame_control) < 24)
801 return TX_CONTINUE; 805 return TX_CONTINUE;
802 806
807 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
808 return TX_CONTINUE;
809
803 /* 810 /*
804 * Anything but QoS data that has a sequence number field 811 * Anything but QoS data that has a sequence number field
805 * (is long enough) gets a sequence number from the global 812 * (is long enough) gets a sequence number from the global
@@ -891,7 +898,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
891 int hdrlen; 898 int hdrlen;
892 int fragnum; 899 int fragnum;
893 900
894 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 901 if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
902 return TX_CONTINUE;
903
904 if (tx->local->ops->set_frag_threshold)
895 return TX_CONTINUE; 905 return TX_CONTINUE;
896 906
897 /* 907 /*
@@ -904,7 +914,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
904 914
905 hdrlen = ieee80211_hdrlen(hdr->frame_control); 915 hdrlen = ieee80211_hdrlen(hdr->frame_control);
906 916
907 /* internal error, why is TX_FRAGMENTED set? */ 917 /* internal error, why isn't DONTFRAG set? */
908 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) 918 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
909 return TX_DROP; 919 return TX_DROP;
910 920
@@ -1025,100 +1035,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
1025 1035
1026/* actual transmit path */ 1036/* actual transmit path */
1027 1037
1028/*
1029 * deal with packet injection down monitor interface
1030 * with Radiotap Header -- only called for monitor mode interface
1031 */
1032static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1033 struct sk_buff *skb)
1034{
1035 /*
1036 * this is the moment to interpret and discard the radiotap header that
1037 * must be at the start of the packet injected in Monitor mode
1038 *
1039 * Need to take some care with endian-ness since radiotap
1040 * args are little-endian
1041 */
1042
1043 struct ieee80211_radiotap_iterator iterator;
1044 struct ieee80211_radiotap_header *rthdr =
1045 (struct ieee80211_radiotap_header *) skb->data;
1046 bool hw_frag;
1047 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1048 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1049 NULL);
1050
1051 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1052 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1053
1054 /* packet is fragmented in HW if we have a non-NULL driver callback */
1055 hw_frag = (tx->local->ops->set_frag_threshold != NULL);
1056
1057 /*
1058 * for every radiotap entry that is present
1059 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1060 * entries present, or -EINVAL on error)
1061 */
1062
1063 while (!ret) {
1064 ret = ieee80211_radiotap_iterator_next(&iterator);
1065
1066 if (ret)
1067 continue;
1068
1069 /* see if this argument is something we can use */
1070 switch (iterator.this_arg_index) {
1071 /*
1072 * You must take care when dereferencing iterator.this_arg
1073 * for multibyte types... the pointer is not aligned. Use
1074 * get_unaligned((type *)iterator.this_arg) to dereference
1075 * iterator.this_arg for type "type" safely on all arches.
1076 */
1077 case IEEE80211_RADIOTAP_FLAGS:
1078 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1079 /*
1080 * this indicates that the skb we have been
1081 * handed has the 32-bit FCS CRC at the end...
1082 * we should react to that by snipping it off
1083 * because it will be recomputed and added
1084 * on transmission
1085 */
1086 if (skb->len < (iterator._max_length + FCS_LEN))
1087 return false;
1088
1089 skb_trim(skb, skb->len - FCS_LEN);
1090 }
1091 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1092 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1093 if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
1094 !hw_frag)
1095 tx->flags |= IEEE80211_TX_FRAGMENTED;
1096 break;
1097
1098 /*
1099 * Please update the file
1100 * Documentation/networking/mac80211-injection.txt
1101 * when parsing new fields here.
1102 */
1103
1104 default:
1105 break;
1106 }
1107 }
1108
1109 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1110 return false;
1111
1112 /*
1113 * remove the radiotap header
1114 * iterator->_max_length was sanity-checked against
1115 * skb->len by iterator init
1116 */
1117 skb_pull(skb, iterator._max_length);
1118
1119 return true;
1120}
1121
1122static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, 1038static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1123 struct sk_buff *skb, 1039 struct sk_buff *skb,
1124 struct ieee80211_tx_info *info, 1040 struct ieee80211_tx_info *info,
@@ -1183,7 +1099,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1183 struct ieee80211_local *local = sdata->local; 1099 struct ieee80211_local *local = sdata->local;
1184 struct ieee80211_hdr *hdr; 1100 struct ieee80211_hdr *hdr;
1185 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1186 int hdrlen, tid; 1102 int tid;
1187 u8 *qc; 1103 u8 *qc;
1188 1104
1189 memset(tx, 0, sizeof(*tx)); 1105 memset(tx, 0, sizeof(*tx));
@@ -1191,26 +1107,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1191 tx->local = local; 1107 tx->local = local;
1192 tx->sdata = sdata; 1108 tx->sdata = sdata;
1193 tx->channel = local->hw.conf.channel; 1109 tx->channel = local->hw.conf.channel;
1194 /*
1195 * Set this flag (used below to indicate "automatic fragmentation"),
1196 * it will be cleared/left by radiotap as desired.
1197 * Only valid when fragmentation is done by the stack.
1198 */
1199 if (!local->ops->set_frag_threshold)
1200 tx->flags |= IEEE80211_TX_FRAGMENTED;
1201
1202 /* process and remove the injection radiotap header */
1203 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1204 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1205 return TX_DROP;
1206
1207 /*
1208 * __ieee80211_parse_tx_radiotap has now removed
1209 * the radiotap header that was present and pre-filled
1210 * 'tx' with tx control information.
1211 */
1212 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1213 }
1214 1110
1215 /* 1111 /*
1216 * If this flag is set to true anywhere, and we get here, 1112 * If this flag is set to true anywhere, and we get here,
@@ -1232,7 +1128,9 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1232 tx->sta = sta_info_get(sdata, hdr->addr1); 1128 tx->sta = sta_info_get(sdata, hdr->addr1);
1233 1129
1234 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1130 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1235 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1131 !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
1132 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
1133 !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
1236 struct tid_ampdu_tx *tid_tx; 1134 struct tid_ampdu_tx *tid_tx;
1237 1135
1238 qc = ieee80211_get_qos_ctl(hdr); 1136 qc = ieee80211_get_qos_ctl(hdr);
@@ -1257,29 +1155,25 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1257 tx->flags |= IEEE80211_TX_UNICAST; 1155 tx->flags |= IEEE80211_TX_UNICAST;
1258 if (unlikely(local->wifi_wme_noack_test)) 1156 if (unlikely(local->wifi_wme_noack_test))
1259 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1157 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1260 else 1158 /*
1261 info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 1159 * Flags are initialized to 0. Hence, no need to
1160 * explicitly unset IEEE80211_TX_CTL_NO_ACK since
1161 * it might already be set for injected frames.
1162 */
1262 } 1163 }
1263 1164
1264 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1165 if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
1265 if ((tx->flags & IEEE80211_TX_UNICAST) && 1166 if (!(tx->flags & IEEE80211_TX_UNICAST) ||
1266 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold && 1167 skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
1267 !(info->flags & IEEE80211_TX_CTL_AMPDU)) 1168 info->flags & IEEE80211_TX_CTL_AMPDU)
1268 tx->flags |= IEEE80211_TX_FRAGMENTED; 1169 info->flags |= IEEE80211_TX_CTL_DONTFRAG;
1269 else
1270 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1271 } 1170 }
1272 1171
1273 if (!tx->sta) 1172 if (!tx->sta)
1274 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1173 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1275 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 1174 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1276 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1175 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1277 1176
1278 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1279 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1280 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1281 tx->ethertype = (pos[0] << 8) | pos[1];
1282 }
1283 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; 1177 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1284 1178
1285 return TX_CONTINUE; 1179 return TX_CONTINUE;
@@ -1490,11 +1384,6 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1490 tail_need = max_t(int, tail_need, 0); 1384 tail_need = max_t(int, tail_need, 0);
1491 } 1385 }
1492 1386
1493 if (head_need || tail_need) {
1494 /* Sorry. Can't account for this any more */
1495 skb_orphan(skb);
1496 }
1497
1498 if (skb_cloned(skb)) 1387 if (skb_cloned(skb))
1499 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1388 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1500 else if (head_need || tail_need) 1389 else if (head_need || tail_need)
@@ -1508,67 +1397,19 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1508 return -ENOMEM; 1397 return -ENOMEM;
1509 } 1398 }
1510 1399
1511 /* update truesize too */
1512 skb->truesize += head_need + tail_need;
1513
1514 return 0; 1400 return 0;
1515} 1401}
1516 1402
1517static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1403void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1518 struct sk_buff *skb)
1519{ 1404{
1520 struct ieee80211_local *local = sdata->local; 1405 struct ieee80211_local *local = sdata->local;
1521 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1406 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1522 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1523 struct ieee80211_sub_if_data *tmp_sdata;
1524 int headroom; 1408 int headroom;
1525 bool may_encrypt; 1409 bool may_encrypt;
1526 1410
1527 rcu_read_lock(); 1411 rcu_read_lock();
1528 1412
1529 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1530 int hdrlen;
1531 u16 len_rthdr;
1532
1533 info->flags |= IEEE80211_TX_CTL_INJECTED |
1534 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1535
1536 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1537 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1538 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1539
1540 /* check the header is complete in the frame */
1541 if (likely(skb->len >= len_rthdr + hdrlen)) {
1542 /*
1543 * We process outgoing injected frames that have a
1544 * local address we handle as though they are our
1545 * own frames.
1546 * This code here isn't entirely correct, the local
1547 * MAC address is not necessarily enough to find
1548 * the interface to use; for that proper VLAN/WDS
1549 * support we will need a different mechanism.
1550 */
1551
1552 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1553 list) {
1554 if (!ieee80211_sdata_running(tmp_sdata))
1555 continue;
1556 if (tmp_sdata->vif.type ==
1557 NL80211_IFTYPE_MONITOR ||
1558 tmp_sdata->vif.type ==
1559 NL80211_IFTYPE_AP_VLAN ||
1560 tmp_sdata->vif.type ==
1561 NL80211_IFTYPE_WDS)
1562 continue;
1563 if (compare_ether_addr(tmp_sdata->vif.addr,
1564 hdr->addr2) == 0) {
1565 sdata = tmp_sdata;
1566 break;
1567 }
1568 }
1569 }
1570 }
1571
1572 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); 1413 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1573 1414
1574 headroom = local->tx_headroom; 1415 headroom = local->tx_headroom;
@@ -1595,11 +1436,94 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1595 return; 1436 return;
1596 } 1437 }
1597 1438
1598 ieee80211_set_qos_hdr(local, skb); 1439 ieee80211_set_qos_hdr(sdata, skb);
1599 ieee80211_tx(sdata, skb, false); 1440 ieee80211_tx(sdata, skb, false);
1600 rcu_read_unlock(); 1441 rcu_read_unlock();
1601} 1442}
1602 1443
1444static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
1445{
1446 struct ieee80211_radiotap_iterator iterator;
1447 struct ieee80211_radiotap_header *rthdr =
1448 (struct ieee80211_radiotap_header *) skb->data;
1449 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1450 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1451 NULL);
1452 u16 txflags;
1453
1454 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
1455 IEEE80211_TX_CTL_DONTFRAG;
1456
1457 /*
1458 * for every radiotap entry that is present
1459 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1460 * entries present, or -EINVAL on error)
1461 */
1462
1463 while (!ret) {
1464 ret = ieee80211_radiotap_iterator_next(&iterator);
1465
1466 if (ret)
1467 continue;
1468
1469 /* see if this argument is something we can use */
1470 switch (iterator.this_arg_index) {
1471 /*
1472 * You must take care when dereferencing iterator.this_arg
1473 * for multibyte types... the pointer is not aligned. Use
1474 * get_unaligned((type *)iterator.this_arg) to dereference
1475 * iterator.this_arg for type "type" safely on all arches.
1476 */
1477 case IEEE80211_RADIOTAP_FLAGS:
1478 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1479 /*
1480 * this indicates that the skb we have been
1481 * handed has the 32-bit FCS CRC at the end...
1482 * we should react to that by snipping it off
1483 * because it will be recomputed and added
1484 * on transmission
1485 */
1486 if (skb->len < (iterator._max_length + FCS_LEN))
1487 return false;
1488
1489 skb_trim(skb, skb->len - FCS_LEN);
1490 }
1491 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1492 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1493 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
1494 info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
1495 break;
1496
1497 case IEEE80211_RADIOTAP_TX_FLAGS:
1498 txflags = get_unaligned_le16(iterator.this_arg);
1499 if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
1500 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1501 break;
1502
1503 /*
1504 * Please update the file
1505 * Documentation/networking/mac80211-injection.txt
1506 * when parsing new fields here.
1507 */
1508
1509 default:
1510 break;
1511 }
1512 }
1513
1514 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1515 return false;
1516
1517 /*
1518 * remove the radiotap header
1519 * iterator->_max_length was sanity-checked against
1520 * skb->len by iterator init
1521 */
1522 skb_pull(skb, iterator._max_length);
1523
1524 return true;
1525}
1526
1603netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, 1527netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1604 struct net_device *dev) 1528 struct net_device *dev)
1605{ 1529{
@@ -1608,7 +1532,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1608 struct ieee80211_radiotap_header *prthdr = 1532 struct ieee80211_radiotap_header *prthdr =
1609 (struct ieee80211_radiotap_header *)skb->data; 1533 (struct ieee80211_radiotap_header *)skb->data;
1610 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1534 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1535 struct ieee80211_hdr *hdr;
1536 struct ieee80211_sub_if_data *tmp_sdata, *sdata;
1611 u16 len_rthdr; 1537 u16 len_rthdr;
1538 int hdrlen;
1612 1539
1613 /* 1540 /*
1614 * Frame injection is not allowed if beaconing is not allowed 1541 * Frame injection is not allowed if beaconing is not allowed
@@ -1659,12 +1586,65 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1659 skb_set_network_header(skb, len_rthdr); 1586 skb_set_network_header(skb, len_rthdr);
1660 skb_set_transport_header(skb, len_rthdr); 1587 skb_set_transport_header(skb, len_rthdr);
1661 1588
1589 if (skb->len < len_rthdr + 2)
1590 goto fail;
1591
1592 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1593 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1594
1595 if (skb->len < len_rthdr + hdrlen)
1596 goto fail;
1597
1598 /*
1599 * Initialize skb->protocol if the injected frame is a data frame
1600 * carrying a rfc1042 header
1601 */
1602 if (ieee80211_is_data(hdr->frame_control) &&
1603 skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
1604 u8 *payload = (u8 *)hdr + hdrlen;
1605
1606 if (compare_ether_addr(payload, rfc1042_header) == 0)
1607 skb->protocol = cpu_to_be16((payload[6] << 8) |
1608 payload[7]);
1609 }
1610
1662 memset(info, 0, sizeof(*info)); 1611 memset(info, 0, sizeof(*info));
1663 1612
1664 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1613 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
1614 IEEE80211_TX_CTL_INJECTED;
1615
1616 /* process and remove the injection radiotap header */
1617 if (!ieee80211_parse_tx_radiotap(skb))
1618 goto fail;
1619
1620 rcu_read_lock();
1621
1622 /*
1623 * We process outgoing injected frames that have a local address
1624 * we handle as though they are non-injected frames.
1625 * This code here isn't entirely correct, the local MAC address
1626 * isn't always enough to find the interface to use; for proper
1627 * VLAN/WDS support we will need a different mechanism (which
1628 * likely isn't going to be monitor interfaces).
1629 */
1630 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1631
1632 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
1633 if (!ieee80211_sdata_running(tmp_sdata))
1634 continue;
1635 if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1636 tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1637 tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
1638 continue;
1639 if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
1640 sdata = tmp_sdata;
1641 break;
1642 }
1643 }
1644
1645 ieee80211_xmit(sdata, skb);
1646 rcu_read_unlock();
1665 1647
1666 /* pass the radiotap header up to xmit */
1667 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1668 return NETDEV_TX_OK; 1648 return NETDEV_TX_OK;
1669 1649
1670fail: 1650fail:
@@ -1703,8 +1683,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1703 int encaps_len, skip_header_bytes; 1683 int encaps_len, skip_header_bytes;
1704 int nh_pos, h_pos; 1684 int nh_pos, h_pos;
1705 struct sta_info *sta = NULL; 1685 struct sta_info *sta = NULL;
1706 u32 sta_flags = 0; 1686 bool wme_sta = false, authorized = false, tdls_auth = false;
1707 struct sk_buff *tmp_skb; 1687 struct sk_buff *tmp_skb;
1688 bool tdls_direct = false;
1708 1689
1709 if (unlikely(skb->len < ETH_HLEN)) { 1690 if (unlikely(skb->len < ETH_HLEN)) {
1710 ret = NETDEV_TX_OK; 1691 ret = NETDEV_TX_OK;
@@ -1728,7 +1709,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1728 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1709 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1729 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1710 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1730 hdrlen = 30; 1711 hdrlen = 30;
1731 sta_flags = get_sta_flags(sta); 1712 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1713 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1732 } 1714 }
1733 rcu_read_unlock(); 1715 rcu_read_unlock();
1734 if (sta) 1716 if (sta)
@@ -1816,11 +1798,50 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1816 break; 1798 break;
1817#endif 1799#endif
1818 case NL80211_IFTYPE_STATION: 1800 case NL80211_IFTYPE_STATION:
1819 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1801 if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1820 if (sdata->u.mgd.use_4addr && 1802 bool tdls_peer = false;
1821 cpu_to_be16(ethertype) != sdata->control_port_protocol) { 1803
1822 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1804 rcu_read_lock();
1805 sta = sta_info_get(sdata, skb->data);
1806 if (sta) {
1807 authorized = test_sta_flag(sta,
1808 WLAN_STA_AUTHORIZED);
1809 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1810 tdls_peer = test_sta_flag(sta,
1811 WLAN_STA_TDLS_PEER);
1812 tdls_auth = test_sta_flag(sta,
1813 WLAN_STA_TDLS_PEER_AUTH);
1814 }
1815 rcu_read_unlock();
1816
1817 /*
1818 * If the TDLS link is enabled, send everything
1819 * directly. Otherwise, allow TDLS setup frames
1820 * to be transmitted indirectly.
1821 */
1822 tdls_direct = tdls_peer && (tdls_auth ||
1823 !(ethertype == ETH_P_TDLS && skb->len > 14 &&
1824 skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
1825 }
1826
1827 if (tdls_direct) {
1828 /* link during setup - throw out frames to peer */
1829 if (!tdls_auth) {
1830 ret = NETDEV_TX_OK;
1831 goto fail;
1832 }
1833
1834 /* DA SA BSSID */
1835 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1836 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1837 memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
1838 hdrlen = 24;
1839 } else if (sdata->u.mgd.use_4addr &&
1840 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1841 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
1842 IEEE80211_FCTL_TODS);
1823 /* RA TA DA SA */ 1843 /* RA TA DA SA */
1844 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1824 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1845 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1825 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1846 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1826 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1847 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1828,6 +1849,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1828 } else { 1849 } else {
1829 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1850 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1830 /* BSSID SA DA */ 1851 /* BSSID SA DA */
1852 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1831 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1853 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1832 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1854 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1833 hdrlen = 24; 1855 hdrlen = 24;
@@ -1853,13 +1875,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1853 if (!is_multicast_ether_addr(hdr.addr1)) { 1875 if (!is_multicast_ether_addr(hdr.addr1)) {
1854 rcu_read_lock(); 1876 rcu_read_lock();
1855 sta = sta_info_get(sdata, hdr.addr1); 1877 sta = sta_info_get(sdata, hdr.addr1);
1856 if (sta) 1878 if (sta) {
1857 sta_flags = get_sta_flags(sta); 1879 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1880 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1881 }
1858 rcu_read_unlock(); 1882 rcu_read_unlock();
1859 } 1883 }
1860 1884
1885 /* For mesh, the use of the QoS header is mandatory */
1886 if (ieee80211_vif_is_mesh(&sdata->vif))
1887 wme_sta = true;
1888
1861 /* receiver and we are QoS enabled, use a QoS type frame */ 1889 /* receiver and we are QoS enabled, use a QoS type frame */
1862 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { 1890 if (wme_sta && local->hw.queues >= 4) {
1863 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1891 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1864 hdrlen += 2; 1892 hdrlen += 2;
1865 } 1893 }
@@ -1868,12 +1896,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1868 * Drop unicast frames to unauthorised stations unless they are 1896 * Drop unicast frames to unauthorised stations unless they are
1869 * EAPOL frames from the local station. 1897 * EAPOL frames from the local station.
1870 */ 1898 */
1871 if (!ieee80211_vif_is_mesh(&sdata->vif) && 1899 if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
1872 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1900 !is_multicast_ether_addr(hdr.addr1) && !authorized &&
1873 !(sta_flags & WLAN_STA_AUTHORIZED) && 1901 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1874 !(cpu_to_be16(ethertype) == sdata->control_port_protocol && 1902 compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1875 compare_ether_addr(sdata->vif.addr,
1876 skb->data + ETH_ALEN) == 0))) {
1877#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1903#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1878 if (net_ratelimit()) 1904 if (net_ratelimit())
1879 printk(KERN_DEBUG "%s: dropped frame to %pM" 1905 printk(KERN_DEBUG "%s: dropped frame to %pM"
@@ -2275,13 +2301,23 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2275 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 2301 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2276 mgmt->u.beacon.beacon_int = 2302 mgmt->u.beacon.beacon_int =
2277 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2303 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2278 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2304 mgmt->u.beacon.capab_info |= cpu_to_le16(
2305 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
2279 2306
2280 pos = skb_put(skb, 2); 2307 pos = skb_put(skb, 2);
2281 *pos++ = WLAN_EID_SSID; 2308 *pos++ = WLAN_EID_SSID;
2282 *pos++ = 0x0; 2309 *pos++ = 0x0;
2283 2310
2284 mesh_mgmt_ies_add(skb, sdata); 2311 if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
2312 mesh_add_ds_params_ie(skb, sdata) ||
2313 ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
2314 mesh_add_rsn_ie(skb, sdata) ||
2315 mesh_add_meshid_ie(skb, sdata) ||
2316 mesh_add_meshconf_ie(skb, sdata) ||
2317 mesh_add_vendor_ies(skb, sdata)) {
2318 pr_err("o11s: couldn't add ies!\n");
2319 goto out;
2320 }
2285 } else { 2321 } else {
2286 WARN_ON(1); 2322 WARN_ON(1);
2287 goto out; 2323 goto out;
@@ -2335,11 +2371,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2335 local = sdata->local; 2371 local = sdata->local;
2336 2372
2337 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 2373 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2338 if (!skb) { 2374 if (!skb)
2339 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2340 "pspoll template\n", sdata->name);
2341 return NULL; 2375 return NULL;
2342 } 2376
2343 skb_reserve(skb, local->hw.extra_tx_headroom); 2377 skb_reserve(skb, local->hw.extra_tx_headroom);
2344 2378
2345 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 2379 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
@@ -2375,11 +2409,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2375 local = sdata->local; 2409 local = sdata->local;
2376 2410
2377 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); 2411 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2378 if (!skb) { 2412 if (!skb)
2379 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2380 "template\n", sdata->name);
2381 return NULL; 2413 return NULL;
2382 } 2414
2383 skb_reserve(skb, local->hw.extra_tx_headroom); 2415 skb_reserve(skb, local->hw.extra_tx_headroom);
2384 2416
2385 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, 2417 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
@@ -2414,11 +2446,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2414 2446
2415 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + 2447 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2416 ie_ssid_len + ie_len); 2448 ie_ssid_len + ie_len);
2417 if (!skb) { 2449 if (!skb)
2418 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2419 "request template\n", sdata->name);
2420 return NULL; 2450 return NULL;
2421 }
2422 2451
2423 skb_reserve(skb, local->hw.extra_tx_headroom); 2452 skb_reserve(skb, local->hw.extra_tx_headroom);
2424 2453
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ddeb1b998383..7439d26bf5f9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -19,7 +19,6 @@
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22#include <linux/crc32.h>
23#include <net/net_namespace.h> 22#include <net/net_namespace.h>
24#include <net/cfg80211.h> 23#include <net/cfg80211.h>
25#include <net/rtnetlink.h> 24#include <net/rtnetlink.h>
@@ -368,14 +367,14 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
368 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 367 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
369} 368}
370 369
371int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 370void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
372 struct sk_buff_head *skbs, 371 struct sk_buff_head *skbs,
373 void (*fn)(void *data), void *data) 372 void (*fn)(void *data), void *data)
374{ 373{
375 struct ieee80211_hw *hw = &local->hw; 374 struct ieee80211_hw *hw = &local->hw;
376 struct sk_buff *skb; 375 struct sk_buff *skb;
377 unsigned long flags; 376 unsigned long flags;
378 int queue, ret = 0, i; 377 int queue, i;
379 378
380 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 379 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
381 for (i = 0; i < hw->queues; i++) 380 for (i = 0; i < hw->queues; i++)
@@ -390,7 +389,6 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
390 continue; 389 continue;
391 } 390 }
392 391
393 ret++;
394 queue = skb_get_queue_mapping(skb); 392 queue = skb_get_queue_mapping(skb);
395 __skb_queue_tail(&local->pending[queue], skb); 393 __skb_queue_tail(&local->pending[queue], skb);
396 } 394 }
@@ -402,14 +400,12 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
402 __ieee80211_wake_queue(hw, i, 400 __ieee80211_wake_queue(hw, i,
403 IEEE80211_QUEUE_STOP_REASON_SKB_ADD); 401 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
404 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 402 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
405
406 return ret;
407} 403}
408 404
409int ieee80211_add_pending_skbs(struct ieee80211_local *local, 405void ieee80211_add_pending_skbs(struct ieee80211_local *local,
410 struct sk_buff_head *skbs) 406 struct sk_buff_head *skbs)
411{ 407{
412 return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); 408 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
413} 409}
414 410
415void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 411void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
@@ -573,172 +569,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
573 ieee802_11_parse_elems_crc(start, len, elems, 0, 0); 569 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
574} 570}
575 571
576u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
577 struct ieee802_11_elems *elems,
578 u64 filter, u32 crc)
579{
580 size_t left = len;
581 u8 *pos = start;
582 bool calc_crc = filter != 0;
583
584 memset(elems, 0, sizeof(*elems));
585 elems->ie_start = start;
586 elems->total_len = len;
587
588 while (left >= 2) {
589 u8 id, elen;
590
591 id = *pos++;
592 elen = *pos++;
593 left -= 2;
594
595 if (elen > left)
596 break;
597
598 if (calc_crc && id < 64 && (filter & (1ULL << id)))
599 crc = crc32_be(crc, pos - 2, elen + 2);
600
601 switch (id) {
602 case WLAN_EID_SSID:
603 elems->ssid = pos;
604 elems->ssid_len = elen;
605 break;
606 case WLAN_EID_SUPP_RATES:
607 elems->supp_rates = pos;
608 elems->supp_rates_len = elen;
609 break;
610 case WLAN_EID_FH_PARAMS:
611 elems->fh_params = pos;
612 elems->fh_params_len = elen;
613 break;
614 case WLAN_EID_DS_PARAMS:
615 elems->ds_params = pos;
616 elems->ds_params_len = elen;
617 break;
618 case WLAN_EID_CF_PARAMS:
619 elems->cf_params = pos;
620 elems->cf_params_len = elen;
621 break;
622 case WLAN_EID_TIM:
623 if (elen >= sizeof(struct ieee80211_tim_ie)) {
624 elems->tim = (void *)pos;
625 elems->tim_len = elen;
626 }
627 break;
628 case WLAN_EID_IBSS_PARAMS:
629 elems->ibss_params = pos;
630 elems->ibss_params_len = elen;
631 break;
632 case WLAN_EID_CHALLENGE:
633 elems->challenge = pos;
634 elems->challenge_len = elen;
635 break;
636 case WLAN_EID_VENDOR_SPECIFIC:
637 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
638 pos[2] == 0xf2) {
639 /* Microsoft OUI (00:50:F2) */
640
641 if (calc_crc)
642 crc = crc32_be(crc, pos - 2, elen + 2);
643
644 if (pos[3] == 1) {
645 /* OUI Type 1 - WPA IE */
646 elems->wpa = pos;
647 elems->wpa_len = elen;
648 } else if (elen >= 5 && pos[3] == 2) {
649 /* OUI Type 2 - WMM IE */
650 if (pos[4] == 0) {
651 elems->wmm_info = pos;
652 elems->wmm_info_len = elen;
653 } else if (pos[4] == 1) {
654 elems->wmm_param = pos;
655 elems->wmm_param_len = elen;
656 }
657 }
658 }
659 break;
660 case WLAN_EID_RSN:
661 elems->rsn = pos;
662 elems->rsn_len = elen;
663 break;
664 case WLAN_EID_ERP_INFO:
665 elems->erp_info = pos;
666 elems->erp_info_len = elen;
667 break;
668 case WLAN_EID_EXT_SUPP_RATES:
669 elems->ext_supp_rates = pos;
670 elems->ext_supp_rates_len = elen;
671 break;
672 case WLAN_EID_HT_CAPABILITY:
673 if (elen >= sizeof(struct ieee80211_ht_cap))
674 elems->ht_cap_elem = (void *)pos;
675 break;
676 case WLAN_EID_HT_INFORMATION:
677 if (elen >= sizeof(struct ieee80211_ht_info))
678 elems->ht_info_elem = (void *)pos;
679 break;
680 case WLAN_EID_MESH_ID:
681 elems->mesh_id = pos;
682 elems->mesh_id_len = elen;
683 break;
684 case WLAN_EID_MESH_CONFIG:
685 if (elen >= sizeof(struct ieee80211_meshconf_ie))
686 elems->mesh_config = (void *)pos;
687 break;
688 case WLAN_EID_PEER_LINK:
689 elems->peer_link = pos;
690 elems->peer_link_len = elen;
691 break;
692 case WLAN_EID_PREQ:
693 elems->preq = pos;
694 elems->preq_len = elen;
695 break;
696 case WLAN_EID_PREP:
697 elems->prep = pos;
698 elems->prep_len = elen;
699 break;
700 case WLAN_EID_PERR:
701 elems->perr = pos;
702 elems->perr_len = elen;
703 break;
704 case WLAN_EID_RANN:
705 if (elen >= sizeof(struct ieee80211_rann_ie))
706 elems->rann = (void *)pos;
707 break;
708 case WLAN_EID_CHANNEL_SWITCH:
709 elems->ch_switch_elem = pos;
710 elems->ch_switch_elem_len = elen;
711 break;
712 case WLAN_EID_QUIET:
713 if (!elems->quiet_elem) {
714 elems->quiet_elem = pos;
715 elems->quiet_elem_len = elen;
716 }
717 elems->num_of_quiet_elem++;
718 break;
719 case WLAN_EID_COUNTRY:
720 elems->country_elem = pos;
721 elems->country_elem_len = elen;
722 break;
723 case WLAN_EID_PWR_CONSTRAINT:
724 elems->pwr_constr_elem = pos;
725 elems->pwr_constr_elem_len = elen;
726 break;
727 case WLAN_EID_TIMEOUT_INTERVAL:
728 elems->timeout_int = pos;
729 elems->timeout_int_len = elen;
730 break;
731 default:
732 break;
733 }
734
735 left -= elen;
736 pos += elen;
737 }
738
739 return crc;
740}
741
742void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 572void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
743{ 573{
744 struct ieee80211_local *local = sdata->local; 574 struct ieee80211_local *local = sdata->local;
@@ -799,8 +629,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
799 629
800 qparam.uapsd = false; 630 qparam.uapsd = false;
801 631
802 local->tx_conf[queue] = qparam; 632 sdata->tx_conf[queue] = qparam;
803 drv_conf_tx(local, queue, &qparam); 633 drv_conf_tx(local, sdata, queue, &qparam);
804 } 634 }
805 635
806 /* after reinitialize QoS TX queues setting to default, 636 /* after reinitialize QoS TX queues setting to default,
@@ -874,11 +704,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
874 704
875 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 705 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
876 sizeof(*mgmt) + 6 + extra_len); 706 sizeof(*mgmt) + 6 + extra_len);
877 if (!skb) { 707 if (!skb)
878 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
879 "frame\n", sdata->name);
880 return; 708 return;
881 } 709
882 skb_reserve(skb, local->hw.extra_tx_headroom); 710 skb_reserve(skb, local->hw.extra_tx_headroom);
883 711
884 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 712 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
@@ -1031,11 +859,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1031 859
1032 /* FIXME: come up with a proper value */ 860 /* FIXME: come up with a proper value */
1033 buf = kmalloc(200 + ie_len, GFP_KERNEL); 861 buf = kmalloc(200 + ie_len, GFP_KERNEL);
1034 if (!buf) { 862 if (!buf)
1035 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1036 "buffer\n", sdata->name);
1037 return NULL; 863 return NULL;
1038 }
1039 864
1040 /* 865 /*
1041 * Do not send DS Channel parameter for directed probe requests 866 * Do not send DS Channel parameter for directed probe requests
@@ -1071,14 +896,18 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1071void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 896void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1072 const u8 *ssid, size_t ssid_len, 897 const u8 *ssid, size_t ssid_len,
1073 const u8 *ie, size_t ie_len, 898 const u8 *ie, size_t ie_len,
1074 u32 ratemask, bool directed) 899 u32 ratemask, bool directed, bool no_cck)
1075{ 900{
1076 struct sk_buff *skb; 901 struct sk_buff *skb;
1077 902
1078 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len, 903 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
1079 ie, ie_len, directed); 904 ie, ie_len, directed);
1080 if (skb) 905 if (skb) {
906 if (no_cck)
907 IEEE80211_SKB_CB(skb)->flags |=
908 IEEE80211_TX_CTL_NO_CCK_RATE;
1081 ieee80211_tx_skb(sdata, skb); 909 ieee80211_tx_skb(sdata, skb);
910 }
1082} 911}
1083 912
1084u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 913u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1205,14 +1034,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1205 struct ieee80211_sub_if_data, 1034 struct ieee80211_sub_if_data,
1206 u.ap); 1035 u.ap);
1207 1036
1037 memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
1208 WARN_ON(drv_sta_add(local, sdata, &sta->sta)); 1038 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1209 } 1039 }
1210 } 1040 }
1211 mutex_unlock(&local->sta_mtx); 1041 mutex_unlock(&local->sta_mtx);
1212 1042
1213 /* reconfigure tx conf */ 1043 /* reconfigure tx conf */
1214 for (i = 0; i < hw->queues; i++) 1044 list_for_each_entry(sdata, &local->interfaces, list) {
1215 drv_conf_tx(local, i, &local->tx_conf[i]); 1045 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1046 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1047 !ieee80211_sdata_running(sdata))
1048 continue;
1049
1050 for (i = 0; i < hw->queues; i++)
1051 drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
1052 }
1216 1053
1217 /* reconfigure hardware */ 1054 /* reconfigure hardware */
1218 ieee80211_hw_config(local, ~0); 1055 ieee80211_hw_config(local, ~0);
@@ -1248,6 +1085,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1248 changed |= BSS_CHANGED_IBSS; 1085 changed |= BSS_CHANGED_IBSS;
1249 /* fall through */ 1086 /* fall through */
1250 case NL80211_IFTYPE_AP: 1087 case NL80211_IFTYPE_AP:
1088 changed |= BSS_CHANGED_SSID;
1089 /* fall through */
1251 case NL80211_IFTYPE_MESH_POINT: 1090 case NL80211_IFTYPE_MESH_POINT:
1252 changed |= BSS_CHANGED_BEACON | 1091 changed |= BSS_CHANGED_BEACON |
1253 BSS_CHANGED_BEACON_ENABLED; 1092 BSS_CHANGED_BEACON_ENABLED;
@@ -1283,7 +1122,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1283 1122
1284 list_for_each_entry(sta, &local->sta_list, list) { 1123 list_for_each_entry(sta, &local->sta_list, list) {
1285 ieee80211_sta_tear_down_BA_sessions(sta, true); 1124 ieee80211_sta_tear_down_BA_sessions(sta, true);
1286 clear_sta_flags(sta, WLAN_STA_BLOCK_BA); 1125 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
1287 } 1126 }
1288 1127
1289 mutex_unlock(&local->sta_mtx); 1128 mutex_unlock(&local->sta_mtx);
@@ -1522,3 +1361,60 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
1522 _ieee80211_enable_rssi_reports(sdata, 0, 0); 1361 _ieee80211_enable_rssi_reports(sdata, 0, 0);
1523} 1362}
1524EXPORT_SYMBOL(ieee80211_disable_rssi_reports); 1363EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
1364
1365int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1366{
1367 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1368 struct ieee80211_local *local = sdata->local;
1369 struct ieee80211_supported_band *sband;
1370 int rate;
1371 u8 i, rates, *pos;
1372
1373 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1374 rates = sband->n_bitrates;
1375 if (rates > 8)
1376 rates = 8;
1377
1378 if (skb_tailroom(skb) < rates + 2)
1379 return -ENOMEM;
1380
1381 pos = skb_put(skb, rates + 2);
1382 *pos++ = WLAN_EID_SUPP_RATES;
1383 *pos++ = rates;
1384 for (i = 0; i < rates; i++) {
1385 rate = sband->bitrates[i].bitrate;
1386 *pos++ = (u8) (rate / 5);
1387 }
1388
1389 return 0;
1390}
1391
1392int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1393{
1394 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1395 struct ieee80211_local *local = sdata->local;
1396 struct ieee80211_supported_band *sband;
1397 int rate;
1398 u8 i, exrates, *pos;
1399
1400 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1401 exrates = sband->n_bitrates;
1402 if (exrates > 8)
1403 exrates -= 8;
1404 else
1405 exrates = 0;
1406
1407 if (skb_tailroom(skb) < exrates + 2)
1408 return -ENOMEM;
1409
1410 if (exrates) {
1411 pos = skb_put(skb, exrates + 2);
1412 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1413 *pos++ = exrates;
1414 for (i = 8; i < sband->n_bitrates; i++) {
1415 rate = sband->bitrates[i].bitrate;
1416 *pos++ = (u8) (rate / 5);
1417 }
1418 }
1419 return 0;
1420}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 7a49532f14cb..fd52e695c071 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -72,7 +72,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
72 case NL80211_IFTYPE_AP_VLAN: 72 case NL80211_IFTYPE_AP_VLAN:
73 sta = rcu_dereference(sdata->u.vlan.sta); 73 sta = rcu_dereference(sdata->u.vlan.sta);
74 if (sta) { 74 if (sta) {
75 qos = get_sta_flags(sta) & WLAN_STA_WME; 75 qos = test_sta_flag(sta, WLAN_STA_WME);
76 break; 76 break;
77 } 77 }
78 case NL80211_IFTYPE_AP: 78 case NL80211_IFTYPE_AP:
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
83 break; 83 break;
84#ifdef CONFIG_MAC80211_MESH 84#ifdef CONFIG_MAC80211_MESH
85 case NL80211_IFTYPE_MESH_POINT: 85 case NL80211_IFTYPE_MESH_POINT:
86 /* 86 ra = skb->data;
87 * XXX: This is clearly broken ... but already was before,
88 * because ieee80211_fill_mesh_addresses() would clear A1
89 * except for multicast addresses.
90 */
91 break; 87 break;
92#endif 88#endif
93 case NL80211_IFTYPE_STATION: 89 case NL80211_IFTYPE_STATION:
@@ -103,7 +99,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
103 if (!sta && ra && !is_multicast_ether_addr(ra)) { 99 if (!sta && ra && !is_multicast_ether_addr(ra)) {
104 sta = sta_info_get(sdata, ra); 100 sta = sta_info_get(sdata, ra);
105 if (sta) 101 if (sta)
106 qos = get_sta_flags(sta) & WLAN_STA_WME; 102 qos = test_sta_flag(sta, WLAN_STA_WME);
107 } 103 }
108 rcu_read_unlock(); 104 rcu_read_unlock();
109 105
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
139 return ieee802_1d_to_ac[skb->priority]; 135 return ieee802_1d_to_ac[skb->priority];
140} 136}
141 137
142void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) 138void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
139 struct sk_buff *skb)
143{ 140{
144 struct ieee80211_hdr *hdr = (void *)skb->data; 141 struct ieee80211_hdr *hdr = (void *)skb->data;
145 142
@@ -150,10 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
150 147
151 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 148 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
152 149
153 if (unlikely(local->wifi_wme_noack_test)) 150 if (unlikely(sdata->local->wifi_wme_noack_test))
154 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; 151 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
155 /* qos header is 2 bytes, second reserved */ 152 /* qos header is 2 bytes */
156 *p++ = ack_policy | tid; 153 *p++ = ack_policy | tid;
157 *p = 0; 154 *p = ieee80211_vif_is_mesh(&sdata->vif) ?
155 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
158 } 156 }
159} 157}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index faead6d02026..34e166fbf4d4 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -17,7 +17,8 @@ extern const int ieee802_1d_to_ac[8];
17 17
18u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, 18u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
19 struct sk_buff *skb); 19 struct sk_buff *skb);
20void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); 20void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
21 struct sk_buff *skb);
21u16 ieee80211_downgrade_queue(struct ieee80211_local *local, 22u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
22 struct sk_buff *skb); 23 struct sk_buff *skb);
23 24
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 380b9a7462b6..94472eb34d76 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -229,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
229 wk->ie_len + /* extra IEs */ 229 wk->ie_len + /* extra IEs */
230 9, /* WMM */ 230 9, /* WMM */
231 GFP_KERNEL); 231 GFP_KERNEL);
232 if (!skb) { 232 if (!skb)
233 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
234 "frame\n", sdata->name);
235 return; 233 return;
236 } 234
237 skb_reserve(skb, local->hw.extra_tx_headroom); 235 skb_reserve(skb, local->hw.extra_tx_headroom);
238 236
239 capab = WLAN_CAPABILITY_ESS; 237 capab = WLAN_CAPABILITY_ESS;
@@ -460,7 +458,7 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
460 */ 458 */
461 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid, 459 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
462 wk->probe_auth.ssid_len, NULL, 0, 460 wk->probe_auth.ssid_len, NULL, 0,
463 (u32) -1, true); 461 (u32) -1, true, false);
464 462
465 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 463 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
466 run_again(local, wk->timeout); 464 run_again(local, wk->timeout);
@@ -579,7 +577,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
579 /* 577 /*
580 * After this, offchan_tx.frame remains but now is no 578 * After this, offchan_tx.frame remains but now is no
581 * longer a valid pointer -- we still need it as the 579 * longer a valid pointer -- we still need it as the
582 * cookie for canceling this work. 580 * cookie for canceling this work/status matching.
583 */ 581 */
584 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame); 582 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
585 583
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7bc8702808fa..f614ce7bb6e3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -53,7 +53,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
53 } 53 }
54 54
55 if (info->control.hw_key && 55 if (info->control.hw_key &&
56 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 56 (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
57 tx->local->ops->set_frag_threshold) &&
57 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { 58 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
58 /* hwaccel - with no need for SW-generated MMIC */ 59 /* hwaccel - with no need for SW-generated MMIC */
59 return TX_CONTINUE; 60 return TX_CONTINUE;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d86cb2..8260b13d93c9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -505,7 +505,7 @@ config NETFILTER_XT_TARGET_LED
505 echo netfilter-ssh > /sys/class/leds/<ledname>/trigger 505 echo netfilter-ssh > /sys/class/leds/<ledname>/trigger
506 506
507 For more information on the LEDs available on your system, see 507 For more information on the LEDs available on your system, see
508 Documentation/leds-class.txt 508 Documentation/leds/leds-class.txt
509 509
510config NETFILTER_XT_TARGET_MARK 510config NETFILTER_XT_TARGET_MARK
511 tristate '"MARK" target support' 511 tristate '"MARK" target support'
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c0ff5d..3346829ea07f 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo)
37 err = mutex_lock_interruptible(&afinfo_mutex); 37 err = mutex_lock_interruptible(&afinfo_mutex);
38 if (err < 0) 38 if (err < 0)
39 return err; 39 return err;
40 rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); 40 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
41 mutex_unlock(&afinfo_mutex); 41 mutex_unlock(&afinfo_mutex);
42 return 0; 42 return 0;
43} 43}
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo);
46void nf_unregister_afinfo(const struct nf_afinfo *afinfo) 46void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
47{ 47{
48 mutex_lock(&afinfo_mutex); 48 mutex_lock(&afinfo_mutex);
49 rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); 49 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
50 mutex_unlock(&afinfo_mutex); 50 mutex_unlock(&afinfo_mutex);
51 synchronize_rcu(); 51 synchronize_rcu();
52} 52}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5290ac353a5e..e3be48bf4dcd 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2283 struct ip_vs_service *svc; 2283 struct ip_vs_service *svc;
2284 struct ip_vs_dest_user *udest_compat; 2284 struct ip_vs_dest_user *udest_compat;
2285 struct ip_vs_dest_user_kern udest; 2285 struct ip_vs_dest_user_kern udest;
2286 struct netns_ipvs *ipvs = net_ipvs(net);
2286 2287
2287 if (!capable(CAP_NET_ADMIN)) 2288 if (!capable(CAP_NET_ADMIN))
2288 return -EPERM; 2289 return -EPERM;
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2303 /* increase the module use count */ 2304 /* increase the module use count */
2304 ip_vs_use_count_inc(); 2305 ip_vs_use_count_inc();
2305 2306
2307 /* Handle daemons since they have another lock */
2308 if (cmd == IP_VS_SO_SET_STARTDAEMON ||
2309 cmd == IP_VS_SO_SET_STOPDAEMON) {
2310 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2311
2312 if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
2313 ret = -ERESTARTSYS;
2314 goto out_dec;
2315 }
2316 if (cmd == IP_VS_SO_SET_STARTDAEMON)
2317 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2318 dm->syncid);
2319 else
2320 ret = stop_sync_thread(net, dm->state);
2321 mutex_unlock(&ipvs->sync_mutex);
2322 goto out_dec;
2323 }
2324
2306 if (mutex_lock_interruptible(&__ip_vs_mutex)) { 2325 if (mutex_lock_interruptible(&__ip_vs_mutex)) {
2307 ret = -ERESTARTSYS; 2326 ret = -ERESTARTSYS;
2308 goto out_dec; 2327 goto out_dec;
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2316 /* Set timeout values for (tcp tcpfin udp) */ 2335 /* Set timeout values for (tcp tcpfin udp) */
2317 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); 2336 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
2318 goto out_unlock; 2337 goto out_unlock;
2319 } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
2320 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2321 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2322 dm->syncid);
2323 goto out_unlock;
2324 } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
2325 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2326 ret = stop_sync_thread(net, dm->state);
2327 goto out_unlock;
2328 } 2338 }
2329 2339
2330 usvc_compat = (struct ip_vs_service_user *)arg; 2340 usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2584 2594
2585 if (copy_from_user(arg, user, copylen) != 0) 2595 if (copy_from_user(arg, user, copylen) != 0)
2586 return -EFAULT; 2596 return -EFAULT;
2597 /*
2598 * Handle daemons first since it has its own locking
2599 */
2600 if (cmd == IP_VS_SO_GET_DAEMON) {
2601 struct ip_vs_daemon_user d[2];
2602
2603 memset(&d, 0, sizeof(d));
2604 if (mutex_lock_interruptible(&ipvs->sync_mutex))
2605 return -ERESTARTSYS;
2606
2607 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2608 d[0].state = IP_VS_STATE_MASTER;
2609 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2610 sizeof(d[0].mcast_ifn));
2611 d[0].syncid = ipvs->master_syncid;
2612 }
2613 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2614 d[1].state = IP_VS_STATE_BACKUP;
2615 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2616 sizeof(d[1].mcast_ifn));
2617 d[1].syncid = ipvs->backup_syncid;
2618 }
2619 if (copy_to_user(user, &d, sizeof(d)) != 0)
2620 ret = -EFAULT;
2621 mutex_unlock(&ipvs->sync_mutex);
2622 return ret;
2623 }
2587 2624
2588 if (mutex_lock_interruptible(&__ip_vs_mutex)) 2625 if (mutex_lock_interruptible(&__ip_vs_mutex))
2589 return -ERESTARTSYS; 2626 return -ERESTARTSYS;
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2681 } 2718 }
2682 break; 2719 break;
2683 2720
2684 case IP_VS_SO_GET_DAEMON:
2685 {
2686 struct ip_vs_daemon_user d[2];
2687
2688 memset(&d, 0, sizeof(d));
2689 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2690 d[0].state = IP_VS_STATE_MASTER;
2691 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2692 sizeof(d[0].mcast_ifn));
2693 d[0].syncid = ipvs->master_syncid;
2694 }
2695 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2696 d[1].state = IP_VS_STATE_BACKUP;
2697 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2698 sizeof(d[1].mcast_ifn));
2699 d[1].syncid = ipvs->backup_syncid;
2700 }
2701 if (copy_to_user(user, &d, sizeof(d)) != 0)
2702 ret = -EFAULT;
2703 }
2704 break;
2705
2706 default: 2721 default:
2707 ret = -EINVAL; 2722 ret = -EINVAL;
2708 } 2723 }
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3205 struct net *net = skb_sknet(skb); 3220 struct net *net = skb_sknet(skb);
3206 struct netns_ipvs *ipvs = net_ipvs(net); 3221 struct netns_ipvs *ipvs = net_ipvs(net);
3207 3222
3208 mutex_lock(&__ip_vs_mutex); 3223 mutex_lock(&ipvs->sync_mutex);
3209 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { 3224 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
3210 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, 3225 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
3211 ipvs->master_mcast_ifn, 3226 ipvs->master_mcast_ifn,
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3225 } 3240 }
3226 3241
3227nla_put_failure: 3242nla_put_failure:
3228 mutex_unlock(&__ip_vs_mutex); 3243 mutex_unlock(&ipvs->sync_mutex);
3229 3244
3230 return skb->len; 3245 return skb->len;
3231} 3246}
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
3271 return ip_vs_set_timeout(net, &t); 3286 return ip_vs_set_timeout(net, &t);
3272} 3287}
3273 3288
3274static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) 3289static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
3275{ 3290{
3276 struct ip_vs_service *svc = NULL;
3277 struct ip_vs_service_user_kern usvc;
3278 struct ip_vs_dest_user_kern udest;
3279 int ret = 0, cmd; 3291 int ret = 0, cmd;
3280 int need_full_svc = 0, need_full_dest = 0;
3281 struct net *net; 3292 struct net *net;
3282 struct netns_ipvs *ipvs; 3293 struct netns_ipvs *ipvs;
3283 3294
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3285 ipvs = net_ipvs(net); 3296 ipvs = net_ipvs(net);
3286 cmd = info->genlhdr->cmd; 3297 cmd = info->genlhdr->cmd;
3287 3298
3288 mutex_lock(&__ip_vs_mutex); 3299 if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
3289
3290 if (cmd == IPVS_CMD_FLUSH) {
3291 ret = ip_vs_flush(net);
3292 goto out;
3293 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3294 ret = ip_vs_genl_set_config(net, info->attrs);
3295 goto out;
3296 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
3297 cmd == IPVS_CMD_DEL_DAEMON) {
3298
3299 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; 3300 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
3300 3301
3302 mutex_lock(&ipvs->sync_mutex);
3301 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || 3303 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
3302 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, 3304 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
3303 info->attrs[IPVS_CMD_ATTR_DAEMON], 3305 info->attrs[IPVS_CMD_ATTR_DAEMON],
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3310 ret = ip_vs_genl_new_daemon(net, daemon_attrs); 3312 ret = ip_vs_genl_new_daemon(net, daemon_attrs);
3311 else 3313 else
3312 ret = ip_vs_genl_del_daemon(net, daemon_attrs); 3314 ret = ip_vs_genl_del_daemon(net, daemon_attrs);
3315out:
3316 mutex_unlock(&ipvs->sync_mutex);
3317 }
3318 return ret;
3319}
3320
3321static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3322{
3323 struct ip_vs_service *svc = NULL;
3324 struct ip_vs_service_user_kern usvc;
3325 struct ip_vs_dest_user_kern udest;
3326 int ret = 0, cmd;
3327 int need_full_svc = 0, need_full_dest = 0;
3328 struct net *net;
3329 struct netns_ipvs *ipvs;
3330
3331 net = skb_sknet(skb);
3332 ipvs = net_ipvs(net);
3333 cmd = info->genlhdr->cmd;
3334
3335 mutex_lock(&__ip_vs_mutex);
3336
3337 if (cmd == IPVS_CMD_FLUSH) {
3338 ret = ip_vs_flush(net);
3339 goto out;
3340 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3341 ret = ip_vs_genl_set_config(net, info->attrs);
3313 goto out; 3342 goto out;
3314 } else if (cmd == IPVS_CMD_ZERO && 3343 } else if (cmd == IPVS_CMD_ZERO &&
3315 !info->attrs[IPVS_CMD_ATTR_SERVICE]) { 3344 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3536 .cmd = IPVS_CMD_NEW_DAEMON, 3565 .cmd = IPVS_CMD_NEW_DAEMON,
3537 .flags = GENL_ADMIN_PERM, 3566 .flags = GENL_ADMIN_PERM,
3538 .policy = ip_vs_cmd_policy, 3567 .policy = ip_vs_cmd_policy,
3539 .doit = ip_vs_genl_set_cmd, 3568 .doit = ip_vs_genl_set_daemon,
3540 }, 3569 },
3541 { 3570 {
3542 .cmd = IPVS_CMD_DEL_DAEMON, 3571 .cmd = IPVS_CMD_DEL_DAEMON,
3543 .flags = GENL_ADMIN_PERM, 3572 .flags = GENL_ADMIN_PERM,
3544 .policy = ip_vs_cmd_policy, 3573 .policy = ip_vs_cmd_policy,
3545 .doit = ip_vs_genl_set_cmd, 3574 .doit = ip_vs_genl_set_daemon,
3546 }, 3575 },
3547 { 3576 {
3548 .cmd = IPVS_CMD_GET_DAEMON, 3577 .cmd = IPVS_CMD_GET_DAEMON,
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7ee7215b8ba0..3cdd479f9b5d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -61,6 +61,7 @@
61 61
62#define SYNC_PROTO_VER 1 /* Protocol version in header */ 62#define SYNC_PROTO_VER 1 /* Protocol version in header */
63 63
64static struct lock_class_key __ipvs_sync_key;
64/* 65/*
65 * IPVS sync connection entry 66 * IPVS sync connection entry
66 * Version 0, i.e. original version. 67 * Version 0, i.e. original version.
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1545 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", 1546 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1546 sizeof(struct ip_vs_sync_conn_v0)); 1547 sizeof(struct ip_vs_sync_conn_v0));
1547 1548
1549
1548 if (state == IP_VS_STATE_MASTER) { 1550 if (state == IP_VS_STATE_MASTER) {
1549 if (ipvs->master_thread) 1551 if (ipvs->master_thread)
1550 return -EEXIST; 1552 return -EEXIST;
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1667{ 1669{
1668 struct netns_ipvs *ipvs = net_ipvs(net); 1670 struct netns_ipvs *ipvs = net_ipvs(net);
1669 1671
1672 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
1670 INIT_LIST_HEAD(&ipvs->sync_queue); 1673 INIT_LIST_HEAD(&ipvs->sync_queue);
1671 spin_lock_init(&ipvs->sync_lock); 1674 spin_lock_init(&ipvs->sync_lock);
1672 spin_lock_init(&ipvs->sync_buff_lock); 1675 spin_lock_init(&ipvs->sync_buff_lock);
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1680void ip_vs_sync_net_cleanup(struct net *net) 1683void ip_vs_sync_net_cleanup(struct net *net)
1681{ 1684{
1682 int retc; 1685 int retc;
1686 struct netns_ipvs *ipvs = net_ipvs(net);
1683 1687
1688 mutex_lock(&ipvs->sync_mutex);
1684 retc = stop_sync_thread(net, IP_VS_STATE_MASTER); 1689 retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
1685 if (retc && retc != -ESRCH) 1690 if (retc && retc != -ESRCH)
1686 pr_err("Failed to stop Master Daemon\n"); 1691 pr_err("Failed to stop Master Daemon\n");
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net)
1688 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); 1693 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
1689 if (retc && retc != -ESRCH) 1694 if (retc && retc != -ESRCH)
1690 pr_err("Failed to stop Backup Daemon\n"); 1695 pr_err("Failed to stop Backup Daemon\n");
1696 mutex_unlock(&ipvs->sync_mutex);
1691} 1697}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f7af8b866017..5acfaf59a9c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -779,7 +779,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
779 if (exp->helper) { 779 if (exp->helper) {
780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
781 if (help) 781 if (help)
782 rcu_assign_pointer(help->helper, exp->helper); 782 RCU_INIT_POINTER(help->helper, exp->helper);
783 } 783 }
784 784
785#ifdef CONFIG_NF_CONNTRACK_MARK 785#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -1317,7 +1317,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1317void nf_conntrack_cleanup(struct net *net) 1317void nf_conntrack_cleanup(struct net *net)
1318{ 1318{
1319 if (net_eq(net, &init_net)) 1319 if (net_eq(net, &init_net))
1320 rcu_assign_pointer(ip_ct_attach, NULL); 1320 RCU_INIT_POINTER(ip_ct_attach, NULL);
1321 1321
1322 /* This makes sure all current packets have passed through 1322 /* This makes sure all current packets have passed through
1323 netfilter framework. Roll on, two-stage module 1323 netfilter framework. Roll on, two-stage module
@@ -1327,7 +1327,7 @@ void nf_conntrack_cleanup(struct net *net)
1327 nf_conntrack_cleanup_net(net); 1327 nf_conntrack_cleanup_net(net);
1328 1328
1329 if (net_eq(net, &init_net)) { 1329 if (net_eq(net, &init_net)) {
1330 rcu_assign_pointer(nf_ct_destroy, NULL); 1330 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1331 nf_conntrack_cleanup_init_net(); 1331 nf_conntrack_cleanup_init_net();
1332 } 1332 }
1333} 1333}
@@ -1576,11 +1576,11 @@ int nf_conntrack_init(struct net *net)
1576 1576
1577 if (net_eq(net, &init_net)) { 1577 if (net_eq(net, &init_net)) {
1578 /* For use by REJECT target */ 1578 /* For use by REJECT target */
1579 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1579 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1580 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1580 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1581 1581
1582 /* Howto get NAT offsets */ 1582 /* Howto get NAT offsets */
1583 rcu_assign_pointer(nf_ct_nat_offset, NULL); 1583 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1584 } 1584 }
1585 return 0; 1585 return 0;
1586 1586
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 63a1b915a7e4..3add99439059 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -94,7 +94,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
94 ret = -EBUSY; 94 ret = -EBUSY;
95 goto out_unlock; 95 goto out_unlock;
96 } 96 }
97 rcu_assign_pointer(nf_conntrack_event_cb, new); 97 RCU_INIT_POINTER(nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex); 98 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret; 99 return ret;
100 100
@@ -112,7 +112,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
112 notify = rcu_dereference_protected(nf_conntrack_event_cb, 112 notify = rcu_dereference_protected(nf_conntrack_event_cb,
113 lockdep_is_held(&nf_ct_ecache_mutex)); 113 lockdep_is_held(&nf_ct_ecache_mutex));
114 BUG_ON(notify != new); 114 BUG_ON(notify != new);
115 rcu_assign_pointer(nf_conntrack_event_cb, NULL); 115 RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
116 mutex_unlock(&nf_ct_ecache_mutex); 116 mutex_unlock(&nf_ct_ecache_mutex);
117} 117}
118EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 118EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -129,7 +129,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
129 ret = -EBUSY; 129 ret = -EBUSY;
130 goto out_unlock; 130 goto out_unlock;
131 } 131 }
132 rcu_assign_pointer(nf_expect_event_cb, new); 132 RCU_INIT_POINTER(nf_expect_event_cb, new);
133 mutex_unlock(&nf_ct_ecache_mutex); 133 mutex_unlock(&nf_ct_ecache_mutex);
134 return ret; 134 return ret;
135 135
@@ -147,7 +147,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
147 notify = rcu_dereference_protected(nf_expect_event_cb, 147 notify = rcu_dereference_protected(nf_expect_event_cb,
148 lockdep_is_held(&nf_ct_ecache_mutex)); 148 lockdep_is_held(&nf_ct_ecache_mutex));
149 BUG_ON(notify != new); 149 BUG_ON(notify != new);
150 rcu_assign_pointer(nf_expect_event_cb, NULL); 150 RCU_INIT_POINTER(nf_expect_event_cb, NULL);
151 mutex_unlock(&nf_ct_ecache_mutex); 151 mutex_unlock(&nf_ct_ecache_mutex);
152} 152}
153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 05ecdc281a53..4605c947dcc4 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
169 before updating alloc_size */ 169 before updating alloc_size */
170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align) 170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
171 + type->len; 171 + type->len;
172 rcu_assign_pointer(nf_ct_ext_types[type->id], type); 172 RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
173 update_alloc_size(type); 173 update_alloc_size(type);
174out: 174out:
175 mutex_unlock(&nf_ct_ext_type_mutex); 175 mutex_unlock(&nf_ct_ext_type_mutex);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register);
181void nf_ct_extend_unregister(struct nf_ct_ext_type *type) 181void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
182{ 182{
183 mutex_lock(&nf_ct_ext_type_mutex); 183 mutex_lock(&nf_ct_ext_type_mutex);
184 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); 184 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
185 update_alloc_size(type); 185 update_alloc_size(type);
186 mutex_unlock(&nf_ct_ext_type_mutex); 186 mutex_unlock(&nf_ct_ext_type_mutex);
187 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 187 rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 1bdfea357955..93c4bdbfc1ae 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
132 if (helper == NULL) { 132 if (helper == NULL) {
133 if (help) 133 if (help)
134 rcu_assign_pointer(help->helper, NULL); 134 RCU_INIT_POINTER(help->helper, NULL);
135 goto out; 135 goto out;
136 } 136 }
137 137
@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
145 memset(&help->help, 0, sizeof(help->help)); 145 memset(&help->help, 0, sizeof(help->help));
146 } 146 }
147 147
148 rcu_assign_pointer(help->helper, helper); 148 RCU_INIT_POINTER(help->helper, helper);
149out: 149out:
150 return ret; 150 return ret;
151} 151}
@@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
162 lockdep_is_held(&nf_conntrack_lock) 162 lockdep_is_held(&nf_conntrack_lock)
163 ) == me) { 163 ) == me) {
164 nf_conntrack_event(IPCT_HELPER, ct); 164 nf_conntrack_event(IPCT_HELPER, ct);
165 rcu_assign_pointer(help->helper, NULL); 165 RCU_INIT_POINTER(help->helper, NULL);
166 } 166 }
167 return 0; 167 return 0;
168} 168}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7dec88a1755b..e58aa9b1fe8a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1125 if (help && help->helper) { 1125 if (help && help->helper) {
1126 /* we had a helper before ... */ 1126 /* we had a helper before ... */
1127 nf_ct_remove_expectations(ct); 1127 nf_ct_remove_expectations(ct);
1128 rcu_assign_pointer(help->helper, NULL); 1128 RCU_INIT_POINTER(help->helper, NULL);
1129 } 1129 }
1130 1130
1131 return 0; 1131 return 0;
@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1163 return -EOPNOTSUPP; 1163 return -EOPNOTSUPP;
1164 } 1164 }
1165 1165
1166 rcu_assign_pointer(help->helper, helper); 1166 RCU_INIT_POINTER(help->helper, helper);
1167 1167
1168 return 0; 1168 return 0;
1169} 1169}
@@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1386 } 1386 }
1387 1387
1388 /* not in hash table yet so not strictly necessary */ 1388 /* not in hash table yet so not strictly necessary */
1389 rcu_assign_pointer(help->helper, helper); 1389 RCU_INIT_POINTER(help->helper, helper);
1390 } 1390 }
1391 } else { 1391 } else {
1392 /* try an implicit helper assignation */ 1392 /* try an implicit helper assignation */
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index cf616e55ca41..d69facdd9a7a 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct,
241 nf_ct_refresh_acct(ct, ctinfo, skb, 241 nf_ct_refresh_acct(ct, ctinfo, skb,
242 ct->proto.gre.stream_timeout); 242 ct->proto.gre.stream_timeout);
243 /* Also, more likely to be important, and not a probe. */ 243 /* Also, more likely to be important, and not a probe. */
244 set_bit(IPS_ASSURED_BIT, &ct->status); 244 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
245 nf_conntrack_event_cache(IPCT_ASSURED, ct); 245 nf_conntrack_event_cache(IPCT_ASSURED, ct);
246 } else 246 } else
247 nf_ct_refresh_acct(ct, ctinfo, skb, 247 nf_ct_refresh_acct(ct, ctinfo, skb,
248 ct->proto.gre.timeout); 248 ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20714edf6cd2..ce0c406f58a8 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
55 llog = rcu_dereference_protected(nf_loggers[pf], 55 llog = rcu_dereference_protected(nf_loggers[pf],
56 lockdep_is_held(&nf_log_mutex)); 56 lockdep_is_held(&nf_log_mutex));
57 if (llog == NULL) 57 if (llog == NULL)
58 rcu_assign_pointer(nf_loggers[pf], logger); 58 RCU_INIT_POINTER(nf_loggers[pf], logger);
59 } 59 }
60 60
61 mutex_unlock(&nf_log_mutex); 61 mutex_unlock(&nf_log_mutex);
@@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger)
74 c_logger = rcu_dereference_protected(nf_loggers[i], 74 c_logger = rcu_dereference_protected(nf_loggers[i],
75 lockdep_is_held(&nf_log_mutex)); 75 lockdep_is_held(&nf_log_mutex));
76 if (c_logger == logger) 76 if (c_logger == logger)
77 rcu_assign_pointer(nf_loggers[i], NULL); 77 RCU_INIT_POINTER(nf_loggers[i], NULL);
78 list_del(&logger->list[i]); 78 list_del(&logger->list[i]);
79 } 79 }
80 mutex_unlock(&nf_log_mutex); 80 mutex_unlock(&nf_log_mutex);
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
92 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
93 return -ENOENT; 93 return -ENOENT;
94 } 94 }
95 rcu_assign_pointer(nf_loggers[pf], logger); 95 RCU_INIT_POINTER(nf_loggers[pf], logger);
96 mutex_unlock(&nf_log_mutex); 96 mutex_unlock(&nf_log_mutex);
97 return 0; 97 return 0;
98} 98}
@@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf)
103 if (pf >= ARRAY_SIZE(nf_loggers)) 103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return; 104 return;
105 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
106 rcu_assign_pointer(nf_loggers[pf], NULL); 106 RCU_INIT_POINTER(nf_loggers[pf], NULL);
107 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
108} 108}
109EXPORT_SYMBOL(nf_log_unbind_pf); 109EXPORT_SYMBOL(nf_log_unbind_pf);
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
250 mutex_unlock(&nf_log_mutex); 250 mutex_unlock(&nf_log_mutex);
251 return -ENOENT; 251 return -ENOENT;
252 } 252 }
253 rcu_assign_pointer(nf_loggers[tindex], logger); 253 RCU_INIT_POINTER(nf_loggers[tindex], logger);
254 mutex_unlock(&nf_log_mutex); 254 mutex_unlock(&nf_log_mutex);
255 } else { 255 } else {
256 mutex_lock(&nf_log_mutex); 256 mutex_lock(&nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 84d0fd47636a..99ffd2885088 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
40 else if (old) 40 else if (old)
41 ret = -EBUSY; 41 ret = -EBUSY;
42 else { 42 else {
43 rcu_assign_pointer(queue_handler[pf], qh); 43 RCU_INIT_POINTER(queue_handler[pf], qh);
44 ret = 0; 44 ret = 0;
45 } 45 }
46 mutex_unlock(&queue_handler_mutex); 46 mutex_unlock(&queue_handler_mutex);
@@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
65 return -EINVAL; 65 return -EINVAL;
66 } 66 }
67 67
68 rcu_assign_pointer(queue_handler[pf], NULL); 68 RCU_INIT_POINTER(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex); 69 mutex_unlock(&queue_handler_mutex);
70 70
71 synchronize_rcu(); 71 synchronize_rcu();
@@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
84 queue_handler[pf], 84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex) 85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh) 86 ) == qh)
87 rcu_assign_pointer(queue_handler[pf], NULL); 87 RCU_INIT_POINTER(queue_handler[pf], NULL);
88 } 88 }
89 mutex_unlock(&queue_handler_mutex); 89 mutex_unlock(&queue_handler_mutex);
90 90
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 1905976b5135..c879c1a2370e 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
59 nfnl_unlock(); 59 nfnl_unlock();
60 return -EBUSY; 60 return -EBUSY;
61 } 61 }
62 rcu_assign_pointer(subsys_table[n->subsys_id], n); 62 RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
63 nfnl_unlock(); 63 nfnl_unlock();
64 64
65 return 0; 65 return 0;
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
210 if (!nfnl) 210 if (!nfnl)
211 return -ENOMEM; 211 return -ENOMEM;
212 net->nfnl_stash = nfnl; 212 net->nfnl_stash = nfnl;
213 rcu_assign_pointer(net->nfnl, nfnl); 213 RCU_INIT_POINTER(net->nfnl, nfnl);
214 return 0; 214 return 0;
215} 215}
216 216
@@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
219 struct net *net; 219 struct net *net;
220 220
221 list_for_each_entry(net, net_exit_list, exit_list) 221 list_for_each_entry(net, net_exit_list, exit_list)
222 rcu_assign_pointer(net->nfnl, NULL); 222 RCU_INIT_POINTER(net->nfnl, NULL);
223 synchronize_net(); 223 synchronize_net();
224 list_for_each_entry(net, net_exit_list, exit_list) 224 list_for_each_entry(net, net_exit_list, exit_list)
225 netlink_kernel_release(net->nfnl_stash); 225 netlink_kernel_release(net->nfnl_stash);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index b0869fe3633b..71441b934ffd 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -776,12 +776,11 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
776 776
777 size = sizeof(void **) * nr_cpu_ids; 777 size = sizeof(void **) * nr_cpu_ids;
778 if (size > PAGE_SIZE) 778 if (size > PAGE_SIZE)
779 i->jumpstack = vmalloc(size); 779 i->jumpstack = vzalloc(size);
780 else 780 else
781 i->jumpstack = kmalloc(size, GFP_KERNEL); 781 i->jumpstack = kzalloc(size, GFP_KERNEL);
782 if (i->jumpstack == NULL) 782 if (i->jumpstack == NULL)
783 return -ENOMEM; 783 return -ENOMEM;
784 memset(i->jumpstack, 0, size);
785 784
786 i->stacksize *= xt_jumpstack_multiplier; 785 i->stacksize *= xt_jumpstack_multiplier;
787 size = sizeof(void *) * i->stacksize; 786 size = sizeof(void *) * i->stacksize;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7d8083cde34f..3f905e5370c2 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); 282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
283 283
284 spin_lock(&netlbl_domhsh_lock); 284 spin_lock(&netlbl_domhsh_lock);
285 rcu_assign_pointer(netlbl_domhsh, hsh_tbl); 285 RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
286 spin_unlock(&netlbl_domhsh_lock); 286 spin_unlock(&netlbl_domhsh_lock);
287 287
288 return 0; 288 return 0;
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); 330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
331 } else { 331 } else {
332 INIT_LIST_HEAD(&entry->list); 332 INIT_LIST_HEAD(&entry->list);
333 rcu_assign_pointer(netlbl_domhsh_def, entry); 333 RCU_INIT_POINTER(netlbl_domhsh_def, entry);
334 } 334 }
335 335
336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { 336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
@@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
451 if (entry != rcu_dereference(netlbl_domhsh_def)) 451 if (entry != rcu_dereference(netlbl_domhsh_def))
452 list_del_rcu(&entry->list); 452 list_del_rcu(&entry->list);
453 else 453 else
454 rcu_assign_pointer(netlbl_domhsh_def, NULL); 454 RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
455 } else 455 } else
456 ret_val = -ENOENT; 456 ret_val = -ENOENT;
457 spin_unlock(&netlbl_domhsh_lock); 457 spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e6e823656f9d..e251c2c88521 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
354 INIT_LIST_HEAD(&iface->list); 354 INIT_LIST_HEAD(&iface->list);
355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) 355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
356 goto add_iface_failure; 356 goto add_iface_failure;
357 rcu_assign_pointer(netlbl_unlhsh_def, iface); 357 RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
358 } 358 }
359 spin_unlock(&netlbl_unlhsh_lock); 359 spin_unlock(&netlbl_unlhsh_lock);
360 360
@@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
621 if (iface->ifindex > 0) 621 if (iface->ifindex > 0)
622 list_del_rcu(&iface->list); 622 list_del_rcu(&iface->list);
623 else 623 else
624 rcu_assign_pointer(netlbl_unlhsh_def, NULL); 624 RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
625 spin_unlock(&netlbl_unlhsh_lock); 625 spin_unlock(&netlbl_unlhsh_lock);
626 626
627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); 627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size)
1449 1449
1450 rcu_read_lock(); 1450 rcu_read_lock();
1451 spin_lock(&netlbl_unlhsh_lock); 1451 spin_lock(&netlbl_unlhsh_lock);
1452 rcu_assign_pointer(netlbl_unlhsh, hsh_tbl); 1452 RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
1453 spin_unlock(&netlbl_unlhsh_lock); 1453 spin_unlock(&netlbl_unlhsh_lock);
1454 rcu_read_unlock(); 1454 rcu_read_unlock();
1455 1455
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a4db0211da0..1201b6d4183d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1324,10 +1324,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1324 if (msg->msg_flags&MSG_OOB) 1324 if (msg->msg_flags&MSG_OOB)
1325 return -EOPNOTSUPP; 1325 return -EOPNOTSUPP;
1326 1326
1327 if (NULL == siocb->scm) { 1327 if (NULL == siocb->scm)
1328 siocb->scm = &scm; 1328 siocb->scm = &scm;
1329 memset(&scm, 0, sizeof(scm)); 1329
1330 }
1331 err = scm_send(sock, msg, siocb->scm); 1330 err = scm_send(sock, msg, siocb->scm);
1332 if (err < 0) 1331 if (err < 0)
1333 return err; 1332 return err;
@@ -1578,7 +1577,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1578 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 1577 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1579 if (!new) 1578 if (!new)
1580 return -ENOMEM; 1579 return -ENOMEM;
1581 old = rcu_dereference_raw(tbl->listeners); 1580 old = rcu_dereference_protected(tbl->listeners, 1);
1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1581 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1583 rcu_assign_pointer(tbl->listeners, new); 1582 rcu_assign_pointer(tbl->listeners, new);
1584 1583
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 33e095b124b3..58cddadf8e8e 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -13,4 +13,6 @@ menuconfig NFC
13 To compile this support as a module, choose M here: the module will 13 To compile this support as a module, choose M here: the module will
14 be called nfc. 14 be called nfc.
15 15
16source "net/nfc/nci/Kconfig"
17
16source "drivers/nfc/Kconfig" 18source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 16250c353851..fbb550f2377b 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_NFC) += nfc.o 5obj-$(CONFIG_NFC) += nfc.o
6obj-$(CONFIG_NFC_NCI) += nci/
6 7
7nfc-objs := core.o netlink.o af_nfc.o rawsock.o 8nfc-objs := core.o netlink.o af_nfc.o rawsock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index b6fd4e1f2057..47e02c1b8c02 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -53,6 +53,80 @@ int nfc_printk(const char *level, const char *format, ...)
53EXPORT_SYMBOL(nfc_printk); 53EXPORT_SYMBOL(nfc_printk);
54 54
55/** 55/**
56 * nfc_dev_up - turn on the NFC device
57 *
58 * @dev: The nfc device to be turned on
59 *
60 * The device remains up until the nfc_dev_down function is called.
61 */
62int nfc_dev_up(struct nfc_dev *dev)
63{
64 int rc = 0;
65
66 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
67
68 device_lock(&dev->dev);
69
70 if (!device_is_registered(&dev->dev)) {
71 rc = -ENODEV;
72 goto error;
73 }
74
75 if (dev->dev_up) {
76 rc = -EALREADY;
77 goto error;
78 }
79
80 if (dev->ops->dev_up)
81 rc = dev->ops->dev_up(dev);
82
83 if (!rc)
84 dev->dev_up = true;
85
86error:
87 device_unlock(&dev->dev);
88 return rc;
89}
90
91/**
92 * nfc_dev_down - turn off the NFC device
93 *
94 * @dev: The nfc device to be turned off
95 */
96int nfc_dev_down(struct nfc_dev *dev)
97{
98 int rc = 0;
99
100 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
101
102 device_lock(&dev->dev);
103
104 if (!device_is_registered(&dev->dev)) {
105 rc = -ENODEV;
106 goto error;
107 }
108
109 if (!dev->dev_up) {
110 rc = -EALREADY;
111 goto error;
112 }
113
114 if (dev->polling || dev->remote_activated) {
115 rc = -EBUSY;
116 goto error;
117 }
118
119 if (dev->ops->dev_down)
120 dev->ops->dev_down(dev);
121
122 dev->dev_up = false;
123
124error:
125 device_unlock(&dev->dev);
126 return rc;
127}
128
129/**
56 * nfc_start_poll - start polling for nfc targets 130 * nfc_start_poll - start polling for nfc targets
57 * 131 *
58 * @dev: The nfc device that must start polling 132 * @dev: The nfc device that must start polling
@@ -144,6 +218,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
144 } 218 }
145 219
146 rc = dev->ops->activate_target(dev, target_idx, protocol); 220 rc = dev->ops->activate_target(dev, target_idx, protocol);
221 if (!rc)
222 dev->remote_activated = true;
147 223
148error: 224error:
149 device_unlock(&dev->dev); 225 device_unlock(&dev->dev);
@@ -170,6 +246,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
170 } 246 }
171 247
172 dev->ops->deactivate_target(dev, target_idx); 248 dev->ops->deactivate_target(dev, target_idx);
249 dev->remote_activated = false;
173 250
174error: 251error:
175 device_unlock(&dev->dev); 252 device_unlock(&dev->dev);
@@ -322,7 +399,9 @@ struct nfc_dev *nfc_get_device(unsigned idx)
322 * @supported_protocols: NFC protocols supported by the device 399 * @supported_protocols: NFC protocols supported by the device
323 */ 400 */
324struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 401struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
325 u32 supported_protocols) 402 u32 supported_protocols,
403 int tx_headroom,
404 int tx_tailroom)
326{ 405{
327 static atomic_t dev_no = ATOMIC_INIT(0); 406 static atomic_t dev_no = ATOMIC_INIT(0);
328 struct nfc_dev *dev; 407 struct nfc_dev *dev;
@@ -345,6 +424,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
345 424
346 dev->ops = ops; 425 dev->ops = ops;
347 dev->supported_protocols = supported_protocols; 426 dev->supported_protocols = supported_protocols;
427 dev->tx_headroom = tx_headroom;
428 dev->tx_tailroom = tx_tailroom;
348 429
349 spin_lock_init(&dev->targets_lock); 430 spin_lock_init(&dev->targets_lock);
350 nfc_genl_data_init(&dev->genl_data); 431 nfc_genl_data_init(&dev->genl_data);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
new file mode 100644
index 000000000000..decdc49b26d8
--- /dev/null
+++ b/net/nfc/nci/Kconfig
@@ -0,0 +1,10 @@
1config NFC_NCI
2 depends on NFC && EXPERIMENTAL
3 tristate "NCI protocol support (EXPERIMENTAL)"
4 default n
5 help
6 NCI (NFC Controller Interface) is a communication protocol between
7 an NFC Controller (NFCC) and a Device Host (DH).
8
9 Say Y here to compile NCI support into the kernel or say M to
10 compile it as module (nci).
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
new file mode 100644
index 000000000000..cdb3a2e44471
--- /dev/null
+++ b/net/nfc/nci/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux NFC NCI layer.
3#
4
5obj-$(CONFIG_NFC_NCI) += nci.o
6
7nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
new file mode 100644
index 000000000000..4047e29acb3b
--- /dev/null
+++ b/net/nfc/nci/core.c
@@ -0,0 +1,797 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/workqueue.h>
30#include <linux/completion.h>
31#include <linux/sched.h>
32#include <linux/bitops.h>
33#include <linux/skbuff.h>
34
35#include "../nfc.h"
36#include <net/nfc/nci.h>
37#include <net/nfc/nci_core.h>
38#include <linux/nfc.h>
39
40static void nci_cmd_work(struct work_struct *work);
41static void nci_rx_work(struct work_struct *work);
42static void nci_tx_work(struct work_struct *work);
43
44/* ---- NCI requests ---- */
45
46void nci_req_complete(struct nci_dev *ndev, int result)
47{
48 if (ndev->req_status == NCI_REQ_PEND) {
49 ndev->req_result = result;
50 ndev->req_status = NCI_REQ_DONE;
51 complete(&ndev->req_completion);
52 }
53}
54
55static void nci_req_cancel(struct nci_dev *ndev, int err)
56{
57 if (ndev->req_status == NCI_REQ_PEND) {
58 ndev->req_result = err;
59 ndev->req_status = NCI_REQ_CANCELED;
60 complete(&ndev->req_completion);
61 }
62}
63
64/* Execute request and wait for completion. */
65static int __nci_request(struct nci_dev *ndev,
66 void (*req)(struct nci_dev *ndev, unsigned long opt),
67 unsigned long opt,
68 __u32 timeout)
69{
70 int rc = 0;
71 unsigned long completion_rc;
72
73 ndev->req_status = NCI_REQ_PEND;
74
75 init_completion(&ndev->req_completion);
76 req(ndev, opt);
77 completion_rc = wait_for_completion_interruptible_timeout(
78 &ndev->req_completion,
79 timeout);
80
81 nfc_dbg("wait_for_completion return %ld", completion_rc);
82
83 if (completion_rc > 0) {
84 switch (ndev->req_status) {
85 case NCI_REQ_DONE:
86 rc = nci_to_errno(ndev->req_result);
87 break;
88
89 case NCI_REQ_CANCELED:
90 rc = -ndev->req_result;
91 break;
92
93 default:
94 rc = -ETIMEDOUT;
95 break;
96 }
97 } else {
98 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
99 completion_rc);
100
101 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
102 }
103
104 ndev->req_status = ndev->req_result = 0;
105
106 return rc;
107}
108
109static inline int nci_request(struct nci_dev *ndev,
110 void (*req)(struct nci_dev *ndev, unsigned long opt),
111 unsigned long opt, __u32 timeout)
112{
113 int rc;
114
115 if (!test_bit(NCI_UP, &ndev->flags))
116 return -ENETDOWN;
117
118 /* Serialize all requests */
119 mutex_lock(&ndev->req_lock);
120 rc = __nci_request(ndev, req, opt, timeout);
121 mutex_unlock(&ndev->req_lock);
122
123 return rc;
124}
125
126static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
127{
128 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
129}
130
131static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
132{
133 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
134}
135
136static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
137{
138 struct nci_core_conn_create_cmd conn_cmd;
139 struct nci_rf_disc_map_cmd cmd;
140 struct disc_map_config *cfg = cmd.mapping_configs;
141 __u8 *num = &cmd.num_mapping_configs;
142 int i;
143
144 /* create static rf connection */
145 conn_cmd.target_handle = 0;
146 conn_cmd.num_target_specific_params = 0;
147 nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
148
149 /* set rf mapping configurations */
150 *num = 0;
151
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
158 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
159 (*num)++;
160 } else if (ndev->supported_rf_interfaces[i] ==
161 NCI_RF_INTERFACE_NFC_DEP) {
162 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
163 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
164 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
165 (*num)++;
166 }
167
168 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
169 break;
170 }
171
172 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
173 (1 + ((*num)*sizeof(struct disc_map_config))),
174 &cmd);
175}
176
177static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
178{
179 struct nci_rf_disc_cmd cmd;
180 __u32 protocols = opt;
181
182 cmd.num_disc_configs = 0;
183
184 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
185 (protocols & NFC_PROTO_JEWEL_MASK
186 || protocols & NFC_PROTO_MIFARE_MASK
187 || protocols & NFC_PROTO_ISO14443_MASK
188 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
189 cmd.disc_configs[cmd.num_disc_configs].type =
190 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
191 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
192 cmd.num_disc_configs++;
193 }
194
195 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
196 (protocols & NFC_PROTO_ISO14443_MASK)) {
197 cmd.disc_configs[cmd.num_disc_configs].type =
198 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
199 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
200 cmd.num_disc_configs++;
201 }
202
203 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
204 (protocols & NFC_PROTO_FELICA_MASK
205 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
206 cmd.disc_configs[cmd.num_disc_configs].type =
207 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
208 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
209 cmd.num_disc_configs++;
210 }
211
212 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
213 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
214 &cmd);
215}
216
217static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
218{
219 struct nci_rf_deactivate_cmd cmd;
220
221 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
222
223 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
224 sizeof(struct nci_rf_deactivate_cmd),
225 &cmd);
226}
227
228static int nci_open_device(struct nci_dev *ndev)
229{
230 int rc = 0;
231
232 mutex_lock(&ndev->req_lock);
233
234 if (test_bit(NCI_UP, &ndev->flags)) {
235 rc = -EALREADY;
236 goto done;
237 }
238
239 if (ndev->ops->open(ndev)) {
240 rc = -EIO;
241 goto done;
242 }
243
244 atomic_set(&ndev->cmd_cnt, 1);
245
246 set_bit(NCI_INIT, &ndev->flags);
247
248 rc = __nci_request(ndev, nci_reset_req, 0,
249 msecs_to_jiffies(NCI_RESET_TIMEOUT));
250
251 if (!rc) {
252 rc = __nci_request(ndev, nci_init_req, 0,
253 msecs_to_jiffies(NCI_INIT_TIMEOUT));
254 }
255
256 if (!rc) {
257 rc = __nci_request(ndev, nci_init_complete_req, 0,
258 msecs_to_jiffies(NCI_INIT_TIMEOUT));
259 }
260
261 clear_bit(NCI_INIT, &ndev->flags);
262
263 if (!rc) {
264 set_bit(NCI_UP, &ndev->flags);
265 } else {
266 /* Init failed, cleanup */
267 skb_queue_purge(&ndev->cmd_q);
268 skb_queue_purge(&ndev->rx_q);
269 skb_queue_purge(&ndev->tx_q);
270
271 ndev->ops->close(ndev);
272 ndev->flags = 0;
273 }
274
275done:
276 mutex_unlock(&ndev->req_lock);
277 return rc;
278}
279
280static int nci_close_device(struct nci_dev *ndev)
281{
282 nci_req_cancel(ndev, ENODEV);
283 mutex_lock(&ndev->req_lock);
284
285 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
286 del_timer_sync(&ndev->cmd_timer);
287 mutex_unlock(&ndev->req_lock);
288 return 0;
289 }
290
291 /* Drop RX and TX queues */
292 skb_queue_purge(&ndev->rx_q);
293 skb_queue_purge(&ndev->tx_q);
294
295 /* Flush RX and TX wq */
296 flush_workqueue(ndev->rx_wq);
297 flush_workqueue(ndev->tx_wq);
298
299 /* Reset device */
300 skb_queue_purge(&ndev->cmd_q);
301 atomic_set(&ndev->cmd_cnt, 1);
302
303 set_bit(NCI_INIT, &ndev->flags);
304 __nci_request(ndev, nci_reset_req, 0,
305 msecs_to_jiffies(NCI_RESET_TIMEOUT));
306 clear_bit(NCI_INIT, &ndev->flags);
307
308 /* Flush cmd wq */
309 flush_workqueue(ndev->cmd_wq);
310
311 /* After this point our queues are empty
312 * and no works are scheduled. */
313 ndev->ops->close(ndev);
314
315 /* Clear flags */
316 ndev->flags = 0;
317
318 mutex_unlock(&ndev->req_lock);
319
320 return 0;
321}
322
323/* NCI command timer function */
324static void nci_cmd_timer(unsigned long arg)
325{
326 struct nci_dev *ndev = (void *) arg;
327
328 nfc_dbg("entry");
329
330 atomic_set(&ndev->cmd_cnt, 1);
331 queue_work(ndev->cmd_wq, &ndev->cmd_work);
332}
333
334static int nci_dev_up(struct nfc_dev *nfc_dev)
335{
336 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
337
338 nfc_dbg("entry");
339
340 return nci_open_device(ndev);
341}
342
343static int nci_dev_down(struct nfc_dev *nfc_dev)
344{
345 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
346
347 nfc_dbg("entry");
348
349 return nci_close_device(ndev);
350}
351
352static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
353{
354 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
355 int rc;
356
357 nfc_dbg("entry");
358
359 if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
360 nfc_err("unable to start poll, since poll is already active");
361 return -EBUSY;
362 }
363
364 if (ndev->target_active_prot) {
365 nfc_err("there is an active target");
366 return -EBUSY;
367 }
368
369 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
370 nfc_dbg("target is active, implicitly deactivate...");
371
372 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
373 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
374 if (rc)
375 return -EBUSY;
376 }
377
378 rc = nci_request(ndev, nci_rf_discover_req, protocols,
379 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
380
381 if (!rc)
382 ndev->poll_prots = protocols;
383
384 return rc;
385}
386
387static void nci_stop_poll(struct nfc_dev *nfc_dev)
388{
389 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
390
391 nfc_dbg("entry");
392
393 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
394 nfc_err("unable to stop poll, since poll is not active");
395 return;
396 }
397
398 nci_request(ndev, nci_rf_deactivate_req, 0,
399 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
400}
401
402static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
403 __u32 protocol)
404{
405 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
406
407 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
408
409 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
410 nfc_err("there is no available target to activate");
411 return -EINVAL;
412 }
413
414 if (ndev->target_active_prot) {
415 nfc_err("there is already an active target");
416 return -EBUSY;
417 }
418
419 if (!(ndev->target_available_prots & (1 << protocol))) {
420 nfc_err("target does not support the requested protocol 0x%x",
421 protocol);
422 return -EINVAL;
423 }
424
425 ndev->target_active_prot = protocol;
426 ndev->target_available_prots = 0;
427
428 return 0;
429}
430
431static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
432{
433 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
434
435 nfc_dbg("entry, target_idx %d", target_idx);
436
437 if (!ndev->target_active_prot) {
438 nfc_err("unable to deactivate target, no active target");
439 return;
440 }
441
442 ndev->target_active_prot = 0;
443
444 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
445 nci_request(ndev, nci_rf_deactivate_req, 0,
446 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
447 }
448}
449
450static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
451 struct sk_buff *skb,
452 data_exchange_cb_t cb,
453 void *cb_context)
454{
455 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
456 int rc;
457
458 nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
459
460 if (!ndev->target_active_prot) {
461 nfc_err("unable to exchange data, no active target");
462 return -EINVAL;
463 }
464
465 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
466 return -EBUSY;
467
468 /* store cb and context to be used on receiving data */
469 ndev->data_exchange_cb = cb;
470 ndev->data_exchange_cb_context = cb_context;
471
472 rc = nci_send_data(ndev, ndev->conn_id, skb);
473 if (rc)
474 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
475
476 return rc;
477}
478
479static struct nfc_ops nci_nfc_ops = {
480 .dev_up = nci_dev_up,
481 .dev_down = nci_dev_down,
482 .start_poll = nci_start_poll,
483 .stop_poll = nci_stop_poll,
484 .activate_target = nci_activate_target,
485 .deactivate_target = nci_deactivate_target,
486 .data_exchange = nci_data_exchange,
487};
488
489/* ---- Interface to NCI drivers ---- */
490
491/**
492 * nci_allocate_device - allocate a new nci device
493 *
494 * @ops: device operations
495 * @supported_protocols: NFC protocols supported by the device
496 */
497struct nci_dev *nci_allocate_device(struct nci_ops *ops,
498 __u32 supported_protocols,
499 int tx_headroom,
500 int tx_tailroom)
501{
502 struct nci_dev *ndev;
503
504 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
505
506 if (!ops->open || !ops->close || !ops->send)
507 return NULL;
508
509 if (!supported_protocols)
510 return NULL;
511
512 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
513 if (!ndev)
514 return NULL;
515
516 ndev->ops = ops;
517 ndev->tx_headroom = tx_headroom;
518 ndev->tx_tailroom = tx_tailroom;
519
520 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
521 supported_protocols,
522 tx_headroom + NCI_DATA_HDR_SIZE,
523 tx_tailroom);
524 if (!ndev->nfc_dev)
525 goto free_exit;
526
527 nfc_set_drvdata(ndev->nfc_dev, ndev);
528
529 return ndev;
530
531free_exit:
532 kfree(ndev);
533 return NULL;
534}
535EXPORT_SYMBOL(nci_allocate_device);
536
537/**
538 * nci_free_device - deallocate nci device
539 *
540 * @ndev: The nci device to deallocate
541 */
542void nci_free_device(struct nci_dev *ndev)
543{
544 nfc_dbg("entry");
545
546 nfc_free_device(ndev->nfc_dev);
547 kfree(ndev);
548}
549EXPORT_SYMBOL(nci_free_device);
550
551/**
552 * nci_register_device - register a nci device in the nfc subsystem
553 *
554 * @dev: The nci device to register
555 */
556int nci_register_device(struct nci_dev *ndev)
557{
558 int rc;
559 struct device *dev = &ndev->nfc_dev->dev;
560 char name[32];
561
562 nfc_dbg("entry");
563
564 rc = nfc_register_device(ndev->nfc_dev);
565 if (rc)
566 goto exit;
567
568 ndev->flags = 0;
569
570 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
571 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
572 ndev->cmd_wq = create_singlethread_workqueue(name);
573 if (!ndev->cmd_wq) {
574 rc = -ENOMEM;
575 goto unreg_exit;
576 }
577
578 INIT_WORK(&ndev->rx_work, nci_rx_work);
579 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
580 ndev->rx_wq = create_singlethread_workqueue(name);
581 if (!ndev->rx_wq) {
582 rc = -ENOMEM;
583 goto destroy_cmd_wq_exit;
584 }
585
586 INIT_WORK(&ndev->tx_work, nci_tx_work);
587 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
588 ndev->tx_wq = create_singlethread_workqueue(name);
589 if (!ndev->tx_wq) {
590 rc = -ENOMEM;
591 goto destroy_rx_wq_exit;
592 }
593
594 skb_queue_head_init(&ndev->cmd_q);
595 skb_queue_head_init(&ndev->rx_q);
596 skb_queue_head_init(&ndev->tx_q);
597
598 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
599 (unsigned long) ndev);
600
601 mutex_init(&ndev->req_lock);
602
603 goto exit;
604
605destroy_rx_wq_exit:
606 destroy_workqueue(ndev->rx_wq);
607
608destroy_cmd_wq_exit:
609 destroy_workqueue(ndev->cmd_wq);
610
611unreg_exit:
612 nfc_unregister_device(ndev->nfc_dev);
613
614exit:
615 return rc;
616}
617EXPORT_SYMBOL(nci_register_device);
618
619/**
620 * nci_unregister_device - unregister a nci device in the nfc subsystem
621 *
622 * @dev: The nci device to unregister
623 */
624void nci_unregister_device(struct nci_dev *ndev)
625{
626 nfc_dbg("entry");
627
628 nci_close_device(ndev);
629
630 destroy_workqueue(ndev->cmd_wq);
631 destroy_workqueue(ndev->rx_wq);
632 destroy_workqueue(ndev->tx_wq);
633
634 nfc_unregister_device(ndev->nfc_dev);
635}
636EXPORT_SYMBOL(nci_unregister_device);
637
638/**
639 * nci_recv_frame - receive frame from NCI drivers
640 *
641 * @skb: The sk_buff to receive
642 */
643int nci_recv_frame(struct sk_buff *skb)
644{
645 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
646
647 nfc_dbg("entry, len %d", skb->len);
648
649 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
650 && !test_bit(NCI_INIT, &ndev->flags))) {
651 kfree_skb(skb);
652 return -ENXIO;
653 }
654
655 /* Queue frame for rx worker thread */
656 skb_queue_tail(&ndev->rx_q, skb);
657 queue_work(ndev->rx_wq, &ndev->rx_work);
658
659 return 0;
660}
661EXPORT_SYMBOL(nci_recv_frame);
662
663static int nci_send_frame(struct sk_buff *skb)
664{
665 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
666
667 nfc_dbg("entry, len %d", skb->len);
668
669 if (!ndev) {
670 kfree_skb(skb);
671 return -ENODEV;
672 }
673
674 /* Get rid of skb owner, prior to sending to the driver. */
675 skb_orphan(skb);
676
677 return ndev->ops->send(skb);
678}
679
680/* Send NCI command */
681int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
682{
683 struct nci_ctrl_hdr *hdr;
684 struct sk_buff *skb;
685
686 nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
687
688 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
689 if (!skb) {
690 nfc_err("no memory for command");
691 return -ENOMEM;
692 }
693
694 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
695 hdr->gid = nci_opcode_gid(opcode);
696 hdr->oid = nci_opcode_oid(opcode);
697 hdr->plen = plen;
698
699 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
700 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
701
702 if (plen)
703 memcpy(skb_put(skb, plen), payload, plen);
704
705 skb->dev = (void *) ndev;
706
707 skb_queue_tail(&ndev->cmd_q, skb);
708 queue_work(ndev->cmd_wq, &ndev->cmd_work);
709
710 return 0;
711}
712
713/* ---- NCI TX Data worker thread ---- */
714
715static void nci_tx_work(struct work_struct *work)
716{
717 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
718 struct sk_buff *skb;
719
720 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
721
722 /* Send queued tx data */
723 while (atomic_read(&ndev->credits_cnt)) {
724 skb = skb_dequeue(&ndev->tx_q);
725 if (!skb)
726 return;
727
728 atomic_dec(&ndev->credits_cnt);
729
730 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
731 nci_pbf(skb->data),
732 nci_conn_id(skb->data),
733 nci_plen(skb->data));
734
735 nci_send_frame(skb);
736 }
737}
738
739/* ----- NCI RX worker thread (data & control) ----- */
740
741static void nci_rx_work(struct work_struct *work)
742{
743 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
744 struct sk_buff *skb;
745
746 while ((skb = skb_dequeue(&ndev->rx_q))) {
747 /* Process frame */
748 switch (nci_mt(skb->data)) {
749 case NCI_MT_RSP_PKT:
750 nci_rsp_packet(ndev, skb);
751 break;
752
753 case NCI_MT_NTF_PKT:
754 nci_ntf_packet(ndev, skb);
755 break;
756
757 case NCI_MT_DATA_PKT:
758 nci_rx_data_packet(ndev, skb);
759 break;
760
761 default:
762 nfc_err("unknown MT 0x%x", nci_mt(skb->data));
763 kfree_skb(skb);
764 break;
765 }
766 }
767}
768
769/* ----- NCI TX CMD worker thread ----- */
770
771static void nci_cmd_work(struct work_struct *work)
772{
773 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
774 struct sk_buff *skb;
775
776 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
777
778 /* Send queued command */
779 if (atomic_read(&ndev->cmd_cnt)) {
780 skb = skb_dequeue(&ndev->cmd_q);
781 if (!skb)
782 return;
783
784 atomic_dec(&ndev->cmd_cnt);
785
786 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
787 nci_pbf(skb->data),
788 nci_opcode_gid(nci_opcode(skb->data)),
789 nci_opcode_oid(nci_opcode(skb->data)),
790 nci_plen(skb->data));
791
792 nci_send_frame(skb);
793
794 mod_timer(&ndev->cmd_timer,
795 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
796 }
797}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
new file mode 100644
index 000000000000..e5ed90fc1a9c
--- /dev/null
+++ b/net/nfc/nci/data.c
@@ -0,0 +1,247 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/types.h>
25#include <linux/interrupt.h>
26#include <linux/wait.h>
27#include <linux/bitops.h>
28#include <linux/skbuff.h>
29
30#include "../nfc.h"
31#include <net/nfc/nci.h>
32#include <net/nfc/nci_core.h>
33#include <linux/nfc.h>
34
35/* Complete data exchange transaction and forward skb to nfc core */
36void nci_data_exchange_complete(struct nci_dev *ndev,
37 struct sk_buff *skb,
38 int err)
39{
40 data_exchange_cb_t cb = ndev->data_exchange_cb;
41 void *cb_context = ndev->data_exchange_cb_context;
42
43 nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
44
45 if (cb) {
46 ndev->data_exchange_cb = NULL;
47 ndev->data_exchange_cb_context = 0;
48
49 /* forward skb to nfc core */
50 cb(cb_context, skb, err);
51 } else if (skb) {
52 nfc_err("no rx callback, dropping rx data...");
53
54 /* no waiting callback, free skb */
55 kfree_skb(skb);
56 }
57
58 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
59}
60
61/* ----------------- NCI TX Data ----------------- */
62
63static inline void nci_push_data_hdr(struct nci_dev *ndev,
64 __u8 conn_id,
65 struct sk_buff *skb,
66 __u8 pbf)
67{
68 struct nci_data_hdr *hdr;
69 int plen = skb->len;
70
71 hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE);
72 hdr->conn_id = conn_id;
73 hdr->rfu = 0;
74 hdr->plen = plen;
75
76 nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
77 nci_pbf_set((__u8 *)hdr, pbf);
78
79 skb->dev = (void *) ndev;
80}
81
82static int nci_queue_tx_data_frags(struct nci_dev *ndev,
83 __u8 conn_id,
84 struct sk_buff *skb) {
85 int total_len = skb->len;
86 unsigned char *data = skb->data;
87 unsigned long flags;
88 struct sk_buff_head frags_q;
89 struct sk_buff *skb_frag;
90 int frag_len;
91 int rc = 0;
92
93 nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
94
95 __skb_queue_head_init(&frags_q);
96
97 while (total_len) {
98 frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
99
100 skb_frag = nci_skb_alloc(ndev,
101 (NCI_DATA_HDR_SIZE + frag_len),
102 GFP_KERNEL);
103 if (skb_frag == NULL) {
104 rc = -ENOMEM;
105 goto free_exit;
106 }
107 skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
108
109 /* first, copy the data */
110 memcpy(skb_put(skb_frag, frag_len), data, frag_len);
111
112 /* second, set the header */
113 nci_push_data_hdr(ndev, conn_id, skb_frag,
114 ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));
115
116 __skb_queue_tail(&frags_q, skb_frag);
117
118 data += frag_len;
119 total_len -= frag_len;
120
121 nfc_dbg("frag_len %d, remaining total_len %d",
122 frag_len, total_len);
123 }
124
125 /* queue all fragments atomically */
126 spin_lock_irqsave(&ndev->tx_q.lock, flags);
127
128 while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
129 __skb_queue_tail(&ndev->tx_q, skb_frag);
130
131 spin_unlock_irqrestore(&ndev->tx_q.lock, flags);
132
133 /* free the original skb */
134 kfree_skb(skb);
135
136 goto exit;
137
138free_exit:
139 while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
140 kfree_skb(skb_frag);
141
142exit:
143 return rc;
144}
145
146/* Send NCI data */
147int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
148{
149 int rc = 0;
150
151 nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
152
153 /* check if the packet need to be fragmented */
154 if (skb->len <= ndev->max_pkt_payload_size) {
155 /* no need to fragment packet */
156 nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
157
158 skb_queue_tail(&ndev->tx_q, skb);
159 } else {
160 /* fragment packet and queue the fragments */
161 rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
162 if (rc) {
163 nfc_err("failed to fragment tx data packet");
164 goto free_exit;
165 }
166 }
167
168 queue_work(ndev->tx_wq, &ndev->tx_work);
169
170 goto exit;
171
172free_exit:
173 kfree_skb(skb);
174
175exit:
176 return rc;
177}
178
179/* ----------------- NCI RX Data ----------------- */
180
181static void nci_add_rx_data_frag(struct nci_dev *ndev,
182 struct sk_buff *skb,
183 __u8 pbf)
184{
185 int reassembly_len;
186 int err = 0;
187
188 if (ndev->rx_data_reassembly) {
189 reassembly_len = ndev->rx_data_reassembly->len;
190
191 /* first, make enough room for the already accumulated data */
192 if (skb_cow_head(skb, reassembly_len)) {
193 nfc_err("error adding room for accumulated rx data");
194
195 kfree_skb(skb);
196 skb = 0;
197
198 kfree_skb(ndev->rx_data_reassembly);
199 ndev->rx_data_reassembly = 0;
200
201 err = -ENOMEM;
202 goto exit;
203 }
204
205 /* second, combine the two fragments */
206 memcpy(skb_push(skb, reassembly_len),
207 ndev->rx_data_reassembly->data,
208 reassembly_len);
209
210 /* third, free old reassembly */
211 kfree_skb(ndev->rx_data_reassembly);
212 ndev->rx_data_reassembly = 0;
213 }
214
215 if (pbf == NCI_PBF_CONT) {
216 /* need to wait for next fragment, store skb and exit */
217 ndev->rx_data_reassembly = skb;
218 return;
219 }
220
221exit:
222 nci_data_exchange_complete(ndev, skb, err);
223}
224
225/* Rx Data packet */
226void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
227{
228 __u8 pbf = nci_pbf(skb->data);
229
230 nfc_dbg("entry, len %d", skb->len);
231
232 nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
233 nci_pbf(skb->data),
234 nci_conn_id(skb->data),
235 nci_plen(skb->data));
236
237 /* strip the nci data header */
238 skb_pull(skb, NCI_DATA_HDR_SIZE);
239
240 if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
241 /* frame I/F => remove the status byte */
242 nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
243 skb_trim(skb, (skb->len - 1));
244 }
245
246 nci_add_rx_data_frag(ndev, skb, pbf);
247}
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
new file mode 100644
index 000000000000..b19dc2fa90e1
--- /dev/null
+++ b/net/nfc/nci/lib.c
@@ -0,0 +1,94 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on lib.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/errno.h>
32
33#include <net/nfc/nci.h>
34
35/* NCI status codes to Unix errno mapping */
36int nci_to_errno(__u8 code)
37{
38 switch (code) {
39 case NCI_STATUS_OK:
40 return 0;
41
42 case NCI_STATUS_REJECTED:
43 return -EBUSY;
44
45 case NCI_STATUS_MESSAGE_CORRUPTED:
46 return -EBADMSG;
47
48 case NCI_STATUS_BUFFER_FULL:
49 return -ENOBUFS;
50
51 case NCI_STATUS_NOT_INITIALIZED:
52 return -EHOSTDOWN;
53
54 case NCI_STATUS_SYNTAX_ERROR:
55 case NCI_STATUS_SEMANTIC_ERROR:
56 case NCI_STATUS_INVALID_PARAM:
57 case NCI_STATUS_RF_PROTOCOL_ERROR:
58 case NCI_STATUS_NFCEE_PROTOCOL_ERROR:
59 return -EPROTO;
60
61 case NCI_STATUS_UNKNOWN_GID:
62 case NCI_STATUS_UNKNOWN_OID:
63 return -EBADRQC;
64
65 case NCI_STATUS_MESSAGE_SIZE_EXCEEDED:
66 return -EMSGSIZE;
67
68 case NCI_STATUS_DISCOVERY_ALREADY_STARTED:
69 return -EALREADY;
70
71 case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED:
72 case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED:
73 return -ECONNREFUSED;
74
75 case NCI_STATUS_RF_TRANSMISSION_ERROR:
76 case NCI_STATUS_NFCEE_TRANSMISSION_ERROR:
77 return -ECOMM;
78
79 case NCI_STATUS_RF_TIMEOUT_ERROR:
80 case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
81 return -ETIMEDOUT;
82
83 case NCI_STATUS_RF_LINK_LOSS_ERROR:
84 return -ENOLINK;
85
86 case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
87 return -EDQUOT;
88
89 case NCI_STATUS_FAILED:
90 default:
91 return -ENOSYS;
92 }
93}
94EXPORT_SYMBOL(nci_to_errno);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
new file mode 100644
index 000000000000..96633f5cda4f
--- /dev/null
+++ b/net/nfc/nci/ntf.c
@@ -0,0 +1,258 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_event.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/bitops.h>
31#include <linux/skbuff.h>
32
33#include "../nfc.h"
34#include <net/nfc/nci.h>
35#include <net/nfc/nci_core.h>
36#include <linux/nfc.h>
37
38/* Handle NCI Notification packets */
39
40static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
41 struct sk_buff *skb)
42{
43 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
44 int i;
45
46 nfc_dbg("entry, num_entries %d", ntf->num_entries);
47
48 if (ntf->num_entries > NCI_MAX_NUM_CONN)
49 ntf->num_entries = NCI_MAX_NUM_CONN;
50
51 /* update the credits */
52 for (i = 0; i < ntf->num_entries; i++) {
53 nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
54 ntf->conn_entries[i].conn_id,
55 ntf->conn_entries[i].credits);
56
57 if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
58 /* found static rf connection */
59 atomic_add(ntf->conn_entries[i].credits,
60 &ndev->credits_cnt);
61 }
62 }
63
64 /* trigger the next tx */
65 if (!skb_queue_empty(&ndev->tx_q))
66 queue_work(ndev->tx_wq, &ndev->tx_work);
67}
68
69static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
70 struct sk_buff *skb)
71{
72 struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
73
74 nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
75}
76
77static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
78 struct nci_rf_activate_ntf *ntf, __u8 *data)
79{
80 struct rf_tech_specific_params_nfca_poll *nfca_poll;
81 struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
82
83 nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
84 nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
85
86 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
87 data += 2;
88
89 nfca_poll->nfcid1_len = *data++;
90
91 nfc_dbg("sens_res 0x%x, nfcid1_len %d",
92 nfca_poll->sens_res,
93 nfca_poll->nfcid1_len);
94
95 memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
96 data += nfca_poll->nfcid1_len;
97
98 nfca_poll->sel_res_len = *data++;
99
100 if (nfca_poll->sel_res_len != 0)
101 nfca_poll->sel_res = *data++;
102
103 ntf->rf_interface_type = *data++;
104 ntf->activation_params_len = *data++;
105
106 nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
107 nfca_poll->sel_res_len,
108 nfca_poll->sel_res,
109 ntf->rf_interface_type,
110 ntf->activation_params_len);
111
112 switch (ntf->rf_interface_type) {
113 case NCI_RF_INTERFACE_ISO_DEP:
114 nfca_poll_iso_dep->rats_res_len = *data++;
115 if (nfca_poll_iso_dep->rats_res_len > 0) {
116 memcpy(nfca_poll_iso_dep->rats_res,
117 data,
118 nfca_poll_iso_dep->rats_res_len);
119 }
120 break;
121
122 case NCI_RF_INTERFACE_FRAME:
123 /* no activation params */
124 break;
125
126 default:
127 nfc_err("unsupported rf_interface_type 0x%x",
128 ntf->rf_interface_type);
129 return -EPROTO;
130 }
131
132 return 0;
133}
134
135static void nci_target_found(struct nci_dev *ndev,
136 struct nci_rf_activate_ntf *ntf)
137{
138 struct nfc_target nfc_tgt;
139
140 if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */
141 nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
142 else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
143 nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
144
145 nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
146 nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
147
148 if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
149 nfc_dbg("the target found does not have the desired protocol");
150 return;
151 }
152
153 nfc_dbg("new target found, supported_protocols 0x%x",
154 nfc_tgt.supported_protocols);
155
156 ndev->target_available_prots = nfc_tgt.supported_protocols;
157
158 nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
159}
160
161static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
162 struct sk_buff *skb)
163{
164 struct nci_rf_activate_ntf ntf;
165 __u8 *data = skb->data;
166 int rc = -1;
167
168 clear_bit(NCI_DISCOVERY, &ndev->flags);
169 set_bit(NCI_POLL_ACTIVE, &ndev->flags);
170
171 ntf.target_handle = *data++;
172 ntf.rf_protocol = *data++;
173 ntf.rf_tech_and_mode = *data++;
174 ntf.rf_tech_specific_params_len = *data++;
175
176 nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
177 ntf.target_handle,
178 ntf.rf_protocol,
179 ntf.rf_tech_and_mode,
180 ntf.rf_tech_specific_params_len);
181
182 switch (ntf.rf_tech_and_mode) {
183 case NCI_NFC_A_PASSIVE_POLL_MODE:
184 rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
185 data);
186 break;
187
188 default:
189 nfc_err("unsupported rf_tech_and_mode 0x%x",
190 ntf.rf_tech_and_mode);
191 return;
192 }
193
194 if (!rc)
195 nci_target_found(ndev, &ntf);
196}
197
198static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
199 struct sk_buff *skb)
200{
201 __u8 type = skb->data[0];
202
203 nfc_dbg("entry, type 0x%x", type);
204
205 clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
206 ndev->target_active_prot = 0;
207
208 /* drop tx data queue */
209 skb_queue_purge(&ndev->tx_q);
210
211 /* drop partial rx data packet */
212 if (ndev->rx_data_reassembly) {
213 kfree_skb(ndev->rx_data_reassembly);
214 ndev->rx_data_reassembly = 0;
215 }
216
217 /* complete the data exchange transaction, if exists */
218 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
219 nci_data_exchange_complete(ndev, NULL, -EIO);
220}
221
222void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
223{
224 __u16 ntf_opcode = nci_opcode(skb->data);
225
226 nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
227 nci_pbf(skb->data),
228 nci_opcode_gid(ntf_opcode),
229 nci_opcode_oid(ntf_opcode),
230 nci_plen(skb->data));
231
232 /* strip the nci control header */
233 skb_pull(skb, NCI_CTRL_HDR_SIZE);
234
235 switch (ntf_opcode) {
236 case NCI_OP_CORE_CONN_CREDITS_NTF:
237 nci_core_conn_credits_ntf_packet(ndev, skb);
238 break;
239
240 case NCI_OP_RF_FIELD_INFO_NTF:
241 nci_rf_field_info_ntf_packet(ndev, skb);
242 break;
243
244 case NCI_OP_RF_ACTIVATE_NTF:
245 nci_rf_activate_ntf_packet(ndev, skb);
246 break;
247
248 case NCI_OP_RF_DEACTIVATE_NTF:
249 nci_rf_deactivate_ntf_packet(ndev, skb);
250 break;
251
252 default:
253 nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
254 break;
255 }
256
257 kfree_skb(skb);
258}
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
new file mode 100644
index 000000000000..0403d4cd0917
--- /dev/null
+++ b/net/nfc/nci/rsp.c
@@ -0,0 +1,226 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_event.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/bitops.h>
31#include <linux/skbuff.h>
32
33#include "../nfc.h"
34#include <net/nfc/nci.h>
35#include <net/nfc/nci_core.h>
36
37/* Handle NCI Response packets */
38
39static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
40{
41 struct nci_core_reset_rsp *rsp = (void *) skb->data;
42
43 nfc_dbg("entry, status 0x%x", rsp->status);
44
45 if (rsp->status == NCI_STATUS_OK)
46 ndev->nci_ver = rsp->nci_ver;
47
48 nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
49
50 nci_req_complete(ndev, rsp->status);
51}
52
53static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
54{
55 struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
56 struct nci_core_init_rsp_2 *rsp_2;
57
58 nfc_dbg("entry, status 0x%x", rsp_1->status);
59
60 if (rsp_1->status != NCI_STATUS_OK)
61 return;
62
63 ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
64 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
65
66 if (ndev->num_supported_rf_interfaces >
67 NCI_MAX_SUPPORTED_RF_INTERFACES) {
68 ndev->num_supported_rf_interfaces =
69 NCI_MAX_SUPPORTED_RF_INTERFACES;
70 }
71
72 memcpy(ndev->supported_rf_interfaces,
73 rsp_1->supported_rf_interfaces,
74 ndev->num_supported_rf_interfaces);
75
76 rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
77
78 ndev->max_logical_connections =
79 rsp_2->max_logical_connections;
80 ndev->max_routing_table_size =
81 __le16_to_cpu(rsp_2->max_routing_table_size);
82 ndev->max_control_packet_payload_length =
83 rsp_2->max_control_packet_payload_length;
84 ndev->rf_sending_buffer_size =
85 __le16_to_cpu(rsp_2->rf_sending_buffer_size);
86 ndev->rf_receiving_buffer_size =
87 __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
88 ndev->manufacturer_id =
89 __le16_to_cpu(rsp_2->manufacturer_id);
90
91 nfc_dbg("nfcc_features 0x%x",
92 ndev->nfcc_features);
93 nfc_dbg("num_supported_rf_interfaces %d",
94 ndev->num_supported_rf_interfaces);
95 nfc_dbg("supported_rf_interfaces[0] 0x%x",
96 ndev->supported_rf_interfaces[0]);
97 nfc_dbg("supported_rf_interfaces[1] 0x%x",
98 ndev->supported_rf_interfaces[1]);
99 nfc_dbg("supported_rf_interfaces[2] 0x%x",
100 ndev->supported_rf_interfaces[2]);
101 nfc_dbg("supported_rf_interfaces[3] 0x%x",
102 ndev->supported_rf_interfaces[3]);
103 nfc_dbg("max_logical_connections %d",
104 ndev->max_logical_connections);
105 nfc_dbg("max_routing_table_size %d",
106 ndev->max_routing_table_size);
107 nfc_dbg("max_control_packet_payload_length %d",
108 ndev->max_control_packet_payload_length);
109 nfc_dbg("rf_sending_buffer_size %d",
110 ndev->rf_sending_buffer_size);
111 nfc_dbg("rf_receiving_buffer_size %d",
112 ndev->rf_receiving_buffer_size);
113 nfc_dbg("manufacturer_id 0x%x",
114 ndev->manufacturer_id);
115
116 nci_req_complete(ndev, rsp_1->status);
117}
118
119static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
120 struct sk_buff *skb)
121{
122 struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
123
124 nfc_dbg("entry, status 0x%x", rsp->status);
125
126 if (rsp->status != NCI_STATUS_OK)
127 return;
128
129 ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
130 ndev->initial_num_credits = rsp->initial_num_credits;
131 ndev->conn_id = rsp->conn_id;
132
133 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
134
135 nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
136 nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
137 nfc_dbg("conn_id %d", ndev->conn_id);
138}
139
140static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
141 struct sk_buff *skb)
142{
143 __u8 status = skb->data[0];
144
145 nfc_dbg("entry, status 0x%x", status);
146
147 nci_req_complete(ndev, status);
148}
149
150static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
151{
152 __u8 status = skb->data[0];
153
154 nfc_dbg("entry, status 0x%x", status);
155
156 if (status == NCI_STATUS_OK)
157 set_bit(NCI_DISCOVERY, &ndev->flags);
158
159 nci_req_complete(ndev, status);
160}
161
162static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
163 struct sk_buff *skb)
164{
165 __u8 status = skb->data[0];
166
167 nfc_dbg("entry, status 0x%x", status);
168
169 clear_bit(NCI_DISCOVERY, &ndev->flags);
170
171 nci_req_complete(ndev, status);
172}
173
174void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
175{
176 __u16 rsp_opcode = nci_opcode(skb->data);
177
178 /* we got a rsp, stop the cmd timer */
179 del_timer(&ndev->cmd_timer);
180
181 nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
182 nci_pbf(skb->data),
183 nci_opcode_gid(rsp_opcode),
184 nci_opcode_oid(rsp_opcode),
185 nci_plen(skb->data));
186
187 /* strip the nci control header */
188 skb_pull(skb, NCI_CTRL_HDR_SIZE);
189
190 switch (rsp_opcode) {
191 case NCI_OP_CORE_RESET_RSP:
192 nci_core_reset_rsp_packet(ndev, skb);
193 break;
194
195 case NCI_OP_CORE_INIT_RSP:
196 nci_core_init_rsp_packet(ndev, skb);
197 break;
198
199 case NCI_OP_CORE_CONN_CREATE_RSP:
200 nci_core_conn_create_rsp_packet(ndev, skb);
201 break;
202
203 case NCI_OP_RF_DISCOVER_MAP_RSP:
204 nci_rf_disc_map_rsp_packet(ndev, skb);
205 break;
206
207 case NCI_OP_RF_DISCOVER_RSP:
208 nci_rf_disc_rsp_packet(ndev, skb);
209 break;
210
211 case NCI_OP_RF_DEACTIVATE_RSP:
212 nci_rf_deactivate_rsp_packet(ndev, skb);
213 break;
214
215 default:
216 nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
217 break;
218 }
219
220 kfree_skb(skb);
221
222 /* trigger the next cmd */
223 atomic_set(&ndev->cmd_cnt, 1);
224 if (!skb_queue_empty(&ndev->cmd_q))
225 queue_work(ndev->cmd_wq, &ndev->cmd_work);
226}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ccdff7953f7d..03f8818e1f16 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -367,6 +367,52 @@ out_putdev:
367 return rc; 367 return rc;
368} 368}
369 369
370static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
371{
372 struct nfc_dev *dev;
373 int rc;
374 u32 idx;
375
376 nfc_dbg("entry");
377
378 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
379 return -EINVAL;
380
381 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
382
383 dev = nfc_get_device(idx);
384 if (!dev)
385 return -ENODEV;
386
387 rc = nfc_dev_up(dev);
388
389 nfc_put_device(dev);
390 return rc;
391}
392
393static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
394{
395 struct nfc_dev *dev;
396 int rc;
397 u32 idx;
398
399 nfc_dbg("entry");
400
401 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
402 return -EINVAL;
403
404 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
405
406 dev = nfc_get_device(idx);
407 if (!dev)
408 return -ENODEV;
409
410 rc = nfc_dev_down(dev);
411
412 nfc_put_device(dev);
413 return rc;
414}
415
370static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) 416static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
371{ 417{
372 struct nfc_dev *dev; 418 struct nfc_dev *dev;
@@ -441,6 +487,16 @@ static struct genl_ops nfc_genl_ops[] = {
441 .policy = nfc_genl_policy, 487 .policy = nfc_genl_policy,
442 }, 488 },
443 { 489 {
490 .cmd = NFC_CMD_DEV_UP,
491 .doit = nfc_genl_dev_up,
492 .policy = nfc_genl_policy,
493 },
494 {
495 .cmd = NFC_CMD_DEV_DOWN,
496 .doit = nfc_genl_dev_down,
497 .policy = nfc_genl_policy,
498 },
499 {
444 .cmd = NFC_CMD_START_POLL, 500 .cmd = NFC_CMD_START_POLL,
445 .doit = nfc_genl_start_poll, 501 .doit = nfc_genl_start_poll,
446 .policy = nfc_genl_policy, 502 .policy = nfc_genl_policy,
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index aaf9832298f3..d86583f4831d 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -24,10 +24,10 @@
24#ifndef __LOCAL_NFC_H 24#ifndef __LOCAL_NFC_H
25#define __LOCAL_NFC_H 25#define __LOCAL_NFC_H
26 26
27#include <net/nfc.h> 27#include <net/nfc/nfc.h>
28#include <net/sock.h> 28#include <net/sock.h>
29 29
30__attribute__((format (printf, 2, 3))) 30__printf(2, 3)
31int nfc_printk(const char *level, const char *fmt, ...); 31int nfc_printk(const char *level, const char *fmt, ...);
32 32
33#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg) 33#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
@@ -101,6 +101,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
101 class_dev_iter_exit(iter); 101 class_dev_iter_exit(iter);
102} 102}
103 103
104int nfc_dev_up(struct nfc_dev *dev);
105
106int nfc_dev_down(struct nfc_dev *dev);
107
104int nfc_start_poll(struct nfc_dev *dev, u32 protocols); 108int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
105 109
106int nfc_stop_poll(struct nfc_dev *dev); 110int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 52de84a55115..9fd652a51424 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -123,11 +123,7 @@ error:
123 123
124static int rawsock_add_header(struct sk_buff *skb) 124static int rawsock_add_header(struct sk_buff *skb)
125{ 125{
126 126 *skb_push(skb, NFC_HEADER_SIZE) = 0;
127 if (skb_cow_head(skb, 1))
128 return -ENOMEM;
129
130 *skb_push(skb, 1) = 0;
131 127
132 return 0; 128 return 0;
133} 129}
@@ -197,6 +193,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
197 struct msghdr *msg, size_t len) 193 struct msghdr *msg, size_t len)
198{ 194{
199 struct sock *sk = sock->sk; 195 struct sock *sk = sock->sk;
196 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
200 struct sk_buff *skb; 197 struct sk_buff *skb;
201 int rc; 198 int rc;
202 199
@@ -208,11 +205,13 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
208 if (sock->state != SS_CONNECTED) 205 if (sock->state != SS_CONNECTED)
209 return -ENOTCONN; 206 return -ENOTCONN;
210 207
211 skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, 208 skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
212 &rc); 209 msg->msg_flags & MSG_DONTWAIT, &rc);
213 if (!skb) 210 if (!skb)
214 return rc; 211 return rc;
215 212
213 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
214
216 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 215 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
217 if (rc < 0) { 216 if (rc < 0) {
218 kfree_skb(skb); 217 kfree_skb(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fabb4fafa281..03bb45adf2fc 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -40,6 +40,10 @@
40 * byte arrays at the end of sockaddr_ll 40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq. 41 * and packet_mreq.
42 * Johann Baudy : Added TX RING. 42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
43 * 47 *
44 * This program is free software; you can redistribute it and/or 48 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License 49 * modify it under the terms of the GNU General Public License
@@ -161,9 +165,56 @@ struct packet_mreq_max {
161 unsigned char mr_address[MAX_ADDR_LEN]; 165 unsigned char mr_address[MAX_ADDR_LEN];
162}; 166};
163 167
164static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
165 int closing, int tx_ring); 169 int closing, int tx_ring);
166 170
171
172#define V3_ALIGNMENT (8)
173
174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179/* kbdq - kernel block descriptor queue */
180struct tpacket_kbdq_core {
181 struct pgv *pkbdq;
182 unsigned int feature_req_word;
183 unsigned int hdrlen;
184 unsigned char reset_pending_on_curr_blk;
185 unsigned char delete_blk_timer;
186 unsigned short kactive_blk_num;
187 unsigned short blk_sizeof_priv;
188
189 /* last_kactive_blk_num:
190 * trick to see if user-space has caught up
191 * in order to avoid refreshing timer when every single pkt arrives.
192 */
193 unsigned short last_kactive_blk_num;
194
195 char *pkblk_start;
196 char *pkblk_end;
197 int kblk_size;
198 unsigned int knum_blocks;
199 uint64_t knxt_seq_num;
200 char *prev;
201 char *nxt_offset;
202 struct sk_buff *skb;
203
204 atomic_t blk_fill_in_prog;
205
206 /* Default is set to 8ms */
207#define DEFAULT_PRB_RETIRE_TOV (8)
208
209 unsigned short retire_blk_tov;
210 unsigned short version;
211 unsigned long tov_in_jiffies;
212
213 /* timer to retire an outstanding block */
214 struct timer_list retire_blk_timer;
215};
216
217#define PGV_FROM_VMALLOC 1
167struct pgv { 218struct pgv {
168 char *buffer; 219 char *buffer;
169}; 220};
@@ -179,12 +230,44 @@ struct packet_ring_buffer {
179 unsigned int pg_vec_pages; 230 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len; 231 unsigned int pg_vec_len;
181 232
233 struct tpacket_kbdq_core prb_bdqc;
182 atomic_t pending; 234 atomic_t pending;
183}; 235};
184 236
237#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
239#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
240#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
241#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
242#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
244
185struct packet_sock; 245struct packet_sock;
186static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 246static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
187 247
248static void *packet_previous_frame(struct packet_sock *po,
249 struct packet_ring_buffer *rb,
250 int status);
251static void packet_increment_head(struct packet_ring_buffer *buff);
252static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253 struct tpacket_block_desc *);
254static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
255 struct packet_sock *);
256static void prb_retire_current_block(struct tpacket_kbdq_core *,
257 struct packet_sock *, unsigned int status);
258static int prb_queue_frozen(struct tpacket_kbdq_core *);
259static void prb_open_block(struct tpacket_kbdq_core *,
260 struct tpacket_block_desc *);
261static void prb_retire_rx_blk_timer_expired(unsigned long);
262static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263static void prb_init_blk_timer(struct packet_sock *,
264 struct tpacket_kbdq_core *,
265 void (*func) (unsigned long));
266static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268 struct tpacket3_hdr *);
269static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270 struct tpacket3_hdr *);
188static void packet_flush_mclist(struct sock *sk); 271static void packet_flush_mclist(struct sock *sk);
189 272
190struct packet_fanout; 273struct packet_fanout;
@@ -193,6 +276,7 @@ struct packet_sock {
193 struct sock sk; 276 struct sock sk;
194 struct packet_fanout *fanout; 277 struct packet_fanout *fanout;
195 struct tpacket_stats stats; 278 struct tpacket_stats stats;
279 union tpacket_stats_u stats_u;
196 struct packet_ring_buffer rx_ring; 280 struct packet_ring_buffer rx_ring;
197 struct packet_ring_buffer tx_ring; 281 struct packet_ring_buffer tx_ring;
198 int copy_thresh; 282 int copy_thresh;
@@ -242,6 +326,15 @@ struct packet_skb_cb {
242 326
243#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 327#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
244 328
329#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
330#define GET_PBLOCK_DESC(x, bid) \
331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
332#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
334#define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
337
245static inline struct packet_sock *pkt_sk(struct sock *sk) 338static inline struct packet_sock *pkt_sk(struct sock *sk)
246{ 339{
247 return (struct packet_sock *)sk; 340 return (struct packet_sock *)sk;
@@ -325,8 +418,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
325 h.h2->tp_status = status; 418 h.h2->tp_status = status;
326 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 419 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
327 break; 420 break;
421 case TPACKET_V3:
328 default: 422 default:
329 pr_err("TPACKET version not supported\n"); 423 WARN(1, "TPACKET version not supported.\n");
330 BUG(); 424 BUG();
331 } 425 }
332 426
@@ -351,8 +445,9 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
351 case TPACKET_V2: 445 case TPACKET_V2:
352 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 446 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
353 return h.h2->tp_status; 447 return h.h2->tp_status;
448 case TPACKET_V3:
354 default: 449 default:
355 pr_err("TPACKET version not supported\n"); 450 WARN(1, "TPACKET version not supported.\n");
356 BUG(); 451 BUG();
357 return 0; 452 return 0;
358 } 453 }
@@ -389,6 +484,670 @@ static inline void *packet_current_frame(struct packet_sock *po,
389 return packet_lookup_frame(po, rb, rb->head, status); 484 return packet_lookup_frame(po, rb, rb->head, status);
390} 485}
391 486
487static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
488{
489 del_timer_sync(&pkc->retire_blk_timer);
490}
491
492static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
493 int tx_ring,
494 struct sk_buff_head *rb_queue)
495{
496 struct tpacket_kbdq_core *pkc;
497
498 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
499
500 spin_lock(&rb_queue->lock);
501 pkc->delete_blk_timer = 1;
502 spin_unlock(&rb_queue->lock);
503
504 prb_del_retire_blk_timer(pkc);
505}
506
507static void prb_init_blk_timer(struct packet_sock *po,
508 struct tpacket_kbdq_core *pkc,
509 void (*func) (unsigned long))
510{
511 init_timer(&pkc->retire_blk_timer);
512 pkc->retire_blk_timer.data = (long)po;
513 pkc->retire_blk_timer.function = func;
514 pkc->retire_blk_timer.expires = jiffies;
515}
516
517static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
518{
519 struct tpacket_kbdq_core *pkc;
520
521 if (tx_ring)
522 BUG();
523
524 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
525 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
526}
527
528static int prb_calc_retire_blk_tmo(struct packet_sock *po,
529 int blk_size_in_bytes)
530{
531 struct net_device *dev;
532 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
533 struct ethtool_cmd ecmd;
534 int err;
535
536 rtnl_lock();
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
539 rtnl_unlock();
540 return DEFAULT_PRB_RETIRE_TOV;
541 }
542 err = __ethtool_get_settings(dev, &ecmd);
543 rtnl_unlock();
544 if (!err) {
545 switch (ecmd.speed) {
546 case SPEED_10000:
547 msec = 1;
548 div = 10000/1000;
549 break;
550 case SPEED_1000:
551 msec = 1;
552 div = 1000/1000;
553 break;
554 /*
555 * If the link speed is so slow you don't really
556 * need to worry about perf anyways
557 */
558 case SPEED_100:
559 case SPEED_10:
560 default:
561 return DEFAULT_PRB_RETIRE_TOV;
562 }
563 }
564
565 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
566
567 if (div)
568 mbits /= div;
569
570 tmo = mbits * msec;
571
572 if (div)
573 return tmo+1;
574 return tmo;
575}
576
577static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
578 union tpacket_req_u *req_u)
579{
580 p1->feature_req_word = req_u->req3.tp_feature_req_word;
581}
582
583static void init_prb_bdqc(struct packet_sock *po,
584 struct packet_ring_buffer *rb,
585 struct pgv *pg_vec,
586 union tpacket_req_u *req_u, int tx_ring)
587{
588 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
589 struct tpacket_block_desc *pbd;
590
591 memset(p1, 0x0, sizeof(*p1));
592
593 p1->knxt_seq_num = 1;
594 p1->pkbdq = pg_vec;
595 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
596 p1->pkblk_start = (char *)pg_vec[0].buffer;
597 p1->kblk_size = req_u->req3.tp_block_size;
598 p1->knum_blocks = req_u->req3.tp_block_nr;
599 p1->hdrlen = po->tp_hdrlen;
600 p1->version = po->tp_version;
601 p1->last_kactive_blk_num = 0;
602 po->stats_u.stats3.tp_freeze_q_cnt = 0;
603 if (req_u->req3.tp_retire_blk_tov)
604 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
605 else
606 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
607 req_u->req3.tp_block_size);
608 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
609 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
610
611 prb_init_ft_ops(p1, req_u);
612 prb_setup_retire_blk_timer(po, tx_ring);
613 prb_open_block(p1, pbd);
614}
615
616/* Do NOT update the last_blk_num first.
617 * Assumes sk_buff_head lock is held.
618 */
619static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
620{
621 mod_timer(&pkc->retire_blk_timer,
622 jiffies + pkc->tov_in_jiffies);
623 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
624}
625
626/*
627 * Timer logic:
628 * 1) We refresh the timer only when we open a block.
629 * By doing this we don't waste cycles refreshing the timer
630 * on packet-by-packet basis.
631 *
632 * With a 1MB block-size, on a 1Gbps line, it will take
633 * i) ~8 ms to fill a block + ii) memcpy etc.
634 * In this cut we are not accounting for the memcpy time.
635 *
636 * So, if the user sets the 'tmo' to 10ms then the timer
637 * will never fire while the block is still getting filled
638 * (which is what we want). However, the user could choose
639 * to close a block early and that's fine.
640 *
641 * But when the timer does fire, we check whether or not to refresh it.
642 * Since the tmo granularity is in msecs, it is not too expensive
643 * to refresh the timer, lets say every '8' msecs.
644 * Either the user can set the 'tmo' or we can derive it based on
645 * a) line-speed and b) block-size.
646 * prb_calc_retire_blk_tmo() calculates the tmo.
647 *
648 */
649static void prb_retire_rx_blk_timer_expired(unsigned long data)
650{
651 struct packet_sock *po = (struct packet_sock *)data;
652 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
653 unsigned int frozen;
654 struct tpacket_block_desc *pbd;
655
656 spin_lock(&po->sk.sk_receive_queue.lock);
657
658 frozen = prb_queue_frozen(pkc);
659 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
660
661 if (unlikely(pkc->delete_blk_timer))
662 goto out;
663
664 /* We only need to plug the race when the block is partially filled.
665 * tpacket_rcv:
666 * lock(); increment BLOCK_NUM_PKTS; unlock()
667 * copy_bits() is in progress ...
668 * timer fires on other cpu:
669 * we can't retire the current block because copy_bits
670 * is in progress.
671 *
672 */
673 if (BLOCK_NUM_PKTS(pbd)) {
674 while (atomic_read(&pkc->blk_fill_in_prog)) {
675 /* Waiting for skb_copy_bits to finish... */
676 cpu_relax();
677 }
678 }
679
680 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
681 if (!frozen) {
682 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
683 if (!prb_dispatch_next_block(pkc, po))
684 goto refresh_timer;
685 else
686 goto out;
687 } else {
688 /* Case 1. Queue was frozen because user-space was
689 * lagging behind.
690 */
691 if (prb_curr_blk_in_use(pkc, pbd)) {
692 /*
693 * Ok, user-space is still behind.
694 * So just refresh the timer.
695 */
696 goto refresh_timer;
697 } else {
698 /* Case 2. queue was frozen,user-space caught up,
699 * now the link went idle && the timer fired.
700 * We don't have a block to close.So we open this
701 * block and restart the timer.
702 * opening a block thaws the queue,restarts timer
703 * Thawing/timer-refresh is a side effect.
704 */
705 prb_open_block(pkc, pbd);
706 goto out;
707 }
708 }
709 }
710
711refresh_timer:
712 _prb_refresh_rx_retire_blk_timer(pkc);
713
714out:
715 spin_unlock(&po->sk.sk_receive_queue.lock);
716}
717
718static inline void prb_flush_block(struct tpacket_kbdq_core *pkc1,
719 struct tpacket_block_desc *pbd1, __u32 status)
720{
721 /* Flush everything minus the block header */
722
723#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
724 u8 *start, *end;
725
726 start = (u8 *)pbd1;
727
728 /* Skip the block header(we know header WILL fit in 4K) */
729 start += PAGE_SIZE;
730
731 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
732 for (; start < end; start += PAGE_SIZE)
733 flush_dcache_page(pgv_to_page(start));
734
735 smp_wmb();
736#endif
737
738 /* Now update the block status. */
739
740 BLOCK_STATUS(pbd1) = status;
741
742 /* Flush the block header */
743
744#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
745 start = (u8 *)pbd1;
746 flush_dcache_page(pgv_to_page(start));
747
748 smp_wmb();
749#endif
750}
751
752/*
753 * Side effect:
754 *
755 * 1) flush the block
756 * 2) Increment active_blk_num
757 *
758 * Note:We DONT refresh the timer on purpose.
759 * Because almost always the next block will be opened.
760 */
761static void prb_close_block(struct tpacket_kbdq_core *pkc1,
762 struct tpacket_block_desc *pbd1,
763 struct packet_sock *po, unsigned int stat)
764{
765 __u32 status = TP_STATUS_USER | stat;
766
767 struct tpacket3_hdr *last_pkt;
768 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
769
770 if (po->stats.tp_drops)
771 status |= TP_STATUS_LOSING;
772
773 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
774 last_pkt->tp_next_offset = 0;
775
776 /* Get the ts of the last pkt */
777 if (BLOCK_NUM_PKTS(pbd1)) {
778 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
779 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
780 } else {
781 /* Ok, we tmo'd - so get the current time */
782 struct timespec ts;
783 getnstimeofday(&ts);
784 h1->ts_last_pkt.ts_sec = ts.tv_sec;
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
786 }
787
788 smp_wmb();
789
790 /* Flush the block */
791 prb_flush_block(pkc1, pbd1, status);
792
793 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
794}
795
796static inline void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
797{
798 pkc->reset_pending_on_curr_blk = 0;
799}
800
801/*
802 * Side effect of opening a block:
803 *
804 * 1) prb_queue is thawed.
805 * 2) retire_blk_timer is refreshed.
806 *
807 */
808static void prb_open_block(struct tpacket_kbdq_core *pkc1,
809 struct tpacket_block_desc *pbd1)
810{
811 struct timespec ts;
812 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
813
814 smp_rmb();
815
816 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
817
818 /* We could have just memset this but we will lose the
819 * flexibility of making the priv area sticky
820 */
821 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
822 BLOCK_NUM_PKTS(pbd1) = 0;
823 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
824 getnstimeofday(&ts);
825 h1->ts_first_pkt.ts_sec = ts.tv_sec;
826 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
827 pkc1->pkblk_start = (char *)pbd1;
828 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
829 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
830 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
831 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
832 pbd1->version = pkc1->version;
833 pkc1->prev = pkc1->nxt_offset;
834 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
835 prb_thaw_queue(pkc1);
836 _prb_refresh_rx_retire_blk_timer(pkc1);
837
838 smp_wmb();
839
840 return;
841 }
842
843 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
844 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
845 dump_stack();
846 BUG();
847}
848
849/*
850 * Queue freeze logic:
851 * 1) Assume tp_block_nr = 8 blocks.
852 * 2) At time 't0', user opens Rx ring.
853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
854 * 4) user-space is either sleeping or processing block '0'.
855 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
856 * it will close block-7,loop around and try to fill block '0'.
857 * call-flow:
858 * __packet_lookup_frame_in_block
859 * prb_retire_current_block()
860 * prb_dispatch_next_block()
861 * |->(BLOCK_STATUS == USER) evaluates to true
862 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
863 * 6) Now there are two cases:
864 * 6.1) Link goes idle right after the queue is frozen.
865 * But remember, the last open_block() refreshed the timer.
866 * When this timer expires,it will refresh itself so that we can
867 * re-open block-0 in near future.
868 * 6.2) Link is busy and keeps on receiving packets. This is a simple
869 * case and __packet_lookup_frame_in_block will check if block-0
870 * is free and can now be re-used.
871 */
872static inline void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
873 struct packet_sock *po)
874{
875 pkc->reset_pending_on_curr_blk = 1;
876 po->stats_u.stats3.tp_freeze_q_cnt++;
877}
878
879#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880
881/*
882 * If the next block is free then we will dispatch it
883 * and return a good offset.
884 * Else, we will freeze the queue.
885 * So, caller must check the return value.
886 */
887static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
888 struct packet_sock *po)
889{
890 struct tpacket_block_desc *pbd;
891
892 smp_rmb();
893
894 /* 1. Get current block num */
895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
896
897 /* 2. If this block is currently in_use then freeze the queue */
898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
899 prb_freeze_queue(pkc, po);
900 return NULL;
901 }
902
903 /*
904 * 3.
905 * open this block and return the offset where the first packet
906 * needs to get stored.
907 */
908 prb_open_block(pkc, pbd);
909 return (void *)pkc->nxt_offset;
910}
911
912static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
913 struct packet_sock *po, unsigned int status)
914{
915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
916
917 /* retire/close the current block */
918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
919 /*
920 * Plug the case where copy_bits() is in progress on
921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
922 * have space to copy the pkt in the current block and
923 * called prb_retire_current_block()
924 *
925 * We don't need to worry about the TMO case because
926 * the timer-handler already handled this case.
927 */
928 if (!(status & TP_STATUS_BLK_TMO)) {
929 while (atomic_read(&pkc->blk_fill_in_prog)) {
930 /* Waiting for skb_copy_bits to finish... */
931 cpu_relax();
932 }
933 }
934 prb_close_block(pkc, pbd, po, status);
935 return;
936 }
937
938 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
939 dump_stack();
940 BUG();
941}
942
943static inline int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
944 struct tpacket_block_desc *pbd)
945{
946 return TP_STATUS_USER & BLOCK_STATUS(pbd);
947}
948
949static inline int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
950{
951 return pkc->reset_pending_on_curr_blk;
952}
953
954static inline void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
955{
956 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
957 atomic_dec(&pkc->blk_fill_in_prog);
958}
959
960static inline void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
961 struct tpacket3_hdr *ppd)
962{
963 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
964}
965
966static inline void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
967 struct tpacket3_hdr *ppd)
968{
969 ppd->hv1.tp_rxhash = 0;
970}
971
972static inline void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
973 struct tpacket3_hdr *ppd)
974{
975 if (vlan_tx_tag_present(pkc->skb)) {
976 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
977 ppd->tp_status = TP_STATUS_VLAN_VALID;
978 } else {
979 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
980 }
981}
982
983static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
984 struct tpacket3_hdr *ppd)
985{
986 prb_fill_vlan_info(pkc, ppd);
987
988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989 prb_fill_rxhash(pkc, ppd);
990 else
991 prb_clear_rxhash(pkc, ppd);
992}
993
994static inline void prb_fill_curr_block(char *curr,
995 struct tpacket_kbdq_core *pkc,
996 struct tpacket_block_desc *pbd,
997 unsigned int len)
998{
999 struct tpacket3_hdr *ppd;
1000
1001 ppd = (struct tpacket3_hdr *)curr;
1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->prev = curr;
1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_NUM_PKTS(pbd) += 1;
1007 atomic_inc(&pkc->blk_fill_in_prog);
1008 prb_run_all_ft_ops(pkc, ppd);
1009}
1010
1011/* Assumes caller has the sk->rx_queue.lock */
1012static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 struct sk_buff *skb,
1014 int status,
1015 unsigned int len
1016 )
1017{
1018 struct tpacket_kbdq_core *pkc;
1019 struct tpacket_block_desc *pbd;
1020 char *curr, *end;
1021
1022 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025 /* Queue is frozen when user space is lagging behind */
1026 if (prb_queue_frozen(pkc)) {
1027 /*
1028 * Check if that last block which caused the queue to freeze,
1029 * is still in_use by user-space.
1030 */
1031 if (prb_curr_blk_in_use(pkc, pbd)) {
1032 /* Can't record this packet */
1033 return NULL;
1034 } else {
1035 /*
1036 * Ok, the block was released by user-space.
1037 * Now let's open that block.
1038 * opening a block also thaws the queue.
1039 * Thawing is a side effect.
1040 */
1041 prb_open_block(pkc, pbd);
1042 }
1043 }
1044
1045 smp_mb();
1046 curr = pkc->nxt_offset;
1047 pkc->skb = skb;
1048 end = (char *) ((char *)pbd + pkc->kblk_size);
1049
1050 /* first try the current block */
1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052 prb_fill_curr_block(curr, pkc, pbd, len);
1053 return (void *)curr;
1054 }
1055
1056 /* Ok, close the current block */
1057 prb_retire_current_block(pkc, po, 0);
1058
1059 /* Now, try to dispatch the next block */
1060 curr = (char *)prb_dispatch_next_block(pkc, po);
1061 if (curr) {
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063 prb_fill_curr_block(curr, pkc, pbd, len);
1064 return (void *)curr;
1065 }
1066
1067 /*
1068 * No free blocks are available.user_space hasn't caught up yet.
1069 * Queue was just frozen and now this packet will get dropped.
1070 */
1071 return NULL;
1072}
1073
1074static inline void *packet_current_rx_frame(struct packet_sock *po,
1075 struct sk_buff *skb,
1076 int status, unsigned int len)
1077{
1078 char *curr = NULL;
1079 switch (po->tp_version) {
1080 case TPACKET_V1:
1081 case TPACKET_V2:
1082 curr = packet_lookup_frame(po, &po->rx_ring,
1083 po->rx_ring.head, status);
1084 return curr;
1085 case TPACKET_V3:
1086 return __packet_lookup_frame_in_block(po, skb, status, len);
1087 default:
1088 WARN(1, "TPACKET version not supported\n");
1089 BUG();
1090 return 0;
1091 }
1092}
1093
1094static inline void *prb_lookup_block(struct packet_sock *po,
1095 struct packet_ring_buffer *rb,
1096 unsigned int previous,
1097 int status)
1098{
1099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1101
1102 if (status != BLOCK_STATUS(pbd))
1103 return NULL;
1104 return pbd;
1105}
1106
1107static inline int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108{
1109 unsigned int prev;
1110 if (rb->prb_bdqc.kactive_blk_num)
1111 prev = rb->prb_bdqc.kactive_blk_num-1;
1112 else
1113 prev = rb->prb_bdqc.knum_blocks-1;
1114 return prev;
1115}
1116
1117/* Assumes caller has held the rx_queue.lock */
1118static inline void *__prb_previous_block(struct packet_sock *po,
1119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 unsigned int previous = prb_previous_blk_num(rb);
1123 return prb_lookup_block(po, rb, previous, status);
1124}
1125
1126static inline void *packet_previous_rx_frame(struct packet_sock *po,
1127 struct packet_ring_buffer *rb,
1128 int status)
1129{
1130 if (po->tp_version <= TPACKET_V2)
1131 return packet_previous_frame(po, rb, status);
1132
1133 return __prb_previous_block(po, rb, status);
1134}
1135
1136static inline void packet_increment_rx_head(struct packet_sock *po,
1137 struct packet_ring_buffer *rb)
1138{
1139 switch (po->tp_version) {
1140 case TPACKET_V1:
1141 case TPACKET_V2:
1142 return packet_increment_head(rb);
1143 case TPACKET_V3:
1144 default:
1145 WARN(1, "TPACKET version not supported.\n");
1146 BUG();
1147 return;
1148 }
1149}
1150
392static inline void *packet_previous_frame(struct packet_sock *po, 1151static inline void *packet_previous_frame(struct packet_sock *po,
393 struct packet_ring_buffer *rb, 1152 struct packet_ring_buffer *rb,
394 int status) 1153 int status)
@@ -454,43 +1213,6 @@ static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *sk
454 return f->arr[cpu % num]; 1213 return f->arr[cpu % num];
455} 1214}
456 1215
457static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
458{
459#ifdef CONFIG_INET
460 const struct iphdr *iph;
461 u32 len;
462
463 if (skb->protocol != htons(ETH_P_IP))
464 return skb;
465
466 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
467 return skb;
468
469 iph = ip_hdr(skb);
470 if (iph->ihl < 5 || iph->version != 4)
471 return skb;
472 if (!pskb_may_pull(skb, iph->ihl*4))
473 return skb;
474 iph = ip_hdr(skb);
475 len = ntohs(iph->tot_len);
476 if (skb->len < len || len < (iph->ihl * 4))
477 return skb;
478
479 if (ip_is_fragment(ip_hdr(skb))) {
480 skb = skb_share_check(skb, GFP_ATOMIC);
481 if (skb) {
482 if (pskb_trim_rcsum(skb, len))
483 return skb;
484 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
485 if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
486 return NULL;
487 skb->rxhash = 0;
488 }
489 }
490#endif
491 return skb;
492}
493
494static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1216static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
495 struct packet_type *pt, struct net_device *orig_dev) 1217 struct packet_type *pt, struct net_device *orig_dev)
496{ 1218{
@@ -509,7 +1231,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
509 case PACKET_FANOUT_HASH: 1231 case PACKET_FANOUT_HASH:
510 default: 1232 default:
511 if (f->defrag) { 1233 if (f->defrag) {
512 skb = fanout_check_defrag(skb); 1234 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
513 if (!skb) 1235 if (!skb)
514 return 0; 1236 return 0;
515 } 1237 }
@@ -985,12 +1707,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
985 union { 1707 union {
986 struct tpacket_hdr *h1; 1708 struct tpacket_hdr *h1;
987 struct tpacket2_hdr *h2; 1709 struct tpacket2_hdr *h2;
1710 struct tpacket3_hdr *h3;
988 void *raw; 1711 void *raw;
989 } h; 1712 } h;
990 u8 *skb_head = skb->data; 1713 u8 *skb_head = skb->data;
991 int skb_len = skb->len; 1714 int skb_len = skb->len;
992 unsigned int snaplen, res; 1715 unsigned int snaplen, res;
993 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; 1716 unsigned long status = TP_STATUS_USER;
994 unsigned short macoff, netoff, hdrlen; 1717 unsigned short macoff, netoff, hdrlen;
995 struct sk_buff *copy_skb = NULL; 1718 struct sk_buff *copy_skb = NULL;
996 struct timeval tv; 1719 struct timeval tv;
@@ -1036,37 +1759,46 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1036 po->tp_reserve; 1759 po->tp_reserve;
1037 macoff = netoff - maclen; 1760 macoff = netoff - maclen;
1038 } 1761 }
1039 1762 if (po->tp_version <= TPACKET_V2) {
1040 if (macoff + snaplen > po->rx_ring.frame_size) { 1763 if (macoff + snaplen > po->rx_ring.frame_size) {
1041 if (po->copy_thresh && 1764 if (po->copy_thresh &&
1042 atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 1765 atomic_read(&sk->sk_rmem_alloc) + skb->truesize
1043 (unsigned)sk->sk_rcvbuf) { 1766 < (unsigned)sk->sk_rcvbuf) {
1044 if (skb_shared(skb)) { 1767 if (skb_shared(skb)) {
1045 copy_skb = skb_clone(skb, GFP_ATOMIC); 1768 copy_skb = skb_clone(skb, GFP_ATOMIC);
1046 } else { 1769 } else {
1047 copy_skb = skb_get(skb); 1770 copy_skb = skb_get(skb);
1048 skb_head = skb->data; 1771 skb_head = skb->data;
1772 }
1773 if (copy_skb)
1774 skb_set_owner_r(copy_skb, sk);
1049 } 1775 }
1050 if (copy_skb) 1776 snaplen = po->rx_ring.frame_size - macoff;
1051 skb_set_owner_r(copy_skb, sk); 1777 if ((int)snaplen < 0)
1778 snaplen = 0;
1052 } 1779 }
1053 snaplen = po->rx_ring.frame_size - macoff;
1054 if ((int)snaplen < 0)
1055 snaplen = 0;
1056 } 1780 }
1057
1058 spin_lock(&sk->sk_receive_queue.lock); 1781 spin_lock(&sk->sk_receive_queue.lock);
1059 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL); 1782 h.raw = packet_current_rx_frame(po, skb,
1783 TP_STATUS_KERNEL, (macoff+snaplen));
1060 if (!h.raw) 1784 if (!h.raw)
1061 goto ring_is_full; 1785 goto ring_is_full;
1062 packet_increment_head(&po->rx_ring); 1786 if (po->tp_version <= TPACKET_V2) {
1787 packet_increment_rx_head(po, &po->rx_ring);
1788 /*
1789 * LOSING will be reported till you read the stats,
1790 * because it's COR - Clear On Read.
1791 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1792 * at packet level.
1793 */
1794 if (po->stats.tp_drops)
1795 status |= TP_STATUS_LOSING;
1796 }
1063 po->stats.tp_packets++; 1797 po->stats.tp_packets++;
1064 if (copy_skb) { 1798 if (copy_skb) {
1065 status |= TP_STATUS_COPY; 1799 status |= TP_STATUS_COPY;
1066 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 1800 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1067 } 1801 }
1068 if (!po->stats.tp_drops)
1069 status &= ~TP_STATUS_LOSING;
1070 spin_unlock(&sk->sk_receive_queue.lock); 1802 spin_unlock(&sk->sk_receive_queue.lock);
1071 1803
1072 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 1804 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
@@ -1117,6 +1849,29 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1117 h.h2->tp_padding = 0; 1849 h.h2->tp_padding = 0;
1118 hdrlen = sizeof(*h.h2); 1850 hdrlen = sizeof(*h.h2);
1119 break; 1851 break;
1852 case TPACKET_V3:
1853 /* tp_nxt_offset,vlan are already populated above.
1854 * So DONT clear those fields here
1855 */
1856 h.h3->tp_status |= status;
1857 h.h3->tp_len = skb->len;
1858 h.h3->tp_snaplen = snaplen;
1859 h.h3->tp_mac = macoff;
1860 h.h3->tp_net = netoff;
1861 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1862 && shhwtstamps->syststamp.tv64)
1863 ts = ktime_to_timespec(shhwtstamps->syststamp);
1864 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1865 && shhwtstamps->hwtstamp.tv64)
1866 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1867 else if (skb->tstamp.tv64)
1868 ts = ktime_to_timespec(skb->tstamp);
1869 else
1870 getnstimeofday(&ts);
1871 h.h3->tp_sec = ts.tv_sec;
1872 h.h3->tp_nsec = ts.tv_nsec;
1873 hdrlen = sizeof(*h.h3);
1874 break;
1120 default: 1875 default:
1121 BUG(); 1876 BUG();
1122 } 1877 }
@@ -1137,13 +1892,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1137 { 1892 {
1138 u8 *start, *end; 1893 u8 *start, *end;
1139 1894
1140 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen); 1895 if (po->tp_version <= TPACKET_V2) {
1141 for (start = h.raw; start < end; start += PAGE_SIZE) 1896 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1142 flush_dcache_page(pgv_to_page(start)); 1897 + macoff + snaplen);
1898 for (start = h.raw; start < end; start += PAGE_SIZE)
1899 flush_dcache_page(pgv_to_page(start));
1900 }
1143 smp_wmb(); 1901 smp_wmb();
1144 } 1902 }
1145#endif 1903#endif
1146 __packet_set_status(po, h.raw, status); 1904 if (po->tp_version <= TPACKET_V2)
1905 __packet_set_status(po, h.raw, status);
1906 else
1907 prb_clear_blk_fill_status(&po->rx_ring);
1147 1908
1148 sk->sk_data_ready(sk, 0); 1909 sk->sk_data_ready(sk, 0);
1149 1910
@@ -1170,8 +1931,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
1170 struct packet_sock *po = pkt_sk(skb->sk); 1931 struct packet_sock *po = pkt_sk(skb->sk);
1171 void *ph; 1932 void *ph;
1172 1933
1173 BUG_ON(skb == NULL);
1174
1175 if (likely(po->tx_ring.pg_vec)) { 1934 if (likely(po->tx_ring.pg_vec)) {
1176 ph = skb_shinfo(skb)->destructor_arg; 1935 ph = skb_shinfo(skb)->destructor_arg;
1177 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); 1936 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
@@ -1634,7 +2393,7 @@ static int packet_release(struct socket *sock)
1634 struct sock *sk = sock->sk; 2393 struct sock *sk = sock->sk;
1635 struct packet_sock *po; 2394 struct packet_sock *po;
1636 struct net *net; 2395 struct net *net;
1637 struct tpacket_req req; 2396 union tpacket_req_u req_u;
1638 2397
1639 if (!sk) 2398 if (!sk)
1640 return 0; 2399 return 0;
@@ -1657,13 +2416,13 @@ static int packet_release(struct socket *sock)
1657 2416
1658 packet_flush_mclist(sk); 2417 packet_flush_mclist(sk);
1659 2418
1660 memset(&req, 0, sizeof(req)); 2419 memset(&req_u, 0, sizeof(req_u));
1661 2420
1662 if (po->rx_ring.pg_vec) 2421 if (po->rx_ring.pg_vec)
1663 packet_set_ring(sk, &req, 1, 0); 2422 packet_set_ring(sk, &req_u, 1, 0);
1664 2423
1665 if (po->tx_ring.pg_vec) 2424 if (po->tx_ring.pg_vec)
1666 packet_set_ring(sk, &req, 1, 1); 2425 packet_set_ring(sk, &req_u, 1, 1);
1667 2426
1668 fanout_release(sk); 2427 fanout_release(sk);
1669 2428
@@ -2283,15 +3042,27 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2283 case PACKET_RX_RING: 3042 case PACKET_RX_RING:
2284 case PACKET_TX_RING: 3043 case PACKET_TX_RING:
2285 { 3044 {
2286 struct tpacket_req req; 3045 union tpacket_req_u req_u;
3046 int len;
2287 3047
2288 if (optlen < sizeof(req)) 3048 switch (po->tp_version) {
3049 case TPACKET_V1:
3050 case TPACKET_V2:
3051 len = sizeof(req_u.req);
3052 break;
3053 case TPACKET_V3:
3054 default:
3055 len = sizeof(req_u.req3);
3056 break;
3057 }
3058 if (optlen < len)
2289 return -EINVAL; 3059 return -EINVAL;
2290 if (pkt_sk(sk)->has_vnet_hdr) 3060 if (pkt_sk(sk)->has_vnet_hdr)
2291 return -EINVAL; 3061 return -EINVAL;
2292 if (copy_from_user(&req, optval, sizeof(req))) 3062 if (copy_from_user(&req_u.req, optval, len))
2293 return -EFAULT; 3063 return -EFAULT;
2294 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 3064 return packet_set_ring(sk, &req_u, 0,
3065 optname == PACKET_TX_RING);
2295 } 3066 }
2296 case PACKET_COPY_THRESH: 3067 case PACKET_COPY_THRESH:
2297 { 3068 {
@@ -2318,6 +3089,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2318 switch (val) { 3089 switch (val) {
2319 case TPACKET_V1: 3090 case TPACKET_V1:
2320 case TPACKET_V2: 3091 case TPACKET_V2:
3092 case TPACKET_V3:
2321 po->tp_version = val; 3093 po->tp_version = val;
2322 return 0; 3094 return 0;
2323 default: 3095 default:
@@ -2427,6 +3199,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2427 struct packet_sock *po = pkt_sk(sk); 3199 struct packet_sock *po = pkt_sk(sk);
2428 void *data; 3200 void *data;
2429 struct tpacket_stats st; 3201 struct tpacket_stats st;
3202 union tpacket_stats_u st_u;
2430 3203
2431 if (level != SOL_PACKET) 3204 if (level != SOL_PACKET)
2432 return -ENOPROTOOPT; 3205 return -ENOPROTOOPT;
@@ -2439,15 +3212,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2439 3212
2440 switch (optname) { 3213 switch (optname) {
2441 case PACKET_STATISTICS: 3214 case PACKET_STATISTICS:
2442 if (len > sizeof(struct tpacket_stats)) 3215 if (po->tp_version == TPACKET_V3) {
2443 len = sizeof(struct tpacket_stats); 3216 len = sizeof(struct tpacket_stats_v3);
3217 } else {
3218 if (len > sizeof(struct tpacket_stats))
3219 len = sizeof(struct tpacket_stats);
3220 }
2444 spin_lock_bh(&sk->sk_receive_queue.lock); 3221 spin_lock_bh(&sk->sk_receive_queue.lock);
2445 st = po->stats; 3222 if (po->tp_version == TPACKET_V3) {
3223 memcpy(&st_u.stats3, &po->stats,
3224 sizeof(struct tpacket_stats));
3225 st_u.stats3.tp_freeze_q_cnt =
3226 po->stats_u.stats3.tp_freeze_q_cnt;
3227 st_u.stats3.tp_packets += po->stats.tp_drops;
3228 data = &st_u.stats3;
3229 } else {
3230 st = po->stats;
3231 st.tp_packets += st.tp_drops;
3232 data = &st;
3233 }
2446 memset(&po->stats, 0, sizeof(st)); 3234 memset(&po->stats, 0, sizeof(st));
2447 spin_unlock_bh(&sk->sk_receive_queue.lock); 3235 spin_unlock_bh(&sk->sk_receive_queue.lock);
2448 st.tp_packets += st.tp_drops;
2449
2450 data = &st;
2451 break; 3236 break;
2452 case PACKET_AUXDATA: 3237 case PACKET_AUXDATA:
2453 if (len > sizeof(int)) 3238 if (len > sizeof(int))
@@ -2488,6 +3273,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2488 case TPACKET_V2: 3273 case TPACKET_V2:
2489 val = sizeof(struct tpacket2_hdr); 3274 val = sizeof(struct tpacket2_hdr);
2490 break; 3275 break;
3276 case TPACKET_V3:
3277 val = sizeof(struct tpacket3_hdr);
3278 break;
2491 default: 3279 default:
2492 return -EINVAL; 3280 return -EINVAL;
2493 } 3281 }
@@ -2644,7 +3432,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
2644 3432
2645 spin_lock_bh(&sk->sk_receive_queue.lock); 3433 spin_lock_bh(&sk->sk_receive_queue.lock);
2646 if (po->rx_ring.pg_vec) { 3434 if (po->rx_ring.pg_vec) {
2647 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) 3435 if (!packet_previous_rx_frame(po, &po->rx_ring,
3436 TP_STATUS_KERNEL))
2648 mask |= POLLIN | POLLRDNORM; 3437 mask |= POLLIN | POLLRDNORM;
2649 } 3438 }
2650 spin_unlock_bh(&sk->sk_receive_queue.lock); 3439 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2763,7 +3552,7 @@ out_free_pgvec:
2763 goto out; 3552 goto out;
2764} 3553}
2765 3554
2766static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 3555static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2767 int closing, int tx_ring) 3556 int closing, int tx_ring)
2768{ 3557{
2769 struct pgv *pg_vec = NULL; 3558 struct pgv *pg_vec = NULL;
@@ -2772,7 +3561,15 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2772 struct packet_ring_buffer *rb; 3561 struct packet_ring_buffer *rb;
2773 struct sk_buff_head *rb_queue; 3562 struct sk_buff_head *rb_queue;
2774 __be16 num; 3563 __be16 num;
2775 int err; 3564 int err = -EINVAL;
3565 /* Added to avoid minimal code churn */
3566 struct tpacket_req *req = &req_u->req;
3567
3568 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3569 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3570 WARN(1, "Tx-ring is not supported.\n");
3571 goto out;
3572 }
2776 3573
2777 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 3574 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2778 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 3575 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@ -2798,6 +3595,9 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2798 case TPACKET_V2: 3595 case TPACKET_V2:
2799 po->tp_hdrlen = TPACKET2_HDRLEN; 3596 po->tp_hdrlen = TPACKET2_HDRLEN;
2800 break; 3597 break;
3598 case TPACKET_V3:
3599 po->tp_hdrlen = TPACKET3_HDRLEN;
3600 break;
2801 } 3601 }
2802 3602
2803 err = -EINVAL; 3603 err = -EINVAL;
@@ -2823,6 +3623,17 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2823 pg_vec = alloc_pg_vec(req, order); 3623 pg_vec = alloc_pg_vec(req, order);
2824 if (unlikely(!pg_vec)) 3624 if (unlikely(!pg_vec))
2825 goto out; 3625 goto out;
3626 switch (po->tp_version) {
3627 case TPACKET_V3:
3628 /* Transmit path is not supported. We checked
3629 * it above but just being paranoid
3630 */
3631 if (!tx_ring)
3632 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3633 break;
3634 default:
3635 break;
3636 }
2826 } 3637 }
2827 /* Done */ 3638 /* Done */
2828 else { 3639 else {
@@ -2875,7 +3686,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2875 register_prot_hook(sk); 3686 register_prot_hook(sk);
2876 } 3687 }
2877 spin_unlock(&po->bind_lock); 3688 spin_unlock(&po->bind_lock);
2878 3689 if (closing && (po->tp_version > TPACKET_V2)) {
3690 /* Because we don't support block-based V3 on tx-ring */
3691 if (!tx_ring)
3692 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3693 }
2879 release_sock(sk); 3694 release_sock(sk);
2880 3695
2881 if (pg_vec) 3696 if (pg_vec)
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index c6fffd946d42..bf10ea8fbbf9 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
480 if (proto_tab[protocol]) 480 if (proto_tab[protocol])
481 err = -EBUSY; 481 err = -EBUSY;
482 else 482 else
483 rcu_assign_pointer(proto_tab[protocol], pp); 483 RCU_INIT_POINTER(proto_tab[protocol], pp);
484 mutex_unlock(&proto_tab_lock); 484 mutex_unlock(&proto_tab_lock);
485 485
486 return err; 486 return err;
@@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
491{ 491{
492 mutex_lock(&proto_tab_lock); 492 mutex_lock(&proto_tab_lock);
493 BUG_ON(proto_tab[protocol] != pp); 493 BUG_ON(proto_tab[protocol] != pp);
494 rcu_assign_pointer(proto_tab[protocol], NULL); 494 RCU_INIT_POINTER(proto_tab[protocol], NULL);
495 mutex_unlock(&proto_tab_lock); 495 mutex_unlock(&proto_tab_lock);
496 synchronize_rcu(); 496 synchronize_rcu();
497 proto_unregister(pp->prot); 497 proto_unregister(pp->prot);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index d2df8f33160b..c5827614376b 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev)
276 mutex_lock(&pnn->routes.lock); 276 mutex_lock(&pnn->routes.lock);
277 for (i = 0; i < 64; i++) 277 for (i = 0; i < 64; i++)
278 if (dev == pnn->routes.table[i]) { 278 if (dev == pnn->routes.table[i]) {
279 rcu_assign_pointer(pnn->routes.table[i], NULL); 279 RCU_INIT_POINTER(pnn->routes.table[i], NULL);
280 set_bit(i, deleted); 280 set_bit(i, deleted);
281 } 281 }
282 mutex_unlock(&pnn->routes.lock); 282 mutex_unlock(&pnn->routes.lock);
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
390 daddr = daddr >> 2; 390 daddr = daddr >> 2;
391 mutex_lock(&routes->lock); 391 mutex_lock(&routes->lock);
392 if (routes->table[daddr] == NULL) { 392 if (routes->table[daddr] == NULL) {
393 rcu_assign_pointer(routes->table[daddr], dev); 393 RCU_INIT_POINTER(routes->table[daddr], dev);
394 dev_hold(dev); 394 dev_hold(dev);
395 err = 0; 395 err = 0;
396 } 396 }
@@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
406 daddr = daddr >> 2; 406 daddr = daddr >> 2;
407 mutex_lock(&routes->lock); 407 mutex_lock(&routes->lock);
408 if (dev == routes->table[daddr]) 408 if (dev == routes->table[daddr])
409 rcu_assign_pointer(routes->table[daddr], NULL); 409 RCU_INIT_POINTER(routes->table[daddr], NULL);
410 else 410 else
411 dev = NULL; 411 dev = NULL;
412 mutex_unlock(&routes->lock); 412 mutex_unlock(&routes->lock);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ab07711cf2f4..676d18dc75b7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -679,7 +679,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
679 mutex_lock(&resource_mutex); 679 mutex_lock(&resource_mutex);
680 if (pnres.sk[res] == NULL) { 680 if (pnres.sk[res] == NULL) {
681 sock_hold(sk); 681 sock_hold(sk);
682 rcu_assign_pointer(pnres.sk[res], sk); 682 RCU_INIT_POINTER(pnres.sk[res], sk);
683 ret = 0; 683 ret = 0;
684 } 684 }
685 mutex_unlock(&resource_mutex); 685 mutex_unlock(&resource_mutex);
@@ -695,7 +695,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
695 695
696 mutex_lock(&resource_mutex); 696 mutex_lock(&resource_mutex);
697 if (pnres.sk[res] == sk) { 697 if (pnres.sk[res] == sk) {
698 rcu_assign_pointer(pnres.sk[res], NULL); 698 RCU_INIT_POINTER(pnres.sk[res], NULL);
699 ret = 0; 699 ret = 0;
700 } 700 }
701 mutex_unlock(&resource_mutex); 701 mutex_unlock(&resource_mutex);
@@ -714,7 +714,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
714 mutex_lock(&resource_mutex); 714 mutex_lock(&resource_mutex);
715 for (res = 0; res < 256; res++) { 715 for (res = 0; res < 256; res++) {
716 if (pnres.sk[res] == sk) { 716 if (pnres.sk[res] == sk) {
717 rcu_assign_pointer(pnres.sk[res], NULL); 717 RCU_INIT_POINTER(pnres.sk[res], NULL);
718 match++; 718 match++;
719 } 719 }
720 } 720 }
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index ec753b3ae72a..4cf6dc7910e4 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,6 +9,7 @@ config RDS
9 9
10config RDS_RDMA 10config RDS_RDMA
11 tristate "RDS over Infiniband and iWARP" 11 tristate "RDS over Infiniband and iWARP"
12 select LLIST
12 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 13 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
13 ---help--- 14 ---help---
14 Allow RDS to use Infiniband and iWARP as a transport. 15 Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index cd67026be2d5..51c868923f64 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -375,23 +375,21 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
375 goto out; 375 goto out;
376 } 376 }
377 377
378 ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), 378 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
379 ibdev_to_node(dev)); 379 ibdev_to_node(dev));
380 if (!ic->i_sends) { 380 if (!ic->i_sends) {
381 ret = -ENOMEM; 381 ret = -ENOMEM;
382 rdsdebug("send allocation failed\n"); 382 rdsdebug("send allocation failed\n");
383 goto out; 383 goto out;
384 } 384 }
385 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
386 385
387 ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), 386 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
388 ibdev_to_node(dev)); 387 ibdev_to_node(dev));
389 if (!ic->i_recvs) { 388 if (!ic->i_recvs) {
390 ret = -ENOMEM; 389 ret = -ENOMEM;
391 rdsdebug("recv allocation failed\n"); 390 rdsdebug("recv allocation failed\n");
392 goto out; 391 goto out;
393 } 392 }
394 memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
395 393
396 rds_ib_recv_init_ack(ic); 394 rds_ib_recv_init_ack(ic);
397 395
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 819c35a0d9cb..e8fdb172adbb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -33,10 +33,10 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/rculist.h> 35#include <linux/rculist.h>
36#include <linux/llist.h>
36 37
37#include "rds.h" 38#include "rds.h"
38#include "ib.h" 39#include "ib.h"
39#include "xlist.h"
40 40
41static DEFINE_PER_CPU(unsigned long, clean_list_grace); 41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0 42#define CLEAN_LIST_BUSY_BIT 0
@@ -49,7 +49,7 @@ struct rds_ib_mr {
49 struct rds_ib_mr_pool *pool; 49 struct rds_ib_mr_pool *pool;
50 struct ib_fmr *fmr; 50 struct ib_fmr *fmr;
51 51
52 struct xlist_head xlist; 52 struct llist_node llnode;
53 53
54 /* unmap_list is for freeing */ 54 /* unmap_list is for freeing */
55 struct list_head unmap_list; 55 struct list_head unmap_list;
@@ -71,9 +71,9 @@ struct rds_ib_mr_pool {
71 atomic_t item_count; /* total # of MRs */ 71 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */ 72 atomic_t dirty_count; /* # dirty of MRs */
73 73
74 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */ 74 struct llist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct xlist_head free_list; /* unused MRs */ 75 struct llist_head free_list; /* unused MRs */
76 struct xlist_head clean_list; /* global unused & unamapped MRs */ 76 struct llist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait; 77 wait_queue_head_t flush_wait;
78 78
79 atomic_t free_pinned; /* memory pinned by free MRs */ 79 atomic_t free_pinned; /* memory pinned by free MRs */
@@ -220,9 +220,9 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
220 if (!pool) 220 if (!pool)
221 return ERR_PTR(-ENOMEM); 221 return ERR_PTR(-ENOMEM);
222 222
223 INIT_XLIST_HEAD(&pool->free_list); 223 init_llist_head(&pool->free_list);
224 INIT_XLIST_HEAD(&pool->drop_list); 224 init_llist_head(&pool->drop_list);
225 INIT_XLIST_HEAD(&pool->clean_list); 225 init_llist_head(&pool->clean_list);
226 mutex_init(&pool->flush_lock); 226 mutex_init(&pool->flush_lock);
227 init_waitqueue_head(&pool->flush_wait); 227 init_waitqueue_head(&pool->flush_wait);
228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
@@ -260,26 +260,18 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
260 kfree(pool); 260 kfree(pool);
261} 261}
262 262
263static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
264 struct rds_ib_mr **ibmr_ret)
265{
266 struct xlist_head *ibmr_xl;
267 ibmr_xl = xlist_del_head_fast(xl);
268 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
269}
270
271static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 263static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
272{ 264{
273 struct rds_ib_mr *ibmr = NULL; 265 struct rds_ib_mr *ibmr = NULL;
274 struct xlist_head *ret; 266 struct llist_node *ret;
275 unsigned long *flag; 267 unsigned long *flag;
276 268
277 preempt_disable(); 269 preempt_disable();
278 flag = &__get_cpu_var(clean_list_grace); 270 flag = &__get_cpu_var(clean_list_grace);
279 set_bit(CLEAN_LIST_BUSY_BIT, flag); 271 set_bit(CLEAN_LIST_BUSY_BIT, flag);
280 ret = xlist_del_head(&pool->clean_list); 272 ret = llist_del_first(&pool->clean_list);
281 if (ret) 273 if (ret)
282 ibmr = list_entry(ret, struct rds_ib_mr, xlist); 274 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
283 275
284 clear_bit(CLEAN_LIST_BUSY_BIT, flag); 276 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
285 preempt_enable(); 277 preempt_enable();
@@ -529,46 +521,44 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
529} 521}
530 522
531/* 523/*
532 * given an xlist of mrs, put them all into the list_head for more processing 524 * given an llist of mrs, put them all into the list_head for more processing
533 */ 525 */
534static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list) 526static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
535{ 527{
536 struct rds_ib_mr *ibmr; 528 struct rds_ib_mr *ibmr;
537 struct xlist_head splice; 529 struct llist_node *node;
538 struct xlist_head *cur; 530 struct llist_node *next;
539 struct xlist_head *next; 531
540 532 node = llist_del_all(llist);
541 splice.next = NULL; 533 while (node) {
542 xlist_splice(xlist, &splice); 534 next = node->next;
543 cur = splice.next; 535 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
544 while (cur) {
545 next = cur->next;
546 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
547 list_add_tail(&ibmr->unmap_list, list); 536 list_add_tail(&ibmr->unmap_list, list);
548 cur = next; 537 node = next;
549 } 538 }
550} 539}
551 540
552/* 541/*
553 * this takes a list head of mrs and turns it into an xlist of clusters. 542 * this takes a list head of mrs and turns it into linked llist nodes
554 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for 543 * of clusters. Each cluster has linked llist nodes of
555 * reuse. 544 * MR_CLUSTER_SIZE mrs that are ready for reuse.
556 */ 545 */
557static void list_append_to_xlist(struct rds_ib_mr_pool *pool, 546static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
558 struct list_head *list, struct xlist_head *xlist, 547 struct list_head *list,
559 struct xlist_head **tail_ret) 548 struct llist_node **nodes_head,
549 struct llist_node **nodes_tail)
560{ 550{
561 struct rds_ib_mr *ibmr; 551 struct rds_ib_mr *ibmr;
562 struct xlist_head *cur_mr = xlist; 552 struct llist_node *cur = NULL;
563 struct xlist_head *tail_mr = NULL; 553 struct llist_node **next = nodes_head;
564 554
565 list_for_each_entry(ibmr, list, unmap_list) { 555 list_for_each_entry(ibmr, list, unmap_list) {
566 tail_mr = &ibmr->xlist; 556 cur = &ibmr->llnode;
567 tail_mr->next = NULL; 557 *next = cur;
568 cur_mr->next = tail_mr; 558 next = &cur->next;
569 cur_mr = tail_mr;
570 } 559 }
571 *tail_ret = tail_mr; 560 *next = NULL;
561 *nodes_tail = cur;
572} 562}
573 563
574/* 564/*
@@ -581,8 +571,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
581 int free_all, struct rds_ib_mr **ibmr_ret) 571 int free_all, struct rds_ib_mr **ibmr_ret)
582{ 572{
583 struct rds_ib_mr *ibmr, *next; 573 struct rds_ib_mr *ibmr, *next;
584 struct xlist_head clean_xlist; 574 struct llist_node *clean_nodes;
585 struct xlist_head *clean_tail; 575 struct llist_node *clean_tail;
586 LIST_HEAD(unmap_list); 576 LIST_HEAD(unmap_list);
587 LIST_HEAD(fmr_list); 577 LIST_HEAD(fmr_list);
588 unsigned long unpinned = 0; 578 unsigned long unpinned = 0;
@@ -603,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
603 593
604 prepare_to_wait(&pool->flush_wait, &wait, 594 prepare_to_wait(&pool->flush_wait, &wait,
605 TASK_UNINTERRUPTIBLE); 595 TASK_UNINTERRUPTIBLE);
606 if (xlist_empty(&pool->clean_list)) 596 if (llist_empty(&pool->clean_list))
607 schedule(); 597 schedule();
608 598
609 ibmr = rds_ib_reuse_fmr(pool); 599 ibmr = rds_ib_reuse_fmr(pool);
@@ -628,10 +618,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
628 /* Get the list of all MRs to be dropped. Ordering matters - 618 /* Get the list of all MRs to be dropped. Ordering matters -
629 * we want to put drop_list ahead of free_list. 619 * we want to put drop_list ahead of free_list.
630 */ 620 */
631 xlist_append_to_list(&pool->drop_list, &unmap_list); 621 llist_append_to_list(&pool->drop_list, &unmap_list);
632 xlist_append_to_list(&pool->free_list, &unmap_list); 622 llist_append_to_list(&pool->free_list, &unmap_list);
633 if (free_all) 623 if (free_all)
634 xlist_append_to_list(&pool->clean_list, &unmap_list); 624 llist_append_to_list(&pool->clean_list, &unmap_list);
635 625
636 free_goal = rds_ib_flush_goal(pool, free_all); 626 free_goal = rds_ib_flush_goal(pool, free_all);
637 627
@@ -663,22 +653,22 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
663 if (!list_empty(&unmap_list)) { 653 if (!list_empty(&unmap_list)) {
664 /* we have to make sure that none of the things we're about 654 /* we have to make sure that none of the things we're about
665 * to put on the clean list would race with other cpus trying 655 * to put on the clean list would race with other cpus trying
666 * to pull items off. The xlist would explode if we managed to 656 * to pull items off. The llist would explode if we managed to
667 * remove something from the clean list and then add it back again 657 * remove something from the clean list and then add it back again
668 * while another CPU was spinning on that same item in xlist_del_head. 658 * while another CPU was spinning on that same item in llist_del_first.
669 * 659 *
670 * This is pretty unlikely, but just in case wait for an xlist grace period 660 * This is pretty unlikely, but just in case wait for an llist grace period
671 * here before adding anything back into the clean list. 661 * here before adding anything back into the clean list.
672 */ 662 */
673 wait_clean_list_grace(); 663 wait_clean_list_grace();
674 664
675 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail); 665 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
676 if (ibmr_ret) 666 if (ibmr_ret)
677 refill_local(pool, &clean_xlist, ibmr_ret); 667 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
678 668
679 /* refill_local may have emptied our list */ 669 /* more than one entry in llist nodes */
680 if (!xlist_empty(&clean_xlist)) 670 if (clean_nodes->next)
681 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list); 671 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
682 672
683 } 673 }
684 674
@@ -711,9 +701,9 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
711 701
712 /* Return it to the pool's free list */ 702 /* Return it to the pool's free list */
713 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 703 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
714 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list); 704 llist_add(&ibmr->llnode, &pool->drop_list);
715 else 705 else
716 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list); 706 llist_add(&ibmr->llnode, &pool->free_list);
717 707
718 atomic_add(ibmr->sg_len, &pool->free_pinned); 708 atomic_add(ibmr->sg_len, &pool->free_pinned);
719 atomic_inc(&pool->dirty_count); 709 atomic_inc(&pool->dirty_count);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index da8adac2bf06..7eaba1831f0d 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -36,8 +36,8 @@
36#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 36#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
37#else 37#else
38/* sigh, pr_debug() causes unused variable warnings */ 38/* sigh, pr_debug() causes unused variable warnings */
39static inline void __attribute__ ((format (printf, 1, 2))) 39static inline __printf(1, 2)
40rdsdebug(char *fmt, ...) 40void rdsdebug(char *fmt, ...)
41{ 41{
42} 42}
43#endif 43#endif
@@ -625,8 +625,8 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
625 struct rds_info_lengths *lens, 625 struct rds_info_lengths *lens,
626 int (*visitor)(struct rds_connection *, void *), 626 int (*visitor)(struct rds_connection *, void *),
627 size_t item_len); 627 size_t item_len);
628void __rds_conn_error(struct rds_connection *conn, const char *, ...) 628__printf(2, 3)
629 __attribute__ ((format (printf, 2, 3))); 629void __rds_conn_error(struct rds_connection *conn, const char *, ...);
630#define rds_conn_error(conn, fmt...) \ 630#define rds_conn_error(conn, fmt...) \
631 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt) 631 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
632 632
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
deleted file mode 100644
index e6b5190daddd..000000000000
--- a/net/rds/xlist.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _LINUX_XLIST_H
2#define _LINUX_XLIST_H
3
4#include <linux/stddef.h>
5#include <linux/poison.h>
6#include <linux/prefetch.h>
7#include <asm/system.h>
8
9struct xlist_head {
10 struct xlist_head *next;
11};
12
13static inline void INIT_XLIST_HEAD(struct xlist_head *list)
14{
15 list->next = NULL;
16}
17
18static inline int xlist_empty(struct xlist_head *head)
19{
20 return head->next == NULL;
21}
22
23static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
24 struct xlist_head *head)
25{
26 struct xlist_head *cur;
27 struct xlist_head *check;
28
29 while (1) {
30 cur = head->next;
31 tail->next = cur;
32 check = cmpxchg(&head->next, cur, new);
33 if (check == cur)
34 break;
35 }
36}
37
38static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
39{
40 struct xlist_head *cur;
41 struct xlist_head *check;
42 struct xlist_head *next;
43
44 while (1) {
45 cur = head->next;
46 if (!cur)
47 goto out;
48
49 next = cur->next;
50 check = cmpxchg(&head->next, cur, next);
51 if (check == cur)
52 goto out;
53 }
54out:
55 return cur;
56}
57
58static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
59{
60 struct xlist_head *cur;
61
62 cur = head->next;
63 if (!cur)
64 return NULL;
65
66 head->next = cur->next;
67 return cur;
68}
69
70static inline void xlist_splice(struct xlist_head *list,
71 struct xlist_head *head)
72{
73 struct xlist_head *cur;
74
75 WARN_ON(head->next);
76 cur = xchg(&list->next, NULL);
77 head->next = cur;
78}
79
80#endif
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a2774..5be19575c340 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -235,7 +235,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
235 else 235 else
236 rfkill->state &= ~RFKILL_BLOCK_HW; 236 rfkill->state &= ~RFKILL_BLOCK_HW;
237 *change = prev != blocked; 237 *change = prev != blocked;
238 any = rfkill->state & RFKILL_BLOCK_ANY; 238 any = !!(rfkill->state & RFKILL_BLOCK_ANY);
239 spin_unlock_irqrestore(&rfkill->lock, flags); 239 spin_unlock_irqrestore(&rfkill->lock, flags);
240 240
241 rfkill_led_trigger_event(rfkill); 241 rfkill_led_trigger_event(rfkill);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 256c5ddd2d72..128677d69056 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -101,6 +101,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
101 if (!rfkill) 101 if (!rfkill)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 if (pdata->gpio_runtime_setup) {
105 ret = pdata->gpio_runtime_setup(pdev);
106 if (ret) {
107 pr_warn("%s: can't set up gpio\n", __func__);
108 return ret;
109 }
110 }
111
104 rfkill->pdata = pdata; 112 rfkill->pdata = pdata;
105 113
106 len = strlen(pdata->name); 114 len = strlen(pdata->name);
@@ -182,7 +190,10 @@ fail_alloc:
182static int rfkill_gpio_remove(struct platform_device *pdev) 190static int rfkill_gpio_remove(struct platform_device *pdev)
183{ 191{
184 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); 192 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
193 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
185 194
195 if (pdata->gpio_runtime_close)
196 pdata->gpio_runtime_close(pdev);
186 rfkill_unregister(rfkill->rfkill_dev); 197 rfkill_unregister(rfkill->rfkill_dev);
187 rfkill_destroy(rfkill->rfkill_dev); 198 rfkill_destroy(rfkill->rfkill_dev);
188 if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) 199 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 18dc512a10f3..3ca7277a3c36 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -90,7 +90,6 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
90 pdata->type, 90 pdata->type,
91 &rfkill_regulator_ops, rfkill_data); 91 &rfkill_regulator_ops, rfkill_data);
92 if (rf_kill == NULL) { 92 if (rf_kill == NULL) {
93 dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
94 ret = -ENOMEM; 93 ret = -ENOMEM;
95 goto err_rfkill_alloc; 94 goto err_rfkill_alloc;
96 } 95 }
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6994214db8f8..9e087d885675 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -65,132 +65,134 @@ static inline u32 addr_fold(void *addr)
65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
66} 66}
67 67
68static u32 flow_get_src(struct sk_buff *skb) 68static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
69{ 69{
70 __be32 *data = NULL, hdata;
71
70 switch (skb->protocol) { 72 switch (skb->protocol) {
71 case htons(ETH_P_IP): 73 case htons(ETH_P_IP):
72 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 74 data = skb_header_pointer(skb,
73 return ntohl(ip_hdr(skb)->saddr); 75 nhoff + offsetof(struct iphdr,
76 saddr),
77 4, &hdata);
74 break; 78 break;
75 case htons(ETH_P_IPV6): 79 case htons(ETH_P_IPV6):
76 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 80 data = skb_header_pointer(skb,
77 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 81 nhoff + offsetof(struct ipv6hdr,
82 saddr.s6_addr32[3]),
83 4, &hdata);
78 break; 84 break;
79 } 85 }
80 86
87 if (data)
88 return ntohl(*data);
81 return addr_fold(skb->sk); 89 return addr_fold(skb->sk);
82} 90}
83 91
84static u32 flow_get_dst(struct sk_buff *skb) 92static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
85{ 93{
94 __be32 *data = NULL, hdata;
95
86 switch (skb->protocol) { 96 switch (skb->protocol) {
87 case htons(ETH_P_IP): 97 case htons(ETH_P_IP):
88 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 98 data = skb_header_pointer(skb,
89 return ntohl(ip_hdr(skb)->daddr); 99 nhoff + offsetof(struct iphdr,
100 daddr),
101 4, &hdata);
90 break; 102 break;
91 case htons(ETH_P_IPV6): 103 case htons(ETH_P_IPV6):
92 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 104 data = skb_header_pointer(skb,
93 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 105 nhoff + offsetof(struct ipv6hdr,
106 daddr.s6_addr32[3]),
107 4, &hdata);
94 break; 108 break;
95 } 109 }
96 110
111 if (data)
112 return ntohl(*data);
97 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 113 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
98} 114}
99 115
100static u32 flow_get_proto(struct sk_buff *skb) 116static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
101{ 117{
118 __u8 *data = NULL, hdata;
119
102 switch (skb->protocol) { 120 switch (skb->protocol) {
103 case htons(ETH_P_IP): 121 case htons(ETH_P_IP):
104 return pskb_network_may_pull(skb, sizeof(struct iphdr)) ? 122 data = skb_header_pointer(skb,
105 ip_hdr(skb)->protocol : 0; 123 nhoff + offsetof(struct iphdr,
124 protocol),
125 1, &hdata);
126 break;
106 case htons(ETH_P_IPV6): 127 case htons(ETH_P_IPV6):
107 return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ? 128 data = skb_header_pointer(skb,
108 ipv6_hdr(skb)->nexthdr : 0; 129 nhoff + offsetof(struct ipv6hdr,
109 default: 130 nexthdr),
110 return 0; 131 1, &hdata);
132 break;
111 } 133 }
134 if (data)
135 return *data;
136 return 0;
112} 137}
113 138
114static u32 flow_get_proto_src(struct sk_buff *skb) 139/* helper function to get either src or dst port */
140static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
141 __be16 *_port, int dst)
115{ 142{
143 __be16 *port = NULL;
144 int poff;
145
116 switch (skb->protocol) { 146 switch (skb->protocol) {
117 case htons(ETH_P_IP): { 147 case htons(ETH_P_IP): {
118 struct iphdr *iph; 148 struct iphdr *iph, _iph;
119 int poff;
120 149
121 if (!pskb_network_may_pull(skb, sizeof(*iph))) 150 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
151 if (!iph)
122 break; 152 break;
123 iph = ip_hdr(skb);
124 if (ip_is_fragment(iph)) 153 if (ip_is_fragment(iph))
125 break; 154 break;
126 poff = proto_ports_offset(iph->protocol); 155 poff = proto_ports_offset(iph->protocol);
127 if (poff >= 0 && 156 if (poff >= 0)
128 pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) { 157 port = skb_header_pointer(skb,
129 iph = ip_hdr(skb); 158 nhoff + iph->ihl * 4 + poff + dst,
130 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 159 sizeof(*_port), _port);
131 poff));
132 }
133 break; 160 break;
134 } 161 }
135 case htons(ETH_P_IPV6): { 162 case htons(ETH_P_IPV6): {
136 struct ipv6hdr *iph; 163 struct ipv6hdr *iph, _iph;
137 int poff;
138 164
139 if (!pskb_network_may_pull(skb, sizeof(*iph))) 165 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
166 if (!iph)
140 break; 167 break;
141 iph = ipv6_hdr(skb);
142 poff = proto_ports_offset(iph->nexthdr); 168 poff = proto_ports_offset(iph->nexthdr);
143 if (poff >= 0 && 169 if (poff >= 0)
144 pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) { 170 port = skb_header_pointer(skb,
145 iph = ipv6_hdr(skb); 171 nhoff + sizeof(*iph) + poff + dst,
146 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 172 sizeof(*_port), _port);
147 poff));
148 }
149 break; 173 break;
150 } 174 }
151 } 175 }
152 176
153 return addr_fold(skb->sk); 177 return port;
154} 178}
155 179
156static u32 flow_get_proto_dst(struct sk_buff *skb) 180static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
157{ 181{
158 switch (skb->protocol) { 182 __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
159 case htons(ETH_P_IP): {
160 struct iphdr *iph;
161 int poff;
162 183
163 if (!pskb_network_may_pull(skb, sizeof(*iph))) 184 if (port)
164 break; 185 return ntohs(*port);
165 iph = ip_hdr(skb);
166 if (ip_is_fragment(iph))
167 break;
168 poff = proto_ports_offset(iph->protocol);
169 if (poff >= 0 &&
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
171 iph = ip_hdr(skb);
172 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
173 2 + poff));
174 }
175 break;
176 }
177 case htons(ETH_P_IPV6): {
178 struct ipv6hdr *iph;
179 int poff;
180 186
181 if (!pskb_network_may_pull(skb, sizeof(*iph))) 187 return addr_fold(skb->sk);
182 break; 188}
183 iph = ipv6_hdr(skb); 189
184 poff = proto_ports_offset(iph->nexthdr); 190static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
185 if (poff >= 0 && 191{
186 pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) { 192 __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
187 iph = ipv6_hdr(skb); 193
188 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 194 if (port)
189 poff + 2)); 195 return ntohs(*port);
190 }
191 break;
192 }
193 }
194 196
195 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 197 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
196} 198}
@@ -223,7 +225,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
223#define CTTUPLE(skb, member) \ 225#define CTTUPLE(skb, member) \
224({ \ 226({ \
225 enum ip_conntrack_info ctinfo; \ 227 enum ip_conntrack_info ctinfo; \
226 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ 228 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
227 if (ct == NULL) \ 229 if (ct == NULL) \
228 goto fallback; \ 230 goto fallback; \
229 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ 231 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
@@ -236,7 +238,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
236}) 238})
237#endif 239#endif
238 240
239static u32 flow_get_nfct_src(struct sk_buff *skb) 241static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
240{ 242{
241 switch (skb->protocol) { 243 switch (skb->protocol) {
242 case htons(ETH_P_IP): 244 case htons(ETH_P_IP):
@@ -245,10 +247,10 @@ static u32 flow_get_nfct_src(struct sk_buff *skb)
245 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 247 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
246 } 248 }
247fallback: 249fallback:
248 return flow_get_src(skb); 250 return flow_get_src(skb, nhoff);
249} 251}
250 252
251static u32 flow_get_nfct_dst(struct sk_buff *skb) 253static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
252{ 254{
253 switch (skb->protocol) { 255 switch (skb->protocol) {
254 case htons(ETH_P_IP): 256 case htons(ETH_P_IP):
@@ -257,21 +259,21 @@ static u32 flow_get_nfct_dst(struct sk_buff *skb)
257 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 259 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
258 } 260 }
259fallback: 261fallback:
260 return flow_get_dst(skb); 262 return flow_get_dst(skb, nhoff);
261} 263}
262 264
263static u32 flow_get_nfct_proto_src(struct sk_buff *skb) 265static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
264{ 266{
265 return ntohs(CTTUPLE(skb, src.u.all)); 267 return ntohs(CTTUPLE(skb, src.u.all));
266fallback: 268fallback:
267 return flow_get_proto_src(skb); 269 return flow_get_proto_src(skb, nhoff);
268} 270}
269 271
270static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) 272static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
271{ 273{
272 return ntohs(CTTUPLE(skb, dst.u.all)); 274 return ntohs(CTTUPLE(skb, dst.u.all));
273fallback: 275fallback:
274 return flow_get_proto_dst(skb); 276 return flow_get_proto_dst(skb, nhoff);
275} 277}
276 278
277static u32 flow_get_rtclassid(const struct sk_buff *skb) 279static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -313,17 +315,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
313 315
314static u32 flow_key_get(struct sk_buff *skb, int key) 316static u32 flow_key_get(struct sk_buff *skb, int key)
315{ 317{
318 int nhoff = skb_network_offset(skb);
319
316 switch (key) { 320 switch (key) {
317 case FLOW_KEY_SRC: 321 case FLOW_KEY_SRC:
318 return flow_get_src(skb); 322 return flow_get_src(skb, nhoff);
319 case FLOW_KEY_DST: 323 case FLOW_KEY_DST:
320 return flow_get_dst(skb); 324 return flow_get_dst(skb, nhoff);
321 case FLOW_KEY_PROTO: 325 case FLOW_KEY_PROTO:
322 return flow_get_proto(skb); 326 return flow_get_proto(skb, nhoff);
323 case FLOW_KEY_PROTO_SRC: 327 case FLOW_KEY_PROTO_SRC:
324 return flow_get_proto_src(skb); 328 return flow_get_proto_src(skb, nhoff);
325 case FLOW_KEY_PROTO_DST: 329 case FLOW_KEY_PROTO_DST:
326 return flow_get_proto_dst(skb); 330 return flow_get_proto_dst(skb, nhoff);
327 case FLOW_KEY_IIF: 331 case FLOW_KEY_IIF:
328 return flow_get_iif(skb); 332 return flow_get_iif(skb);
329 case FLOW_KEY_PRIORITY: 333 case FLOW_KEY_PRIORITY:
@@ -333,13 +337,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
333 case FLOW_KEY_NFCT: 337 case FLOW_KEY_NFCT:
334 return flow_get_nfct(skb); 338 return flow_get_nfct(skb);
335 case FLOW_KEY_NFCT_SRC: 339 case FLOW_KEY_NFCT_SRC:
336 return flow_get_nfct_src(skb); 340 return flow_get_nfct_src(skb, nhoff);
337 case FLOW_KEY_NFCT_DST: 341 case FLOW_KEY_NFCT_DST:
338 return flow_get_nfct_dst(skb); 342 return flow_get_nfct_dst(skb, nhoff);
339 case FLOW_KEY_NFCT_PROTO_SRC: 343 case FLOW_KEY_NFCT_PROTO_SRC:
340 return flow_get_nfct_proto_src(skb); 344 return flow_get_nfct_proto_src(skb, nhoff);
341 case FLOW_KEY_NFCT_PROTO_DST: 345 case FLOW_KEY_NFCT_PROTO_DST:
342 return flow_get_nfct_proto_dst(skb); 346 return flow_get_nfct_proto_dst(skb, nhoff);
343 case FLOW_KEY_RTCLASSID: 347 case FLOW_KEY_RTCLASSID:
344 return flow_get_rtclassid(skb); 348 return flow_get_rtclassid(skb);
345 case FLOW_KEY_SKUID: 349 case FLOW_KEY_SKUID:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0a833d0c1f61..e83c272c0325 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
287 u32 r, slot, salt, sfbhash; 287 u32 r, slot, salt, sfbhash;
288 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 288 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
289 289
290 if (unlikely(sch->q.qlen >= q->limit)) {
291 sch->qstats.overlimits++;
292 q->stats.queuedrop++;
293 goto drop;
294 }
295
290 if (q->rehash_interval > 0) { 296 if (q->rehash_interval > 0) {
291 unsigned long limit = q->rehash_time + q->rehash_interval; 297 unsigned long limit = q->rehash_time + q->rehash_interval;
292 298
@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 slot ^= 1; 338 slot ^= 1;
333 sfb_skb_cb(skb)->hashes[slot] = 0; 339 sfb_skb_cb(skb)->hashes[slot] = 0;
334 340
335 if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) { 341 if (unlikely(minqlen >= q->max)) {
336 sch->qstats.overlimits++; 342 sch->qstats.overlimits++;
337 if (minqlen >= q->max) 343 q->stats.bucketdrop++;
338 q->stats.bucketdrop++;
339 else
340 q->stats.queuedrop++;
341 goto drop; 344 goto drop;
342 } 345 }
343 346
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index dc16b90ddb6f..152b5b3c3fff 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -282,6 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
282 asoc->peer.asconf_capable = 1; 282 asoc->peer.asconf_capable = 1;
283 asoc->asconf_addr_del_pending = NULL; 283 asoc->asconf_addr_del_pending = NULL;
284 asoc->src_out_of_asoc_ok = 0; 284 asoc->src_out_of_asoc_ok = 0;
285 asoc->new_transport = NULL;
285 286
286 /* Create an input queue. */ 287 /* Create an input queue. */
287 sctp_inq_init(&asoc->base.inqueue); 288 sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index aabaee41dd3e..810427833bcd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -243,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
243 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 243 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
244 skb->local_df = 1; 244 skb->local_df = 1;
245 245
246 return ip6_xmit(sk, skb, &fl6, np->opt); 246 return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
247} 247}
248 248
249/* Returns the dst cache entry for the given source and destination ip 249/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a6d27bf563a5..14c2b06028ff 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -917,6 +917,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
917 * current cwnd). 917 * current cwnd).
918 */ 918 */
919 if (!list_empty(&q->retransmit)) { 919 if (!list_empty(&q->retransmit)) {
920 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
921 goto sctp_flush_out;
920 if (transport == asoc->peer.retran_path) 922 if (transport == asoc->peer.retran_path)
921 goto retran; 923 goto retran;
922 924
@@ -989,6 +991,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
989 ((new_transport->state == SCTP_INACTIVE) || 991 ((new_transport->state == SCTP_INACTIVE) ||
990 (new_transport->state == SCTP_UNCONFIRMED))) 992 (new_transport->state == SCTP_UNCONFIRMED)))
991 new_transport = asoc->peer.active_path; 993 new_transport = asoc->peer.active_path;
994 if (new_transport->state == SCTP_UNCONFIRMED)
995 continue;
992 996
993 /* Change packets if necessary. */ 997 /* Change packets if necessary. */
994 if (new_transport != transport) { 998 if (new_transport != transport) {
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 91784f44a2e2..61b9fca5a173 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1299,7 +1299,7 @@ SCTP_STATIC __init int sctp_init(void)
1299 max_share = min(4UL*1024*1024, limit); 1299 max_share = min(4UL*1024*1024, limit);
1300 1300
1301 sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ 1301 sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
1302 sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); 1302 sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
1303 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); 1303 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
1304 1304
1305 sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; 1305 sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 81db4e385352..0121e0ab0351 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3015,6 +3015,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3015 /* Start the heartbeat timer. */ 3015 /* Start the heartbeat timer. */
3016 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3016 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
3017 sctp_transport_hold(peer); 3017 sctp_transport_hold(peer);
3018 asoc->new_transport = peer;
3018 break; 3019 break;
3019 case SCTP_PARAM_DEL_IP: 3020 case SCTP_PARAM_DEL_IP:
3020 /* ADDIP 4.3 D7) If a request is received to delete the 3021 /* ADDIP 4.3 D7) If a request is received to delete the
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a0f31e6c1c63..891f5db8cc31 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3618,6 +3618,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3618 */ 3618 */
3619 asconf_ack->dest = chunk->source; 3619 asconf_ack->dest = chunk->source;
3620 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3620 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
3621 if (asoc->new_transport) {
3622 sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
3623 commands);
3624 ((struct sctp_association *)asoc)->new_transport = NULL;
3625 }
3621 3626
3622 return SCTP_DISPOSITION_CONSUME; 3627 return SCTP_DISPOSITION_CONSUME;
3623} 3628}
diff --git a/net/socket.c b/net/socket.c
index ffe92ca32f2a..2877647f347b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto_family *ops)
2472 lockdep_is_held(&net_family_lock))) 2472 lockdep_is_held(&net_family_lock)))
2473 err = -EEXIST; 2473 err = -EEXIST;
2474 else { 2474 else {
2475 rcu_assign_pointer(net_families[ops->family], ops); 2475 RCU_INIT_POINTER(net_families[ops->family], ops);
2476 err = 0; 2476 err = 0;
2477 } 2477 }
2478 spin_unlock(&net_family_lock); 2478 spin_unlock(&net_family_lock);
@@ -2500,7 +2500,7 @@ void sock_unregister(int family)
2500 BUG_ON(family < 0 || family >= NPROTO); 2500 BUG_ON(family < 0 || family >= NPROTO);
2501 2501
2502 spin_lock(&net_family_lock); 2502 spin_lock(&net_family_lock);
2503 rcu_assign_pointer(net_families[family], NULL); 2503 RCU_INIT_POINTER(net_families[family], NULL);
2504 spin_unlock(&net_family_lock); 2504 spin_unlock(&net_family_lock);
2505 2505
2506 synchronize_rcu(); 2506 synchronize_rcu();
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e9b76939268d..afb56553dfe7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
123 return; 123 return;
124 gss_get_ctx(ctx); 124 gss_get_ctx(ctx);
125 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 125 RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
127 smp_mb__before_clear_bit(); 127 smp_mb__before_clear_bit();
128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
@@ -950,7 +950,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
950 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 950 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
951 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 951 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
952 952
953 rcu_assign_pointer(gss_cred->gc_ctx, NULL); 953 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
954 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 954 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
955 if (ctx) 955 if (ctx)
956 gss_put_ctx(ctx); 956 gss_put_ctx(ctx);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 67dbc1884383..bfddd68b31d3 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1104,3 +1104,6 @@ void unregister_rpc_pipefs(void)
1104 kmem_cache_destroy(rpc_inode_cachep); 1104 kmem_cache_destroy(rpc_inode_cachep);
1105 unregister_filesystem(&rpc_pipe_fs_type); 1105 unregister_filesystem(&rpc_pipe_fs_type);
1106} 1106}
1107
1108/* Make 'mount -t rpc_pipefs ...' autoload this module. */
1109MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 252552a685dc..6e038884ae0c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -295,6 +295,18 @@ svc_pool_map_put(void)
295} 295}
296 296
297 297
298static int svc_pool_map_get_node(unsigned int pidx)
299{
300 const struct svc_pool_map *m = &svc_pool_map;
301
302 if (m->count) {
303 if (m->mode == SVC_POOL_PERCPU)
304 return cpu_to_node(m->pool_to[pidx]);
305 if (m->mode == SVC_POOL_PERNODE)
306 return m->pool_to[pidx];
307 }
308 return NUMA_NO_NODE;
309}
298/* 310/*
299 * Set the given thread's cpus_allowed mask so that it 311 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool. 312 * will only run on cpus in the given pool.
@@ -541,7 +553,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
541 * We allocate pages and place them in rq_argpages. 553 * We allocate pages and place them in rq_argpages.
542 */ 554 */
543static int 555static int
544svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 556svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
545{ 557{
546 unsigned int pages, arghi; 558 unsigned int pages, arghi;
547 559
@@ -555,7 +567,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
555 arghi = 0; 567 arghi = 0;
556 BUG_ON(pages > RPCSVC_MAXPAGES); 568 BUG_ON(pages > RPCSVC_MAXPAGES);
557 while (pages) { 569 while (pages) {
558 struct page *p = alloc_page(GFP_KERNEL); 570 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
559 if (!p) 571 if (!p)
560 break; 572 break;
561 rqstp->rq_pages[arghi++] = p; 573 rqstp->rq_pages[arghi++] = p;
@@ -578,11 +590,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
578} 590}
579 591
580struct svc_rqst * 592struct svc_rqst *
581svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 593svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
582{ 594{
583 struct svc_rqst *rqstp; 595 struct svc_rqst *rqstp;
584 596
585 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 597 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
586 if (!rqstp) 598 if (!rqstp)
587 goto out_enomem; 599 goto out_enomem;
588 600
@@ -596,15 +608,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
596 rqstp->rq_server = serv; 608 rqstp->rq_server = serv;
597 rqstp->rq_pool = pool; 609 rqstp->rq_pool = pool;
598 610
599 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 611 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
600 if (!rqstp->rq_argp) 612 if (!rqstp->rq_argp)
601 goto out_thread; 613 goto out_thread;
602 614
603 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 615 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
604 if (!rqstp->rq_resp) 616 if (!rqstp->rq_resp)
605 goto out_thread; 617 goto out_thread;
606 618
607 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 619 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
608 goto out_thread; 620 goto out_thread;
609 621
610 return rqstp; 622 return rqstp;
@@ -689,6 +701,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
689 struct svc_pool *chosen_pool; 701 struct svc_pool *chosen_pool;
690 int error = 0; 702 int error = 0;
691 unsigned int state = serv->sv_nrthreads-1; 703 unsigned int state = serv->sv_nrthreads-1;
704 int node;
692 705
693 if (pool == NULL) { 706 if (pool == NULL) {
694 /* The -1 assumes caller has done a svc_get() */ 707 /* The -1 assumes caller has done a svc_get() */
@@ -704,14 +717,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
704 nrservs--; 717 nrservs--;
705 chosen_pool = choose_pool(serv, pool, &state); 718 chosen_pool = choose_pool(serv, pool, &state);
706 719
707 rqstp = svc_prepare_thread(serv, chosen_pool); 720 node = svc_pool_map_get_node(chosen_pool->sp_id);
721 rqstp = svc_prepare_thread(serv, chosen_pool, node);
708 if (IS_ERR(rqstp)) { 722 if (IS_ERR(rqstp)) {
709 error = PTR_ERR(rqstp); 723 error = PTR_ERR(rqstp);
710 break; 724 break;
711 } 725 }
712 726
713 __module_get(serv->sv_module); 727 __module_get(serv->sv_module);
714 task = kthread_create(serv->sv_function, rqstp, serv->sv_name); 728 task = kthread_create_on_node(serv->sv_function, rqstp,
729 node, serv->sv_name);
715 if (IS_ERR(task)) { 730 if (IS_ERR(task)) {
716 error = PTR_ERR(task); 731 error = PTR_ERR(task);
717 module_put(serv->sv_module); 732 module_put(serv->sv_module);
@@ -998,9 +1013,8 @@ static void svc_unregister(const struct svc_serv *serv)
998/* 1013/*
999 * Printk the given error with the address of the client that caused it. 1014 * Printk the given error with the address of the client that caused it.
1000 */ 1015 */
1001static int 1016static __printf(2, 3)
1002__attribute__ ((format (printf, 2, 3))) 1017int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1003svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1004{ 1018{
1005 va_list args; 1019 va_list args;
1006 int r; 1020 int r;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bd31208bbb61..d86bb673e1f6 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -254,8 +254,6 @@ EXPORT_SYMBOL_GPL(svc_create_xprt);
254 */ 254 */
255void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 255void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
256{ 256{
257 struct sockaddr *sin;
258
259 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 257 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
260 rqstp->rq_addrlen = xprt->xpt_remotelen; 258 rqstp->rq_addrlen = xprt->xpt_remotelen;
261 259
@@ -263,15 +261,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
263 * Destination address in request is needed for binding the 261 * Destination address in request is needed for binding the
264 * source address in RPC replies/callbacks later. 262 * source address in RPC replies/callbacks later.
265 */ 263 */
266 sin = (struct sockaddr *)&xprt->xpt_local; 264 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
267 switch (sin->sa_family) { 265 rqstp->rq_daddrlen = xprt->xpt_locallen;
268 case AF_INET:
269 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
270 break;
271 case AF_INET6:
272 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
273 break;
274 }
275} 266}
276EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 267EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
277 268
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 767d494de7a2..dfd686eb0b7f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -143,19 +143,20 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
143 cmh->cmsg_level = SOL_IP; 143 cmh->cmsg_level = SOL_IP;
144 cmh->cmsg_type = IP_PKTINFO; 144 cmh->cmsg_type = IP_PKTINFO;
145 pki->ipi_ifindex = 0; 145 pki->ipi_ifindex = 0;
146 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 146 pki->ipi_spec_dst.s_addr =
147 svc_daddr_in(rqstp)->sin_addr.s_addr;
147 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 148 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
148 } 149 }
149 break; 150 break;
150 151
151 case AF_INET6: { 152 case AF_INET6: {
152 struct in6_pktinfo *pki = CMSG_DATA(cmh); 153 struct in6_pktinfo *pki = CMSG_DATA(cmh);
154 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
153 155
154 cmh->cmsg_level = SOL_IPV6; 156 cmh->cmsg_level = SOL_IPV6;
155 cmh->cmsg_type = IPV6_PKTINFO; 157 cmh->cmsg_type = IPV6_PKTINFO;
156 pki->ipi6_ifindex = 0; 158 pki->ipi6_ifindex = daddr->sin6_scope_id;
157 ipv6_addr_copy(&pki->ipi6_addr, 159 ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
158 &rqstp->rq_daddr.addr6);
159 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 160 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
160 } 161 }
161 break; 162 break;
@@ -498,9 +499,13 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
498 struct cmsghdr *cmh) 499 struct cmsghdr *cmh)
499{ 500{
500 struct in_pktinfo *pki = CMSG_DATA(cmh); 501 struct in_pktinfo *pki = CMSG_DATA(cmh);
502 struct sockaddr_in *daddr = svc_daddr_in(rqstp);
503
501 if (cmh->cmsg_type != IP_PKTINFO) 504 if (cmh->cmsg_type != IP_PKTINFO)
502 return 0; 505 return 0;
503 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; 506
507 daddr->sin_family = AF_INET;
508 daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
504 return 1; 509 return 1;
505} 510}
506 511
@@ -511,9 +516,14 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
511 struct cmsghdr *cmh) 516 struct cmsghdr *cmh)
512{ 517{
513 struct in6_pktinfo *pki = CMSG_DATA(cmh); 518 struct in6_pktinfo *pki = CMSG_DATA(cmh);
519 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
520
514 if (cmh->cmsg_type != IPV6_PKTINFO) 521 if (cmh->cmsg_type != IPV6_PKTINFO)
515 return 0; 522 return 0;
516 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); 523
524 daddr->sin6_family = AF_INET6;
525 ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
526 daddr->sin6_scope_id = pki->ipi6_ifindex;
517 return 1; 527 return 1;
518} 528}
519 529
@@ -614,6 +624,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
614 skb_free_datagram_locked(svsk->sk_sk, skb); 624 skb_free_datagram_locked(svsk->sk_sk, skb);
615 return 0; 625 return 0;
616 } 626 }
627 rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
617 628
618 if (skb_is_nonlinear(skb)) { 629 if (skb_is_nonlinear(skb)) {
619 /* we have to copy */ 630 /* we have to copy */
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 759b318b5ffb..28908f54459e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -39,6 +39,7 @@
39#include "link.h" 39#include "link.h"
40#include "port.h" 40#include "port.h"
41#include "bcast.h" 41#include "bcast.h"
42#include "name_distr.h"
42 43
43#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
44 45
@@ -298,14 +299,9 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
298 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 299 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
299 msg_set_bcast_tag(msg, tipc_own_tag); 300 msg_set_bcast_tag(msg, tipc_own_tag);
300 301
301 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { 302 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
302 bcl->stats.sent_nacks++; 303 bcl->stats.sent_nacks++;
303 buf_discard(buf); 304 buf_discard(buf);
304 } else {
305 tipc_bearer_schedule(bcl->b_ptr, bcl);
306 bcl->proto_msg_queue = buf;
307 bcl->stats.bearer_congs++;
308 }
309 305
310 /* 306 /*
311 * Ensure we doesn't send another NACK msg to the node 307 * Ensure we doesn't send another NACK msg to the node
@@ -426,20 +422,28 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
426void tipc_bclink_recv_pkt(struct sk_buff *buf) 422void tipc_bclink_recv_pkt(struct sk_buff *buf)
427{ 423{
428 struct tipc_msg *msg = buf_msg(buf); 424 struct tipc_msg *msg = buf_msg(buf);
429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 425 struct tipc_node *node;
430 u32 next_in; 426 u32 next_in;
431 u32 seqno; 427 u32 seqno;
432 struct sk_buff *deferred; 428 struct sk_buff *deferred;
433 429
434 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 430 /* Screen out unwanted broadcast messages */
435 (msg_mc_netid(msg) != tipc_net_id))) { 431
436 buf_discard(buf); 432 if (msg_mc_netid(msg) != tipc_net_id)
437 return; 433 goto exit;
438 } 434
435 node = tipc_node_find(msg_prevnode(msg));
436 if (unlikely(!node))
437 goto exit;
438
439 tipc_node_lock(node);
440 if (unlikely(!node->bclink.supported))
441 goto unlock;
439 442
440 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 443 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
444 if (msg_type(msg) != STATE_MSG)
445 goto unlock;
441 if (msg_destnode(msg) == tipc_own_addr) { 446 if (msg_destnode(msg) == tipc_own_addr) {
442 tipc_node_lock(node);
443 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 447 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
444 tipc_node_unlock(node); 448 tipc_node_unlock(node);
445 spin_lock_bh(&bc_lock); 449 spin_lock_bh(&bc_lock);
@@ -449,18 +453,18 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
449 msg_bcgap_to(msg)); 453 msg_bcgap_to(msg));
450 spin_unlock_bh(&bc_lock); 454 spin_unlock_bh(&bc_lock);
451 } else { 455 } else {
456 tipc_node_unlock(node);
452 tipc_bclink_peek_nack(msg_destnode(msg), 457 tipc_bclink_peek_nack(msg_destnode(msg),
453 msg_bcast_tag(msg), 458 msg_bcast_tag(msg),
454 msg_bcgap_after(msg), 459 msg_bcgap_after(msg),
455 msg_bcgap_to(msg)); 460 msg_bcgap_to(msg));
456 } 461 }
457 buf_discard(buf); 462 goto exit;
458 return;
459 } 463 }
460 464
461 tipc_node_lock(node); 465 /* Handle in-sequence broadcast message */
466
462receive: 467receive:
463 deferred = node->bclink.deferred_head;
464 next_in = mod(node->bclink.last_in + 1); 468 next_in = mod(node->bclink.last_in + 1);
465 seqno = msg_seqno(msg); 469 seqno = msg_seqno(msg);
466 470
@@ -474,7 +478,10 @@ receive:
474 } 478 }
475 if (likely(msg_isdata(msg))) { 479 if (likely(msg_isdata(msg))) {
476 tipc_node_unlock(node); 480 tipc_node_unlock(node);
477 tipc_port_recv_mcast(buf, NULL); 481 if (likely(msg_mcast(msg)))
482 tipc_port_recv_mcast(buf, NULL);
483 else
484 buf_discard(buf);
478 } else if (msg_user(msg) == MSG_BUNDLER) { 485 } else if (msg_user(msg) == MSG_BUNDLER) {
479 bcl->stats.recv_bundles++; 486 bcl->stats.recv_bundles++;
480 bcl->stats.recv_bundled += msg_msgcnt(msg); 487 bcl->stats.recv_bundled += msg_msgcnt(msg);
@@ -487,18 +494,22 @@ receive:
487 bcl->stats.recv_fragmented++; 494 bcl->stats.recv_fragmented++;
488 tipc_node_unlock(node); 495 tipc_node_unlock(node);
489 tipc_net_route_msg(buf); 496 tipc_net_route_msg(buf);
497 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
498 tipc_node_unlock(node);
499 tipc_named_recv(buf);
490 } else { 500 } else {
491 tipc_node_unlock(node); 501 tipc_node_unlock(node);
492 tipc_net_route_msg(buf); 502 buf_discard(buf);
493 } 503 }
504 buf = NULL;
505 tipc_node_lock(node);
506 deferred = node->bclink.deferred_head;
494 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 507 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
495 tipc_node_lock(node);
496 buf = deferred; 508 buf = deferred;
497 msg = buf_msg(buf); 509 msg = buf_msg(buf);
498 node->bclink.deferred_head = deferred->next; 510 node->bclink.deferred_head = deferred->next;
499 goto receive; 511 goto receive;
500 } 512 }
501 return;
502 } else if (less(next_in, seqno)) { 513 } else if (less(next_in, seqno)) {
503 u32 gap_after = node->bclink.gap_after; 514 u32 gap_after = node->bclink.gap_after;
504 u32 gap_to = node->bclink.gap_to; 515 u32 gap_to = node->bclink.gap_to;
@@ -513,6 +524,7 @@ receive:
513 else if (less(gap_after, seqno) && less(seqno, gap_to)) 524 else if (less(gap_after, seqno) && less(seqno, gap_to))
514 node->bclink.gap_to = seqno; 525 node->bclink.gap_to = seqno;
515 } 526 }
527 buf = NULL;
516 if (bclink_ack_allowed(node->bclink.nack_sync)) { 528 if (bclink_ack_allowed(node->bclink.nack_sync)) {
517 if (gap_to != gap_after) 529 if (gap_to != gap_after)
518 bclink_send_nack(node); 530 bclink_send_nack(node);
@@ -520,9 +532,11 @@ receive:
520 } 532 }
521 } else { 533 } else {
522 bcl->stats.duplicates++; 534 bcl->stats.duplicates++;
523 buf_discard(buf);
524 } 535 }
536unlock:
525 tipc_node_unlock(node); 537 tipc_node_unlock(node);
538exit:
539 buf_discard(buf);
526} 540}
527 541
528u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 542u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
@@ -535,10 +549,11 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
535/** 549/**
536 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 550 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
537 * 551 *
538 * Send through as many bearers as necessary to reach all nodes 552 * Send packet over as many bearers as necessary to reach all nodes
539 * that support TIPC multicasting. 553 * that have joined the broadcast link.
540 * 554 *
541 * Returns 0 if packet sent successfully, non-zero if not 555 * Returns 0 (packet sent successfully) under all circumstances,
556 * since the broadcast link's pseudo-bearer never blocks
542 */ 557 */
543 558
544static int tipc_bcbearer_send(struct sk_buff *buf, 559static int tipc_bcbearer_send(struct sk_buff *buf,
@@ -547,7 +562,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
547{ 562{
548 int bp_index; 563 int bp_index;
549 564
550 /* Prepare buffer for broadcasting (if first time trying to send it) */ 565 /*
566 * Prepare broadcast link message for reliable transmission,
567 * if first time trying to send it;
568 * preparation is skipped for broadcast link protocol messages
569 * since they are sent in an unreliable manner and don't need it
570 */
551 571
552 if (likely(!msg_non_seq(buf_msg(buf)))) { 572 if (likely(!msg_non_seq(buf_msg(buf)))) {
553 struct tipc_msg *msg; 573 struct tipc_msg *msg;
@@ -596,18 +616,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
596 } 616 }
597 617
598 if (bcbearer->remains_new.count == 0) 618 if (bcbearer->remains_new.count == 0)
599 return 0; 619 break; /* all targets reached */
600 620
601 bcbearer->remains = bcbearer->remains_new; 621 bcbearer->remains = bcbearer->remains_new;
602 } 622 }
603 623
604 /* 624 return 0;
605 * Unable to reach all targets (indicate success, since currently
606 * there isn't code in place to properly block & unblock the
607 * pseudo-bearer used by the broadcast link)
608 */
609
610 return TIPC_OK;
611} 625}
612 626
613/** 627/**
@@ -667,27 +681,6 @@ void tipc_bcbearer_sort(void)
667 spin_unlock_bh(&bc_lock); 681 spin_unlock_bh(&bc_lock);
668} 682}
669 683
670/**
671 * tipc_bcbearer_push - resolve bearer congestion
672 *
673 * Forces bclink to push out any unsent packets, until all packets are gone
674 * or congestion reoccurs.
675 * No locks set when function called
676 */
677
678void tipc_bcbearer_push(void)
679{
680 struct tipc_bearer *b_ptr;
681
682 spin_lock_bh(&bc_lock);
683 b_ptr = &bcbearer->bearer;
684 if (b_ptr->blocked) {
685 b_ptr->blocked = 0;
686 tipc_bearer_lock_push(b_ptr);
687 }
688 spin_unlock_bh(&bc_lock);
689}
690
691 684
692int tipc_bclink_stats(char *buf, const u32 buf_size) 685int tipc_bclink_stats(char *buf, const u32 buf_size)
693{ 686{
@@ -764,7 +757,7 @@ int tipc_bclink_init(void)
764 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 757 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
765 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 758 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
766 if (!bcbearer || !bclink) { 759 if (!bcbearer || !bclink) {
767 warn("Multicast link creation failed, no memory\n"); 760 warn("Broadcast link creation failed, no memory\n");
768 kfree(bcbearer); 761 kfree(bcbearer);
769 bcbearer = NULL; 762 bcbearer = NULL;
770 kfree(bclink); 763 kfree(bclink);
@@ -775,7 +768,7 @@ int tipc_bclink_init(void)
775 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 768 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
776 bcbearer->bearer.media = &bcbearer->media; 769 bcbearer->bearer.media = &bcbearer->media;
777 bcbearer->media.send_msg = tipc_bcbearer_send; 770 bcbearer->media.send_msg = tipc_bcbearer_send;
778 sprintf(bcbearer->media.name, "tipc-multicast"); 771 sprintf(bcbearer->media.name, "tipc-broadcast");
779 772
780 bcl = &bclink->link; 773 bcl = &bclink->link;
781 INIT_LIST_HEAD(&bcl->waiting_ports); 774 INIT_LIST_HEAD(&bcl->waiting_ports);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 500c97f1c859..06740da5ae61 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -101,6 +101,5 @@ int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
101int tipc_bclink_reset_stats(void); 101int tipc_bclink_reset_stats(void);
102int tipc_bclink_set_queue_limits(u32 limit); 102int tipc_bclink_set_queue_limits(u32 limit);
103void tipc_bcbearer_sort(void); 103void tipc_bcbearer_sort(void);
104void tipc_bcbearer_push(void);
105 104
106#endif 105#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 85eba9c08ee9..e2202de3d93e 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -385,13 +385,9 @@ static int bearer_push(struct tipc_bearer *b_ptr)
385 385
386void tipc_bearer_lock_push(struct tipc_bearer *b_ptr) 386void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
387{ 387{
388 int res;
389
390 spin_lock_bh(&b_ptr->lock); 388 spin_lock_bh(&b_ptr->lock);
391 res = bearer_push(b_ptr); 389 bearer_push(b_ptr);
392 spin_unlock_bh(&b_ptr->lock); 390 spin_unlock_bh(&b_ptr->lock);
393 if (res)
394 tipc_bcbearer_push();
395} 391}
396 392
397 393
@@ -608,6 +604,7 @@ int tipc_block_bearer(const char *name)
608 info("Blocking bearer <%s>\n", name); 604 info("Blocking bearer <%s>\n", name);
609 spin_lock_bh(&b_ptr->lock); 605 spin_lock_bh(&b_ptr->lock);
610 b_ptr->blocked = 1; 606 b_ptr->blocked = 1;
607 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
611 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 608 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
612 struct tipc_node *n_ptr = l_ptr->owner; 609 struct tipc_node *n_ptr = l_ptr->owner;
613 610
@@ -635,6 +632,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
635 spin_lock_bh(&b_ptr->lock); 632 spin_lock_bh(&b_ptr->lock);
636 b_ptr->blocked = 1; 633 b_ptr->blocked = 1;
637 b_ptr->media->disable_bearer(b_ptr); 634 b_ptr->media->disable_bearer(b_ptr);
635 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
638 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 636 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
639 tipc_link_delete(l_ptr); 637 tipc_link_delete(l_ptr);
640 } 638 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5ad70eff1ebf..d696f9e414e3 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,8 +39,8 @@
39 39
40#include "bcast.h" 40#include "bcast.h"
41 41
42#define MAX_BEARERS 8 42#define MAX_BEARERS 2
43#define MAX_MEDIA 4 43#define MAX_MEDIA 2
44 44
45/* 45/*
46 * Identifiers of supported TIPC media types 46 * Identifiers of supported TIPC media types
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 443159a166fd..80da6ebc2785 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -65,7 +65,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
65 const void *req_tlv_area, int req_tlv_space, 65 const void *req_tlv_area, int req_tlv_space,
66 int headroom); 66 int headroom);
67 67
68void tipc_cfg_link_event(u32 addr, char *name, int up);
69int tipc_cfg_init(void); 68int tipc_cfg_init(void);
70void tipc_cfg_stop(void); 69void tipc_cfg_stop(void);
71 70
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 0987933155b9..f2fb96e86ee8 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -159,12 +159,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
159 } 159 }
160 tipc_node_lock(n_ptr); 160 tipc_node_lock(n_ptr);
161 161
162 /* Don't talk to neighbor during cleanup after last session */
163 if (n_ptr->cleanup_required) {
164 tipc_node_unlock(n_ptr);
165 return;
166 }
167
168 link = n_ptr->links[b_ptr->identity]; 162 link = n_ptr->links[b_ptr->identity];
169 163
170 /* Create a link endpoint for this bearer, if necessary */ 164 /* Create a link endpoint for this bearer, if necessary */
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index b69092eb95d8..e728d4ce2a1b 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2008, 2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "bearer.h" 38#include "bearer.h"
39 39
40#define MAX_ETH_BEARERS 2 40#define MAX_ETH_BEARERS MAX_BEARERS
41#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI 41#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
42#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL 42#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
43#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN 43#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
@@ -144,31 +144,27 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
144 144
145 /* Find device with specified name */ 145 /* Find device with specified name */
146 146
147 read_lock(&dev_base_lock);
147 for_each_netdev(&init_net, pdev) { 148 for_each_netdev(&init_net, pdev) {
148 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { 149 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
149 dev = pdev; 150 dev = pdev;
151 dev_hold(dev);
150 break; 152 break;
151 } 153 }
152 } 154 }
155 read_unlock(&dev_base_lock);
153 if (!dev) 156 if (!dev)
154 return -ENODEV; 157 return -ENODEV;
155 158
156 /* Find Ethernet bearer for device (or create one) */ 159 /* Create Ethernet bearer for device */
157 160
158 while ((eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev)) 161 eb_ptr->dev = dev;
159 eb_ptr++; 162 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
160 if (eb_ptr == stop) 163 eb_ptr->tipc_packet_type.dev = dev;
161 return -EDQUOT; 164 eb_ptr->tipc_packet_type.func = recv_msg;
162 if (!eb_ptr->dev) { 165 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
163 eb_ptr->dev = dev; 166 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
164 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); 167 dev_add_pack(&eb_ptr->tipc_packet_type);
165 eb_ptr->tipc_packet_type.dev = dev;
166 eb_ptr->tipc_packet_type.func = recv_msg;
167 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
168 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
169 dev_hold(dev);
170 dev_add_pack(&eb_ptr->tipc_packet_type);
171 }
172 168
173 /* Associate TIPC bearer with Ethernet bearer */ 169 /* Associate TIPC bearer with Ethernet bearer */
174 170
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f89570c54f54..ae98a72da11a 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -332,15 +332,16 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
332 332
333 l_ptr->addr = peer; 333 l_ptr->addr = peer;
334 if_name = strchr(b_ptr->name, ':') + 1; 334 if_name = strchr(b_ptr->name, ':') + 1;
335 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:", 335 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
336 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 336 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
337 tipc_node(tipc_own_addr), 337 tipc_node(tipc_own_addr),
338 if_name, 338 if_name,
339 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 339 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
340 /* note: peer i/f is appended to link name by reset/activate */ 340 /* note: peer i/f name is updated by reset/activate message */
341 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 341 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
342 l_ptr->owner = n_ptr; 342 l_ptr->owner = n_ptr;
343 l_ptr->checkpoint = 1; 343 l_ptr->checkpoint = 1;
344 l_ptr->peer_session = INVALID_SESSION;
344 l_ptr->b_ptr = b_ptr; 345 l_ptr->b_ptr = b_ptr;
345 link_set_supervision_props(l_ptr, b_ptr->media->tolerance); 346 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
346 l_ptr->state = RESET_UNKNOWN; 347 l_ptr->state = RESET_UNKNOWN;
@@ -536,9 +537,6 @@ void tipc_link_stop(struct link *l_ptr)
536 l_ptr->proto_msg_queue = NULL; 537 l_ptr->proto_msg_queue = NULL;
537} 538}
538 539
539/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
540#define link_send_event(fcn, l_ptr, up) do { } while (0)
541
542void tipc_link_reset(struct link *l_ptr) 540void tipc_link_reset(struct link *l_ptr)
543{ 541{
544 struct sk_buff *buf; 542 struct sk_buff *buf;
@@ -596,10 +594,6 @@ void tipc_link_reset(struct link *l_ptr)
596 l_ptr->fsm_msg_cnt = 0; 594 l_ptr->fsm_msg_cnt = 0;
597 l_ptr->stale_count = 0; 595 l_ptr->stale_count = 0;
598 link_reset_statistics(l_ptr); 596 link_reset_statistics(l_ptr);
599
600 link_send_event(tipc_cfg_link_event, l_ptr, 0);
601 if (!in_own_cluster(l_ptr->addr))
602 link_send_event(tipc_disc_link_event, l_ptr, 0);
603} 597}
604 598
605 599
@@ -608,9 +602,6 @@ static void link_activate(struct link *l_ptr)
608 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 602 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
609 tipc_node_link_up(l_ptr->owner, l_ptr); 603 tipc_node_link_up(l_ptr->owner, l_ptr);
610 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 604 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
611 link_send_event(tipc_cfg_link_event, l_ptr, 1);
612 if (!in_own_cluster(l_ptr->addr))
613 link_send_event(tipc_disc_link_event, l_ptr, 1);
614} 605}
615 606
616/** 607/**
@@ -985,6 +976,51 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
985} 976}
986 977
987/* 978/*
979 * tipc_link_send_names - send name table entries to new neighbor
980 *
981 * Send routine for bulk delivery of name table messages when contact
982 * with a new neighbor occurs. No link congestion checking is performed
983 * because name table messages *must* be delivered. The messages must be
984 * small enough not to require fragmentation.
985 * Called without any locks held.
986 */
987
988void tipc_link_send_names(struct list_head *message_list, u32 dest)
989{
990 struct tipc_node *n_ptr;
991 struct link *l_ptr;
992 struct sk_buff *buf;
993 struct sk_buff *temp_buf;
994
995 if (list_empty(message_list))
996 return;
997
998 read_lock_bh(&tipc_net_lock);
999 n_ptr = tipc_node_find(dest);
1000 if (n_ptr) {
1001 tipc_node_lock(n_ptr);
1002 l_ptr = n_ptr->active_links[0];
1003 if (l_ptr) {
1004 /* convert circular list to linear list */
1005 ((struct sk_buff *)message_list->prev)->next = NULL;
1006 link_add_chain_to_outqueue(l_ptr,
1007 (struct sk_buff *)message_list->next, 0);
1008 tipc_link_push_queue(l_ptr);
1009 INIT_LIST_HEAD(message_list);
1010 }
1011 tipc_node_unlock(n_ptr);
1012 }
1013 read_unlock_bh(&tipc_net_lock);
1014
1015 /* discard the messages if they couldn't be sent */
1016
1017 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1018 list_del((struct list_head *)buf);
1019 buf_discard(buf);
1020 }
1021}
1022
1023/*
988 * link_send_buf_fast: Entry for data messages where the 1024 * link_send_buf_fast: Entry for data messages where the
989 * destination link is known and the header is complete, 1025 * destination link is known and the header is complete,
990 * inclusive total message length. Very time critical. 1026 * inclusive total message length. Very time critical.
@@ -1031,9 +1067,6 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1031 u32 selector = msg_origport(buf_msg(buf)) & 1; 1067 u32 selector = msg_origport(buf_msg(buf)) & 1;
1032 u32 dummy; 1068 u32 dummy;
1033 1069
1034 if (destnode == tipc_own_addr)
1035 return tipc_port_recv_msg(buf);
1036
1037 read_lock_bh(&tipc_net_lock); 1070 read_lock_bh(&tipc_net_lock);
1038 n_ptr = tipc_node_find(destnode); 1071 n_ptr = tipc_node_find(destnode);
1039 if (likely(n_ptr)) { 1072 if (likely(n_ptr)) {
@@ -1658,19 +1691,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1658 continue; 1691 continue;
1659 } 1692 }
1660 1693
1694 /* Discard unicast link messages destined for another node */
1695
1661 if (unlikely(!msg_short(msg) && 1696 if (unlikely(!msg_short(msg) &&
1662 (msg_destnode(msg) != tipc_own_addr))) 1697 (msg_destnode(msg) != tipc_own_addr)))
1663 goto cont; 1698 goto cont;
1664 1699
1665 /* Discard non-routeable messages destined for another node */
1666
1667 if (unlikely(!msg_isdata(msg) &&
1668 (msg_destnode(msg) != tipc_own_addr))) {
1669 if ((msg_user(msg) != CONN_MANAGER) &&
1670 (msg_user(msg) != MSG_FRAGMENTER))
1671 goto cont;
1672 }
1673
1674 /* Locate neighboring node that sent message */ 1700 /* Locate neighboring node that sent message */
1675 1701
1676 n_ptr = tipc_node_find(msg_prevnode(msg)); 1702 n_ptr = tipc_node_find(msg_prevnode(msg));
@@ -1678,17 +1704,24 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1678 goto cont; 1704 goto cont;
1679 tipc_node_lock(n_ptr); 1705 tipc_node_lock(n_ptr);
1680 1706
1681 /* Don't talk to neighbor during cleanup after last session */ 1707 /* Locate unicast link endpoint that should handle message */
1682 1708
1683 if (n_ptr->cleanup_required) { 1709 l_ptr = n_ptr->links[b_ptr->identity];
1710 if (unlikely(!l_ptr)) {
1684 tipc_node_unlock(n_ptr); 1711 tipc_node_unlock(n_ptr);
1685 goto cont; 1712 goto cont;
1686 } 1713 }
1687 1714
1688 /* Locate unicast link endpoint that should handle message */ 1715 /* Verify that communication with node is currently allowed */
1689 1716
1690 l_ptr = n_ptr->links[b_ptr->identity]; 1717 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1691 if (unlikely(!l_ptr)) { 1718 msg_user(msg) == LINK_PROTOCOL &&
1719 (msg_type(msg) == RESET_MSG ||
1720 msg_type(msg) == ACTIVATE_MSG) &&
1721 !msg_redundant_link(msg))
1722 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1723
1724 if (n_ptr->block_setup) {
1692 tipc_node_unlock(n_ptr); 1725 tipc_node_unlock(n_ptr);
1693 goto cont; 1726 goto cont;
1694 } 1727 }
@@ -1923,6 +1956,12 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1923 1956
1924 if (link_blocked(l_ptr)) 1957 if (link_blocked(l_ptr))
1925 return; 1958 return;
1959
1960 /* Abort non-RESET send if communication with node is prohibited */
1961
1962 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1963 return;
1964
1926 msg_set_type(msg, msg_typ); 1965 msg_set_type(msg, msg_typ);
1927 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1966 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1928 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1967 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
@@ -2051,9 +2090,19 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2051 case RESET_MSG: 2090 case RESET_MSG:
2052 if (!link_working_unknown(l_ptr) && 2091 if (!link_working_unknown(l_ptr) &&
2053 (l_ptr->peer_session != INVALID_SESSION)) { 2092 (l_ptr->peer_session != INVALID_SESSION)) {
2054 if (msg_session(msg) == l_ptr->peer_session) 2093 if (less_eq(msg_session(msg), l_ptr->peer_session))
2055 break; /* duplicate: ignore */ 2094 break; /* duplicate or old reset: ignore */
2095 }
2096
2097 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2098 link_working_unknown(l_ptr))) {
2099 /*
2100 * peer has lost contact -- don't allow peer's links
2101 * to reactivate before we recognize loss & clean up
2102 */
2103 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2056 } 2104 }
2105
2057 /* fall thru' */ 2106 /* fall thru' */
2058 case ACTIVATE_MSG: 2107 case ACTIVATE_MSG:
2059 /* Update link settings according other endpoint's values */ 2108 /* Update link settings according other endpoint's values */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 74fbecab1ea0..e56cb532913e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -223,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
223struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space); 223struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
224void tipc_link_reset(struct link *l_ptr); 224void tipc_link_reset(struct link *l_ptr);
225int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector); 225int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
226void tipc_link_send_names(struct list_head *message_list, u32 dest);
226int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf); 227int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
227u32 tipc_link_get_max_pkt(u32 dest, u32 selector); 228u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
228int tipc_link_send_sections_fast(struct tipc_port *sender, 229int tipc_link_send_sections_fast(struct tipc_port *sender,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index cd356e504332..b7ca1bd7b151 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -173,18 +173,40 @@ void tipc_named_withdraw(struct publication *publ)
173 * tipc_named_node_up - tell specified node about all publications by this node 173 * tipc_named_node_up - tell specified node about all publications by this node
174 */ 174 */
175 175
176void tipc_named_node_up(unsigned long node) 176void tipc_named_node_up(unsigned long nodearg)
177{ 177{
178 struct tipc_node *n_ptr;
179 struct link *l_ptr;
178 struct publication *publ; 180 struct publication *publ;
179 struct distr_item *item = NULL; 181 struct distr_item *item = NULL;
180 struct sk_buff *buf = NULL; 182 struct sk_buff *buf = NULL;
183 struct list_head message_list;
184 u32 node = (u32)nodearg;
181 u32 left = 0; 185 u32 left = 0;
182 u32 rest; 186 u32 rest;
183 u32 max_item_buf; 187 u32 max_item_buf = 0;
188
189 /* compute maximum amount of publication data to send per message */
190
191 read_lock_bh(&tipc_net_lock);
192 n_ptr = tipc_node_find(node);
193 if (n_ptr) {
194 tipc_node_lock(n_ptr);
195 l_ptr = n_ptr->active_links[0];
196 if (l_ptr)
197 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
198 ITEM_SIZE) * ITEM_SIZE;
199 tipc_node_unlock(n_ptr);
200 }
201 read_unlock_bh(&tipc_net_lock);
202 if (!max_item_buf)
203 return;
204
205 /* create list of publication messages, then send them as a unit */
206
207 INIT_LIST_HEAD(&message_list);
184 208
185 read_lock_bh(&tipc_nametbl_lock); 209 read_lock_bh(&tipc_nametbl_lock);
186 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
187 max_item_buf *= ITEM_SIZE;
188 rest = publ_cnt * ITEM_SIZE; 210 rest = publ_cnt * ITEM_SIZE;
189 211
190 list_for_each_entry(publ, &publ_root, local_list) { 212 list_for_each_entry(publ, &publ_root, local_list) {
@@ -202,13 +224,14 @@ void tipc_named_node_up(unsigned long node)
202 item++; 224 item++;
203 left -= ITEM_SIZE; 225 left -= ITEM_SIZE;
204 if (!left) { 226 if (!left) {
205 msg_set_link_selector(buf_msg(buf), node); 227 list_add_tail((struct list_head *)buf, &message_list);
206 tipc_link_send(buf, node, node);
207 buf = NULL; 228 buf = NULL;
208 } 229 }
209 } 230 }
210exit: 231exit:
211 read_unlock_bh(&tipc_nametbl_lock); 232 read_unlock_bh(&tipc_nametbl_lock);
233
234 tipc_link_send_names(&message_list, (u32)node);
212} 235}
213 236
214/** 237/**
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 68b3dd637291..fafef6c3c0f6 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -141,17 +141,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
141 return; 141 return;
142 msg = buf_msg(buf); 142 msg = buf_msg(buf);
143 143
144 msg_incr_reroute_cnt(msg);
145 if (msg_reroute_cnt(msg) > 6) {
146 if (msg_errcode(msg)) {
147 buf_discard(buf);
148 } else {
149 tipc_reject_msg(buf, msg_destport(msg) ?
150 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
151 }
152 return;
153 }
154
155 /* Handle message for this node */ 144 /* Handle message for this node */
156 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); 145 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
157 if (tipc_in_scope(dnode, tipc_own_addr)) { 146 if (tipc_in_scope(dnode, tipc_own_addr)) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2d106ef4fa4c..27b4bb0cca6c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -112,6 +112,7 @@ struct tipc_node *tipc_node_create(u32 addr)
112 break; 112 break;
113 } 113 }
114 list_add_tail(&n_ptr->list, &temp_node->list); 114 list_add_tail(&n_ptr->list, &temp_node->list);
115 n_ptr->block_setup = WAIT_PEER_DOWN;
115 116
116 tipc_num_nodes++; 117 tipc_num_nodes++;
117 118
@@ -312,7 +313,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
312 } 313 }
313} 314}
314 315
315static void node_cleanup_finished(unsigned long node_addr) 316static void node_name_purge_complete(unsigned long node_addr)
316{ 317{
317 struct tipc_node *n_ptr; 318 struct tipc_node *n_ptr;
318 319
@@ -320,7 +321,7 @@ static void node_cleanup_finished(unsigned long node_addr)
320 n_ptr = tipc_node_find(node_addr); 321 n_ptr = tipc_node_find(node_addr);
321 if (n_ptr) { 322 if (n_ptr) {
322 tipc_node_lock(n_ptr); 323 tipc_node_lock(n_ptr);
323 n_ptr->cleanup_required = 0; 324 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
324 tipc_node_unlock(n_ptr); 325 tipc_node_unlock(n_ptr);
325 } 326 }
326 read_unlock_bh(&tipc_net_lock); 327 read_unlock_bh(&tipc_net_lock);
@@ -331,28 +332,32 @@ static void node_lost_contact(struct tipc_node *n_ptr)
331 char addr_string[16]; 332 char addr_string[16];
332 u32 i; 333 u32 i;
333 334
334 /* Clean up broadcast reception remains */ 335 info("Lost contact with %s\n",
335 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 336 tipc_addr_string_fill(addr_string, n_ptr->addr));
336 while (n_ptr->bclink.deferred_head) { 337
337 struct sk_buff *buf = n_ptr->bclink.deferred_head; 338 /* Flush broadcast link info associated with lost node */
338 n_ptr->bclink.deferred_head = buf->next;
339 buf_discard(buf);
340 }
341 if (n_ptr->bclink.defragm) {
342 buf_discard(n_ptr->bclink.defragm);
343 n_ptr->bclink.defragm = NULL;
344 }
345 339
346 if (n_ptr->bclink.supported) { 340 if (n_ptr->bclink.supported) {
341 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
342 while (n_ptr->bclink.deferred_head) {
343 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 n_ptr->bclink.deferred_head = buf->next;
345 buf_discard(buf);
346 }
347
348 if (n_ptr->bclink.defragm) {
349 buf_discard(n_ptr->bclink.defragm);
350 n_ptr->bclink.defragm = NULL;
351 }
352
353 tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
347 tipc_bclink_acknowledge(n_ptr, 354 tipc_bclink_acknowledge(n_ptr,
348 mod(n_ptr->bclink.acked + 10000)); 355 mod(n_ptr->bclink.acked + 10000));
349 tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
350 if (n_ptr->addr < tipc_own_addr) 356 if (n_ptr->addr < tipc_own_addr)
351 tipc_own_tag--; 357 tipc_own_tag--;
352 }
353 358
354 info("Lost contact with %s\n", 359 n_ptr->bclink.supported = 0;
355 tipc_addr_string_fill(addr_string, n_ptr->addr)); 360 }
356 361
357 /* Abort link changeover */ 362 /* Abort link changeover */
358 for (i = 0; i < MAX_BEARERS; i++) { 363 for (i = 0; i < MAX_BEARERS; i++) {
@@ -367,10 +372,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
367 /* Notify subscribers */ 372 /* Notify subscribers */
368 tipc_nodesub_notify(n_ptr); 373 tipc_nodesub_notify(n_ptr);
369 374
370 /* Prevent re-contact with node until all cleanup is done */ 375 /* Prevent re-contact with node until cleanup is done */
371 376
372 n_ptr->cleanup_required = 1; 377 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
373 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); 378 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
374} 379}
375 380
376struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 381struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5c61afc7a0b9..4f15cb40aaa4 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,6 +42,12 @@
42#include "net.h" 42#include "net.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* Flags used to block (re)establishment of contact with a neighboring node */
46
47#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
48#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
49#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
50
45/** 51/**
46 * struct tipc_node - TIPC node structure 52 * struct tipc_node - TIPC node structure
47 * @addr: network address of node 53 * @addr: network address of node
@@ -52,7 +58,7 @@
52 * @active_links: pointers to active links to node 58 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 59 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby) 60 * @working_links: number of working links to node (both active and standby)
55 * @cleanup_required: non-zero if cleaning up after a prior loss of contact 61 * @block_setup: bit mask of conditions preventing link establishment to node
56 * @link_cnt: number of links to node 62 * @link_cnt: number of links to node
57 * @permit_changeover: non-zero if node has redundant links to this system 63 * @permit_changeover: non-zero if node has redundant links to this system
58 * @bclink: broadcast-related info 64 * @bclink: broadcast-related info
@@ -77,7 +83,7 @@ struct tipc_node {
77 struct link *links[MAX_BEARERS]; 83 struct link *links[MAX_BEARERS];
78 int link_cnt; 84 int link_cnt;
79 int working_links; 85 int working_links;
80 int cleanup_required; 86 int block_setup;
81 int permit_changeover; 87 int permit_changeover;
82 struct { 88 struct {
83 int supported; 89 int supported;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adb2eff4a102..9440a3d48ca0 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -49,7 +49,7 @@ struct tipc_sock {
49 struct sock sk; 49 struct sock sk;
50 struct tipc_port *p; 50 struct tipc_port *p;
51 struct tipc_portid peer_name; 51 struct tipc_portid peer_name;
52 long conn_timeout; 52 unsigned int conn_timeout;
53}; 53};
54 54
55#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 55#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -231,7 +231,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
231 sock_init_data(sock, sk); 231 sock_init_data(sock, sk);
232 sk->sk_backlog_rcv = backlog_rcv; 232 sk->sk_backlog_rcv = backlog_rcv;
233 tipc_sk(sk)->p = tp_ptr; 233 tipc_sk(sk)->p = tp_ptr;
234 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); 234 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
235 235
236 spin_unlock_bh(tp_ptr->lock); 236 spin_unlock_bh(tp_ptr->lock);
237 237
@@ -525,6 +525,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
525 struct tipc_port *tport = tipc_sk_port(sk); 525 struct tipc_port *tport = tipc_sk_port(sk);
526 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 526 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
527 int needs_conn; 527 int needs_conn;
528 long timeout_val;
528 int res = -EINVAL; 529 int res = -EINVAL;
529 530
530 if (unlikely(!dest)) 531 if (unlikely(!dest))
@@ -564,6 +565,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
564 reject_rx_queue(sk); 565 reject_rx_queue(sk);
565 } 566 }
566 567
568 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
569
567 do { 570 do {
568 if (dest->addrtype == TIPC_ADDR_NAME) { 571 if (dest->addrtype == TIPC_ADDR_NAME) {
569 res = dest_name_check(dest, m); 572 res = dest_name_check(dest, m);
@@ -600,16 +603,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
600 sock->state = SS_CONNECTING; 603 sock->state = SS_CONNECTING;
601 break; 604 break;
602 } 605 }
603 if (m->msg_flags & MSG_DONTWAIT) { 606 if (timeout_val <= 0L) {
604 res = -EWOULDBLOCK; 607 res = timeout_val ? timeout_val : -EWOULDBLOCK;
605 break; 608 break;
606 } 609 }
607 release_sock(sk); 610 release_sock(sk);
608 res = wait_event_interruptible(*sk_sleep(sk), 611 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
609 !tport->congested); 612 !tport->congested, timeout_val);
610 lock_sock(sk); 613 lock_sock(sk);
611 if (res)
612 break;
613 } while (1); 614 } while (1);
614 615
615exit: 616exit:
@@ -636,6 +637,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
636 struct sock *sk = sock->sk; 637 struct sock *sk = sock->sk;
637 struct tipc_port *tport = tipc_sk_port(sk); 638 struct tipc_port *tport = tipc_sk_port(sk);
638 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 639 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
640 long timeout_val;
639 int res; 641 int res;
640 642
641 /* Handle implied connection establishment */ 643 /* Handle implied connection establishment */
@@ -650,6 +652,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
650 if (iocb) 652 if (iocb)
651 lock_sock(sk); 653 lock_sock(sk);
652 654
655 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
656
653 do { 657 do {
654 if (unlikely(sock->state != SS_CONNECTED)) { 658 if (unlikely(sock->state != SS_CONNECTED)) {
655 if (sock->state == SS_DISCONNECTING) 659 if (sock->state == SS_DISCONNECTING)
@@ -663,16 +667,14 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
663 total_len); 667 total_len);
664 if (likely(res != -ELINKCONG)) 668 if (likely(res != -ELINKCONG))
665 break; 669 break;
666 if (m->msg_flags & MSG_DONTWAIT) { 670 if (timeout_val <= 0L) {
667 res = -EWOULDBLOCK; 671 res = timeout_val ? timeout_val : -EWOULDBLOCK;
668 break; 672 break;
669 } 673 }
670 release_sock(sk); 674 release_sock(sk);
671 res = wait_event_interruptible(*sk_sleep(sk), 675 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
672 (!tport->congested || !tport->connected)); 676 (!tport->congested || !tport->connected), timeout_val);
673 lock_sock(sk); 677 lock_sock(sk);
674 if (res)
675 break;
676 } while (1); 678 } while (1);
677 679
678 if (iocb) 680 if (iocb)
@@ -1369,7 +1371,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1369 struct msghdr m = {NULL,}; 1371 struct msghdr m = {NULL,};
1370 struct sk_buff *buf; 1372 struct sk_buff *buf;
1371 struct tipc_msg *msg; 1373 struct tipc_msg *msg;
1372 long timeout; 1374 unsigned int timeout;
1373 int res; 1375 int res;
1374 1376
1375 lock_sock(sk); 1377 lock_sock(sk);
@@ -1434,7 +1436,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1434 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1436 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1435 (!skb_queue_empty(&sk->sk_receive_queue) || 1437 (!skb_queue_empty(&sk->sk_receive_queue) ||
1436 (sock->state != SS_CONNECTING)), 1438 (sock->state != SS_CONNECTING)),
1437 timeout ? timeout : MAX_SCHEDULE_TIMEOUT); 1439 timeout ? (long)msecs_to_jiffies(timeout)
1440 : MAX_SCHEDULE_TIMEOUT);
1438 lock_sock(sk); 1441 lock_sock(sk);
1439 1442
1440 if (res > 0) { 1443 if (res > 0) {
@@ -1480,9 +1483,7 @@ static int listen(struct socket *sock, int len)
1480 1483
1481 lock_sock(sk); 1484 lock_sock(sk);
1482 1485
1483 if (sock->state == SS_READY) 1486 if (sock->state != SS_UNCONNECTED)
1484 res = -EOPNOTSUPP;
1485 else if (sock->state != SS_UNCONNECTED)
1486 res = -EINVAL; 1487 res = -EINVAL;
1487 else { 1488 else {
1488 sock->state = SS_LISTENING; 1489 sock->state = SS_LISTENING;
@@ -1510,10 +1511,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1510 1511
1511 lock_sock(sk); 1512 lock_sock(sk);
1512 1513
1513 if (sock->state == SS_READY) {
1514 res = -EOPNOTSUPP;
1515 goto exit;
1516 }
1517 if (sock->state != SS_LISTENING) { 1514 if (sock->state != SS_LISTENING) {
1518 res = -EINVAL; 1515 res = -EINVAL;
1519 goto exit; 1516 goto exit;
@@ -1696,7 +1693,7 @@ static int setsockopt(struct socket *sock,
1696 res = tipc_set_portunreturnable(tport->ref, value); 1693 res = tipc_set_portunreturnable(tport->ref, value);
1697 break; 1694 break;
1698 case TIPC_CONN_TIMEOUT: 1695 case TIPC_CONN_TIMEOUT:
1699 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value); 1696 tipc_sk(sk)->conn_timeout = value;
1700 /* no need to set "res", since already 0 at this point */ 1697 /* no need to set "res", since already 0 at this point */
1701 break; 1698 break;
1702 default: 1699 default:
@@ -1752,7 +1749,7 @@ static int getsockopt(struct socket *sock,
1752 res = tipc_portunreturnable(tport->ref, &value); 1749 res = tipc_portunreturnable(tport->ref, &value);
1753 break; 1750 break;
1754 case TIPC_CONN_TIMEOUT: 1751 case TIPC_CONN_TIMEOUT:
1755 value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout); 1752 value = tipc_sk(sk)->conn_timeout;
1756 /* no need to set "res", since already 0 at this point */ 1753 /* no need to set "res", since already 0 at this point */
1757 break; 1754 break;
1758 case TIPC_NODE_RECVQ_DEPTH: 1755 case TIPC_NODE_RECVQ_DEPTH:
@@ -1790,11 +1787,11 @@ static const struct proto_ops msg_ops = {
1790 .bind = bind, 1787 .bind = bind,
1791 .connect = connect, 1788 .connect = connect,
1792 .socketpair = sock_no_socketpair, 1789 .socketpair = sock_no_socketpair,
1793 .accept = accept, 1790 .accept = sock_no_accept,
1794 .getname = get_name, 1791 .getname = get_name,
1795 .poll = poll, 1792 .poll = poll,
1796 .ioctl = sock_no_ioctl, 1793 .ioctl = sock_no_ioctl,
1797 .listen = listen, 1794 .listen = sock_no_listen,
1798 .shutdown = shutdown, 1795 .shutdown = shutdown,
1799 .setsockopt = setsockopt, 1796 .setsockopt = setsockopt,
1800 .getsockopt = getsockopt, 1797 .getsockopt = getsockopt,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 6cf726863485..198371723b41 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -151,7 +151,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
151 if (!must && !(sub->filter & TIPC_SUB_PORTS)) 151 if (!must && !(sub->filter & TIPC_SUB_PORTS))
152 return; 152 return;
153 153
154 sub->event_cb(sub, found_lower, found_upper, event, port_ref, node); 154 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
155} 155}
156 156
157/** 157/**
@@ -365,7 +365,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
365 subscr_terminate(subscriber); 365 subscr_terminate(subscriber);
366 return NULL; 366 return NULL;
367 } 367 }
368 sub->event_cb = subscr_send_event;
369 INIT_LIST_HEAD(&sub->nameseq_list); 368 INIT_LIST_HEAD(&sub->nameseq_list);
370 list_add(&sub->subscription_list, &subscriber->subscription_list); 369 list_add(&sub->subscription_list, &subscriber->subscription_list);
371 sub->server_ref = subscriber->port_ref; 370 sub->server_ref = subscriber->port_ref;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d202..4b06ef6f8401 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,16 +39,11 @@
39 39
40struct subscription; 40struct subscription;
41 41
42typedef void (*tipc_subscr_event) (struct subscription *sub,
43 u32 found_lower, u32 found_upper,
44 u32 event, u32 port_ref, u32 node);
45
46/** 42/**
47 * struct subscription - TIPC network topology subscription object 43 * struct subscription - TIPC network topology subscription object
48 * @seq: name sequence associated with subscription 44 * @seq: name sequence associated with subscription
49 * @timeout: duration of subscription (in ms) 45 * @timeout: duration of subscription (in ms)
50 * @filter: event filtering to be done for subscription 46 * @filter: event filtering to be done for subscription
51 * @event_cb: routine invoked when a subscription event is detected
52 * @timer: timer governing subscription duration (optional) 47 * @timer: timer governing subscription duration (optional)
53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 48 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
54 * @subscription_list: adjacent subscriptions in subscriber's subscription list 49 * @subscription_list: adjacent subscriptions in subscriber's subscription list
@@ -61,7 +56,6 @@ struct subscription {
61 struct tipc_name_seq seq; 56 struct tipc_name_seq seq;
62 u32 timeout; 57 u32 timeout;
63 u32 filter; 58 u32 filter;
64 tipc_subscr_event event_cb;
65 struct timer_list timer; 59 struct timer_list timer;
66 struct list_head nameseq_list; 60 struct list_head nameseq_list;
67 struct list_head subscription_list; 61 struct list_head subscription_list;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ec68e1c05b85..466fbcc5cf77 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1381,8 +1381,10 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1382{ 1382{
1383 int err = 0; 1383 int err = 0;
1384
1384 UNIXCB(skb).pid = get_pid(scm->pid); 1385 UNIXCB(skb).pid = get_pid(scm->pid);
1385 UNIXCB(skb).cred = get_cred(scm->cred); 1386 if (scm->cred)
1387 UNIXCB(skb).cred = get_cred(scm->cred);
1386 UNIXCB(skb).fp = NULL; 1388 UNIXCB(skb).fp = NULL;
1387 if (scm->fp && send_fds) 1389 if (scm->fp && send_fds)
1388 err = unix_attach_fds(scm, skb); 1390 err = unix_attach_fds(scm, skb);
@@ -1392,6 +1394,24 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1392} 1394}
1393 1395
1394/* 1396/*
1397 * Some apps rely on write() giving SCM_CREDENTIALS
1398 * We include credentials if source or destination socket
1399 * asserted SOCK_PASSCRED.
1400 */
1401static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1402 const struct sock *other)
1403{
1404 if (UNIXCB(skb).cred)
1405 return;
1406 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1407 !other->sk_socket ||
1408 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1409 UNIXCB(skb).pid = get_pid(task_tgid(current));
1410 UNIXCB(skb).cred = get_current_cred();
1411 }
1412}
1413
1414/*
1395 * Send AF_UNIX data. 1415 * Send AF_UNIX data.
1396 */ 1416 */
1397 1417
@@ -1538,6 +1558,7 @@ restart:
1538 1558
1539 if (sock_flag(other, SOCK_RCVTSTAMP)) 1559 if (sock_flag(other, SOCK_RCVTSTAMP))
1540 __net_timestamp(skb); 1560 __net_timestamp(skb);
1561 maybe_add_creds(skb, sock, other);
1541 skb_queue_tail(&other->sk_receive_queue, skb); 1562 skb_queue_tail(&other->sk_receive_queue, skb);
1542 if (max_level > unix_sk(other)->recursion_level) 1563 if (max_level > unix_sk(other)->recursion_level)
1543 unix_sk(other)->recursion_level = max_level; 1564 unix_sk(other)->recursion_level = max_level;
@@ -1652,6 +1673,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1652 (other->sk_shutdown & RCV_SHUTDOWN)) 1673 (other->sk_shutdown & RCV_SHUTDOWN))
1653 goto pipe_err_free; 1674 goto pipe_err_free;
1654 1675
1676 maybe_add_creds(skb, sock, other);
1655 skb_queue_tail(&other->sk_receive_queue, skb); 1677 skb_queue_tail(&other->sk_receive_queue, skb);
1656 if (max_level > unix_sk(other)->recursion_level) 1678 if (max_level > unix_sk(other)->recursion_level)
1657 unix_sk(other)->recursion_level = max_level; 1679 unix_sk(other)->recursion_level = max_level;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c14865172da7..220f3bd176f8 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -582,7 +582,7 @@ int wiphy_register(struct wiphy *wiphy)
582 } 582 }
583 583
584 /* set up regulatory info */ 584 /* set up regulatory info */
585 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 585 regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
586 586
587 list_add_rcu(&rdev->list, &cfg80211_rdev_list); 587 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
588 cfg80211_rdev_list_generation++; 588 cfg80211_rdev_list_generation++;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8672e028022f..b9ec3061ed72 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -279,8 +279,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
279 char *newname); 279 char *newname);
280 280
281void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 281void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
282void wiphy_update_regulatory(struct wiphy *wiphy,
283 enum nl80211_reg_initiator setby);
284 282
285void cfg80211_bss_expire(struct cfg80211_registered_device *dev); 283void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
286void cfg80211_bss_age(struct cfg80211_registered_device *dev, 284void cfg80211_bss_age(struct cfg80211_registered_device *dev,
@@ -377,7 +375,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
377 struct ieee80211_channel *chan, bool offchan, 375 struct ieee80211_channel *chan, bool offchan,
378 enum nl80211_channel_type channel_type, 376 enum nl80211_channel_type channel_type,
379 bool channel_type_valid, unsigned int wait, 377 bool channel_type_valid, unsigned int wait,
380 const u8 *buf, size_t len, u64 *cookie); 378 const u8 *buf, size_t len, bool no_cck,
379 u64 *cookie);
381 380
382/* SME */ 381/* SME */
383int __cfg80211_connect(struct cfg80211_registered_device *rdev, 382int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -408,6 +407,7 @@ void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
408bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); 407bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
409 408
410/* internal helpers */ 409/* internal helpers */
410bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
411int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, 411int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
412 struct key_params *params, int key_idx, 412 struct key_params *params, int key_idx,
413 bool pairwise, const u8 *mac_addr); 413 bool pairwise, const u8 *mac_addr);
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 3268fac5ab22..a55c27b75ee5 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -41,6 +41,11 @@ struct lib80211_crypto_alg {
41static LIST_HEAD(lib80211_crypto_algs); 41static LIST_HEAD(lib80211_crypto_algs);
42static DEFINE_SPINLOCK(lib80211_crypto_lock); 42static DEFINE_SPINLOCK(lib80211_crypto_lock);
43 43
44static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
45 int force);
46static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
47static void lib80211_crypt_deinit_handler(unsigned long data);
48
44const char *print_ssid(char *buf, const char *ssid, u8 ssid_len) 49const char *print_ssid(char *buf, const char *ssid, u8 ssid_len)
45{ 50{
46 const char *s = ssid; 51 const char *s = ssid;
@@ -111,7 +116,8 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
111} 116}
112EXPORT_SYMBOL(lib80211_crypt_info_free); 117EXPORT_SYMBOL(lib80211_crypt_info_free);
113 118
114void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) 119static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
120 int force)
115{ 121{
116 struct lib80211_crypt_data *entry, *next; 122 struct lib80211_crypt_data *entry, *next;
117 unsigned long flags; 123 unsigned long flags;
@@ -131,10 +137,9 @@ void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
131 } 137 }
132 spin_unlock_irqrestore(info->lock, flags); 138 spin_unlock_irqrestore(info->lock, flags);
133} 139}
134EXPORT_SYMBOL(lib80211_crypt_deinit_entries);
135 140
136/* After this, crypt_deinit_list won't accept new members */ 141/* After this, crypt_deinit_list won't accept new members */
137void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) 142static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
138{ 143{
139 unsigned long flags; 144 unsigned long flags;
140 145
@@ -142,9 +147,8 @@ void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
142 info->crypt_quiesced = 1; 147 info->crypt_quiesced = 1;
143 spin_unlock_irqrestore(info->lock, flags); 148 spin_unlock_irqrestore(info->lock, flags);
144} 149}
145EXPORT_SYMBOL(lib80211_crypt_quiescing);
146 150
147void lib80211_crypt_deinit_handler(unsigned long data) 151static void lib80211_crypt_deinit_handler(unsigned long data)
148{ 152{
149 struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data; 153 struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
150 unsigned long flags; 154 unsigned long flags;
@@ -160,7 +164,6 @@ void lib80211_crypt_deinit_handler(unsigned long data)
160 } 164 }
161 spin_unlock_irqrestore(info->lock, flags); 165 spin_unlock_irqrestore(info->lock, flags);
162} 166}
163EXPORT_SYMBOL(lib80211_crypt_deinit_handler);
164 167
165void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, 168void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
166 struct lib80211_crypt_data **crypt) 169 struct lib80211_crypt_data **crypt)
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index dacb3b4b1bdb..755738d26bb4 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -77,8 +77,6 @@ static void *lib80211_ccmp_init(int key_idx)
77 77
78 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 78 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
79 if (IS_ERR(priv->tfm)) { 79 if (IS_ERR(priv->tfm)) {
80 printk(KERN_DEBUG "lib80211_crypt_ccmp: could not allocate "
81 "crypto API aes\n");
82 priv->tfm = NULL; 80 priv->tfm = NULL;
83 goto fail; 81 goto fail;
84 } 82 }
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 7ea4f2b0770e..38734846c19e 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -101,7 +101,6 @@ static void *lib80211_tkip_init(int key_idx)
101 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 101 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
102 CRYPTO_ALG_ASYNC); 102 CRYPTO_ALG_ASYNC);
103 if (IS_ERR(priv->tx_tfm_arc4)) { 103 if (IS_ERR(priv->tx_tfm_arc4)) {
104 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
105 priv->tx_tfm_arc4 = NULL; 104 priv->tx_tfm_arc4 = NULL;
106 goto fail; 105 goto fail;
107 } 106 }
@@ -109,7 +108,6 @@ static void *lib80211_tkip_init(int key_idx)
109 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 108 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
110 CRYPTO_ALG_ASYNC); 109 CRYPTO_ALG_ASYNC);
111 if (IS_ERR(priv->tx_tfm_michael)) { 110 if (IS_ERR(priv->tx_tfm_michael)) {
112 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
113 priv->tx_tfm_michael = NULL; 111 priv->tx_tfm_michael = NULL;
114 goto fail; 112 goto fail;
115 } 113 }
@@ -117,7 +115,6 @@ static void *lib80211_tkip_init(int key_idx)
117 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 115 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
118 CRYPTO_ALG_ASYNC); 116 CRYPTO_ALG_ASYNC);
119 if (IS_ERR(priv->rx_tfm_arc4)) { 117 if (IS_ERR(priv->rx_tfm_arc4)) {
120 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
121 priv->rx_tfm_arc4 = NULL; 118 priv->rx_tfm_arc4 = NULL;
122 goto fail; 119 goto fail;
123 } 120 }
@@ -125,7 +122,6 @@ static void *lib80211_tkip_init(int key_idx)
125 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 122 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
126 CRYPTO_ALG_ASYNC); 123 CRYPTO_ALG_ASYNC);
127 if (IS_ERR(priv->rx_tfm_michael)) { 124 if (IS_ERR(priv->rx_tfm_michael)) {
128 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
129 priv->rx_tfm_michael = NULL; 125 priv->rx_tfm_michael = NULL;
130 goto fail; 126 goto fail;
131 } 127 }
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 2f265e033ae2..c1304018fc1c 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -50,16 +50,12 @@ static void *lib80211_wep_init(int keyidx)
50 50
51 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 51 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
52 if (IS_ERR(priv->tx_tfm)) { 52 if (IS_ERR(priv->tx_tfm)) {
53 printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
54 "crypto API arc4\n");
55 priv->tx_tfm = NULL; 53 priv->tx_tfm = NULL;
56 goto fail; 54 goto fail;
57 } 55 }
58 56
59 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 57 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
60 if (IS_ERR(priv->rx_tfm)) { 58 if (IS_ERR(priv->rx_tfm)) {
61 printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
62 "crypto API arc4\n");
63 priv->rx_tfm = NULL; 59 priv->rx_tfm = NULL;
64 goto fail; 60 goto fail;
65 } 61 }
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5c116083eeca..4423e64c7d98 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -12,6 +12,7 @@
12#define MESH_HOLD_T 100 12#define MESH_HOLD_T 100
13 13
14#define MESH_PATH_TIMEOUT 5000 14#define MESH_PATH_TIMEOUT 5000
15#define MESH_RANN_INTERVAL 5000
15 16
16/* 17/*
17 * Minimum interval between two consecutive PREQs originated by the same 18 * Minimum interval between two consecutive PREQs originated by the same
@@ -49,6 +50,8 @@ const struct mesh_config default_mesh_config = {
49 .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES, 50 .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
50 .path_refresh_time = MESH_PATH_REFRESH_TIME, 51 .path_refresh_time = MESH_PATH_REFRESH_TIME,
51 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, 52 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
53 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
54 .dot11MeshGateAnnouncementProtocol = false,
52}; 55};
53 56
54const struct mesh_setup default_mesh_setup = { 57const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 832f6574e4ed..21fc9702f81c 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -900,7 +900,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
900 struct ieee80211_channel *chan, bool offchan, 900 struct ieee80211_channel *chan, bool offchan,
901 enum nl80211_channel_type channel_type, 901 enum nl80211_channel_type channel_type,
902 bool channel_type_valid, unsigned int wait, 902 bool channel_type_valid, unsigned int wait,
903 const u8 *buf, size_t len, u64 *cookie) 903 const u8 *buf, size_t len, bool no_cck,
904 u64 *cookie)
904{ 905{
905 struct wireless_dev *wdev = dev->ieee80211_ptr; 906 struct wireless_dev *wdev = dev->ieee80211_ptr;
906 const struct ieee80211_mgmt *mgmt; 907 const struct ieee80211_mgmt *mgmt;
@@ -991,7 +992,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
991 /* Transmit the Action frame as requested by user space */ 992 /* Transmit the Action frame as requested by user space */
992 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, 993 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
993 channel_type, channel_type_valid, 994 channel_type, channel_type_valid,
994 wait, buf, len, cookie); 995 wait, buf, len, no_cck, cookie);
995} 996}
996 997
997bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, 998bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1095,3 +1096,14 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
1095 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 1096 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
1096} 1097}
1097EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); 1098EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
1099
1100void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
1101 const u8 *bssid, bool preauth, gfp_t gfp)
1102{
1103 struct wireless_dev *wdev = dev->ieee80211_ptr;
1104 struct wiphy *wiphy = wdev->wiphy;
1105 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1106
1107 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
1108}
1109EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ea40d540a990..48260c2d092a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -23,6 +23,12 @@
23#include "nl80211.h" 23#include "nl80211.h"
24#include "reg.h" 24#include "reg.h"
25 25
26static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
27static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
28 struct genl_info *info,
29 struct cfg80211_crypto_settings *settings,
30 int cipher_limit);
31
26static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, 32static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
27 struct genl_info *info); 33 struct genl_info *info);
28static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, 34static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
@@ -178,6 +184,19 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, 184 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
179 [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, 185 [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
180 [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, 186 [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
187 [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
188 [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
189 .len = IEEE80211_MAX_DATA_LEN },
190 [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
191 .len = IEEE80211_MAX_DATA_LEN },
192 [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
193 [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
194 [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
195 [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 },
196 [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 },
197 [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
198 [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
199 [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
181}; 200};
182 201
183/* policy for the key attributes */ 202/* policy for the key attributes */
@@ -220,6 +239,12 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
220 [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, 239 [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
221}; 240};
222 241
242static const struct nla_policy
243nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
244 [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
245 .len = IEEE80211_MAX_SSID_LEN },
246};
247
223/* ifidx get helper */ 248/* ifidx get helper */
224static int nl80211_get_ifidx(struct netlink_callback *cb) 249static int nl80211_get_ifidx(struct netlink_callback *cb)
225{ 250{
@@ -703,11 +728,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
703 dev->wiphy.max_scan_ie_len); 728 dev->wiphy.max_scan_ie_len);
704 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 729 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
705 dev->wiphy.max_sched_scan_ie_len); 730 dev->wiphy.max_sched_scan_ie_len);
731 NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
732 dev->wiphy.max_match_sets);
706 733
707 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 734 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
708 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 735 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
709 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) 736 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
710 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); 737 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
738 if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
739 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
740 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
741 NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
742 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
743 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
744 if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
745 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
711 746
712 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 747 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
713 sizeof(u32) * dev->wiphy.n_cipher_suites, 748 sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -850,6 +885,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
850 } 885 }
851 CMD(set_channel, SET_CHANNEL); 886 CMD(set_channel, SET_CHANNEL);
852 CMD(set_wds_peer, SET_WDS_PEER); 887 CMD(set_wds_peer, SET_WDS_PEER);
888 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
889 CMD(tdls_mgmt, TDLS_MGMT);
890 CMD(tdls_oper, TDLS_OPER);
891 }
853 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 892 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
854 CMD(sched_scan_start, START_SCHED_SCAN); 893 CMD(sched_scan_start, START_SCHED_SCAN);
855 894
@@ -871,8 +910,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
871 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 910 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
872 dev->wiphy.max_remain_on_channel_duration); 911 dev->wiphy.max_remain_on_channel_duration);
873 912
874 /* for now at least assume all drivers have it */ 913 if (dev->ops->mgmt_tx_cancel_wait)
875 if (dev->ops->mgmt_tx)
876 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); 914 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
877 915
878 if (mgmt_stypes) { 916 if (mgmt_stypes) {
@@ -1210,6 +1248,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1210 goto bad_res; 1248 goto bad_res;
1211 } 1249 }
1212 1250
1251 if (!netdev) {
1252 result = -EINVAL;
1253 goto bad_res;
1254 }
1255
1213 nla_for_each_nested(nl_txq_params, 1256 nla_for_each_nested(nl_txq_params,
1214 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1257 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1215 rem_txq_params) { 1258 rem_txq_params) {
@@ -1222,6 +1265,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1222 goto bad_res; 1265 goto bad_res;
1223 1266
1224 result = rdev->ops->set_txq_params(&rdev->wiphy, 1267 result = rdev->ops->set_txq_params(&rdev->wiphy,
1268 netdev,
1225 &txq_params); 1269 &txq_params);
1226 if (result) 1270 if (result)
1227 goto bad_res; 1271 goto bad_res;
@@ -1985,7 +2029,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1985 struct beacon_parameters params; 2029 struct beacon_parameters params;
1986 int haveinfo = 0, err; 2030 int haveinfo = 0, err;
1987 2031
1988 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) 2032 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
2033 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
2034 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
2035 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
1989 return -EINVAL; 2036 return -EINVAL;
1990 2037
1991 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2038 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
@@ -2011,6 +2058,49 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2011 if (err) 2058 if (err)
2012 return err; 2059 return err;
2013 2060
2061 /*
2062 * In theory, some of these attributes could be required for
2063 * NEW_BEACON, but since they were not used when the command was
2064 * originally added, keep them optional for old user space
2065 * programs to work with drivers that do not need the additional
2066 * information.
2067 */
2068 if (info->attrs[NL80211_ATTR_SSID]) {
2069 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2070 params.ssid_len =
2071 nla_len(info->attrs[NL80211_ATTR_SSID]);
2072 if (params.ssid_len == 0 ||
2073 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2074 return -EINVAL;
2075 }
2076
2077 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2078 params.hidden_ssid = nla_get_u32(
2079 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2080 if (params.hidden_ssid !=
2081 NL80211_HIDDEN_SSID_NOT_IN_USE &&
2082 params.hidden_ssid !=
2083 NL80211_HIDDEN_SSID_ZERO_LEN &&
2084 params.hidden_ssid !=
2085 NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2086 return -EINVAL;
2087 }
2088
2089 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2090
2091 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2092 params.auth_type = nla_get_u32(
2093 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2094 if (!nl80211_valid_auth_type(params.auth_type))
2095 return -EINVAL;
2096 } else
2097 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2098
2099 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2100 NL80211_MAX_NR_CIPHER_SUITES);
2101 if (err)
2102 return err;
2103
2014 call = rdev->ops->add_beacon; 2104 call = rdev->ops->add_beacon;
2015 break; 2105 break;
2016 case NL80211_CMD_SET_BEACON: 2106 case NL80211_CMD_SET_BEACON:
@@ -2041,6 +2131,25 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2041 if (!haveinfo) 2131 if (!haveinfo)
2042 return -EINVAL; 2132 return -EINVAL;
2043 2133
2134 if (info->attrs[NL80211_ATTR_IE]) {
2135 params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2136 params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2137 }
2138
2139 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
2140 params.proberesp_ies =
2141 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2142 params.proberesp_ies_len =
2143 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2144 }
2145
2146 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
2147 params.assocresp_ies =
2148 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2149 params.assocresp_ies_len =
2150 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2151 }
2152
2044 err = call(&rdev->wiphy, dev, &params); 2153 err = call(&rdev->wiphy, dev, &params);
2045 if (!err && params.interval) 2154 if (!err && params.interval)
2046 wdev->beacon_interval = params.interval; 2155 wdev->beacon_interval = params.interval;
@@ -2235,8 +2344,16 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2235 2344
2236 nla_nest_end(msg, bss_param); 2345 nla_nest_end(msg, bss_param);
2237 } 2346 }
2347 if (sinfo->filled & STATION_INFO_STA_FLAGS)
2348 NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
2349 sizeof(struct nl80211_sta_flag_update),
2350 &sinfo->sta_flags);
2238 nla_nest_end(msg, sinfoattr); 2351 nla_nest_end(msg, sinfoattr);
2239 2352
2353 if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
2354 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2355 sinfo->assoc_req_ies);
2356
2240 return genlmsg_end(msg, hdr); 2357 return genlmsg_end(msg, hdr);
2241 2358
2242 nla_put_failure: 2359 nla_put_failure:
@@ -2264,6 +2381,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2264 } 2381 }
2265 2382
2266 while (1) { 2383 while (1) {
2384 memset(&sinfo, 0, sizeof(sinfo));
2267 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 2385 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
2268 mac_addr, &sinfo); 2386 mac_addr, &sinfo);
2269 if (err == -ENOENT) 2387 if (err == -ENOENT)
@@ -2416,18 +2534,25 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2416 break; 2534 break;
2417 case NL80211_IFTYPE_P2P_CLIENT: 2535 case NL80211_IFTYPE_P2P_CLIENT:
2418 case NL80211_IFTYPE_STATION: 2536 case NL80211_IFTYPE_STATION:
2419 /* disallow everything but AUTHORIZED flag */ 2537 /* disallow things sta doesn't support */
2420 if (params.plink_action) 2538 if (params.plink_action)
2421 err = -EINVAL; 2539 err = -EINVAL;
2422 if (params.vlan) 2540 if (params.vlan)
2423 err = -EINVAL; 2541 err = -EINVAL;
2424 if (params.supported_rates) 2542 if (params.supported_rates &&
2543 !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2425 err = -EINVAL; 2544 err = -EINVAL;
2426 if (params.ht_capa) 2545 if (params.ht_capa)
2427 err = -EINVAL; 2546 err = -EINVAL;
2428 if (params.listen_interval >= 0) 2547 if (params.listen_interval >= 0)
2429 err = -EINVAL; 2548 err = -EINVAL;
2430 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 2549 if (params.sta_flags_mask &
2550 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
2551 BIT(NL80211_STA_FLAG_TDLS_PEER)))
2552 err = -EINVAL;
2553 /* can't change the TDLS bit */
2554 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
2555 (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2431 err = -EINVAL; 2556 err = -EINVAL;
2432 break; 2557 break;
2433 case NL80211_IFTYPE_MESH_POINT: 2558 case NL80211_IFTYPE_MESH_POINT:
@@ -2465,6 +2590,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2465 return err; 2590 return err;
2466} 2591}
2467 2592
2593static struct nla_policy
2594nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
2595 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
2596 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
2597};
2598
2468static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) 2599static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2469{ 2600{
2470 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2601 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2510,10 +2641,50 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2510 if (parse_station_flags(info, &params)) 2641 if (parse_station_flags(info, &params))
2511 return -EINVAL; 2642 return -EINVAL;
2512 2643
2644 /* parse WME attributes if sta is WME capable */
2645 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
2646 (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
2647 info->attrs[NL80211_ATTR_STA_WME]) {
2648 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
2649 struct nlattr *nla;
2650
2651 nla = info->attrs[NL80211_ATTR_STA_WME];
2652 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
2653 nl80211_sta_wme_policy);
2654 if (err)
2655 return err;
2656
2657 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
2658 params.uapsd_queues =
2659 nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
2660 if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
2661 return -EINVAL;
2662
2663 if (tb[NL80211_STA_WME_MAX_SP])
2664 params.max_sp =
2665 nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
2666
2667 if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
2668 return -EINVAL;
2669
2670 params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
2671 }
2672
2513 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2673 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2514 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2674 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2515 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && 2675 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2516 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2676 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
2677 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
2678 return -EINVAL;
2679
2680 /*
2681 * Only managed stations can add TDLS peers, and only when the
2682 * wiphy supports external TDLS setup.
2683 */
2684 if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
2685 !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
2686 (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
2687 (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
2517 return -EINVAL; 2688 return -EINVAL;
2518 2689
2519 err = get_vlan(info, rdev, &params.vlan); 2690 err = get_vlan(info, rdev, &params.vlan);
@@ -2955,6 +3126,10 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
2955 cur_params.dot11MeshHWMPnetDiameterTraversalTime); 3126 cur_params.dot11MeshHWMPnetDiameterTraversalTime);
2956 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, 3127 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
2957 cur_params.dot11MeshHWMPRootMode); 3128 cur_params.dot11MeshHWMPRootMode);
3129 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3130 cur_params.dot11MeshHWMPRannInterval);
3131 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3132 cur_params.dot11MeshGateAnnouncementProtocol);
2958 nla_nest_end(msg, pinfoattr); 3133 nla_nest_end(msg, pinfoattr);
2959 genlmsg_end(msg, hdr); 3134 genlmsg_end(msg, hdr);
2960 return genlmsg_reply(msg, info); 3135 return genlmsg_reply(msg, info);
@@ -2982,6 +3157,9 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
2982 [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, 3157 [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
2983 [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, 3158 [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
2984 [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, 3159 [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
3160 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
3161 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3162 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
2985}; 3163};
2986 3164
2987static const struct nla_policy 3165static const struct nla_policy
@@ -3060,6 +3238,14 @@ do {\
3060 dot11MeshHWMPRootMode, mask, 3238 dot11MeshHWMPRootMode, mask,
3061 NL80211_MESHCONF_HWMP_ROOTMODE, 3239 NL80211_MESHCONF_HWMP_ROOTMODE,
3062 nla_get_u8); 3240 nla_get_u8);
3241 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3242 dot11MeshHWMPRannInterval, mask,
3243 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3244 nla_get_u16);
3245 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3246 dot11MeshGateAnnouncementProtocol, mask,
3247 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3248 nla_get_u8);
3063 if (mask_out) 3249 if (mask_out)
3064 *mask_out = mask; 3250 *mask_out = mask;
3065 3251
@@ -3477,6 +3663,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3477 } 3663 }
3478 } 3664 }
3479 3665
3666 request->no_cck =
3667 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
3668
3480 request->dev = dev; 3669 request->dev = dev;
3481 request->wiphy = &rdev->wiphy; 3670 request->wiphy = &rdev->wiphy;
3482 3671
@@ -3503,10 +3692,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3503 struct net_device *dev = info->user_ptr[1]; 3692 struct net_device *dev = info->user_ptr[1];
3504 struct nlattr *attr; 3693 struct nlattr *attr;
3505 struct wiphy *wiphy; 3694 struct wiphy *wiphy;
3506 int err, tmp, n_ssids = 0, n_channels, i; 3695 int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
3507 u32 interval; 3696 u32 interval;
3508 enum ieee80211_band band; 3697 enum ieee80211_band band;
3509 size_t ie_len; 3698 size_t ie_len;
3699 struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
3510 3700
3511 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || 3701 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3512 !rdev->ops->sched_scan_start) 3702 !rdev->ops->sched_scan_start)
@@ -3545,6 +3735,15 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3545 if (n_ssids > wiphy->max_sched_scan_ssids) 3735 if (n_ssids > wiphy->max_sched_scan_ssids)
3546 return -EINVAL; 3736 return -EINVAL;
3547 3737
3738 if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
3739 nla_for_each_nested(attr,
3740 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
3741 tmp)
3742 n_match_sets++;
3743
3744 if (n_match_sets > wiphy->max_match_sets)
3745 return -EINVAL;
3746
3548 if (info->attrs[NL80211_ATTR_IE]) 3747 if (info->attrs[NL80211_ATTR_IE])
3549 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3748 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3550 else 3749 else
@@ -3562,6 +3761,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3562 3761
3563 request = kzalloc(sizeof(*request) 3762 request = kzalloc(sizeof(*request)
3564 + sizeof(*request->ssids) * n_ssids 3763 + sizeof(*request->ssids) * n_ssids
3764 + sizeof(*request->match_sets) * n_match_sets
3565 + sizeof(*request->channels) * n_channels 3765 + sizeof(*request->channels) * n_channels
3566 + ie_len, GFP_KERNEL); 3766 + ie_len, GFP_KERNEL);
3567 if (!request) { 3767 if (!request) {
@@ -3579,6 +3779,18 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3579 request->ie = (void *)(request->channels + n_channels); 3779 request->ie = (void *)(request->channels + n_channels);
3580 } 3780 }
3581 3781
3782 if (n_match_sets) {
3783 if (request->ie)
3784 request->match_sets = (void *)(request->ie + ie_len);
3785 else if (request->ssids)
3786 request->match_sets =
3787 (void *)(request->ssids + n_ssids);
3788 else
3789 request->match_sets =
3790 (void *)(request->channels + n_channels);
3791 }
3792 request->n_match_sets = n_match_sets;
3793
3582 i = 0; 3794 i = 0;
3583 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 3795 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3584 /* user specified, bail out if channel not found */ 3796 /* user specified, bail out if channel not found */
@@ -3643,6 +3855,31 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3643 } 3855 }
3644 } 3856 }
3645 3857
3858 i = 0;
3859 if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
3860 nla_for_each_nested(attr,
3861 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
3862 tmp) {
3863 struct nlattr *ssid;
3864
3865 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
3866 nla_data(attr), nla_len(attr),
3867 nl80211_match_policy);
3868 ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
3869 if (ssid) {
3870 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
3871 err = -EINVAL;
3872 goto out_free;
3873 }
3874 memcpy(request->match_sets[i].ssid.ssid,
3875 nla_data(ssid), nla_len(ssid));
3876 request->match_sets[i].ssid.ssid_len =
3877 nla_len(ssid);
3878 }
3879 i++;
3880 }
3881 }
3882
3646 if (info->attrs[NL80211_ATTR_IE]) { 3883 if (info->attrs[NL80211_ATTR_IE]) {
3647 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3884 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3648 memcpy((void *)request->ie, 3885 memcpy((void *)request->ie,
@@ -3935,22 +4172,6 @@ static bool nl80211_valid_wpa_versions(u32 wpa_versions)
3935 NL80211_WPA_VERSION_2)); 4172 NL80211_WPA_VERSION_2));
3936} 4173}
3937 4174
3938static bool nl80211_valid_akm_suite(u32 akm)
3939{
3940 return akm == WLAN_AKM_SUITE_8021X ||
3941 akm == WLAN_AKM_SUITE_PSK;
3942}
3943
3944static bool nl80211_valid_cipher_suite(u32 cipher)
3945{
3946 return cipher == WLAN_CIPHER_SUITE_WEP40 ||
3947 cipher == WLAN_CIPHER_SUITE_WEP104 ||
3948 cipher == WLAN_CIPHER_SUITE_TKIP ||
3949 cipher == WLAN_CIPHER_SUITE_CCMP ||
3950 cipher == WLAN_CIPHER_SUITE_AES_CMAC;
3951}
3952
3953
3954static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) 4175static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3955{ 4176{
3956 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 4177 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4083,7 +4304,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4083 memcpy(settings->ciphers_pairwise, data, len); 4304 memcpy(settings->ciphers_pairwise, data, len);
4084 4305
4085 for (i = 0; i < settings->n_ciphers_pairwise; i++) 4306 for (i = 0; i < settings->n_ciphers_pairwise; i++)
4086 if (!nl80211_valid_cipher_suite( 4307 if (!cfg80211_supported_cipher_suite(
4308 &rdev->wiphy,
4087 settings->ciphers_pairwise[i])) 4309 settings->ciphers_pairwise[i]))
4088 return -EINVAL; 4310 return -EINVAL;
4089 } 4311 }
@@ -4091,7 +4313,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4091 if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { 4313 if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
4092 settings->cipher_group = 4314 settings->cipher_group =
4093 nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); 4315 nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
4094 if (!nl80211_valid_cipher_suite(settings->cipher_group)) 4316 if (!cfg80211_supported_cipher_suite(&rdev->wiphy,
4317 settings->cipher_group))
4095 return -EINVAL; 4318 return -EINVAL;
4096 } 4319 }
4097 4320
@@ -4104,7 +4327,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4104 4327
4105 if (info->attrs[NL80211_ATTR_AKM_SUITES]) { 4328 if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
4106 void *data; 4329 void *data;
4107 int len, i; 4330 int len;
4108 4331
4109 data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); 4332 data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
4110 len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); 4333 len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
@@ -4117,10 +4340,6 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4117 return -EINVAL; 4340 return -EINVAL;
4118 4341
4119 memcpy(settings->akm_suites, data, len); 4342 memcpy(settings->akm_suites, data, len);
4120
4121 for (i = 0; i < settings->n_akm_suites; i++)
4122 if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
4123 return -EINVAL;
4124 } 4343 }
4125 4344
4126 return 0; 4345 return 0;
@@ -4339,8 +4558,12 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4339 4558
4340 wiphy = &rdev->wiphy; 4559 wiphy = &rdev->wiphy;
4341 4560
4342 if (info->attrs[NL80211_ATTR_MAC]) 4561 if (info->attrs[NL80211_ATTR_MAC]) {
4343 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 4562 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
4563
4564 if (!is_valid_ether_addr(ibss.bssid))
4565 return -EINVAL;
4566 }
4344 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); 4567 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
4345 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); 4568 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
4346 4569
@@ -4777,6 +5000,57 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4777 return rdev->ops->flush_pmksa(&rdev->wiphy, dev); 5000 return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
4778} 5001}
4779 5002
5003static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
5004{
5005 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5006 struct net_device *dev = info->user_ptr[1];
5007 u8 action_code, dialog_token;
5008 u16 status_code;
5009 u8 *peer;
5010
5011 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
5012 !rdev->ops->tdls_mgmt)
5013 return -EOPNOTSUPP;
5014
5015 if (!info->attrs[NL80211_ATTR_TDLS_ACTION] ||
5016 !info->attrs[NL80211_ATTR_STATUS_CODE] ||
5017 !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] ||
5018 !info->attrs[NL80211_ATTR_IE] ||
5019 !info->attrs[NL80211_ATTR_MAC])
5020 return -EINVAL;
5021
5022 peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
5023 action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
5024 status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
5025 dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
5026
5027 return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
5028 dialog_token, status_code,
5029 nla_data(info->attrs[NL80211_ATTR_IE]),
5030 nla_len(info->attrs[NL80211_ATTR_IE]));
5031}
5032
5033static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
5034{
5035 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5036 struct net_device *dev = info->user_ptr[1];
5037 enum nl80211_tdls_operation operation;
5038 u8 *peer;
5039
5040 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
5041 !rdev->ops->tdls_oper)
5042 return -EOPNOTSUPP;
5043
5044 if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] ||
5045 !info->attrs[NL80211_ATTR_MAC])
5046 return -EINVAL;
5047
5048 operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
5049 peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
5050
5051 return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation);
5052}
5053
4780static int nl80211_remain_on_channel(struct sk_buff *skb, 5054static int nl80211_remain_on_channel(struct sk_buff *skb,
4781 struct genl_info *info) 5055 struct genl_info *info)
4782{ 5056{
@@ -4997,6 +5271,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
4997 struct sk_buff *msg; 5271 struct sk_buff *msg;
4998 unsigned int wait = 0; 5272 unsigned int wait = 0;
4999 bool offchan; 5273 bool offchan;
5274 bool no_cck;
5000 5275
5001 if (!info->attrs[NL80211_ATTR_FRAME] || 5276 if (!info->attrs[NL80211_ATTR_FRAME] ||
5002 !info->attrs[NL80211_ATTR_WIPHY_FREQ]) 5277 !info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5033,6 +5308,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5033 5308
5034 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; 5309 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
5035 5310
5311 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
5312
5036 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 5313 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
5037 chan = rdev_freq_to_chan(rdev, freq, channel_type); 5314 chan = rdev_freq_to_chan(rdev, freq, channel_type);
5038 if (chan == NULL) 5315 if (chan == NULL)
@@ -5053,7 +5330,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5053 channel_type_valid, wait, 5330 channel_type_valid, wait,
5054 nla_data(info->attrs[NL80211_ATTR_FRAME]), 5331 nla_data(info->attrs[NL80211_ATTR_FRAME]),
5055 nla_len(info->attrs[NL80211_ATTR_FRAME]), 5332 nla_len(info->attrs[NL80211_ATTR_FRAME]),
5056 &cookie); 5333 no_cck, &cookie);
5057 if (err) 5334 if (err)
5058 goto free_msg; 5335 goto free_msg;
5059 5336
@@ -6089,6 +6366,22 @@ static struct genl_ops nl80211_ops[] = {
6089 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 6366 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6090 NL80211_FLAG_NEED_RTNL, 6367 NL80211_FLAG_NEED_RTNL,
6091 }, 6368 },
6369 {
6370 .cmd = NL80211_CMD_TDLS_MGMT,
6371 .doit = nl80211_tdls_mgmt,
6372 .policy = nl80211_policy,
6373 .flags = GENL_ADMIN_PERM,
6374 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6375 NL80211_FLAG_NEED_RTNL,
6376 },
6377 {
6378 .cmd = NL80211_CMD_TDLS_OPER,
6379 .doit = nl80211_tdls_oper,
6380 .policy = nl80211_policy,
6381 .flags = GENL_ADMIN_PERM,
6382 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6383 NL80211_FLAG_NEED_RTNL,
6384 },
6092}; 6385};
6093 6386
6094static struct genl_multicast_group nl80211_mlme_mcgrp = { 6387static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7078,6 +7371,52 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7078 nlmsg_free(msg); 7371 nlmsg_free(msg);
7079} 7372}
7080 7373
7374void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7375 struct net_device *netdev, int index,
7376 const u8 *bssid, bool preauth, gfp_t gfp)
7377{
7378 struct sk_buff *msg;
7379 struct nlattr *attr;
7380 void *hdr;
7381
7382 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
7383 if (!msg)
7384 return;
7385
7386 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE);
7387 if (!hdr) {
7388 nlmsg_free(msg);
7389 return;
7390 }
7391
7392 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
7393 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
7394
7395 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
7396 if (!attr)
7397 goto nla_put_failure;
7398
7399 NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
7400 NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
7401 if (preauth)
7402 NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
7403
7404 nla_nest_end(msg, attr);
7405
7406 if (genlmsg_end(msg, hdr) < 0) {
7407 nlmsg_free(msg);
7408 return;
7409 }
7410
7411 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
7412 nl80211_mlme_mcgrp.id, gfp);
7413 return;
7414
7415 nla_put_failure:
7416 genlmsg_cancel(msg, hdr);
7417 nlmsg_free(msg);
7418}
7419
7081void 7420void
7082nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 7421nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7083 struct net_device *netdev, const u8 *peer, 7422 struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 5d69c56400ae..f24a1fbeaf19 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -113,4 +113,8 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
113 struct net_device *netdev, const u8 *bssid, 113 struct net_device *netdev, const u8 *bssid,
114 const u8 *replay_ctr, gfp_t gfp); 114 const u8 *replay_ctr, gfp_t gfp);
115 115
116void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
117 struct net_device *netdev, int index,
118 const u8 *bssid, bool preauth, gfp_t gfp);
119
116#endif /* __NET_WIRELESS_NL80211_H */ 120#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 68a471ba193f..2520a1b7e7db 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -49,10 +49,8 @@
49#include "nl80211.h" 49#include "nl80211.h"
50 50
51#ifdef CONFIG_CFG80211_REG_DEBUG 51#ifdef CONFIG_CFG80211_REG_DEBUG
52#define REG_DBG_PRINT(format, args...) \ 52#define REG_DBG_PRINT(format, args...) \
53 do { \ 53 printk(KERN_DEBUG pr_fmt(format), ##args)
54 printk(KERN_DEBUG pr_fmt(format), ##args); \
55 } while (0)
56#else 54#else
57#define REG_DBG_PRINT(args...) 55#define REG_DBG_PRINT(args...)
58#endif 56#endif
@@ -753,9 +751,10 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
753 chan->center_freq, 751 chan->center_freq,
754 KHZ_TO_MHZ(desired_bw_khz)); 752 KHZ_TO_MHZ(desired_bw_khz));
755 753
756 REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n", 754 REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
757 freq_range->start_freq_khz, 755 freq_range->start_freq_khz,
758 freq_range->end_freq_khz, 756 freq_range->end_freq_khz,
757 freq_range->max_bandwidth_khz,
759 max_antenna_gain, 758 max_antenna_gain,
760 power_rule->max_eirp); 759 power_rule->max_eirp);
761} 760}
@@ -891,7 +890,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
891 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { 890 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
892 REG_DBG_PRINT("Ignoring regulatory request %s " 891 REG_DBG_PRINT("Ignoring regulatory request %s "
893 "since the driver uses its own custom " 892 "since the driver uses its own custom "
894 "regulatory domain ", 893 "regulatory domain\n",
895 reg_initiator_name(initiator)); 894 reg_initiator_name(initiator));
896 return true; 895 return true;
897 } 896 }
@@ -905,7 +904,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
905 !is_world_regdom(last_request->alpha2)) { 904 !is_world_regdom(last_request->alpha2)) {
906 REG_DBG_PRINT("Ignoring regulatory request %s " 905 REG_DBG_PRINT("Ignoring regulatory request %s "
907 "since the driver requires its own regulatory " 906 "since the driver requires its own regulatory "
908 "domain to be set first", 907 "domain to be set first\n",
909 reg_initiator_name(initiator)); 908 reg_initiator_name(initiator));
910 return true; 909 return true;
911 } 910 }
@@ -913,14 +912,6 @@ static bool ignore_reg_update(struct wiphy *wiphy,
913 return false; 912 return false;
914} 913}
915 914
916static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
917{
918 struct cfg80211_registered_device *rdev;
919
920 list_for_each_entry(rdev, &cfg80211_rdev_list, list)
921 wiphy_update_regulatory(&rdev->wiphy, initiator);
922}
923
924static void handle_reg_beacon(struct wiphy *wiphy, 915static void handle_reg_beacon(struct wiphy *wiphy,
925 unsigned int chan_idx, 916 unsigned int chan_idx,
926 struct reg_beacon *reg_beacon) 917 struct reg_beacon *reg_beacon)
@@ -1120,11 +1111,13 @@ static void reg_process_ht_flags(struct wiphy *wiphy)
1120 1111
1121} 1112}
1122 1113
1123void wiphy_update_regulatory(struct wiphy *wiphy, 1114static void wiphy_update_regulatory(struct wiphy *wiphy,
1124 enum nl80211_reg_initiator initiator) 1115 enum nl80211_reg_initiator initiator)
1125{ 1116{
1126 enum ieee80211_band band; 1117 enum ieee80211_band band;
1127 1118
1119 assert_reg_lock();
1120
1128 if (ignore_reg_update(wiphy, initiator)) 1121 if (ignore_reg_update(wiphy, initiator))
1129 return; 1122 return;
1130 1123
@@ -1139,6 +1132,22 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1139 wiphy->reg_notifier(wiphy, last_request); 1132 wiphy->reg_notifier(wiphy, last_request);
1140} 1133}
1141 1134
1135void regulatory_update(struct wiphy *wiphy,
1136 enum nl80211_reg_initiator setby)
1137{
1138 mutex_lock(&reg_mutex);
1139 wiphy_update_regulatory(wiphy, setby);
1140 mutex_unlock(&reg_mutex);
1141}
1142
1143static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
1144{
1145 struct cfg80211_registered_device *rdev;
1146
1147 list_for_each_entry(rdev, &cfg80211_rdev_list, list)
1148 wiphy_update_regulatory(&rdev->wiphy, initiator);
1149}
1150
1142static void handle_channel_custom(struct wiphy *wiphy, 1151static void handle_channel_custom(struct wiphy *wiphy,
1143 enum ieee80211_band band, 1152 enum ieee80211_band band,
1144 unsigned int chan_idx, 1153 unsigned int chan_idx,
@@ -1475,7 +1484,7 @@ static void reg_process_pending_hints(void)
1475 /* When last_request->processed becomes true this will be rescheduled */ 1484 /* When last_request->processed becomes true this will be rescheduled */
1476 if (last_request && !last_request->processed) { 1485 if (last_request && !last_request->processed) {
1477 REG_DBG_PRINT("Pending regulatory request, waiting " 1486 REG_DBG_PRINT("Pending regulatory request, waiting "
1478 "for it to be processed..."); 1487 "for it to be processed...\n");
1479 goto out; 1488 goto out;
1480 } 1489 }
1481 1490
@@ -2188,7 +2197,7 @@ out:
2188static void reg_timeout_work(struct work_struct *work) 2197static void reg_timeout_work(struct work_struct *work)
2189{ 2198{
2190 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, " 2199 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
2191 "restoring regulatory settings"); 2200 "restoring regulatory settings\n");
2192 restore_regulatory_settings(true); 2201 restore_regulatory_settings(true);
2193} 2202}
2194 2203
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b67d1c3a2fb9..4a56799d868d 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,6 +16,8 @@ void regulatory_exit(void);
16 16
17int set_regdom(const struct ieee80211_regdomain *rd); 17int set_regdom(const struct ieee80211_regdomain *rd);
18 18
19void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
20
19/** 21/**
20 * regulatory_hint_found_beacon - hints a beacon was found on a channel 22 * regulatory_hint_found_beacon - hints a beacon was found on a channel
21 * @wiphy: the wireless device where the beacon was found on 23 * @wiphy: the wireless device where the beacon was found on
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2936cb809152..0fb142410404 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -12,6 +12,7 @@
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <net/arp.h> 13#include <net/arp.h>
14#include <net/cfg80211.h> 14#include <net/cfg80211.h>
15#include <net/cfg80211-wext.h>
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include "core.h" 17#include "core.h"
17#include "nl80211.h" 18#include "nl80211.h"
@@ -227,6 +228,33 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
227} 228}
228EXPORT_SYMBOL(cfg80211_find_ie); 229EXPORT_SYMBOL(cfg80211_find_ie);
229 230
231const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
232 const u8 *ies, int len)
233{
234 struct ieee80211_vendor_ie *ie;
235 const u8 *pos = ies, *end = ies + len;
236 int ie_oui;
237
238 while (pos < end) {
239 pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
240 end - pos);
241 if (!pos)
242 return NULL;
243
244 if (end - pos < sizeof(*ie))
245 return NULL;
246
247 ie = (struct ieee80211_vendor_ie *)pos;
248 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
249 if (ie_oui == oui && ie->oui_type == oui_type)
250 return pos;
251
252 pos += 2 + ie->len;
253 }
254 return NULL;
255}
256EXPORT_SYMBOL(cfg80211_find_vendor_ie);
257
230static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 258static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
231{ 259{
232 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); 260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dec0fa28372e..6e86d5acf145 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -110,17 +110,22 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
110 else { 110 else {
111 int i = 0, j; 111 int i = 0, j;
112 enum ieee80211_band band; 112 enum ieee80211_band band;
113 struct ieee80211_supported_band *bands;
114 struct ieee80211_channel *channel;
113 115
114 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 116 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
115 if (!wdev->wiphy->bands[band]) 117 bands = wdev->wiphy->bands[band];
118 if (!bands)
116 continue; 119 continue;
117 for (j = 0; j < wdev->wiphy->bands[band]->n_channels; 120 for (j = 0; j < bands->n_channels; j++) {
118 i++, j++) 121 channel = &bands->channels[j];
119 request->channels[i] = 122 if (channel->flags & IEEE80211_CHAN_DISABLED)
120 &wdev->wiphy->bands[band]->channels[j]; 123 continue;
121 request->rates[band] = 124 request->channels[i++] = channel;
122 (1 << wdev->wiphy->bands[band]->n_bitrates) - 1; 125 }
126 request->rates[band] = (1 << bands->n_bitrates) - 1;
123 } 127 }
128 n_channels = i;
124 } 129 }
125 request->n_channels = n_channels; 130 request->n_channels = n_channels;
126 request->ssids = (void *)&request->channels[n_channels]; 131 request->ssids = (void *)&request->channels[n_channels];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index be75a3a0424e..2f178f73943f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -6,6 +6,7 @@
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/crc32.h>
9#include <net/cfg80211.h> 10#include <net/cfg80211.h>
10#include <net/ip.h> 11#include <net/ip.h>
11#include "core.h" 12#include "core.h"
@@ -150,12 +151,19 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
150 set_mandatory_flags_band(wiphy->bands[band], band); 151 set_mandatory_flags_band(wiphy->bands[band], band);
151} 152}
152 153
154bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
155{
156 int i;
157 for (i = 0; i < wiphy->n_cipher_suites; i++)
158 if (cipher == wiphy->cipher_suites[i])
159 return true;
160 return false;
161}
162
153int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, 163int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
154 struct key_params *params, int key_idx, 164 struct key_params *params, int key_idx,
155 bool pairwise, const u8 *mac_addr) 165 bool pairwise, const u8 *mac_addr)
156{ 166{
157 int i;
158
159 if (key_idx > 5) 167 if (key_idx > 5)
160 return -EINVAL; 168 return -EINVAL;
161 169
@@ -225,10 +233,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
225 } 233 }
226 } 234 }
227 235
228 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) 236 if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher))
229 if (params->cipher == rdev->wiphy.cipher_suites[i])
230 break;
231 if (i == rdev->wiphy.n_cipher_suites)
232 return -EINVAL; 237 return -EINVAL;
233 238
234 return 0; 239 return 0;
@@ -391,8 +396,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
391 } 396 }
392 break; 397 break;
393 case cpu_to_le16(0): 398 case cpu_to_le16(0):
394 if (iftype != NL80211_IFTYPE_ADHOC) 399 if (iftype != NL80211_IFTYPE_ADHOC &&
395 return -1; 400 iftype != NL80211_IFTYPE_STATION)
401 return -1;
396 break; 402 break;
397 } 403 }
398 404
@@ -512,10 +518,9 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
512 if (head_need) 518 if (head_need)
513 skb_orphan(skb); 519 skb_orphan(skb);
514 520
515 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) { 521 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
516 pr_err("failed to reallocate Tx buffer\n");
517 return -ENOMEM; 522 return -ENOMEM;
518 } 523
519 skb->truesize += head_need; 524 skb->truesize += head_need;
520 } 525 }
521 526
@@ -1044,3 +1049,170 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
1044 1049
1045 return 0; 1050 return 0;
1046} 1051}
1052
1053u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
1054 struct ieee802_11_elems *elems,
1055 u64 filter, u32 crc)
1056{
1057 size_t left = len;
1058 u8 *pos = start;
1059 bool calc_crc = filter != 0;
1060
1061 memset(elems, 0, sizeof(*elems));
1062 elems->ie_start = start;
1063 elems->total_len = len;
1064
1065 while (left >= 2) {
1066 u8 id, elen;
1067
1068 id = *pos++;
1069 elen = *pos++;
1070 left -= 2;
1071
1072 if (elen > left)
1073 break;
1074
1075 if (calc_crc && id < 64 && (filter & (1ULL << id)))
1076 crc = crc32_be(crc, pos - 2, elen + 2);
1077
1078 switch (id) {
1079 case WLAN_EID_SSID:
1080 elems->ssid = pos;
1081 elems->ssid_len = elen;
1082 break;
1083 case WLAN_EID_SUPP_RATES:
1084 elems->supp_rates = pos;
1085 elems->supp_rates_len = elen;
1086 break;
1087 case WLAN_EID_FH_PARAMS:
1088 elems->fh_params = pos;
1089 elems->fh_params_len = elen;
1090 break;
1091 case WLAN_EID_DS_PARAMS:
1092 elems->ds_params = pos;
1093 elems->ds_params_len = elen;
1094 break;
1095 case WLAN_EID_CF_PARAMS:
1096 elems->cf_params = pos;
1097 elems->cf_params_len = elen;
1098 break;
1099 case WLAN_EID_TIM:
1100 if (elen >= sizeof(struct ieee80211_tim_ie)) {
1101 elems->tim = (void *)pos;
1102 elems->tim_len = elen;
1103 }
1104 break;
1105 case WLAN_EID_IBSS_PARAMS:
1106 elems->ibss_params = pos;
1107 elems->ibss_params_len = elen;
1108 break;
1109 case WLAN_EID_CHALLENGE:
1110 elems->challenge = pos;
1111 elems->challenge_len = elen;
1112 break;
1113 case WLAN_EID_VENDOR_SPECIFIC:
1114 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
1115 pos[2] == 0xf2) {
1116 /* Microsoft OUI (00:50:F2) */
1117
1118 if (calc_crc)
1119 crc = crc32_be(crc, pos - 2, elen + 2);
1120
1121 if (pos[3] == 1) {
1122 /* OUI Type 1 - WPA IE */
1123 elems->wpa = pos;
1124 elems->wpa_len = elen;
1125 } else if (elen >= 5 && pos[3] == 2) {
1126 /* OUI Type 2 - WMM IE */
1127 if (pos[4] == 0) {
1128 elems->wmm_info = pos;
1129 elems->wmm_info_len = elen;
1130 } else if (pos[4] == 1) {
1131 elems->wmm_param = pos;
1132 elems->wmm_param_len = elen;
1133 }
1134 }
1135 }
1136 break;
1137 case WLAN_EID_RSN:
1138 elems->rsn = pos;
1139 elems->rsn_len = elen;
1140 break;
1141 case WLAN_EID_ERP_INFO:
1142 elems->erp_info = pos;
1143 elems->erp_info_len = elen;
1144 break;
1145 case WLAN_EID_EXT_SUPP_RATES:
1146 elems->ext_supp_rates = pos;
1147 elems->ext_supp_rates_len = elen;
1148 break;
1149 case WLAN_EID_HT_CAPABILITY:
1150 if (elen >= sizeof(struct ieee80211_ht_cap))
1151 elems->ht_cap_elem = (void *)pos;
1152 break;
1153 case WLAN_EID_HT_INFORMATION:
1154 if (elen >= sizeof(struct ieee80211_ht_info))
1155 elems->ht_info_elem = (void *)pos;
1156 break;
1157 case WLAN_EID_MESH_ID:
1158 elems->mesh_id = pos;
1159 elems->mesh_id_len = elen;
1160 break;
1161 case WLAN_EID_MESH_CONFIG:
1162 if (elen >= sizeof(struct ieee80211_meshconf_ie))
1163 elems->mesh_config = (void *)pos;
1164 break;
1165 case WLAN_EID_PEER_MGMT:
1166 elems->peering = pos;
1167 elems->peering_len = elen;
1168 break;
1169 case WLAN_EID_PREQ:
1170 elems->preq = pos;
1171 elems->preq_len = elen;
1172 break;
1173 case WLAN_EID_PREP:
1174 elems->prep = pos;
1175 elems->prep_len = elen;
1176 break;
1177 case WLAN_EID_PERR:
1178 elems->perr = pos;
1179 elems->perr_len = elen;
1180 break;
1181 case WLAN_EID_RANN:
1182 if (elen >= sizeof(struct ieee80211_rann_ie))
1183 elems->rann = (void *)pos;
1184 break;
1185 case WLAN_EID_CHANNEL_SWITCH:
1186 elems->ch_switch_elem = pos;
1187 elems->ch_switch_elem_len = elen;
1188 break;
1189 case WLAN_EID_QUIET:
1190 if (!elems->quiet_elem) {
1191 elems->quiet_elem = pos;
1192 elems->quiet_elem_len = elen;
1193 }
1194 elems->num_of_quiet_elem++;
1195 break;
1196 case WLAN_EID_COUNTRY:
1197 elems->country_elem = pos;
1198 elems->country_elem_len = elen;
1199 break;
1200 case WLAN_EID_PWR_CONSTRAINT:
1201 elems->pwr_constr_elem = pos;
1202 elems->pwr_constr_elem_len = elen;
1203 break;
1204 case WLAN_EID_TIMEOUT_INTERVAL:
1205 elems->timeout_int = pos;
1206 elems->timeout_int_len = elen;
1207 break;
1208 default:
1209 break;
1210 }
1211
1212 left -= elen;
1213 pos += elen;
1214 }
1215
1216 return crc;
1217}
1218EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0bf169bb770e..62f121d1d9cb 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <net/iw_handler.h> 16#include <net/iw_handler.h>
17#include <net/cfg80211.h> 17#include <net/cfg80211.h>
18#include <net/cfg80211-wext.h>
18#include "wext-compat.h" 19#include "wext-compat.h"
19#include "core.h" 20#include "core.h"
20 21
@@ -363,9 +364,9 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
363} 364}
364EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag); 365EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
365 366
366int cfg80211_wext_siwretry(struct net_device *dev, 367static int cfg80211_wext_siwretry(struct net_device *dev,
367 struct iw_request_info *info, 368 struct iw_request_info *info,
368 struct iw_param *retry, char *extra) 369 struct iw_param *retry, char *extra)
369{ 370{
370 struct wireless_dev *wdev = dev->ieee80211_ptr; 371 struct wireless_dev *wdev = dev->ieee80211_ptr;
371 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 372 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -402,7 +403,6 @@ int cfg80211_wext_siwretry(struct net_device *dev,
402 403
403 return err; 404 return err;
404} 405}
405EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
406 406
407int cfg80211_wext_giwretry(struct net_device *dev, 407int cfg80211_wext_giwretry(struct net_device *dev,
408 struct iw_request_info *info, 408 struct iw_request_info *info,
@@ -593,9 +593,9 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
593 return err; 593 return err;
594} 594}
595 595
596int cfg80211_wext_siwencode(struct net_device *dev, 596static int cfg80211_wext_siwencode(struct net_device *dev,
597 struct iw_request_info *info, 597 struct iw_request_info *info,
598 struct iw_point *erq, char *keybuf) 598 struct iw_point *erq, char *keybuf)
599{ 599{
600 struct wireless_dev *wdev = dev->ieee80211_ptr; 600 struct wireless_dev *wdev = dev->ieee80211_ptr;
601 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 601 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -652,11 +652,10 @@ int cfg80211_wext_siwencode(struct net_device *dev,
652 wdev->wext.default_key == -1, 652 wdev->wext.default_key == -1,
653 idx, &params); 653 idx, &params);
654} 654}
655EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
656 655
657int cfg80211_wext_siwencodeext(struct net_device *dev, 656static int cfg80211_wext_siwencodeext(struct net_device *dev,
658 struct iw_request_info *info, 657 struct iw_request_info *info,
659 struct iw_point *erq, char *extra) 658 struct iw_point *erq, char *extra)
660{ 659{
661 struct wireless_dev *wdev = dev->ieee80211_ptr; 660 struct wireless_dev *wdev = dev->ieee80211_ptr;
662 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 661 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -744,11 +743,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
744 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, 743 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
745 idx, &params); 744 idx, &params);
746} 745}
747EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
748 746
749int cfg80211_wext_giwencode(struct net_device *dev, 747static int cfg80211_wext_giwencode(struct net_device *dev,
750 struct iw_request_info *info, 748 struct iw_request_info *info,
751 struct iw_point *erq, char *keybuf) 749 struct iw_point *erq, char *keybuf)
752{ 750{
753 struct wireless_dev *wdev = dev->ieee80211_ptr; 751 struct wireless_dev *wdev = dev->ieee80211_ptr;
754 int idx; 752 int idx;
@@ -782,11 +780,10 @@ int cfg80211_wext_giwencode(struct net_device *dev,
782 780
783 return 0; 781 return 0;
784} 782}
785EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
786 783
787int cfg80211_wext_siwfreq(struct net_device *dev, 784static int cfg80211_wext_siwfreq(struct net_device *dev,
788 struct iw_request_info *info, 785 struct iw_request_info *info,
789 struct iw_freq *wextfreq, char *extra) 786 struct iw_freq *wextfreq, char *extra)
790{ 787{
791 struct wireless_dev *wdev = dev->ieee80211_ptr; 788 struct wireless_dev *wdev = dev->ieee80211_ptr;
792 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 789 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -815,11 +812,10 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
815 return -EOPNOTSUPP; 812 return -EOPNOTSUPP;
816 } 813 }
817} 814}
818EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
819 815
820int cfg80211_wext_giwfreq(struct net_device *dev, 816static int cfg80211_wext_giwfreq(struct net_device *dev,
821 struct iw_request_info *info, 817 struct iw_request_info *info,
822 struct iw_freq *freq, char *extra) 818 struct iw_freq *freq, char *extra)
823{ 819{
824 struct wireless_dev *wdev = dev->ieee80211_ptr; 820 struct wireless_dev *wdev = dev->ieee80211_ptr;
825 821
@@ -836,11 +832,10 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
836 return 0; 832 return 0;
837 } 833 }
838} 834}
839EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq);
840 835
841int cfg80211_wext_siwtxpower(struct net_device *dev, 836static int cfg80211_wext_siwtxpower(struct net_device *dev,
842 struct iw_request_info *info, 837 struct iw_request_info *info,
843 union iwreq_data *data, char *extra) 838 union iwreq_data *data, char *extra)
844{ 839{
845 struct wireless_dev *wdev = dev->ieee80211_ptr; 840 struct wireless_dev *wdev = dev->ieee80211_ptr;
846 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 841 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -889,11 +884,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
889 884
890 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm)); 885 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
891} 886}
892EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
893 887
894int cfg80211_wext_giwtxpower(struct net_device *dev, 888static int cfg80211_wext_giwtxpower(struct net_device *dev,
895 struct iw_request_info *info, 889 struct iw_request_info *info,
896 union iwreq_data *data, char *extra) 890 union iwreq_data *data, char *extra)
897{ 891{
898 struct wireless_dev *wdev = dev->ieee80211_ptr; 892 struct wireless_dev *wdev = dev->ieee80211_ptr;
899 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 893 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -919,7 +913,6 @@ int cfg80211_wext_giwtxpower(struct net_device *dev,
919 913
920 return 0; 914 return 0;
921} 915}
922EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
923 916
924static int cfg80211_set_auth_alg(struct wireless_dev *wdev, 917static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
925 s32 auth_alg) 918 s32 auth_alg)
@@ -1070,9 +1063,9 @@ static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
1070 return 0; 1063 return 0;
1071} 1064}
1072 1065
1073int cfg80211_wext_siwauth(struct net_device *dev, 1066static int cfg80211_wext_siwauth(struct net_device *dev,
1074 struct iw_request_info *info, 1067 struct iw_request_info *info,
1075 struct iw_param *data, char *extra) 1068 struct iw_param *data, char *extra)
1076{ 1069{
1077 struct wireless_dev *wdev = dev->ieee80211_ptr; 1070 struct wireless_dev *wdev = dev->ieee80211_ptr;
1078 1071
@@ -1102,21 +1095,19 @@ int cfg80211_wext_siwauth(struct net_device *dev,
1102 return -EOPNOTSUPP; 1095 return -EOPNOTSUPP;
1103 } 1096 }
1104} 1097}
1105EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth);
1106 1098
1107int cfg80211_wext_giwauth(struct net_device *dev, 1099static int cfg80211_wext_giwauth(struct net_device *dev,
1108 struct iw_request_info *info, 1100 struct iw_request_info *info,
1109 struct iw_param *data, char *extra) 1101 struct iw_param *data, char *extra)
1110{ 1102{
1111 /* XXX: what do we need? */ 1103 /* XXX: what do we need? */
1112 1104
1113 return -EOPNOTSUPP; 1105 return -EOPNOTSUPP;
1114} 1106}
1115EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth);
1116 1107
1117int cfg80211_wext_siwpower(struct net_device *dev, 1108static int cfg80211_wext_siwpower(struct net_device *dev,
1118 struct iw_request_info *info, 1109 struct iw_request_info *info,
1119 struct iw_param *wrq, char *extra) 1110 struct iw_param *wrq, char *extra)
1120{ 1111{
1121 struct wireless_dev *wdev = dev->ieee80211_ptr; 1112 struct wireless_dev *wdev = dev->ieee80211_ptr;
1122 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1113 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1160,11 +1151,10 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1160 return 0; 1151 return 0;
1161 1152
1162} 1153}
1163EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower);
1164 1154
1165int cfg80211_wext_giwpower(struct net_device *dev, 1155static int cfg80211_wext_giwpower(struct net_device *dev,
1166 struct iw_request_info *info, 1156 struct iw_request_info *info,
1167 struct iw_param *wrq, char *extra) 1157 struct iw_param *wrq, char *extra)
1168{ 1158{
1169 struct wireless_dev *wdev = dev->ieee80211_ptr; 1159 struct wireless_dev *wdev = dev->ieee80211_ptr;
1170 1160
@@ -1172,7 +1162,6 @@ int cfg80211_wext_giwpower(struct net_device *dev,
1172 1162
1173 return 0; 1163 return 0;
1174} 1164}
1175EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower);
1176 1165
1177static int cfg80211_wds_wext_siwap(struct net_device *dev, 1166static int cfg80211_wds_wext_siwap(struct net_device *dev,
1178 struct iw_request_info *info, 1167 struct iw_request_info *info,
@@ -1218,9 +1207,9 @@ static int cfg80211_wds_wext_giwap(struct net_device *dev,
1218 return 0; 1207 return 0;
1219} 1208}
1220 1209
1221int cfg80211_wext_siwrate(struct net_device *dev, 1210static int cfg80211_wext_siwrate(struct net_device *dev,
1222 struct iw_request_info *info, 1211 struct iw_request_info *info,
1223 struct iw_param *rate, char *extra) 1212 struct iw_param *rate, char *extra)
1224{ 1213{
1225 struct wireless_dev *wdev = dev->ieee80211_ptr; 1214 struct wireless_dev *wdev = dev->ieee80211_ptr;
1226 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1215 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1268,11 +1257,10 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1268 1257
1269 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1258 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1270} 1259}
1271EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
1272 1260
1273int cfg80211_wext_giwrate(struct net_device *dev, 1261static int cfg80211_wext_giwrate(struct net_device *dev,
1274 struct iw_request_info *info, 1262 struct iw_request_info *info,
1275 struct iw_param *rate, char *extra) 1263 struct iw_param *rate, char *extra)
1276{ 1264{
1277 struct wireless_dev *wdev = dev->ieee80211_ptr; 1265 struct wireless_dev *wdev = dev->ieee80211_ptr;
1278 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1266 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1308,10 +1296,9 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1308 1296
1309 return 0; 1297 return 0;
1310} 1298}
1311EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate);
1312 1299
1313/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ 1300/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
1314struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) 1301static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1315{ 1302{
1316 struct wireless_dev *wdev = dev->ieee80211_ptr; 1303 struct wireless_dev *wdev = dev->ieee80211_ptr;
1317 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1304 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1376,11 +1363,10 @@ struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1376 1363
1377 return &wstats; 1364 return &wstats;
1378} 1365}
1379EXPORT_SYMBOL_GPL(cfg80211_wireless_stats);
1380 1366
1381int cfg80211_wext_siwap(struct net_device *dev, 1367static int cfg80211_wext_siwap(struct net_device *dev,
1382 struct iw_request_info *info, 1368 struct iw_request_info *info,
1383 struct sockaddr *ap_addr, char *extra) 1369 struct sockaddr *ap_addr, char *extra)
1384{ 1370{
1385 struct wireless_dev *wdev = dev->ieee80211_ptr; 1371 struct wireless_dev *wdev = dev->ieee80211_ptr;
1386 1372
@@ -1395,11 +1381,10 @@ int cfg80211_wext_siwap(struct net_device *dev,
1395 return -EOPNOTSUPP; 1381 return -EOPNOTSUPP;
1396 } 1382 }
1397} 1383}
1398EXPORT_SYMBOL_GPL(cfg80211_wext_siwap);
1399 1384
1400int cfg80211_wext_giwap(struct net_device *dev, 1385static int cfg80211_wext_giwap(struct net_device *dev,
1401 struct iw_request_info *info, 1386 struct iw_request_info *info,
1402 struct sockaddr *ap_addr, char *extra) 1387 struct sockaddr *ap_addr, char *extra)
1403{ 1388{
1404 struct wireless_dev *wdev = dev->ieee80211_ptr; 1389 struct wireless_dev *wdev = dev->ieee80211_ptr;
1405 1390
@@ -1414,11 +1399,10 @@ int cfg80211_wext_giwap(struct net_device *dev,
1414 return -EOPNOTSUPP; 1399 return -EOPNOTSUPP;
1415 } 1400 }
1416} 1401}
1417EXPORT_SYMBOL_GPL(cfg80211_wext_giwap);
1418 1402
1419int cfg80211_wext_siwessid(struct net_device *dev, 1403static int cfg80211_wext_siwessid(struct net_device *dev,
1420 struct iw_request_info *info, 1404 struct iw_request_info *info,
1421 struct iw_point *data, char *ssid) 1405 struct iw_point *data, char *ssid)
1422{ 1406{
1423 struct wireless_dev *wdev = dev->ieee80211_ptr; 1407 struct wireless_dev *wdev = dev->ieee80211_ptr;
1424 1408
@@ -1431,11 +1415,10 @@ int cfg80211_wext_siwessid(struct net_device *dev,
1431 return -EOPNOTSUPP; 1415 return -EOPNOTSUPP;
1432 } 1416 }
1433} 1417}
1434EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid);
1435 1418
1436int cfg80211_wext_giwessid(struct net_device *dev, 1419static int cfg80211_wext_giwessid(struct net_device *dev,
1437 struct iw_request_info *info, 1420 struct iw_request_info *info,
1438 struct iw_point *data, char *ssid) 1421 struct iw_point *data, char *ssid)
1439{ 1422{
1440 struct wireless_dev *wdev = dev->ieee80211_ptr; 1423 struct wireless_dev *wdev = dev->ieee80211_ptr;
1441 1424
@@ -1451,11 +1434,10 @@ int cfg80211_wext_giwessid(struct net_device *dev,
1451 return -EOPNOTSUPP; 1434 return -EOPNOTSUPP;
1452 } 1435 }
1453} 1436}
1454EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
1455 1437
1456int cfg80211_wext_siwpmksa(struct net_device *dev, 1438static int cfg80211_wext_siwpmksa(struct net_device *dev,
1457 struct iw_request_info *info, 1439 struct iw_request_info *info,
1458 struct iw_point *data, char *extra) 1440 struct iw_point *data, char *extra)
1459{ 1441{
1460 struct wireless_dev *wdev = dev->ieee80211_ptr; 1442 struct wireless_dev *wdev = dev->ieee80211_ptr;
1461 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1443 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1493,7 +1475,6 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
1493 return -EOPNOTSUPP; 1475 return -EOPNOTSUPP;
1494 } 1476 }
1495} 1477}
1496EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa);
1497 1478
1498static const iw_handler cfg80211_handlers[] = { 1479static const iw_handler cfg80211_handlers[] = {
1499 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, 1480 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 20b3daef6964..5d766b0118e8 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -42,6 +42,14 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
42 struct iw_request_info *info, 42 struct iw_request_info *info,
43 struct iw_point *data, char *ssid); 43 struct iw_point *data, char *ssid);
44 44
45int cfg80211_wext_siwmlme(struct net_device *dev,
46 struct iw_request_info *info,
47 struct iw_point *data, char *extra);
48int cfg80211_wext_siwgenie(struct net_device *dev,
49 struct iw_request_info *info,
50 struct iw_point *data, char *extra);
51
52
45int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); 53int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
46 54
47 55
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 6fffe62d7c25..0d4b8c3033ff 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -9,6 +9,7 @@
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <net/cfg80211.h> 11#include <net/cfg80211.h>
12#include <net/cfg80211-wext.h>
12#include "wext-compat.h" 13#include "wext-compat.h"
13#include "nl80211.h" 14#include "nl80211.h"
14 15
@@ -365,7 +366,6 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
365 wdev_unlock(wdev); 366 wdev_unlock(wdev);
366 return err; 367 return err;
367} 368}
368EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie);
369 369
370int cfg80211_wext_siwmlme(struct net_device *dev, 370int cfg80211_wext_siwmlme(struct net_device *dev,
371 struct iw_request_info *info, 371 struct iw_request_info *info,
@@ -402,4 +402,3 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
402 402
403 return err; 403 return err;
404} 404}
405EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d30615419b4d..5f03e4ea65bf 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
91 int needed; 91 int needed;
92 int rc; 92 int rc;
93 93
94 if (skb->len < 1) { 94 if (!pskb_may_pull(skb, 1)) {
95 /* packet has no address block */ 95 /* packet has no address block */
96 rc = 0; 96 rc = 0;
97 goto empty; 97 goto empty;
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
100 len = *skb->data; 100 len = *skb->data;
101 needed = 1 + (len >> 4) + (len & 0x0f); 101 needed = 1 + (len >> 4) + (len & 0x0f);
102 102
103 if (skb->len < needed) { 103 if (!pskb_may_pull(skb, needed)) {
104 /* packet is too short to hold the addresses it claims 104 /* packet is too short to hold the addresses it claims
105 to hold */ 105 to hold */
106 rc = -1; 106 rc = -1;
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
295 * Found a listening socket, now check the incoming 295 * Found a listening socket, now check the incoming
296 * call user data vs this sockets call user data 296 * call user data vs this sockets call user data
297 */ 297 */
298 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 298 if (x25_sk(s)->cudmatchlength > 0 &&
299 skb->len >= x25_sk(s)->cudmatchlength) {
299 if((memcmp(x25_sk(s)->calluserdata.cuddata, 300 if((memcmp(x25_sk(s)->calluserdata.cuddata,
300 skb->data, 301 skb->data,
301 x25_sk(s)->cudmatchlength)) == 0) { 302 x25_sk(s)->cudmatchlength)) == 0) {
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
951 * 952 *
952 * Facilities length is mandatory in call request packets 953 * Facilities length is mandatory in call request packets
953 */ 954 */
954 if (skb->len < 1) 955 if (!pskb_may_pull(skb, 1))
955 goto out_clear_request; 956 goto out_clear_request;
956 len = skb->data[0] + 1; 957 len = skb->data[0] + 1;
957 if (skb->len < len) 958 if (!pskb_may_pull(skb, len))
958 goto out_clear_request; 959 goto out_clear_request;
959 skb_pull(skb,len); 960 skb_pull(skb,len);
960 961
961 /* 962 /*
963 * Ensure that the amount of call user data is valid.
964 */
965 if (skb->len > X25_MAX_CUD_LEN)
966 goto out_clear_request;
967
968 /*
969 * Get all the call user data so it can be used in
970 * x25_find_listener and skb_copy_from_linear_data up ahead.
971 */
972 if (!pskb_may_pull(skb, skb->len))
973 goto out_clear_request;
974
975 /*
962 * Find a listener for the particular address/cud pair. 976 * Find a listener for the particular address/cud pair.
963 */ 977 */
964 sk = x25_find_listener(&source_addr,skb); 978 sk = x25_find_listener(&source_addr,skb);
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1166 * byte of the user data is the logical value of the Q Bit. 1180 * byte of the user data is the logical value of the Q Bit.
1167 */ 1181 */
1168 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1182 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1183 if (!pskb_may_pull(skb, 1))
1184 goto out_kfree_skb;
1185
1169 qbit = skb->data[0]; 1186 qbit = skb->data[0];
1170 skb_pull(skb, 1); 1187 skb_pull(skb, 1);
1171 } 1188 }
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1244 struct x25_sock *x25 = x25_sk(sk); 1261 struct x25_sock *x25 = x25_sk(sk);
1245 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1262 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
1246 size_t copied; 1263 size_t copied;
1247 int qbit; 1264 int qbit, header_len = x25->neighbour->extended ?
1265 X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
1266
1248 struct sk_buff *skb; 1267 struct sk_buff *skb;
1249 unsigned char *asmptr; 1268 unsigned char *asmptr;
1250 int rc = -ENOTCONN; 1269 int rc = -ENOTCONN;
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1265 1284
1266 skb = skb_dequeue(&x25->interrupt_in_queue); 1285 skb = skb_dequeue(&x25->interrupt_in_queue);
1267 1286
1287 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
1288 goto out_free_dgram;
1289
1268 skb_pull(skb, X25_STD_MIN_LEN); 1290 skb_pull(skb, X25_STD_MIN_LEN);
1269 1291
1270 /* 1292 /*
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1285 if (!skb) 1307 if (!skb)
1286 goto out; 1308 goto out;
1287 1309
1310 if (!pskb_may_pull(skb, header_len))
1311 goto out_free_dgram;
1312
1288 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; 1313 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
1289 1314
1290 skb_pull(skb, x25->neighbour->extended ? 1315 skb_pull(skb, header_len);
1291 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
1292 1316
1293 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1317 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1294 asmptr = skb_push(skb, 1); 1318 asmptr = skb_push(skb, 1);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index e547ca1578c3..fa2b41888bd9 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
32 unsigned short frametype; 32 unsigned short frametype;
33 unsigned int lci; 33 unsigned int lci;
34 34
35 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
36 return 0;
37
35 frametype = skb->data[2]; 38 frametype = skb->data[2];
36 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 39 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
37 40
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
115 goto drop; 118 goto drop;
116 } 119 }
117 120
121 if (!pskb_may_pull(skb, 1))
122 return 0;
123
118 switch (skb->data[0]) { 124 switch (skb->data[0]) {
119 125
120 case X25_IFACE_DATA: 126 case X25_IFACE_DATA:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index f77e4e75f914..36384a1fa9f2 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -44,7 +44,7 @@
44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, 44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
46{ 46{
47 unsigned char *p = skb->data; 47 unsigned char *p;
48 unsigned int len; 48 unsigned int len;
49 49
50 *vc_fac_mask = 0; 50 *vc_fac_mask = 0;
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
62 62
63 if (skb->len < 1) 63 if (!pskb_may_pull(skb, 1))
64 return 0; 64 return 0;
65 65
66 len = *p++; 66 len = skb->data[0];
67 67
68 if (len >= skb->len) 68 if (!pskb_may_pull(skb, 1 + len))
69 return -1; 69 return -1;
70 70
71 p = skb->data + 1;
72
71 while (len > 0) { 73 while (len > 0) {
72 switch (*p & X25_FAC_CLASS_MASK) { 74 switch (*p & X25_FAC_CLASS_MASK) {
73 case X25_FAC_CLASS_A: 75 case X25_FAC_CLASS_A:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 0b073b51b183..a49cd4ec551a 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
107 /* 107 /*
108 * Parse the data in the frame. 108 * Parse the data in the frame.
109 */ 109 */
110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
111 goto out_clear;
110 skb_pull(skb, X25_STD_MIN_LEN); 112 skb_pull(skb, X25_STD_MIN_LEN);
111 113
112 len = x25_parse_address_block(skb, &source_addr, 114 len = x25_parse_address_block(skb, &source_addr,
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
127 * Copy any Call User Data. 129 * Copy any Call User Data.
128 */ 130 */
129 if (skb->len > 0) { 131 if (skb->len > 0) {
130 skb_copy_from_linear_data(skb, 132 if (skb->len > X25_MAX_CUD_LEN)
131 x25->calluserdata.cuddata, 133 goto out_clear;
132 skb->len); 134
135 skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
136 skb->len);
133 x25->calluserdata.cudlength = skb->len; 137 x25->calluserdata.cudlength = skb->len;
134 } 138 }
135 if (!sock_flag(sk, SOCK_DEAD)) 139 if (!sock_flag(sk, SOCK_DEAD))
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
137 break; 141 break;
138 } 142 }
139 case X25_CLEAR_REQUEST: 143 case X25_CLEAR_REQUEST:
144 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
145 goto out_clear;
146
140 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 147 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
141 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); 148 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
142 break; 149 break;
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
164 switch (frametype) { 171 switch (frametype) {
165 172
166 case X25_CLEAR_REQUEST: 173 case X25_CLEAR_REQUEST:
174 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
175 goto out_clear;
176
167 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 177 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
168 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 178 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
169 break; 179 break;
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
177 } 187 }
178 188
179 return 0; 189 return 0;
190
191out_clear:
192 x25_write_internal(sk, X25_CLEAR_REQUEST);
193 x25_start_t23timer(sk);
194 return 0;
180} 195}
181 196
182/* 197/*
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
206 break; 221 break;
207 222
208 case X25_CLEAR_REQUEST: 223 case X25_CLEAR_REQUEST:
224 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
225 goto out_clear;
226
209 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 227 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
210 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 228 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
211 break; 229 break;
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
304 } 322 }
305 323
306 return queued; 324 return queued;
325
326out_clear:
327 x25_write_internal(sk, X25_CLEAR_REQUEST);
328 x25->state = X25_STATE_2;
329 x25_start_t23timer(sk);
330 return 0;
307} 331}
308 332
309/* 333/*
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
313 */ 337 */
314static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) 338static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
315{ 339{
340 struct x25_sock *x25 = x25_sk(sk);
341
316 switch (frametype) { 342 switch (frametype) {
317 343
318 case X25_RESET_REQUEST: 344 case X25_RESET_REQUEST:
319 x25_write_internal(sk, X25_RESET_CONFIRMATION); 345 x25_write_internal(sk, X25_RESET_CONFIRMATION);
320 case X25_RESET_CONFIRMATION: { 346 case X25_RESET_CONFIRMATION: {
321 struct x25_sock *x25 = x25_sk(sk);
322
323 x25_stop_timer(sk); 347 x25_stop_timer(sk);
324 x25->condition = 0x00; 348 x25->condition = 0x00;
325 x25->va = 0; 349 x25->va = 0;
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
331 break; 355 break;
332 } 356 }
333 case X25_CLEAR_REQUEST: 357 case X25_CLEAR_REQUEST:
358 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
359 goto out_clear;
360
334 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 361 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
335 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 362 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
336 break; 363 break;
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
340 } 367 }
341 368
342 return 0; 369 return 0;
370
371out_clear:
372 x25_write_internal(sk, X25_CLEAR_REQUEST);
373 x25->state = X25_STATE_2;
374 x25_start_t23timer(sk);
375 return 0;
343} 376}
344 377
345/* Higher level upcall for a LAPB frame */ 378/* Higher level upcall for a LAPB frame */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 037958ff8eed..4acacf3c6617 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
90 break; 90 break;
91 91
92 case X25_DIAGNOSTIC: 92 case X25_DIAGNOSTIC:
93 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
94 break;
95
93 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", 96 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
94 skb->data[3], skb->data[4], 97 skb->data[3], skb->data[4],
95 skb->data[5], skb->data[6]); 98 skb->data[5], skb->data[6]);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 24a342ebc7f5..5170d52bfd96 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
269 int *d, int *m) 269 int *d, int *m)
270{ 270{
271 struct x25_sock *x25 = x25_sk(sk); 271 struct x25_sock *x25 = x25_sk(sk);
272 unsigned char *frame = skb->data; 272 unsigned char *frame;
273
274 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
275 return X25_ILLEGAL;
276 frame = skb->data;
273 277
274 *ns = *nr = *q = *d = *m = 0; 278 *ns = *nr = *q = *d = *m = 0;
275 279
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
294 if (frame[2] == X25_RR || 298 if (frame[2] == X25_RR ||
295 frame[2] == X25_RNR || 299 frame[2] == X25_RNR ||
296 frame[2] == X25_REJ) { 300 frame[2] == X25_REJ) {
301 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
302 return X25_ILLEGAL;
303 frame = skb->data;
304
297 *nr = (frame[3] >> 1) & 0x7F; 305 *nr = (frame[3] >> 1) & 0x7F;
298 return frame[2]; 306 return frame[2];
299 } 307 }
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
308 316
309 if (x25->neighbour->extended) { 317 if (x25->neighbour->extended) {
310 if ((frame[2] & 0x01) == X25_DATA) { 318 if ((frame[2] & 0x01) == X25_DATA) {
319 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
320 return X25_ILLEGAL;
321 frame = skb->data;
322
311 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; 323 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
312 *d = (frame[0] & X25_D_BIT) == X25_D_BIT; 324 *d = (frame[0] & X25_D_BIT) == X25_D_BIT;
313 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; 325 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index fc91ad7ee26e..e5246fbe36c4 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -70,26 +70,29 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
70 70
71 while ((scratch += len, dlen -= len) > 0) { 71 while ((scratch += len, dlen -= len) > 0) {
72 skb_frag_t *frag; 72 skb_frag_t *frag;
73 struct page *page;
73 74
74 err = -EMSGSIZE; 75 err = -EMSGSIZE;
75 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) 76 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
76 goto out; 77 goto out;
77 78
78 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; 79 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
79 frag->page = alloc_page(GFP_ATOMIC); 80 page = alloc_page(GFP_ATOMIC);
80 81
81 err = -ENOMEM; 82 err = -ENOMEM;
82 if (!frag->page) 83 if (!page)
83 goto out; 84 goto out;
84 85
86 __skb_frag_set_page(frag, page);
87
85 len = PAGE_SIZE; 88 len = PAGE_SIZE;
86 if (dlen < len) 89 if (dlen < len)
87 len = dlen; 90 len = dlen;
88 91
89 memcpy(page_address(frag->page), scratch, len);
90
91 frag->page_offset = 0; 92 frag->page_offset = 0;
92 frag->size = len; 93 skb_frag_size_set(frag, len);
94 memcpy(skb_frag_address(frag), scratch, len);
95
93 skb->truesize += len; 96 skb->truesize += len;
94 skb->data_len += len; 97 skb->data_len += len;
95 skb->len += len; 98 skb->len += len;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index b11ea692bd7d..6ca357406ea8 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -203,8 +203,6 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
203 if (!replay_esn->replay_window) 203 if (!replay_esn->replay_window)
204 return 0; 204 return 0;
205 205
206 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
207
208 if (unlikely(seq == 0)) 206 if (unlikely(seq == 0))
209 goto err; 207 goto err;
210 208
@@ -216,19 +214,18 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
216 goto err; 214 goto err;
217 } 215 }
218 216
219 if (pos >= diff) { 217 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
218
219 if (pos >= diff)
220 bitnr = (pos - diff) % replay_esn->replay_window; 220 bitnr = (pos - diff) % replay_esn->replay_window;
221 nr = bitnr >> 5; 221 else
222 bitnr = bitnr & 0x1F;
223 if (replay_esn->bmp[nr] & (1U << bitnr))
224 goto err_replay;
225 } else {
226 bitnr = replay_esn->replay_window - (diff - pos); 222 bitnr = replay_esn->replay_window - (diff - pos);
227 nr = bitnr >> 5; 223
228 bitnr = bitnr & 0x1F; 224 nr = bitnr >> 5;
229 if (replay_esn->bmp[nr] & (1U << bitnr)) 225 bitnr = bitnr & 0x1F;
230 goto err_replay; 226 if (replay_esn->bmp[nr] & (1U << bitnr))
231 } 227 goto err_replay;
228
232 return 0; 229 return 0;
233 230
234err_replay: 231err_replay:
@@ -259,39 +256,27 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
259 bitnr = bitnr & 0x1F; 256 bitnr = bitnr & 0x1F;
260 replay_esn->bmp[nr] &= ~(1U << bitnr); 257 replay_esn->bmp[nr] &= ~(1U << bitnr);
261 } 258 }
262
263 bitnr = (pos + diff) % replay_esn->replay_window;
264 nr = bitnr >> 5;
265 bitnr = bitnr & 0x1F;
266 replay_esn->bmp[nr] |= (1U << bitnr);
267 } else { 259 } else {
268 nr = (replay_esn->replay_window - 1) >> 5; 260 nr = (replay_esn->replay_window - 1) >> 5;
269 for (i = 0; i <= nr; i++) 261 for (i = 0; i <= nr; i++)
270 replay_esn->bmp[i] = 0; 262 replay_esn->bmp[i] = 0;
271
272 bitnr = (pos + diff) % replay_esn->replay_window;
273 nr = bitnr >> 5;
274 bitnr = bitnr & 0x1F;
275 replay_esn->bmp[nr] |= (1U << bitnr);
276 } 263 }
277 264
265 bitnr = (pos + diff) % replay_esn->replay_window;
278 replay_esn->seq = seq; 266 replay_esn->seq = seq;
279 } else { 267 } else {
280 diff = replay_esn->seq - seq; 268 diff = replay_esn->seq - seq;
281 269
282 if (pos >= diff) { 270 if (pos >= diff)
283 bitnr = (pos - diff) % replay_esn->replay_window; 271 bitnr = (pos - diff) % replay_esn->replay_window;
284 nr = bitnr >> 5; 272 else
285 bitnr = bitnr & 0x1F;
286 replay_esn->bmp[nr] |= (1U << bitnr);
287 } else {
288 bitnr = replay_esn->replay_window - (diff - pos); 273 bitnr = replay_esn->replay_window - (diff - pos);
289 nr = bitnr >> 5;
290 bitnr = bitnr & 0x1F;
291 replay_esn->bmp[nr] |= (1U << bitnr);
292 }
293 } 274 }
294 275
276 nr = bitnr >> 5;
277 bitnr = bitnr & 0x1F;
278 replay_esn->bmp[nr] |= (1U << bitnr);
279
295 if (xfrm_aevent_is_on(xs_net(x))) 280 if (xfrm_aevent_is_on(xs_net(x)))
296 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 281 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
297} 282}
@@ -390,8 +375,6 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
390 if (!wsize) 375 if (!wsize)
391 return 0; 376 return 0;
392 377
393 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
394
395 if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && 378 if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
396 (replay_esn->seq < replay_esn->replay_window - 1))) 379 (replay_esn->seq < replay_esn->replay_window - 1)))
397 goto err; 380 goto err;
@@ -415,19 +398,18 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
415 goto err; 398 goto err;
416 } 399 }
417 400
418 if (pos >= diff) { 401 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
402
403 if (pos >= diff)
419 bitnr = (pos - diff) % replay_esn->replay_window; 404 bitnr = (pos - diff) % replay_esn->replay_window;
420 nr = bitnr >> 5; 405 else
421 bitnr = bitnr & 0x1F;
422 if (replay_esn->bmp[nr] & (1U << bitnr))
423 goto err_replay;
424 } else {
425 bitnr = replay_esn->replay_window - (diff - pos); 406 bitnr = replay_esn->replay_window - (diff - pos);
426 nr = bitnr >> 5; 407
427 bitnr = bitnr & 0x1F; 408 nr = bitnr >> 5;
428 if (replay_esn->bmp[nr] & (1U << bitnr)) 409 bitnr = bitnr & 0x1F;
429 goto err_replay; 410 if (replay_esn->bmp[nr] & (1U << bitnr))
430 } 411 goto err_replay;
412
431 return 0; 413 return 0;
432 414
433err_replay: 415err_replay:
@@ -465,22 +447,13 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
465 bitnr = bitnr & 0x1F; 447 bitnr = bitnr & 0x1F;
466 replay_esn->bmp[nr] &= ~(1U << bitnr); 448 replay_esn->bmp[nr] &= ~(1U << bitnr);
467 } 449 }
468
469 bitnr = (pos + diff) % replay_esn->replay_window;
470 nr = bitnr >> 5;
471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr);
473 } else { 450 } else {
474 nr = (replay_esn->replay_window - 1) >> 5; 451 nr = (replay_esn->replay_window - 1) >> 5;
475 for (i = 0; i <= nr; i++) 452 for (i = 0; i <= nr; i++)
476 replay_esn->bmp[i] = 0; 453 replay_esn->bmp[i] = 0;
477
478 bitnr = (pos + diff) % replay_esn->replay_window;
479 nr = bitnr >> 5;
480 bitnr = bitnr & 0x1F;
481 replay_esn->bmp[nr] |= (1U << bitnr);
482 } 454 }
483 455
456 bitnr = (pos + diff) % replay_esn->replay_window;
484 replay_esn->seq = seq; 457 replay_esn->seq = seq;
485 458
486 if (unlikely(wrap > 0)) 459 if (unlikely(wrap > 0))
@@ -488,19 +461,16 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
488 } else { 461 } else {
489 diff = replay_esn->seq - seq; 462 diff = replay_esn->seq - seq;
490 463
491 if (pos >= diff) { 464 if (pos >= diff)
492 bitnr = (pos - diff) % replay_esn->replay_window; 465 bitnr = (pos - diff) % replay_esn->replay_window;
493 nr = bitnr >> 5; 466 else
494 bitnr = bitnr & 0x1F;
495 replay_esn->bmp[nr] |= (1U << bitnr);
496 } else {
497 bitnr = replay_esn->replay_window - (diff - pos); 467 bitnr = replay_esn->replay_window - (diff - pos);
498 nr = bitnr >> 5;
499 bitnr = bitnr & 0x1F;
500 replay_esn->bmp[nr] |= (1U << bitnr);
501 }
502 } 468 }
503 469
470 nr = bitnr >> 5;
471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr);
473
504 if (xfrm_aevent_is_on(xs_net(x))) 474 if (xfrm_aevent_is_on(xs_net(x)))
505 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 475 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
506} 476}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0256b8a0a7cf..d0a42df5160e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
2927 if (nlsk == NULL) 2927 if (nlsk == NULL)
2928 return -ENOMEM; 2928 return -ENOMEM;
2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2930 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2930 RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
2931 return 0; 2931 return 0;
2932} 2932}
2933 2933
@@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2935{ 2935{
2936 struct net *net; 2936 struct net *net;
2937 list_for_each_entry(net, net_exit_list, exit_list) 2937 list_for_each_entry(net, net_exit_list, exit_list)
2938 rcu_assign_pointer(net->xfrm.nlsk, NULL); 2938 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
2939 synchronize_net(); 2939 synchronize_net();
2940 list_for_each_entry(net, net_exit_list, exit_list) 2940 list_for_each_entry(net, net_exit_list, exit_list)
2941 netlink_kernel_release(net->xfrm.nlsk_stash); 2941 netlink_kernel_release(net->xfrm.nlsk_stash);