aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-11-13 14:55:35 -0500
committerJiri Kosina <jkosina@suse.cz>2011-11-13 14:55:53 -0500
commit2290c0d06d82faee87b1ab2d9d4f7bf81ef64379 (patch)
treee075e4d5534193f28e6059904f61e5ca03958d3c /net
parent4da669a2e3e5bc70b30a0465f3641528681b5f77 (diff)
parent52e4c2a05256cb83cda12f3c2137ab1533344edb (diff)
Merge branch 'master' into for-next
Sync with Linus tree to have 157550ff ("mtd: add GPMI-NAND driver in the config and Makefile") as I have patch depending on that one.
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c1
-rw-r--r--net/802/garp.c5
-rw-r--r--net/802/stp.c5
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c8
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/8021q/vlan_netlink.c1
-rw-r--r--net/9p/client.c469
-rw-r--r--net/9p/protocol.c99
-rw-r--r--net/9p/protocol.h4
-rw-r--r--net/9p/trans_common.c53
-rw-r--r--net/9p/trans_common.h21
-rw-r--r--net/9p/trans_virtio.c319
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/atalk_proc.c1
-rw-r--r--net/appletalk/ddp.c5
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/atm/svc.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c293
-rw-r--r--net/batman-adv/aggregation.h46
-rw-r--r--net/batman-adv/bat_iv_ogm.c1170
-rw-r--r--net/batman-adv/bat_ogm.h35
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/gateway_client.c10
-rw-r--r--net/batman-adv/hard-interface.c88
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/hash.h25
-rw-r--r--net/batman-adv/main.c4
-rw-r--r--net/batman-adv/main.h8
-rw-r--r--net/batman-adv/originator.c21
-rw-r--r--net/batman-adv/packet.h19
-rw-r--r--net/batman-adv/routing.c669
-rw-r--r--net/batman-adv/routing.h17
-rw-r--r--net/batman-adv/send.c313
-rw-r--r--net/batman-adv/send.h9
-rw-r--r--net/batman-adv/soft-interface.c36
-rw-r--r--net/batman-adv/translation-table.c223
-rw-r--r--net/batman-adv/translation-table.h21
-rw-r--r--net/batman-adv/types.h9
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c10
-rw-r--r--net/bluetooth/af_bluetooth.c30
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c59
-rw-r--r--net/bluetooth/hci_event.c36
-rw-r--r--net/bluetooth/hci_sock.c18
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/hidp/core.c13
-rw-r--r--net/bluetooth/l2cap_core.c273
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/mgmt.c215
-rw-r--r--net/bluetooth/rfcomm/core.c14
-rw-r--r--net/bluetooth/smp.c421
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_if.c41
-rw-r--r--net/bridge/br_input.c34
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c34
-rw-r--r--net/bridge/netfilter/ebt_ulog.c7
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/caif/cfcnfg.c38
-rw-r--r--net/caif/cfctrl.c23
-rw-r--r--net/caif/cfdbgl.c7
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c7
-rw-r--r--net/caif/cfmuxl.c6
-rw-r--r--net/caif/cfpkt_skbuff.c1
-rw-r--r--net/caif/cfrfml.c7
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c8
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c7
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/gw.c957
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/ceph/Kconfig14
-rw-r--r--net/ceph/ceph_common.c47
-rw-r--r--net/ceph/messenger.c131
-rw-r--r--net/ceph/mon_client.c79
-rw-r--r--net/ceph/msgpool.c4
-rw-r--r--net/ceph/osd_client.c34
-rw-r--r--net/compat.c1
-rw-r--r--net/core/datagram.c24
-rw-r--r--net/core/dev.c339
-rw-r--r--net/core/dev_addr_lists.c5
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c20
-rw-r--r--net/core/fib_rules.c5
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/flow.c14
-rw-r--r--net/core/kmap_skb.h2
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/core/neighbour.c50
-rw-r--r--net/core/net-sysfs.c13
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/netevent.c1
-rw-r--r--net/core/netpoll.c5
-rw-r--r--net/core/pktgen.c25
-rw-r--r--net/core/rtnetlink.c34
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c173
-rw-r--r--net/core/sock.c24
-rw-r--r--net/core/timestamping.c13
-rw-r--r--net/core/user_dma.c7
-rw-r--r--net/dcb/dcbevent.c1
-rw-r--r--net/dcb/dcbnl.c31
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/dccp/ccids/ccid2.c84
-rw-r--r--net/dccp/ccids/ccid2.h6
-rw-r--r--net/dccp/ccids/lib/tfrc.c1
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/dccp/timer.c1
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_route.c1
-rw-r--r--net/decnet/dn_rules.c1
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan.c891
-rw-r--r--net/ieee802154/6lowpan.h212
-rw-r--r--net/ieee802154/Kconfig6
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_rules.c1
-rw-r--r--net/ipv4/fib_trie.c13
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/inet_lro.c10
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c40
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c17
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipconfig.c1
-rw-r--r--net/ipv4/ipip.c10
-rw-r--r--net/ipv4/ipmr.c9
-rw-r--r--net/ipv4/netfilter.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c1
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udplite.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c26
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c53
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c97
-rw-r--r--net/ipv4/tcp_input.c244
-rw-r--r--net/ipv4/tcp_ipv4.c80
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c152
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c41
-rw-r--r--net/ipv4/udplite.c14
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c125
-rw-r--r--net/ipv6/addrconf_core.c1
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c8
-rw-r--r--net/ipv6/exthdrs_core.c1
-rw-r--r--net/ipv6/fib6_rules.c1
-rw-r--r--net/ipv6/icmp.c28
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_output.c37
-rw-r--r--net/ipv6/ip6_tunnel.c54
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/netfilter.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c11
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/sysctl_net_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c77
-rw-r--r--net/ipv6/udp.c16
-rw-r--r--net/ipv6/udplite.c14
-rw-r--r--net/ipv6/xfrm6_output.c56
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/ipx/ipx_proc.c1
-rw-r--r--net/irda/discovery.c1
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/irda_device.c1
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/irda/qos.c2
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c870
-rw-r--r--net/iucv/iucv.c23
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/lapb/lapb_iface.c29
-rw-r--r--net/llc/llc_input.c1
-rw-r--r--net/llc/llc_output.c1
-rw-r--r--net/llc/llc_proc.c1
-rw-r--r--net/mac80211/Kconfig25
-rw-r--r--net/mac80211/agg-rx.c26
-rw-r--r--net/mac80211/agg-tx.c65
-rw-r--r--net/mac80211/cfg.c497
-rw-r--r--net/mac80211/debugfs.c71
-rw-r--r--net/mac80211/debugfs_netdev.c59
-rw-r--r--net/mac80211/debugfs_sta.c37
-rw-r--r--net/mac80211/driver-ops.h91
-rw-r--r--net/mac80211/driver-trace.h117
-rw-r--r--net/mac80211/ht.c9
-rw-r--r--net/mac80211/ibss.c16
-rw-r--r--net/mac80211/ieee80211_i.h105
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/key.c5
-rw-r--r--net/mac80211/led.c1
-rw-r--r--net/mac80211/main.c27
-rw-r--r--net/mac80211/mesh.c213
-rw-r--r--net/mac80211/mesh.h38
-rw-r--r--net/mac80211/mesh_hwmp.c177
-rw-r--r--net/mac80211/mesh_pathtbl.c481
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mlme.c160
-rw-r--r--net/mac80211/offchannel.c1
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c38
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c1
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c1
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c200
-rw-r--r--net/mac80211/scan.c7
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c977
-rw-r--r--net/mac80211/sta_info.h171
-rw-r--r--net/mac80211/status.c252
-rw-r--r--net/mac80211/tkip.c1
-rw-r--r--net/mac80211/tx.c558
-rw-r--r--net/mac80211/util.c281
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c17
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipset/ip_set_core.c4
-rw-r--r--net/netfilter/ipset/ip_set_getport.c1
-rw-r--r--net/netfilter/ipset/pfxlen.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_conntrack_acct.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c17
-rw-r--r--net/netfilter/nf_conntrack_ecache.c9
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c6
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_log.c7
-rw-r--r--net/netfilter/x_tables.c1
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_hashlimit.c5
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_statistic.c1
-rw-r--r--net/netlabel/netlabel_domainhash.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/netrom/nr_route.c1
-rw-r--r--net/nfc/Kconfig2
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/af_nfc.c1
-rw-r--r--net/nfc/core.c83
-rw-r--r--net/nfc/nci/Kconfig10
-rw-r--r--net/nfc/nci/Makefile7
-rw-r--r--net/nfc/nci/core.c798
-rw-r--r--net/nfc/nci/data.c247
-rw-r--r--net/nfc/nci/lib.c94
-rw-r--r--net/nfc/nci/ntf.c258
-rw-r--r--net/nfc/nci/rsp.c226
-rw-r--r--net/nfc/netlink.c56
-rw-r--r--net/nfc/nfc.h8
-rw-r--r--net/nfc/rawsock.c14
-rw-r--r--net/packet/af_packet.c1007
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/datagram.c1
-rw-r--r--net/phonet/pep.c1
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/socket.c7
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/ib_rdma.c112
-rw-r--r--net/rds/info.c1
-rw-r--r--net/rds/iw.c1
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/page.c1
-rw-r--r--net/rds/rdma_transport.c1
-rw-r--r--net/rds/rds.h8
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rds/threads.c1
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/input.c1
-rw-r--r--net/rfkill/rfkill-gpio.c11
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/rose/rose_route.c1
-rw-r--r--net/rxrpc/ar-output.c1
-rw-r--r--net/rxrpc/ar-recvmsg.c1
-rw-r--r--net/sched/act_api.c1
-rw-r--r--net/sched/cls_flow.c189
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c1
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/addr.c7
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c28
-rw-r--r--net/sunrpc/auth_unix.c3
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c23
-rw-r--r--net/sunrpc/rpcb_clnt.c94
-rw-r--r--net/sunrpc/socklib.c1
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/svc.c86
-rw-r--r--net/sunrpc/svc_xprt.c14
-rw-r--r--net/sunrpc/svcsock.c24
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/tipc/bcast.c111
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/discover.c6
-rw-r--r--net/tipc/eth_media.c32
-rw-r--r--net/tipc/link.c111
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/tipc/net.c11
-rw-r--r--net/tipc/node.c45
-rw-r--r--net/tipc/node.h10
-rw-r--r--net/tipc/socket.c52
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/subscr.h6
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wimax/op-msg.c1
-rw-r--r--net/wimax/op-reset.c1
-rw-r--r--net/wimax/op-rfkill.c1
-rw-r--r--net/wimax/stack.c1
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/ibss.c1
-rw-r--r--net/wireless/lib80211.c15
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/lib80211_crypt_wep.c4
-rw-r--r--net/wireless/mesh.c4
-rw-r--r--net/wireless/mlme.c16
-rw-r--r--net/wireless/nl80211.c405
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/radiotap.c1
-rw-r--r--net/wireless/reg.c49
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c28
-rw-r--r--net/wireless/sme.c20
-rw-r--r--net/wireless/util.c195
-rw-r--r--net/wireless/wext-compat.c138
-rw-r--r--net/wireless/wext-compat.h8
-rw-r--r--net/wireless/wext-core.c1
-rw-r--r--net/wireless/wext-sme.c4
-rw-r--r--net/wireless/wext-spy.c1
-rw-r--r--net/x25/af_x25.c11
-rw-r--r--net/x25/x25_proc.c1
-rw-r--r--net/xfrm/xfrm_ipcomp.c13
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_replay.c99
-rw-r--r--net/xfrm/xfrm_user.c4
448 files changed, 15558 insertions, 6379 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 1e49f2d4ea96..bd345f3d29f8 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -27,6 +27,7 @@
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/export.h>
30#include <net/arp.h> 31#include <net/arp.h>
31 32
32/* 33/*
diff --git a/net/802/garp.c b/net/802/garp.c
index 16102951d36a..8e21b6db3981 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -15,6 +15,7 @@
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/llc.h> 16#include <linux/llc.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/module.h>
18#include <net/llc.h> 19#include <net/llc.h>
19#include <net/llc_pdu.h> 20#include <net/llc_pdu.h>
20#include <net/garp.h> 21#include <net/garp.h>
@@ -553,7 +554,7 @@ static void garp_release_port(struct net_device *dev)
553 if (rtnl_dereference(port->applicants[i])) 554 if (rtnl_dereference(port->applicants[i]))
554 return; 555 return;
555 } 556 }
556 rcu_assign_pointer(dev->garp_port, NULL); 557 RCU_INIT_POINTER(dev->garp_port, NULL);
557 kfree_rcu(port, rcu); 558 kfree_rcu(port, rcu);
558} 559}
559 560
@@ -605,7 +606,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
605 606
606 ASSERT_RTNL(); 607 ASSERT_RTNL();
607 608
608 rcu_assign_pointer(port->applicants[appl->type], NULL); 609 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
609 610
610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 611 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
611 * all pending messages before the applicant is gone. */ 612 * all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b1b36b..15540b7323cd 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -12,6 +12,7 @@
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/llc.h> 13#include <linux/llc.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/module.h>
15#include <net/llc.h> 16#include <net/llc.h>
16#include <net/llc_pdu.h> 17#include <net/llc_pdu.h>
17#include <net/stp.h> 18#include <net/stp.h>
@@ -88,9 +89,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
88{ 89{
89 mutex_lock(&stp_proto_mutex); 90 mutex_lock(&stp_proto_mutex);
90 if (is_zero_ether_addr(proto->group_address)) 91 if (is_zero_ether_addr(proto->group_address))
91 rcu_assign_pointer(stp_proto, NULL); 92 RCU_INIT_POINTER(stp_proto, NULL);
92 else 93 else
93 rcu_assign_pointer(garp_protos[proto->group_address[5] - 94 RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
94 GARP_ADDR_MIN], NULL); 95 GARP_ADDR_MIN], NULL);
95 synchronize_rcu(); 96 synchronize_rcu();
96 97
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8970ba139d73..5471628d3ffe 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
133 if (grp->nr_vlans == 0) { 133 if (grp->nr_vlans == 0) {
134 vlan_gvrp_uninit_applicant(real_dev); 134 vlan_gvrp_uninit_applicant(real_dev);
135 135
136 rcu_assign_pointer(real_dev->vlgrp, NULL); 136 RCU_INIT_POINTER(real_dev->vlgrp, NULL);
137 137
138 /* Free the group, after all cpu's are done. */ 138 /* Free the group, after all cpu's are done. */
139 call_rcu(&grp->rcu, vlan_rcu_free); 139 call_rcu(&grp->rcu, vlan_rcu_free);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f1f2f7bb6661..f5ffc02729d6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -2,9 +2,10 @@
2#include <linux/netdevice.h> 2#include <linux/netdevice.h>
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include <linux/export.h>
5#include "vlan.h" 6#include "vlan.h"
6 7
7bool vlan_do_receive(struct sk_buff **skbp) 8bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
8{ 9{
9 struct sk_buff *skb = *skbp; 10 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 11 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -13,7 +14,10 @@ bool vlan_do_receive(struct sk_buff **skbp)
13 14
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 15 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) { 16 if (!vlan_dev) {
16 if (vlan_id) 17 /* Only the last call to vlan_do_receive() should change
18 * pkt_type to PACKET_OTHERHOST
19 */
20 if (vlan_id && last_handler)
17 skb->pkt_type = PACKET_OTHERHOST; 21 skb->pkt_type = PACKET_OTHERHOST;
18 return false; 22 return false;
19 } 23 }
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d40a071d038..bc2528624583 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -470,10 +470,12 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
470{ 470{
471 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 471 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
472 472
473 if (change & IFF_ALLMULTI) 473 if (dev->flags & IFF_UP) {
474 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 474 if (change & IFF_ALLMULTI)
475 if (change & IFF_PROMISC) 475 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
476 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); 476 if (change & IFF_PROMISC)
477 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
478 }
477} 479}
478 480
479static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 481static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
@@ -610,7 +612,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
610 struct ethtool_cmd *cmd) 612 struct ethtool_cmd *cmd)
611{ 613{
612 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 614 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
613 return dev_ethtool_get_settings(vlan->real_dev, cmd); 615
616 return __ethtool_get_settings(vlan->real_dev, cmd);
614} 617}
615 618
616static void vlan_ethtool_get_drvinfo(struct net_device *dev, 619static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -674,7 +677,6 @@ static const struct net_device_ops vlan_netdev_ops = {
674 .ndo_validate_addr = eth_validate_addr, 677 .ndo_validate_addr = eth_validate_addr,
675 .ndo_set_mac_address = vlan_dev_set_mac_address, 678 .ndo_set_mac_address = vlan_dev_set_mac_address,
676 .ndo_set_rx_mode = vlan_dev_set_rx_mode, 679 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
677 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
678 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 680 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
679 .ndo_do_ioctl = vlan_dev_ioctl, 681 .ndo_do_ioctl = vlan_dev_ioctl,
680 .ndo_neigh_setup = vlan_dev_neigh_setup, 682 .ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index be9a5c19a775..235c2197dbb6 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/if_vlan.h> 13#include <linux/if_vlan.h>
14#include <linux/module.h>
14#include <net/net_namespace.h> 15#include <net/net_namespace.h>
15#include <net/netlink.h> 16#include <net/netlink.h>
16#include <net/rtnetlink.h> 17#include <net/rtnetlink.h>
diff --git a/net/9p/client.c b/net/9p/client.c
index 0505a03c374c..854ca7a911c4 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -38,6 +38,9 @@
38#include <net/9p/transport.h> 38#include <net/9p/transport.h>
39#include "protocol.h" 39#include "protocol.h"
40 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/9p.h>
43
41/* 44/*
42 * Client Option Parsing (code inspired by NFS code) 45 * Client Option Parsing (code inspired by NFS code)
43 * - a little lazy - parse all client options 46 * - a little lazy - parse all client options
@@ -123,21 +126,19 @@ static int parse_opts(char *opts, struct p9_client *clnt)
123 options = tmp_options; 126 options = tmp_options;
124 127
125 while ((p = strsep(&options, ",")) != NULL) { 128 while ((p = strsep(&options, ",")) != NULL) {
126 int token; 129 int token, r;
127 if (!*p) 130 if (!*p)
128 continue; 131 continue;
129 token = match_token(p, tokens, args); 132 token = match_token(p, tokens, args);
130 if (token < Opt_trans) { 133 switch (token) {
131 int r = match_int(&args[0], &option); 134 case Opt_msize:
135 r = match_int(&args[0], &option);
132 if (r < 0) { 136 if (r < 0) {
133 P9_DPRINTK(P9_DEBUG_ERROR, 137 P9_DPRINTK(P9_DEBUG_ERROR,
134 "integer field, but no integer?\n"); 138 "integer field, but no integer?\n");
135 ret = r; 139 ret = r;
136 continue; 140 continue;
137 } 141 }
138 }
139 switch (token) {
140 case Opt_msize:
141 clnt->msize = option; 142 clnt->msize = option;
142 break; 143 break;
143 case Opt_trans: 144 case Opt_trans:
@@ -203,11 +204,13 @@ free_and_return:
203 * 204 *
204 */ 205 */
205 206
206static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) 207static struct p9_req_t *
208p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
207{ 209{
208 unsigned long flags; 210 unsigned long flags;
209 int row, col; 211 int row, col;
210 struct p9_req_t *req; 212 struct p9_req_t *req;
213 int alloc_msize = min(c->msize, max_size);
211 214
212 /* This looks up the original request by tag so we know which 215 /* This looks up the original request by tag so we know which
213 * buffer to read the data into */ 216 * buffer to read the data into */
@@ -245,23 +248,10 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
245 return ERR_PTR(-ENOMEM); 248 return ERR_PTR(-ENOMEM);
246 } 249 }
247 init_waitqueue_head(req->wq); 250 init_waitqueue_head(req->wq);
248 if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 251 req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
249 P9_TRANS_PREF_PAYLOAD_SEP) { 252 GFP_NOFS);
250 int alloc_msize = min(c->msize, 4096); 253 req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
251 req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize, 254 GFP_NOFS);
252 GFP_NOFS);
253 req->tc->capacity = alloc_msize;
254 req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
255 GFP_NOFS);
256 req->rc->capacity = alloc_msize;
257 } else {
258 req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
259 GFP_NOFS);
260 req->tc->capacity = c->msize;
261 req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
262 GFP_NOFS);
263 req->rc->capacity = c->msize;
264 }
265 if ((!req->tc) || (!req->rc)) { 255 if ((!req->tc) || (!req->rc)) {
266 printk(KERN_ERR "Couldn't grow tag array\n"); 256 printk(KERN_ERR "Couldn't grow tag array\n");
267 kfree(req->tc); 257 kfree(req->tc);
@@ -271,6 +261,8 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
271 req->wq = NULL; 261 req->wq = NULL;
272 return ERR_PTR(-ENOMEM); 262 return ERR_PTR(-ENOMEM);
273 } 263 }
264 req->tc->capacity = alloc_msize;
265 req->rc->capacity = alloc_msize;
274 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); 266 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
275 req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall); 267 req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
276 } 268 }
@@ -475,37 +467,22 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
475 int ecode; 467 int ecode;
476 468
477 err = p9_parse_header(req->rc, NULL, &type, NULL, 0); 469 err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
470 /*
471 * dump the response from server
472 * This should be after check errors which poplulate pdu_fcall.
473 */
474 trace_9p_protocol_dump(c, req->rc);
478 if (err) { 475 if (err) {
479 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); 476 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
480 return err; 477 return err;
481 } 478 }
482
483 if (type != P9_RERROR && type != P9_RLERROR) 479 if (type != P9_RERROR && type != P9_RLERROR)
484 return 0; 480 return 0;
485 481
486 if (!p9_is_proto_dotl(c)) { 482 if (!p9_is_proto_dotl(c)) {
487 char *ename; 483 char *ename;
488
489 if (req->tc->pbuf_size) {
490 /* Handle user buffers */
491 size_t len = req->rc->size - req->rc->offset;
492 if (req->tc->pubuf) {
493 /* User Buffer */
494 err = copy_from_user(
495 &req->rc->sdata[req->rc->offset],
496 req->tc->pubuf, len);
497 if (err) {
498 err = -EFAULT;
499 goto out_err;
500 }
501 } else {
502 /* Kernel Buffer */
503 memmove(&req->rc->sdata[req->rc->offset],
504 req->tc->pkbuf, len);
505 }
506 }
507 err = p9pdu_readf(req->rc, c->proto_version, "s?d", 484 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
508 &ename, &ecode); 485 &ename, &ecode);
509 if (err) 486 if (err)
510 goto out_err; 487 goto out_err;
511 488
@@ -515,11 +492,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
515 if (!err || !IS_ERR_VALUE(err)) { 492 if (!err || !IS_ERR_VALUE(err)) {
516 err = p9_errstr2errno(ename, strlen(ename)); 493 err = p9_errstr2errno(ename, strlen(ename));
517 494
518 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, 495 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
519 ename); 496 -ecode, ename);
520
521 kfree(ename);
522 } 497 }
498 kfree(ename);
523 } else { 499 } else {
524 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); 500 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
525 err = -ecode; 501 err = -ecode;
@@ -527,7 +503,6 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
527 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); 503 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
528 } 504 }
529 505
530
531 return err; 506 return err;
532 507
533out_err: 508out_err:
@@ -536,6 +511,115 @@ out_err:
536 return err; 511 return err;
537} 512}
538 513
514/**
515 * p9_check_zc_errors - check 9p packet for error return and process it
516 * @c: current client instance
517 * @req: request to parse and check for error conditions
518 * @in_hdrlen: Size of response protocol buffer.
519 *
520 * returns error code if one is discovered, otherwise returns 0
521 *
522 * this will have to be more complicated if we have multiple
523 * error packet types
524 */
525
526static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
527 char *uidata, int in_hdrlen, int kern_buf)
528{
529 int err;
530 int ecode;
531 int8_t type;
532 char *ename = NULL;
533
534 err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
535 /*
536 * dump the response from server
537 * This should be after parse_header which poplulate pdu_fcall.
538 */
539 trace_9p_protocol_dump(c, req->rc);
540 if (err) {
541 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
542 return err;
543 }
544
545 if (type != P9_RERROR && type != P9_RLERROR)
546 return 0;
547
548 if (!p9_is_proto_dotl(c)) {
549 /* Error is reported in string format */
550 uint16_t len;
551 /* 7 = header size for RERROR, 2 is the size of string len; */
552 int inline_len = in_hdrlen - (7 + 2);
553
554 /* Read the size of error string */
555 err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
556 if (err)
557 goto out_err;
558
559 ename = kmalloc(len + 1, GFP_NOFS);
560 if (!ename) {
561 err = -ENOMEM;
562 goto out_err;
563 }
564 if (len <= inline_len) {
565 /* We have error in protocol buffer itself */
566 if (pdu_read(req->rc, ename, len)) {
567 err = -EFAULT;
568 goto out_free;
569
570 }
571 } else {
572 /*
573 * Part of the data is in user space buffer.
574 */
575 if (pdu_read(req->rc, ename, inline_len)) {
576 err = -EFAULT;
577 goto out_free;
578
579 }
580 if (kern_buf) {
581 memcpy(ename + inline_len, uidata,
582 len - inline_len);
583 } else {
584 err = copy_from_user(ename + inline_len,
585 uidata, len - inline_len);
586 if (err) {
587 err = -EFAULT;
588 goto out_free;
589 }
590 }
591 }
592 ename[len] = 0;
593 if (p9_is_proto_dotu(c)) {
594 /* For dotu we also have error code */
595 err = p9pdu_readf(req->rc,
596 c->proto_version, "d", &ecode);
597 if (err)
598 goto out_free;
599 err = -ecode;
600 }
601 if (!err || !IS_ERR_VALUE(err)) {
602 err = p9_errstr2errno(ename, strlen(ename));
603
604 P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
605 -ecode, ename);
606 }
607 kfree(ename);
608 } else {
609 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
610 err = -ecode;
611
612 P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
613 }
614 return err;
615
616out_free:
617 kfree(ename);
618out_err:
619 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
620 return err;
621}
622
539static struct p9_req_t * 623static struct p9_req_t *
540p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); 624p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
541 625
@@ -579,23 +663,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
579 return 0; 663 return 0;
580} 664}
581 665
582/** 666static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
583 * p9_client_rpc - issue a request and wait for a response 667 int8_t type, int req_size,
584 * @c: client session 668 const char *fmt, va_list ap)
585 * @type: type of request
586 * @fmt: protocol format string (see protocol.c)
587 *
588 * Returns request structure (which client must free using p9_free_req)
589 */
590
591static struct p9_req_t *
592p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
593{ 669{
594 va_list ap;
595 int tag, err; 670 int tag, err;
596 struct p9_req_t *req; 671 struct p9_req_t *req;
597 unsigned long flags;
598 int sigpending;
599 672
600 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); 673 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
601 674
@@ -607,12 +680,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
607 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) 680 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
608 return ERR_PTR(-EIO); 681 return ERR_PTR(-EIO);
609 682
610 if (signal_pending(current)) {
611 sigpending = 1;
612 clear_thread_flag(TIF_SIGPENDING);
613 } else
614 sigpending = 0;
615
616 tag = P9_NOTAG; 683 tag = P9_NOTAG;
617 if (type != P9_TVERSION) { 684 if (type != P9_TVERSION) {
618 tag = p9_idpool_get(c->tagpool); 685 tag = p9_idpool_get(c->tagpool);
@@ -620,18 +687,51 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
620 return ERR_PTR(-ENOMEM); 687 return ERR_PTR(-ENOMEM);
621 } 688 }
622 689
623 req = p9_tag_alloc(c, tag); 690 req = p9_tag_alloc(c, tag, req_size);
624 if (IS_ERR(req)) 691 if (IS_ERR(req))
625 return req; 692 return req;
626 693
627 /* marshall the data */ 694 /* marshall the data */
628 p9pdu_prepare(req->tc, tag, type); 695 p9pdu_prepare(req->tc, tag, type);
629 va_start(ap, fmt);
630 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); 696 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
631 va_end(ap);
632 if (err) 697 if (err)
633 goto reterr; 698 goto reterr;
634 p9pdu_finalize(req->tc); 699 p9pdu_finalize(c, req->tc);
700 trace_9p_client_req(c, type, tag);
701 return req;
702reterr:
703 p9_free_req(c, req);
704 return ERR_PTR(err);
705}
706
707/**
708 * p9_client_rpc - issue a request and wait for a response
709 * @c: client session
710 * @type: type of request
711 * @fmt: protocol format string (see protocol.c)
712 *
713 * Returns request structure (which client must free using p9_free_req)
714 */
715
716static struct p9_req_t *
717p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
718{
719 va_list ap;
720 int sigpending, err;
721 unsigned long flags;
722 struct p9_req_t *req;
723
724 va_start(ap, fmt);
725 req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
726 va_end(ap);
727 if (IS_ERR(req))
728 return req;
729
730 if (signal_pending(current)) {
731 sigpending = 1;
732 clear_thread_flag(TIF_SIGPENDING);
733 } else
734 sigpending = 0;
635 735
636 err = c->trans_mod->request(c, req); 736 err = c->trans_mod->request(c, req);
637 if (err < 0) { 737 if (err < 0) {
@@ -639,18 +739,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
639 c->status = Disconnected; 739 c->status = Disconnected;
640 goto reterr; 740 goto reterr;
641 } 741 }
642 742 /* Wait for the response */
643 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
644 err = wait_event_interruptible(*req->wq, 743 err = wait_event_interruptible(*req->wq,
645 req->status >= REQ_STATUS_RCVD); 744 req->status >= REQ_STATUS_RCVD);
646 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
647 req->wq, tag, err);
648 745
649 if (req->status == REQ_STATUS_ERROR) { 746 if (req->status == REQ_STATUS_ERROR) {
650 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); 747 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
651 err = req->t_err; 748 err = req->t_err;
652 } 749 }
653
654 if ((err == -ERESTARTSYS) && (c->status == Connected)) { 750 if ((err == -ERESTARTSYS) && (c->status == Connected)) {
655 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); 751 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
656 sigpending = 1; 752 sigpending = 1;
@@ -663,25 +759,102 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
663 if (req->status == REQ_STATUS_RCVD) 759 if (req->status == REQ_STATUS_RCVD)
664 err = 0; 760 err = 0;
665 } 761 }
666
667 if (sigpending) { 762 if (sigpending) {
668 spin_lock_irqsave(&current->sighand->siglock, flags); 763 spin_lock_irqsave(&current->sighand->siglock, flags);
669 recalc_sigpending(); 764 recalc_sigpending();
670 spin_unlock_irqrestore(&current->sighand->siglock, flags); 765 spin_unlock_irqrestore(&current->sighand->siglock, flags);
671 } 766 }
672
673 if (err < 0) 767 if (err < 0)
674 goto reterr; 768 goto reterr;
675 769
676 err = p9_check_errors(c, req); 770 err = p9_check_errors(c, req);
677 if (!err) { 771 trace_9p_client_res(c, type, req->rc->tag, err);
678 P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type); 772 if (!err)
773 return req;
774reterr:
775 p9_free_req(c, req);
776 return ERR_PTR(err);
777}
778
779/**
780 * p9_client_zc_rpc - issue a request and wait for a response
781 * @c: client session
782 * @type: type of request
783 * @uidata: user bffer that should be ued for zero copy read
784 * @uodata: user buffer that shoud be user for zero copy write
785 * @inlen: read buffer size
786 * @olen: write buffer size
787 * @hdrlen: reader header size, This is the size of response protocol data
788 * @fmt: protocol format string (see protocol.c)
789 *
790 * Returns request structure (which client must free using p9_free_req)
791 */
792static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
793 char *uidata, char *uodata,
794 int inlen, int olen, int in_hdrlen,
795 int kern_buf, const char *fmt, ...)
796{
797 va_list ap;
798 int sigpending, err;
799 unsigned long flags;
800 struct p9_req_t *req;
801
802 va_start(ap, fmt);
803 /*
804 * We allocate a inline protocol data of only 4k bytes.
805 * The actual content is passed in zero-copy fashion.
806 */
807 req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
808 va_end(ap);
809 if (IS_ERR(req))
679 return req; 810 return req;
811
812 if (signal_pending(current)) {
813 sigpending = 1;
814 clear_thread_flag(TIF_SIGPENDING);
815 } else
816 sigpending = 0;
817
818 /* If we are called with KERNEL_DS force kern_buf */
819 if (segment_eq(get_fs(), KERNEL_DS))
820 kern_buf = 1;
821
822 err = c->trans_mod->zc_request(c, req, uidata, uodata,
823 inlen, olen, in_hdrlen, kern_buf);
824 if (err < 0) {
825 if (err == -EIO)
826 c->status = Disconnected;
827 goto reterr;
828 }
829 if (req->status == REQ_STATUS_ERROR) {
830 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
831 err = req->t_err;
832 }
833 if ((err == -ERESTARTSYS) && (c->status == Connected)) {
834 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
835 sigpending = 1;
836 clear_thread_flag(TIF_SIGPENDING);
837
838 if (c->trans_mod->cancel(c, req))
839 p9_client_flush(c, req);
840
841 /* if we received the response anyway, don't signal error */
842 if (req->status == REQ_STATUS_RCVD)
843 err = 0;
844 }
845 if (sigpending) {
846 spin_lock_irqsave(&current->sighand->siglock, flags);
847 recalc_sigpending();
848 spin_unlock_irqrestore(&current->sighand->siglock, flags);
680 } 849 }
850 if (err < 0)
851 goto reterr;
681 852
853 err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
854 trace_9p_client_res(c, type, req->rc->tag, err);
855 if (!err)
856 return req;
682reterr: 857reterr:
683 P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
684 err);
685 p9_free_req(c, req); 858 p9_free_req(c, req);
686 return ERR_PTR(err); 859 return ERR_PTR(err);
687} 860}
@@ -769,7 +942,7 @@ static int p9_client_version(struct p9_client *c)
769 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); 942 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
770 if (err) { 943 if (err) {
771 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); 944 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
772 P9_DUMP_PKT(1, req->rc); 945 trace_9p_protocol_dump(c, req->rc);
773 goto error; 946 goto error;
774 } 947 }
775 948
@@ -906,15 +1079,14 @@ EXPORT_SYMBOL(p9_client_begin_disconnect);
906struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 1079struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
907 char *uname, u32 n_uname, char *aname) 1080 char *uname, u32 n_uname, char *aname)
908{ 1081{
909 int err; 1082 int err = 0;
910 struct p9_req_t *req; 1083 struct p9_req_t *req;
911 struct p9_fid *fid; 1084 struct p9_fid *fid;
912 struct p9_qid qid; 1085 struct p9_qid qid;
913 1086
914 P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
915 afid ? afid->fid : -1, uname, aname);
916 err = 0;
917 1087
1088 P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
1089 afid ? afid->fid : -1, uname, aname);
918 fid = p9_fid_create(clnt); 1090 fid = p9_fid_create(clnt);
919 if (IS_ERR(fid)) { 1091 if (IS_ERR(fid)) {
920 err = PTR_ERR(fid); 1092 err = PTR_ERR(fid);
@@ -931,7 +1103,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
931 1103
932 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); 1104 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
933 if (err) { 1105 if (err) {
934 P9_DUMP_PKT(1, req->rc); 1106 trace_9p_protocol_dump(clnt, req->rc);
935 p9_free_req(clnt, req); 1107 p9_free_req(clnt, req);
936 goto error; 1108 goto error;
937 } 1109 }
@@ -991,7 +1163,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
991 1163
992 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); 1164 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
993 if (err) { 1165 if (err) {
994 P9_DUMP_PKT(1, req->rc); 1166 trace_9p_protocol_dump(clnt, req->rc);
995 p9_free_req(clnt, req); 1167 p9_free_req(clnt, req);
996 goto clunk_fid; 1168 goto clunk_fid;
997 } 1169 }
@@ -1058,7 +1230,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
1058 1230
1059 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1231 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1060 if (err) { 1232 if (err) {
1061 P9_DUMP_PKT(1, req->rc); 1233 trace_9p_protocol_dump(clnt, req->rc);
1062 goto free_and_error; 1234 goto free_and_error;
1063 } 1235 }
1064 1236
@@ -1101,7 +1273,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
1101 1273
1102 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); 1274 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
1103 if (err) { 1275 if (err) {
1104 P9_DUMP_PKT(1, req->rc); 1276 trace_9p_protocol_dump(clnt, req->rc);
1105 goto free_and_error; 1277 goto free_and_error;
1106 } 1278 }
1107 1279
@@ -1146,7 +1318,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1146 1318
1147 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1319 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1148 if (err) { 1320 if (err) {
1149 P9_DUMP_PKT(1, req->rc); 1321 trace_9p_protocol_dump(clnt, req->rc);
1150 goto free_and_error; 1322 goto free_and_error;
1151 } 1323 }
1152 1324
@@ -1185,7 +1357,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
1185 1357
1186 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 1358 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1187 if (err) { 1359 if (err) {
1188 P9_DUMP_PKT(1, req->rc); 1360 trace_9p_protocol_dump(clnt, req->rc);
1189 goto free_and_error; 1361 goto free_and_error;
1190 } 1362 }
1191 1363
@@ -1330,13 +1502,15 @@ int
1330p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1502p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1331 u32 count) 1503 u32 count)
1332{ 1504{
1333 int err, rsize;
1334 struct p9_client *clnt;
1335 struct p9_req_t *req;
1336 char *dataptr; 1505 char *dataptr;
1506 int kernel_buf = 0;
1507 struct p9_req_t *req;
1508 struct p9_client *clnt;
1509 int err, rsize, non_zc = 0;
1510
1337 1511
1338 P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid, 1512 P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1339 (long long unsigned) offset, count); 1513 fid->fid, (long long unsigned) offset, count);
1340 err = 0; 1514 err = 0;
1341 clnt = fid->clnt; 1515 clnt = fid->clnt;
1342 1516
@@ -1348,13 +1522,24 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1348 rsize = count; 1522 rsize = count;
1349 1523
1350 /* Don't bother zerocopy for small IO (< 1024) */ 1524 /* Don't bother zerocopy for small IO (< 1024) */
1351 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1525 if (clnt->trans_mod->zc_request && rsize > 1024) {
1352 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1526 char *indata;
1353 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1527 if (data) {
1354 rsize, data, udata); 1528 kernel_buf = 1;
1529 indata = data;
1530 } else
1531 indata = (char *)udata;
1532 /*
1533 * response header len is 11
1534 * PDU Header(7) + IO Size (4)
1535 */
1536 req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
1537 11, kernel_buf, "dqd", fid->fid,
1538 offset, rsize);
1355 } else { 1539 } else {
1540 non_zc = 1;
1356 req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, 1541 req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
1357 rsize); 1542 rsize);
1358 } 1543 }
1359 if (IS_ERR(req)) { 1544 if (IS_ERR(req)) {
1360 err = PTR_ERR(req); 1545 err = PTR_ERR(req);
@@ -1363,14 +1548,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1363 1548
1364 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 1549 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1365 if (err) { 1550 if (err) {
1366 P9_DUMP_PKT(1, req->rc); 1551 trace_9p_protocol_dump(clnt, req->rc);
1367 goto free_and_error; 1552 goto free_and_error;
1368 } 1553 }
1369 1554
1370 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count); 1555 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
1371 P9_DUMP_PKT(1, req->rc);
1372 1556
1373 if (!req->tc->pbuf_size) { 1557 if (non_zc) {
1374 if (data) { 1558 if (data) {
1375 memmove(data, dataptr, count); 1559 memmove(data, dataptr, count);
1376 } else { 1560 } else {
@@ -1396,6 +1580,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1396 u64 offset, u32 count) 1580 u64 offset, u32 count)
1397{ 1581{
1398 int err, rsize; 1582 int err, rsize;
1583 int kernel_buf = 0;
1399 struct p9_client *clnt; 1584 struct p9_client *clnt;
1400 struct p9_req_t *req; 1585 struct p9_req_t *req;
1401 1586
@@ -1411,19 +1596,24 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1411 if (count < rsize) 1596 if (count < rsize)
1412 rsize = count; 1597 rsize = count;
1413 1598
1414 /* Don't bother zerocopy form small IO (< 1024) */ 1599 /* Don't bother zerocopy for small IO (< 1024) */
1415 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1600 if (clnt->trans_mod->zc_request && rsize > 1024) {
1416 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1601 char *odata;
1417 req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset, 1602 if (data) {
1418 rsize, data, udata); 1603 kernel_buf = 1;
1604 odata = data;
1605 } else
1606 odata = (char *)udata;
1607 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
1608 P9_ZC_HDR_SZ, kernel_buf, "dqd",
1609 fid->fid, offset, rsize);
1419 } else { 1610 } else {
1420
1421 if (data) 1611 if (data)
1422 req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, 1612 req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
1423 offset, rsize, data); 1613 offset, rsize, data);
1424 else 1614 else
1425 req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, 1615 req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
1426 offset, rsize, udata); 1616 offset, rsize, udata);
1427 } 1617 }
1428 if (IS_ERR(req)) { 1618 if (IS_ERR(req)) {
1429 err = PTR_ERR(req); 1619 err = PTR_ERR(req);
@@ -1432,7 +1622,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1432 1622
1433 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); 1623 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
1434 if (err) { 1624 if (err) {
1435 P9_DUMP_PKT(1, req->rc); 1625 trace_9p_protocol_dump(clnt, req->rc);
1436 goto free_and_error; 1626 goto free_and_error;
1437 } 1627 }
1438 1628
@@ -1472,7 +1662,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1472 1662
1473 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); 1663 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
1474 if (err) { 1664 if (err) {
1475 P9_DUMP_PKT(1, req->rc); 1665 trace_9p_protocol_dump(clnt, req->rc);
1476 p9_free_req(clnt, req); 1666 p9_free_req(clnt, req);
1477 goto error; 1667 goto error;
1478 } 1668 }
@@ -1523,7 +1713,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
1523 1713
1524 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); 1714 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
1525 if (err) { 1715 if (err) {
1526 P9_DUMP_PKT(1, req->rc); 1716 trace_9p_protocol_dump(clnt, req->rc);
1527 p9_free_req(clnt, req); 1717 p9_free_req(clnt, req);
1528 goto error; 1718 goto error;
1529 } 1719 }
@@ -1671,7 +1861,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
1671 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, 1861 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
1672 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); 1862 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
1673 if (err) { 1863 if (err) {
1674 P9_DUMP_PKT(1, req->rc); 1864 trace_9p_protocol_dump(clnt, req->rc);
1675 p9_free_req(clnt, req); 1865 p9_free_req(clnt, req);
1676 goto error; 1866 goto error;
1677 } 1867 }
@@ -1778,7 +1968,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
1778 } 1968 }
1779 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); 1969 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
1780 if (err) { 1970 if (err) {
1781 P9_DUMP_PKT(1, req->rc); 1971 trace_9p_protocol_dump(clnt, req->rc);
1782 p9_free_req(clnt, req); 1972 p9_free_req(clnt, req);
1783 goto clunk_fid; 1973 goto clunk_fid;
1784 } 1974 }
@@ -1824,7 +2014,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1824 2014
1825int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 2015int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1826{ 2016{
1827 int err, rsize; 2017 int err, rsize, non_zc = 0;
1828 struct p9_client *clnt; 2018 struct p9_client *clnt;
1829 struct p9_req_t *req; 2019 struct p9_req_t *req;
1830 char *dataptr; 2020 char *dataptr;
@@ -1842,13 +2032,18 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1842 if (count < rsize) 2032 if (count < rsize)
1843 rsize = count; 2033 rsize = count;
1844 2034
1845 if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 2035 /* Don't bother zerocopy for small IO (< 1024) */
1846 P9_TRANS_PREF_PAYLOAD_SEP) { 2036 if (clnt->trans_mod->zc_request && rsize > 1024) {
1847 req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid, 2037 /*
1848 offset, rsize, data); 2038 * response header len is 11
2039 * PDU Header(7) + IO Size (4)
2040 */
2041 req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
2042 11, 1, "dqd", fid->fid, offset, rsize);
1849 } else { 2043 } else {
2044 non_zc = 1;
1850 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, 2045 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
1851 offset, rsize); 2046 offset, rsize);
1852 } 2047 }
1853 if (IS_ERR(req)) { 2048 if (IS_ERR(req)) {
1854 err = PTR_ERR(req); 2049 err = PTR_ERR(req);
@@ -1857,13 +2052,13 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1857 2052
1858 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 2053 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1859 if (err) { 2054 if (err) {
1860 P9_DUMP_PKT(1, req->rc); 2055 trace_9p_protocol_dump(clnt, req->rc);
1861 goto free_and_error; 2056 goto free_and_error;
1862 } 2057 }
1863 2058
1864 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); 2059 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
1865 2060
1866 if (!req->tc->pbuf_size && data) 2061 if (non_zc)
1867 memmove(data, dataptr, count); 2062 memmove(data, dataptr, count);
1868 2063
1869 p9_free_req(clnt, req); 2064 p9_free_req(clnt, req);
@@ -1894,7 +2089,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
1894 2089
1895 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 2090 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1896 if (err) { 2091 if (err) {
1897 P9_DUMP_PKT(1, req->rc); 2092 trace_9p_protocol_dump(clnt, req->rc);
1898 goto error; 2093 goto error;
1899 } 2094 }
1900 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, 2095 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1925,7 +2120,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
1925 2120
1926 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 2121 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1927 if (err) { 2122 if (err) {
1928 P9_DUMP_PKT(1, req->rc); 2123 trace_9p_protocol_dump(clnt, req->rc);
1929 goto error; 2124 goto error;
1930 } 2125 }
1931 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, 2126 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1960,7 +2155,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
1960 2155
1961 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); 2156 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
1962 if (err) { 2157 if (err) {
1963 P9_DUMP_PKT(1, req->rc); 2158 trace_9p_protocol_dump(clnt, req->rc);
1964 goto error; 2159 goto error;
1965 } 2160 }
1966 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); 2161 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1993,7 +2188,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
1993 &glock->start, &glock->length, &glock->proc_id, 2188 &glock->start, &glock->length, &glock->proc_id,
1994 &glock->client_id); 2189 &glock->client_id);
1995 if (err) { 2190 if (err) {
1996 P9_DUMP_PKT(1, req->rc); 2191 trace_9p_protocol_dump(clnt, req->rc);
1997 goto error; 2192 goto error;
1998 } 2193 }
1999 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " 2194 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -2021,7 +2216,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
2021 2216
2022 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); 2217 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
2023 if (err) { 2218 if (err) {
2024 P9_DUMP_PKT(1, req->rc); 2219 trace_9p_protocol_dump(clnt, req->rc);
2025 goto error; 2220 goto error;
2026 } 2221 }
2027 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); 2222 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index df58375ea6b3..55e10a96c902 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -37,40 +37,11 @@
37#include <net/9p/client.h> 37#include <net/9p/client.h>
38#include "protocol.h" 38#include "protocol.h"
39 39
40#include <trace/events/9p.h>
41
40static int 42static int
41p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 43p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
42 44
43#ifdef CONFIG_NET_9P_DEBUG
44void
45p9pdu_dump(int way, struct p9_fcall *pdu)
46{
47 int len = pdu->size;
48
49 if ((p9_debug_level & P9_DEBUG_VPKT) != P9_DEBUG_VPKT) {
50 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) {
51 if (len > 32)
52 len = 32;
53 } else {
54 /* shouldn't happen */
55 return;
56 }
57 }
58
59 if (way)
60 print_hex_dump_bytes("[9P] ", DUMP_PREFIX_OFFSET, pdu->sdata,
61 len);
62 else
63 print_hex_dump_bytes("]9P[ ", DUMP_PREFIX_OFFSET, pdu->sdata,
64 len);
65}
66#else
67void
68p9pdu_dump(int way, struct p9_fcall *pdu)
69{
70}
71#endif
72EXPORT_SYMBOL(p9pdu_dump);
73
74void p9stat_free(struct p9_wstat *stbuf) 45void p9stat_free(struct p9_wstat *stbuf)
75{ 46{
76 kfree(stbuf->name); 47 kfree(stbuf->name);
@@ -81,7 +52,7 @@ void p9stat_free(struct p9_wstat *stbuf)
81} 52}
82EXPORT_SYMBOL(p9stat_free); 53EXPORT_SYMBOL(p9stat_free);
83 54
84static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) 55size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
85{ 56{
86 size_t len = min(pdu->size - pdu->offset, size); 57 size_t len = min(pdu->size - pdu->offset, size);
87 memcpy(data, &pdu->sdata[pdu->offset], len); 58 memcpy(data, &pdu->sdata[pdu->offset], len);
@@ -108,26 +79,6 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
108 return size - len; 79 return size - len;
109} 80}
110 81
111static size_t
112pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
113 size_t size)
114{
115 BUG_ON(pdu->size > P9_IOHDRSZ);
116 pdu->pubuf = (char __user *)udata;
117 pdu->pkbuf = (char *)kdata;
118 pdu->pbuf_size = size;
119 return 0;
120}
121
122static size_t
123pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
124{
125 BUG_ON(pdu->size > P9_READDIRHDRSZ);
126 pdu->pkbuf = (char *)kdata;
127 pdu->pbuf_size = size;
128 return 0;
129}
130
131/* 82/*
132 b - int8_t 83 b - int8_t
133 w - int16_t 84 w - int16_t
@@ -459,26 +410,6 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
459 errcode = -EFAULT; 410 errcode = -EFAULT;
460 } 411 }
461 break; 412 break;
462 case 'E':{
463 int32_t cnt = va_arg(ap, int32_t);
464 const char *k = va_arg(ap, const void *);
465 const char __user *u = va_arg(ap,
466 const void __user *);
467 errcode = p9pdu_writef(pdu, proto_version, "d",
468 cnt);
469 if (!errcode && pdu_write_urw(pdu, k, u, cnt))
470 errcode = -EFAULT;
471 }
472 break;
473 case 'F':{
474 int32_t cnt = va_arg(ap, int32_t);
475 const char *k = va_arg(ap, const void *);
476 errcode = p9pdu_writef(pdu, proto_version, "d",
477 cnt);
478 if (!errcode && pdu_write_readdir(pdu, k, cnt))
479 errcode = -EFAULT;
480 }
481 break;
482 case 'U':{ 413 case 'U':{
483 int32_t count = va_arg(ap, int32_t); 414 int32_t count = va_arg(ap, int32_t);
484 const char __user *udata = 415 const char __user *udata =
@@ -591,7 +522,7 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
591 return ret; 522 return ret;
592} 523}
593 524
594int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version) 525int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
595{ 526{
596 struct p9_fcall fake_pdu; 527 struct p9_fcall fake_pdu;
597 int ret; 528 int ret;
@@ -601,10 +532,10 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
601 fake_pdu.sdata = buf; 532 fake_pdu.sdata = buf;
602 fake_pdu.offset = 0; 533 fake_pdu.offset = 0;
603 534
604 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st); 535 ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
605 if (ret) { 536 if (ret) {
606 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); 537 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
607 P9_DUMP_PKT(0, &fake_pdu); 538 trace_9p_protocol_dump(clnt, &fake_pdu);
608 } 539 }
609 540
610 return ret; 541 return ret;
@@ -617,7 +548,7 @@ int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
617 return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); 548 return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
618} 549}
619 550
620int p9pdu_finalize(struct p9_fcall *pdu) 551int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
621{ 552{
622 int size = pdu->size; 553 int size = pdu->size;
623 int err; 554 int err;
@@ -626,7 +557,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
626 err = p9pdu_writef(pdu, 0, "d", size); 557 err = p9pdu_writef(pdu, 0, "d", size);
627 pdu->size = size; 558 pdu->size = size;
628 559
629 P9_DUMP_PKT(0, pdu); 560 trace_9p_protocol_dump(clnt, pdu);
630 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, 561 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
631 pdu->id, pdu->tag); 562 pdu->id, pdu->tag);
632 563
@@ -637,14 +568,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
637{ 568{
638 pdu->offset = 0; 569 pdu->offset = 0;
639 pdu->size = 0; 570 pdu->size = 0;
640 pdu->private = NULL;
641 pdu->pubuf = NULL;
642 pdu->pkbuf = NULL;
643 pdu->pbuf_size = 0;
644} 571}
645 572
646int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, 573int p9dirent_read(struct p9_client *clnt, char *buf, int len,
647 int proto_version) 574 struct p9_dirent *dirent)
648{ 575{
649 struct p9_fcall fake_pdu; 576 struct p9_fcall fake_pdu;
650 int ret; 577 int ret;
@@ -655,11 +582,11 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
655 fake_pdu.sdata = buf; 582 fake_pdu.sdata = buf;
656 fake_pdu.offset = 0; 583 fake_pdu.offset = 0;
657 584
658 ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid, 585 ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
659 &dirent->d_off, &dirent->d_type, &nameptr); 586 &dirent->d_off, &dirent->d_type, &nameptr);
660 if (ret) { 587 if (ret) {
661 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); 588 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
662 P9_DUMP_PKT(1, &fake_pdu); 589 trace_9p_protocol_dump(clnt, &fake_pdu);
663 goto out; 590 goto out;
664 } 591 }
665 592
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index 2431c0f38d56..2cc525fa49fa 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -29,6 +29,6 @@ int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
29 va_list ap); 29 va_list ap);
30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); 31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
32int p9pdu_finalize(struct p9_fcall *pdu); 32int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
33void p9pdu_dump(int, struct p9_fcall *);
34void p9pdu_reset(struct p9_fcall *pdu); 33void p9pdu_reset(struct p9_fcall *pdu);
34size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9a70ebdec56e..de8df957867d 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -21,30 +21,25 @@
21 21
22/** 22/**
23 * p9_release_req_pages - Release pages after the transaction. 23 * p9_release_req_pages - Release pages after the transaction.
24 * @*private: PDU's private page of struct trans_rpage_info
25 */ 24 */
26void 25void p9_release_pages(struct page **pages, int nr_pages)
27p9_release_req_pages(struct trans_rpage_info *rpinfo)
28{ 26{
29 int i = 0; 27 int i = 0;
30 28 while (pages[i] && nr_pages--) {
31 while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) { 29 put_page(pages[i]);
32 put_page(rpinfo->rp_data[i]);
33 i++; 30 i++;
34 } 31 }
35} 32}
36EXPORT_SYMBOL(p9_release_req_pages); 33EXPORT_SYMBOL(p9_release_pages);
37 34
38/** 35/**
39 * p9_nr_pages - Return number of pages needed to accommodate the payload. 36 * p9_nr_pages - Return number of pages needed to accommodate the payload.
40 */ 37 */
41int 38int p9_nr_pages(char *data, int len)
42p9_nr_pages(struct p9_req_t *req)
43{ 39{
44 unsigned long start_page, end_page; 40 unsigned long start_page, end_page;
45 start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT; 41 start_page = (unsigned long)data >> PAGE_SHIFT;
46 end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size + 42 end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
47 PAGE_SIZE - 1) >> PAGE_SHIFT;
48 return end_page - start_page; 43 return end_page - start_page;
49} 44}
50EXPORT_SYMBOL(p9_nr_pages); 45EXPORT_SYMBOL(p9_nr_pages);
@@ -58,35 +53,17 @@ EXPORT_SYMBOL(p9_nr_pages);
58 * @nr_pages: number of pages to accommodate the payload 53 * @nr_pages: number of pages to accommodate the payload
59 * @rw: Indicates if the pages are for read or write. 54 * @rw: Indicates if the pages are for read or write.
60 */ 55 */
61int
62p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
63 int nr_pages, u8 rw)
64{
65 uint32_t first_page_bytes = 0;
66 int32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo;
68
69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
70 56
71 if (*pdata_off) 57int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), 58{
73 req->tc->pbuf_size); 59 int nr_mapped_pages;
74 60
75 rpinfo = req->tc->private; 61 nr_mapped_pages = get_user_pages_fast((unsigned long)data,
76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, 62 *nr_pages, write, pages);
77 nr_pages, rw, &rpinfo->rp_data[0]); 63 if (nr_mapped_pages <= 0)
78 if (pdata_mapped_pages <= 0) 64 return nr_mapped_pages;
79 return pdata_mapped_pages;
80 65
81 rpinfo->rp_nr_pages = pdata_mapped_pages; 66 *nr_pages = nr_mapped_pages;
82 if (*pdata_off) {
83 *pdata_len = first_page_bytes;
84 *pdata_len += min((req->tc->pbuf_size - *pdata_len),
85 ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
86 } else {
87 *pdata_len = min(req->tc->pbuf_size,
88 (size_t)pdata_mapped_pages << PAGE_SHIFT);
89 }
90 return 0; 67 return 0;
91} 68}
92EXPORT_SYMBOL(p9_payload_gup); 69EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 76309223bb02..173bb550a9eb 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,21 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15/* TRUE if it is user context */ 15void p9_release_pages(struct page **, int);
16#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS)) 16int p9_payload_gup(char *, int *, struct page **, int);
17 17int p9_nr_pages(char *, int);
18/**
19 * struct trans_rpage_info - To store mapped page information in PDU.
20 * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
21 * @rp_nr_pages: Number of mapped pages
22 * @rp_data: Array of page pointers
23 */
24struct trans_rpage_info {
25 u8 rp_alloc;
26 int rp_nr_pages;
27 struct page *rp_data[0];
28};
29
30void p9_release_req_pages(struct trans_rpage_info *);
31int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
32int p9_nr_pages(struct p9_req_t *);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e317583fcc73..32aa9834229c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -150,12 +150,10 @@ static void req_done(struct virtqueue *vq)
150 while (1) { 150 while (1) {
151 spin_lock_irqsave(&chan->lock, flags); 151 spin_lock_irqsave(&chan->lock, flags);
152 rc = virtqueue_get_buf(chan->vq, &len); 152 rc = virtqueue_get_buf(chan->vq, &len);
153
154 if (rc == NULL) { 153 if (rc == NULL) {
155 spin_unlock_irqrestore(&chan->lock, flags); 154 spin_unlock_irqrestore(&chan->lock, flags);
156 break; 155 break;
157 } 156 }
158
159 chan->ring_bufs_avail = 1; 157 chan->ring_bufs_avail = 1;
160 spin_unlock_irqrestore(&chan->lock, flags); 158 spin_unlock_irqrestore(&chan->lock, flags);
161 /* Wakeup if anyone waiting for VirtIO ring space. */ 159 /* Wakeup if anyone waiting for VirtIO ring space. */
@@ -163,17 +161,6 @@ static void req_done(struct virtqueue *vq)
163 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); 161 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
164 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 162 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
165 req = p9_tag_lookup(chan->client, rc->tag); 163 req = p9_tag_lookup(chan->client, rc->tag);
166 if (req->tc->private) {
167 struct trans_rpage_info *rp = req->tc->private;
168 int p = rp->rp_nr_pages;
169 /*Release pages */
170 p9_release_req_pages(rp);
171 atomic_sub(p, &vp_pinned);
172 wake_up(&vp_wq);
173 if (rp->rp_alloc)
174 kfree(rp);
175 req->tc->private = NULL;
176 }
177 req->status = REQ_STATUS_RCVD; 164 req->status = REQ_STATUS_RCVD;
178 p9_client_cb(chan->client, req); 165 p9_client_cb(chan->client, req);
179 } 166 }
@@ -193,9 +180,8 @@ static void req_done(struct virtqueue *vq)
193 * 180 *
194 */ 181 */
195 182
196static int 183static int pack_sg_list(struct scatterlist *sg, int start,
197pack_sg_list(struct scatterlist *sg, int start, int limit, char *data, 184 int limit, char *data, int count)
198 int count)
199{ 185{
200 int s; 186 int s;
201 int index = start; 187 int index = start;
@@ -224,31 +210,36 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
224 * this takes a list of pages. 210 * this takes a list of pages.
225 * @sg: scatter/gather list to pack into 211 * @sg: scatter/gather list to pack into
226 * @start: which segment of the sg_list to start at 212 * @start: which segment of the sg_list to start at
227 * @pdata_off: Offset into the first page
228 * @**pdata: a list of pages to add into sg. 213 * @**pdata: a list of pages to add into sg.
214 * @nr_pages: number of pages to pack into the scatter/gather list
215 * @data: data to pack into scatter/gather list
229 * @count: amount of data to pack into the scatter/gather list 216 * @count: amount of data to pack into the scatter/gather list
230 */ 217 */
231static int 218static int
232pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off, 219pack_sg_list_p(struct scatterlist *sg, int start, int limit,
233 struct page **pdata, int count) 220 struct page **pdata, int nr_pages, char *data, int count)
234{ 221{
235 int s; 222 int i = 0, s;
236 int i = 0; 223 int data_off;
237 int index = start; 224 int index = start;
238 225
239 if (pdata_off) { 226 BUG_ON(nr_pages > (limit - start));
240 s = min((int)(PAGE_SIZE - pdata_off), count); 227 /*
241 sg_set_page(&sg[index++], pdata[i++], s, pdata_off); 228 * if the first page doesn't start at
242 count -= s; 229 * page boundary find the offset
243 } 230 */
244 231 data_off = offset_in_page(data);
245 while (count) { 232 while (nr_pages) {
246 BUG_ON(index > limit); 233 s = rest_of_page(data);
247 s = min((int)PAGE_SIZE, count); 234 if (s > count)
248 sg_set_page(&sg[index++], pdata[i++], s, 0); 235 s = count;
236 sg_set_page(&sg[index++], pdata[i++], s, data_off);
237 data_off = 0;
238 data += s;
249 count -= s; 239 count -= s;
240 nr_pages--;
250 } 241 }
251 return index-start; 242 return index - start;
252} 243}
253 244
254/** 245/**
@@ -261,114 +252,166 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
261static int 252static int
262p9_virtio_request(struct p9_client *client, struct p9_req_t *req) 253p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 254{
264 int in, out, inp, outp; 255 int err;
265 struct virtio_chan *chan = client->trans; 256 int in, out;
266 unsigned long flags; 257 unsigned long flags;
267 size_t pdata_off = 0; 258 struct virtio_chan *chan = client->trans;
268 struct trans_rpage_info *rpinfo = NULL;
269 int err, pdata_len = 0;
270 259
271 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 260 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
272 261
273 req->status = REQ_STATUS_SENT; 262 req->status = REQ_STATUS_SENT;
263req_retry:
264 spin_lock_irqsave(&chan->lock, flags);
265
266 /* Handle out VirtIO ring buffers */
267 out = pack_sg_list(chan->sg, 0,
268 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
274 269
275 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) { 270 in = pack_sg_list(chan->sg, out,
276 int nr_pages = p9_nr_pages(req); 271 VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
277 int rpinfo_size = sizeof(struct trans_rpage_info) +
278 sizeof(struct page *) * nr_pages;
279 272
280 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { 273 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
281 err = wait_event_interruptible(vp_wq, 274 if (err < 0) {
282 atomic_read(&vp_pinned) < chan->p9_max_pages); 275 if (err == -ENOSPC) {
276 chan->ring_bufs_avail = 0;
277 spin_unlock_irqrestore(&chan->lock, flags);
278 err = wait_event_interruptible(*chan->vc_wq,
279 chan->ring_bufs_avail);
283 if (err == -ERESTARTSYS) 280 if (err == -ERESTARTSYS)
284 return err; 281 return err;
285 P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
286 }
287 282
288 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) { 283 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
289 /* We can use sdata */ 284 goto req_retry;
290 req->tc->private = req->tc->sdata + req->tc->size;
291 rpinfo = (struct trans_rpage_info *)req->tc->private;
292 rpinfo->rp_alloc = 0;
293 } else { 285 } else {
294 req->tc->private = kmalloc(rpinfo_size, GFP_NOFS); 286 spin_unlock_irqrestore(&chan->lock, flags);
295 if (!req->tc->private) { 287 P9_DPRINTK(P9_DEBUG_TRANS,
296 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: " 288 "9p debug: "
297 "private kmalloc returned NULL"); 289 "virtio rpc add_buf returned failure");
298 return -ENOMEM; 290 return -EIO;
299 }
300 rpinfo = (struct trans_rpage_info *)req->tc->private;
301 rpinfo->rp_alloc = 1;
302 } 291 }
292 }
293 virtqueue_kick(chan->vq);
294 spin_unlock_irqrestore(&chan->lock, flags);
303 295
304 err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages, 296 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
305 req->tc->id == P9_TREAD ? 1 : 0); 297 return 0;
306 if (err < 0) { 298}
307 if (rpinfo->rp_alloc) 299
308 kfree(rpinfo); 300static int p9_get_mapped_pages(struct virtio_chan *chan,
301 struct page **pages, char *data,
302 int nr_pages, int write, int kern_buf)
303{
304 int err;
305 if (!kern_buf) {
306 /*
307 * We allow only p9_max_pages pinned. We wait for the
308 * Other zc request to finish here
309 */
310 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
311 err = wait_event_interruptible(vp_wq,
312 (atomic_read(&vp_pinned) < chan->p9_max_pages));
313 if (err == -ERESTARTSYS)
314 return err;
315 }
316 err = p9_payload_gup(data, &nr_pages, pages, write);
317 if (err < 0)
309 return err; 318 return err;
310 } else { 319 atomic_add(nr_pages, &vp_pinned);
311 atomic_add(rpinfo->rp_nr_pages, &vp_pinned); 320 } else {
321 /* kernel buffer, no need to pin pages */
322 int s, index = 0;
323 int count = nr_pages;
324 while (nr_pages) {
325 s = rest_of_page(data);
326 pages[index++] = virt_to_page(data);
327 data += s;
328 nr_pages--;
312 } 329 }
330 nr_pages = count;
313 } 331 }
332 return nr_pages;
333}
314 334
315req_retry_pinned: 335/**
316 spin_lock_irqsave(&chan->lock, flags); 336 * p9_virtio_zc_request - issue a zero copy request
337 * @client: client instance issuing the request
338 * @req: request to be issued
339 * @uidata: user bffer that should be ued for zero copy read
340 * @uodata: user buffer that shoud be user for zero copy write
341 * @inlen: read buffer size
342 * @olen: write buffer size
343 * @hdrlen: reader header size, This is the size of response protocol data
344 *
345 */
346static int
347p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
348 char *uidata, char *uodata, int inlen,
349 int outlen, int in_hdr_len, int kern_buf)
350{
351 int in, out, err;
352 unsigned long flags;
353 int in_nr_pages = 0, out_nr_pages = 0;
354 struct page **in_pages = NULL, **out_pages = NULL;
355 struct virtio_chan *chan = client->trans;
317 356
318 /* Handle out VirtIO ring buffers */ 357 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
319 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, 358
320 req->tc->size); 359 if (uodata) {
321 360 out_nr_pages = p9_nr_pages(uodata, outlen);
322 if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) { 361 out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
323 /* We have additional write payload buffer to take care */ 362 GFP_NOFS);
324 if (req->tc->pubuf && P9_IS_USER_CONTEXT) { 363 if (!out_pages) {
325 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 364 err = -ENOMEM;
326 pdata_off, rpinfo->rp_data, pdata_len); 365 goto err_out;
327 } else { 366 }
328 char *pbuf; 367 out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
329 if (req->tc->pubuf) 368 out_nr_pages, 0, kern_buf);
330 pbuf = (__force char *) req->tc->pubuf; 369 if (out_nr_pages < 0) {
331 else 370 err = out_nr_pages;
332 pbuf = req->tc->pkbuf; 371 kfree(out_pages);
333 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, 372 out_pages = NULL;
334 req->tc->pbuf_size); 373 goto err_out;
335 } 374 }
336 out += outp;
337 } 375 }
338 376 if (uidata) {
339 /* Handle in VirtIO ring buffers */ 377 in_nr_pages = p9_nr_pages(uidata, inlen);
340 if (req->tc->pbuf_size && 378 in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
341 ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) { 379 GFP_NOFS);
342 /* 380 if (!in_pages) {
343 * Take care of additional Read payload. 381 err = -ENOMEM;
344 * 11 is the read/write header = PDU Header(7) + IO Size (4). 382 goto err_out;
345 * Arrange in such a way that server places header in the 383 }
346 * alloced memory and payload onto the user buffer. 384 in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
347 */ 385 in_nr_pages, 1, kern_buf);
348 inp = pack_sg_list(chan->sg, out, 386 if (in_nr_pages < 0) {
349 VIRTQUEUE_NUM, req->rc->sdata, 11); 387 err = in_nr_pages;
350 /* 388 kfree(in_pages);
351 * Running executables in the filesystem may result in 389 in_pages = NULL;
352 * a read request with kernel buffer as opposed to user buffer. 390 goto err_out;
353 */
354 if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
355 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
356 pdata_off, rpinfo->rp_data, pdata_len);
357 } else {
358 char *pbuf;
359 if (req->tc->pubuf)
360 pbuf = (__force char *) req->tc->pubuf;
361 else
362 pbuf = req->tc->pkbuf;
363
364 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
365 pbuf, req->tc->pbuf_size);
366 } 391 }
367 in += inp;
368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 req->rc->sdata, req->rc->capacity);
371 } 392 }
393 req->status = REQ_STATUS_SENT;
394req_retry_pinned:
395 spin_lock_irqsave(&chan->lock, flags);
396 /* out data */
397 out = pack_sg_list(chan->sg, 0,
398 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
399
400 if (out_pages)
401 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
402 out_pages, out_nr_pages, uodata, outlen);
403 /*
404 * Take care of in data
405 * For example TREAD have 11.
406 * 11 is the read/write header = PDU Header(7) + IO Size (4).
407 * Arrange in such a way that server places header in the
408 * alloced memory and payload onto the user buffer.
409 */
410 in = pack_sg_list(chan->sg, out,
411 VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
412 if (in_pages)
413 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
414 in_pages, in_nr_pages, uidata, inlen);
372 415
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 416 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
374 if (err < 0) { 417 if (err < 0) {
@@ -376,28 +419,45 @@ req_retry_pinned:
376 chan->ring_bufs_avail = 0; 419 chan->ring_bufs_avail = 0;
377 spin_unlock_irqrestore(&chan->lock, flags); 420 spin_unlock_irqrestore(&chan->lock, flags);
378 err = wait_event_interruptible(*chan->vc_wq, 421 err = wait_event_interruptible(*chan->vc_wq,
379 chan->ring_bufs_avail); 422 chan->ring_bufs_avail);
380 if (err == -ERESTARTSYS) 423 if (err == -ERESTARTSYS)
381 return err; 424 goto err_out;
382 425
383 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n"); 426 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
384 goto req_retry_pinned; 427 goto req_retry_pinned;
385 } else { 428 } else {
386 spin_unlock_irqrestore(&chan->lock, flags); 429 spin_unlock_irqrestore(&chan->lock, flags);
387 P9_DPRINTK(P9_DEBUG_TRANS, 430 P9_DPRINTK(P9_DEBUG_TRANS,
388 "9p debug: " 431 "9p debug: "
389 "virtio rpc add_buf returned failure"); 432 "virtio rpc add_buf returned failure");
390 if (rpinfo && rpinfo->rp_alloc) 433 err = -EIO;
391 kfree(rpinfo); 434 goto err_out;
392 return -EIO;
393 } 435 }
394 } 436 }
395
396 virtqueue_kick(chan->vq); 437 virtqueue_kick(chan->vq);
397 spin_unlock_irqrestore(&chan->lock, flags); 438 spin_unlock_irqrestore(&chan->lock, flags);
398
399 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); 439 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
400 return 0; 440 err = wait_event_interruptible(*req->wq,
441 req->status >= REQ_STATUS_RCVD);
442 /*
443 * Non kernel buffers are pinned, unpin them
444 */
445err_out:
446 if (!kern_buf) {
447 if (in_pages) {
448 p9_release_pages(in_pages, in_nr_pages);
449 atomic_sub(in_nr_pages, &vp_pinned);
450 }
451 if (out_pages) {
452 p9_release_pages(out_pages, out_nr_pages);
453 atomic_sub(out_nr_pages, &vp_pinned);
454 }
455 /* wakeup anybody waiting for slots to pin pages */
456 wake_up(&vp_wq);
457 }
458 kfree(in_pages);
459 kfree(out_pages);
460 return err;
401} 461}
402 462
403static ssize_t p9_mount_tag_show(struct device *dev, 463static ssize_t p9_mount_tag_show(struct device *dev,
@@ -591,8 +651,8 @@ static struct p9_trans_module p9_virtio_trans = {
591 .create = p9_virtio_create, 651 .create = p9_virtio_create,
592 .close = p9_virtio_close, 652 .close = p9_virtio_close,
593 .request = p9_virtio_request, 653 .request = p9_virtio_request,
654 .zc_request = p9_virtio_zc_request,
594 .cancel = p9_virtio_cancel, 655 .cancel = p9_virtio_cancel,
595
596 /* 656 /*
597 * We leave one entry for input and one entry for response 657 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address 658 * headers. We also skip one more entry to accomodate, address
@@ -600,7 +660,6 @@ static struct p9_trans_module p9_virtio_trans = {
600 * page in zero copy. 660 * page in zero copy.
601 */ 661 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), 662 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
604 .def = 0, 663 .def = 0,
605 .owner = THIS_MODULE, 664 .owner = THIS_MODULE,
606}; 665};
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 1acc69576df8..173a2e82f486 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/export.h>
42 43
43int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME; 44int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
44int sysctl_aarp_tick_time = AARP_TICK_TIME; 45int sysctl_aarp_tick_time = AARP_TICK_TIME;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 6ef0e761e5de..b5b1a221c242 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -14,6 +14,7 @@
14#include <net/net_namespace.h> 14#include <net/net_namespace.h>
15#include <net/sock.h> 15#include <net/sock.h>
16#include <linux/atalk.h> 16#include <linux/atalk.h>
17#include <linux/export.h>
17 18
18 19
19static __inline__ struct atalk_iface *atalk_get_interface_idx(loff_t pos) 20static __inline__ struct atalk_iface *atalk_get_interface_idx(loff_t pos)
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b1fe7c35e8d1..bfa9ab93eda5 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -951,13 +951,12 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
951 /* checksum stuff in frags */ 951 /* checksum stuff in frags */
952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
953 int end; 953 int end;
954 954 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
955 WARN_ON(start > offset + len); 955 WARN_ON(start > offset + len);
956 956
957 end = start + skb_shinfo(skb)->frags[i].size; 957 end = start + skb_frag_size(frag);
958 if ((copy = end - offset) > 0) { 958 if ((copy = end - offset) > 0) {
959 u8 *vaddr; 959 u8 *vaddr;
960 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
961 960
962 if (copy > len) 961 if (copy > len)
963 copy = len; 962 copy = len;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 215c9fad7cdf..f1964caa0f83 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
643 .ndo_start_xmit = lec_start_xmit, 643 .ndo_start_xmit = lec_start_xmit,
644 .ndo_change_mtu = lec_change_mtu, 644 .ndo_change_mtu = lec_change_mtu,
645 .ndo_tx_timeout = lec_tx_timeout, 645 .ndo_tx_timeout = lec_tx_timeout,
646 .ndo_set_multicast_list = lec_set_multicast_list, 646 .ndo_set_rx_mode = lec_set_multicast_list,
647}; 647};
648 648
649static const unsigned char lec_ctrl_magic[] = { 649static const unsigned char lec_ctrl_magic[] = {
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 437ee70c5e62..3a734919c36c 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/export.h>
14#include <net/sock.h> /* for sock_no_* */ 15#include <net/sock.h> /* for sock_no_* */
15 16
16#include "resources.h" /* devs and vccs */ 17#include "resources.h" /* devs and vccs */
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 754ee4791d96..1281049c135f 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -20,6 +20,7 @@
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <net/sock.h> /* for sock_no_* */ 21#include <net/sock.h> /* for sock_no_* */
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/export.h>
23 24
24#include "resources.h" 25#include "resources.h"
25#include "common.h" /* common for PVCs and SVCs */ 26#include "common.h" /* common for PVCs and SVCs */
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a1690845dc6e..87fddab22e0f 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/export.h>
41 42
42static ax25_route *ax25_route_list; 43static ax25_route *ax25_route_list;
43static DEFINE_RWLOCK(ax25_route_lock); 44static DEFINE_RWLOCK(ax25_route_lock);
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index d349be9578f5..4c83137b5954 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -37,6 +37,7 @@
37#include <linux/stat.h> 37#include <linux/stat.h>
38#include <linux/netfilter.h> 38#include <linux/netfilter.h>
39#include <linux/sysctl.h> 39#include <linux/sysctl.h>
40#include <linux/export.h>
40#include <net/ip.h> 41#include <net/ip.h>
41#include <net/arp.h> 42#include <net/arp.h>
42 43
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 2de93d00631b..ce6861166499 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,8 +19,8 @@
19# 19#
20 20
21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o 21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
22batman-adv-y += aggregation.o
23batman-adv-y += bat_debugfs.o 22batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o 24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 25batman-adv-y += bitarray.o
26batman-adv-y += gateway_client.o 26batman-adv-y += gateway_client.o
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
deleted file mode 100644
index 69467fe71ff2..000000000000
--- a/net/batman-adv/aggregation.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "translation-table.h"
24#include "aggregation.h"
25#include "send.h"
26#include "routing.h"
27#include "hard-interface.h"
28
29/* return true if new_packet can be aggregated with forw_packet */
30static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
31 struct bat_priv *bat_priv,
32 int packet_len,
33 unsigned long send_time,
34 bool directlink,
35 const struct hard_iface *if_incoming,
36 const struct forw_packet *forw_packet)
37{
38 struct batman_packet *batman_packet =
39 (struct batman_packet *)forw_packet->skb->data;
40 int aggregated_bytes = forw_packet->packet_len + packet_len;
41 struct hard_iface *primary_if = NULL;
42 bool res = false;
43
44 /**
45 * we can aggregate the current packet to this aggregated packet
46 * if:
47 *
48 * - the send time is within our MAX_AGGREGATION_MS time
49 * - the resulting packet wont be bigger than
50 * MAX_AGGREGATION_BYTES
51 */
52
53 if (time_before(send_time, forw_packet->send_time) &&
54 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
55 forw_packet->send_time) &&
56 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
57
58 /**
59 * check aggregation compatibility
60 * -> direct link packets are broadcasted on
61 * their interface only
62 * -> aggregate packet if the current packet is
63 * a "global" packet as well as the base
64 * packet
65 */
66
67 primary_if = primary_if_get_selected(bat_priv);
68 if (!primary_if)
69 goto out;
70
71 /* packets without direct link flag and high TTL
72 * are flooded through the net */
73 if ((!directlink) &&
74 (!(batman_packet->flags & DIRECTLINK)) &&
75 (batman_packet->ttl != 1) &&
76
77 /* own packets originating non-primary
78 * interfaces leave only that interface */
79 ((!forw_packet->own) ||
80 (forw_packet->if_incoming == primary_if))) {
81 res = true;
82 goto out;
83 }
84
85 /* if the incoming packet is sent via this one
86 * interface only - we still can aggregate */
87 if ((directlink) &&
88 (new_batman_packet->ttl == 1) &&
89 (forw_packet->if_incoming == if_incoming) &&
90
91 /* packets from direct neighbors or
92 * own secondary interface packets
93 * (= secondary interface packets in general) */
94 (batman_packet->flags & DIRECTLINK ||
95 (forw_packet->own &&
96 forw_packet->if_incoming != primary_if))) {
97 res = true;
98 goto out;
99 }
100 }
101
102out:
103 if (primary_if)
104 hardif_free_ref(primary_if);
105 return res;
106}
107
108/* create a new aggregated packet and add this packet to it */
109static void new_aggregated_packet(const unsigned char *packet_buff,
110 int packet_len, unsigned long send_time,
111 bool direct_link,
112 struct hard_iface *if_incoming,
113 int own_packet)
114{
115 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
116 struct forw_packet *forw_packet_aggr;
117 unsigned char *skb_buff;
118
119 if (!atomic_inc_not_zero(&if_incoming->refcount))
120 return;
121
122 /* own packet should always be scheduled */
123 if (!own_packet) {
124 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
125 bat_dbg(DBG_BATMAN, bat_priv,
126 "batman packet queue full\n");
127 goto out;
128 }
129 }
130
131 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
132 if (!forw_packet_aggr) {
133 if (!own_packet)
134 atomic_inc(&bat_priv->batman_queue_left);
135 goto out;
136 }
137
138 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
139 (packet_len < MAX_AGGREGATION_BYTES))
140 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
141 sizeof(struct ethhdr));
142 else
143 forw_packet_aggr->skb = dev_alloc_skb(packet_len +
144 sizeof(struct ethhdr));
145
146 if (!forw_packet_aggr->skb) {
147 if (!own_packet)
148 atomic_inc(&bat_priv->batman_queue_left);
149 kfree(forw_packet_aggr);
150 goto out;
151 }
152 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
153
154 INIT_HLIST_NODE(&forw_packet_aggr->list);
155
156 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
157 forw_packet_aggr->packet_len = packet_len;
158 memcpy(skb_buff, packet_buff, packet_len);
159
160 forw_packet_aggr->own = own_packet;
161 forw_packet_aggr->if_incoming = if_incoming;
162 forw_packet_aggr->num_packets = 0;
163 forw_packet_aggr->direct_link_flags = NO_FLAGS;
164 forw_packet_aggr->send_time = send_time;
165
166 /* save packet direct link flag status */
167 if (direct_link)
168 forw_packet_aggr->direct_link_flags |= 1;
169
170 /* add new packet to packet list */
171 spin_lock_bh(&bat_priv->forw_bat_list_lock);
172 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
173 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
174
175 /* start timer for this packet */
176 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
177 send_outstanding_bat_packet);
178 queue_delayed_work(bat_event_workqueue,
179 &forw_packet_aggr->delayed_work,
180 send_time - jiffies);
181
182 return;
183out:
184 hardif_free_ref(if_incoming);
185}
186
187/* aggregate a new packet into the existing aggregation */
188static void aggregate(struct forw_packet *forw_packet_aggr,
189 const unsigned char *packet_buff, int packet_len,
190 bool direct_link)
191{
192 unsigned char *skb_buff;
193
194 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
195 memcpy(skb_buff, packet_buff, packet_len);
196 forw_packet_aggr->packet_len += packet_len;
197 forw_packet_aggr->num_packets++;
198
199 /* save packet direct link flag status */
200 if (direct_link)
201 forw_packet_aggr->direct_link_flags |=
202 (1 << forw_packet_aggr->num_packets);
203}
204
205void add_bat_packet_to_list(struct bat_priv *bat_priv,
206 unsigned char *packet_buff, int packet_len,
207 struct hard_iface *if_incoming, int own_packet,
208 unsigned long send_time)
209{
210 /**
211 * _aggr -> pointer to the packet we want to aggregate with
212 * _pos -> pointer to the position in the queue
213 */
214 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
215 struct hlist_node *tmp_node;
216 struct batman_packet *batman_packet =
217 (struct batman_packet *)packet_buff;
218 bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
219
220 /* find position for the packet in the forward queue */
221 spin_lock_bh(&bat_priv->forw_bat_list_lock);
222 /* own packets are not to be aggregated */
223 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
224 hlist_for_each_entry(forw_packet_pos, tmp_node,
225 &bat_priv->forw_bat_list, list) {
226 if (can_aggregate_with(batman_packet,
227 bat_priv,
228 packet_len,
229 send_time,
230 direct_link,
231 if_incoming,
232 forw_packet_pos)) {
233 forw_packet_aggr = forw_packet_pos;
234 break;
235 }
236 }
237 }
238
239 /* nothing to aggregate with - either aggregation disabled or no
240 * suitable aggregation packet found */
241 if (!forw_packet_aggr) {
242 /* the following section can run without the lock */
243 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
244
245 /**
246 * if we could not aggregate this packet with one of the others
247 * we hold it back for a while, so that it might be aggregated
248 * later on
249 */
250 if ((!own_packet) &&
251 (atomic_read(&bat_priv->aggregated_ogms)))
252 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
253
254 new_aggregated_packet(packet_buff, packet_len,
255 send_time, direct_link,
256 if_incoming, own_packet);
257 } else {
258 aggregate(forw_packet_aggr,
259 packet_buff, packet_len,
260 direct_link);
261 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
262 }
263}
264
265/* unpack the aggregated packets and process them one by one */
266void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
267 unsigned char *packet_buff, int packet_len,
268 struct hard_iface *if_incoming)
269{
270 struct batman_packet *batman_packet;
271 int buff_pos = 0;
272 unsigned char *tt_buff;
273
274 batman_packet = (struct batman_packet *)packet_buff;
275
276 do {
277 /* network to host order for our 32bit seqno and the
278 orig_interval */
279 batman_packet->seqno = ntohl(batman_packet->seqno);
280 batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
281
282 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
283
284 receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
285
286 buff_pos += BAT_PACKET_LEN +
287 tt_len(batman_packet->tt_num_changes);
288
289 batman_packet = (struct batman_packet *)
290 (packet_buff + buff_pos);
291 } while (aggregated_packet(buff_pos, packet_len,
292 batman_packet->tt_num_changes));
293}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
deleted file mode 100644
index 216337bb841f..000000000000
--- a/net/batman-adv/aggregation.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
23#define _NET_BATMAN_ADV_AGGREGATION_H_
24
25#include "main.h"
26
27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len,
29 int tt_num_changes)
30{
31 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
32 sizeof(struct tt_change));
33
34 return (next_buff_pos <= packet_len) &&
35 (next_buff_pos <= MAX_AGGREGATION_BYTES);
36}
37
38void add_bat_packet_to_list(struct bat_priv *bat_priv,
39 unsigned char *packet_buff, int packet_len,
40 struct hard_iface *if_incoming, int own_packet,
41 unsigned long send_time);
42void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
43 unsigned char *packet_buff, int packet_len,
44 struct hard_iface *if_incoming);
45
46#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
new file mode 100644
index 000000000000..3512e251545b
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -0,0 +1,1170 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "bat_ogm.h"
24#include "translation-table.h"
25#include "ring_buffer.h"
26#include "originator.h"
27#include "routing.h"
28#include "gateway_common.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "send.h"
32
33void bat_ogm_init(struct hard_iface *hard_iface)
34{
35 struct batman_ogm_packet *batman_ogm_packet;
36
37 hard_iface->packet_len = BATMAN_OGM_LEN;
38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
39
40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
41 batman_ogm_packet->packet_type = BAT_OGM;
42 batman_ogm_packet->version = COMPAT_VERSION;
43 batman_ogm_packet->flags = NO_FLAGS;
44 batman_ogm_packet->ttl = 2;
45 batman_ogm_packet->tq = TQ_MAX_VALUE;
46 batman_ogm_packet->tt_num_changes = 0;
47 batman_ogm_packet->ttvn = 0;
48}
49
50void bat_ogm_init_primary(struct hard_iface *hard_iface)
51{
52 struct batman_ogm_packet *batman_ogm_packet;
53
54 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
55 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
56 batman_ogm_packet->ttl = TTL;
57}
58
59void bat_ogm_update_mac(struct hard_iface *hard_iface)
60{
61 struct batman_ogm_packet *batman_ogm_packet;
62
63 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
64 memcpy(batman_ogm_packet->orig,
65 hard_iface->net_dev->dev_addr, ETH_ALEN);
66 memcpy(batman_ogm_packet->prev_sender,
67 hard_iface->net_dev->dev_addr, ETH_ALEN);
68}
69
70/* when do we schedule our own ogm to be sent */
71static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
72{
73 return jiffies + msecs_to_jiffies(
74 atomic_read(&bat_priv->orig_interval) -
75 JITTER + (random32() % 2*JITTER));
76}
77
78/* when do we schedule a ogm packet to be sent */
79static unsigned long bat_ogm_fwd_send_time(void)
80{
81 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
82}
83
84/* apply hop penalty for a normal link */
85static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
86{
87 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
88 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
89}
90
91/* is there another aggregated packet here? */
92static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
93 int tt_num_changes)
94{
95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
96
97 return (next_buff_pos <= packet_len) &&
98 (next_buff_pos <= MAX_AGGREGATION_BYTES);
99}
100
101/* send a batman ogm to a given interface */
102static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
103 struct hard_iface *hard_iface)
104{
105 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
106 char *fwd_str;
107 uint8_t packet_num;
108 int16_t buff_pos;
109 struct batman_ogm_packet *batman_ogm_packet;
110 struct sk_buff *skb;
111
112 if (hard_iface->if_status != IF_ACTIVE)
113 return;
114
115 packet_num = 0;
116 buff_pos = 0;
117 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
118
119 /* adjust all flags and log packets */
120 while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
121 batman_ogm_packet->tt_num_changes)) {
122
123 /* we might have aggregated direct link packets with an
124 * ordinary base packet */
125 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
126 (forw_packet->if_incoming == hard_iface))
127 batman_ogm_packet->flags |= DIRECTLINK;
128 else
129 batman_ogm_packet->flags &= ~DIRECTLINK;
130
131 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
132 "Sending own" :
133 "Forwarding"));
134 bat_dbg(DBG_BATMAN, bat_priv,
135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
136 " IDF %s, ttvn %d) on interface %s [%pM]\n",
137 fwd_str, (packet_num > 0 ? "aggregated " : ""),
138 batman_ogm_packet->orig,
139 ntohl(batman_ogm_packet->seqno),
140 batman_ogm_packet->tq, batman_ogm_packet->ttl,
141 (batman_ogm_packet->flags & DIRECTLINK ?
142 "on" : "off"),
143 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
144 hard_iface->net_dev->dev_addr);
145
146 buff_pos += BATMAN_OGM_LEN +
147 tt_len(batman_ogm_packet->tt_num_changes);
148 packet_num++;
149 batman_ogm_packet = (struct batman_ogm_packet *)
150 (forw_packet->skb->data + buff_pos);
151 }
152
153 /* create clone because function is called more than once */
154 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
155 if (skb)
156 send_skb_packet(skb, hard_iface, broadcast_addr);
157}
158
159/* send a batman ogm packet */
160void bat_ogm_emit(struct forw_packet *forw_packet)
161{
162 struct hard_iface *hard_iface;
163 struct net_device *soft_iface;
164 struct bat_priv *bat_priv;
165 struct hard_iface *primary_if = NULL;
166 struct batman_ogm_packet *batman_ogm_packet;
167 unsigned char directlink;
168
169 batman_ogm_packet = (struct batman_ogm_packet *)
170 (forw_packet->skb->data);
171 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
172
173 if (!forw_packet->if_incoming) {
174 pr_err("Error - can't forward packet: incoming iface not "
175 "specified\n");
176 goto out;
177 }
178
179 soft_iface = forw_packet->if_incoming->soft_iface;
180 bat_priv = netdev_priv(soft_iface);
181
182 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
183 goto out;
184
185 primary_if = primary_if_get_selected(bat_priv);
186 if (!primary_if)
187 goto out;
188
189 /* multihomed peer assumed */
190 /* non-primary OGMs are only broadcasted on their interface */
191 if ((directlink && (batman_ogm_packet->ttl == 1)) ||
192 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
193
194 /* FIXME: what about aggregated packets ? */
195 bat_dbg(DBG_BATMAN, bat_priv,
196 "%s packet (originator %pM, seqno %d, TTL %d) "
197 "on interface %s [%pM]\n",
198 (forw_packet->own ? "Sending own" : "Forwarding"),
199 batman_ogm_packet->orig,
200 ntohl(batman_ogm_packet->seqno),
201 batman_ogm_packet->ttl,
202 forw_packet->if_incoming->net_dev->name,
203 forw_packet->if_incoming->net_dev->dev_addr);
204
205 /* skb is only used once and than forw_packet is free'd */
206 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
207 broadcast_addr);
208 forw_packet->skb = NULL;
209
210 goto out;
211 }
212
213 /* broadcast on every interface */
214 rcu_read_lock();
215 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
216 if (hard_iface->soft_iface != soft_iface)
217 continue;
218
219 bat_ogm_send_to_if(forw_packet, hard_iface);
220 }
221 rcu_read_unlock();
222
223out:
224 if (primary_if)
225 hardif_free_ref(primary_if);
226}
227
228/* return true if new_packet can be aggregated with forw_packet */
229static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
230 *new_batman_ogm_packet,
231 struct bat_priv *bat_priv,
232 int packet_len, unsigned long send_time,
233 bool directlink,
234 const struct hard_iface *if_incoming,
235 const struct forw_packet *forw_packet)
236{
237 struct batman_ogm_packet *batman_ogm_packet;
238 int aggregated_bytes = forw_packet->packet_len + packet_len;
239 struct hard_iface *primary_if = NULL;
240 bool res = false;
241
242 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
243
244 /**
245 * we can aggregate the current packet to this aggregated packet
246 * if:
247 *
248 * - the send time is within our MAX_AGGREGATION_MS time
249 * - the resulting packet wont be bigger than
250 * MAX_AGGREGATION_BYTES
251 */
252
253 if (time_before(send_time, forw_packet->send_time) &&
254 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
255 forw_packet->send_time) &&
256 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
257
258 /**
259 * check aggregation compatibility
260 * -> direct link packets are broadcasted on
261 * their interface only
262 * -> aggregate packet if the current packet is
263 * a "global" packet as well as the base
264 * packet
265 */
266
267 primary_if = primary_if_get_selected(bat_priv);
268 if (!primary_if)
269 goto out;
270
271 /* packets without direct link flag and high TTL
272 * are flooded through the net */
273 if ((!directlink) &&
274 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
275 (batman_ogm_packet->ttl != 1) &&
276
277 /* own packets originating non-primary
278 * interfaces leave only that interface */
279 ((!forw_packet->own) ||
280 (forw_packet->if_incoming == primary_if))) {
281 res = true;
282 goto out;
283 }
284
285 /* if the incoming packet is sent via this one
286 * interface only - we still can aggregate */
287 if ((directlink) &&
288 (new_batman_ogm_packet->ttl == 1) &&
289 (forw_packet->if_incoming == if_incoming) &&
290
291 /* packets from direct neighbors or
292 * own secondary interface packets
293 * (= secondary interface packets in general) */
294 (batman_ogm_packet->flags & DIRECTLINK ||
295 (forw_packet->own &&
296 forw_packet->if_incoming != primary_if))) {
297 res = true;
298 goto out;
299 }
300 }
301
302out:
303 if (primary_if)
304 hardif_free_ref(primary_if);
305 return res;
306}
307
308/* create a new aggregated packet and add this packet to it */
309static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
310 int packet_len, unsigned long send_time,
311 bool direct_link,
312 struct hard_iface *if_incoming,
313 int own_packet)
314{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct forw_packet *forw_packet_aggr;
317 unsigned char *skb_buff;
318
319 if (!atomic_inc_not_zero(&if_incoming->refcount))
320 return;
321
322 /* own packet should always be scheduled */
323 if (!own_packet) {
324 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
325 bat_dbg(DBG_BATMAN, bat_priv,
326 "batman packet queue full\n");
327 goto out;
328 }
329 }
330
331 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
332 if (!forw_packet_aggr) {
333 if (!own_packet)
334 atomic_inc(&bat_priv->batman_queue_left);
335 goto out;
336 }
337
338 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
339 (packet_len < MAX_AGGREGATION_BYTES))
340 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
341 sizeof(struct ethhdr));
342 else
343 forw_packet_aggr->skb = dev_alloc_skb(packet_len +
344 sizeof(struct ethhdr));
345
346 if (!forw_packet_aggr->skb) {
347 if (!own_packet)
348 atomic_inc(&bat_priv->batman_queue_left);
349 kfree(forw_packet_aggr);
350 goto out;
351 }
352 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
353
354 INIT_HLIST_NODE(&forw_packet_aggr->list);
355
356 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
357 forw_packet_aggr->packet_len = packet_len;
358 memcpy(skb_buff, packet_buff, packet_len);
359
360 forw_packet_aggr->own = own_packet;
361 forw_packet_aggr->if_incoming = if_incoming;
362 forw_packet_aggr->num_packets = 0;
363 forw_packet_aggr->direct_link_flags = NO_FLAGS;
364 forw_packet_aggr->send_time = send_time;
365
366 /* save packet direct link flag status */
367 if (direct_link)
368 forw_packet_aggr->direct_link_flags |= 1;
369
370 /* add new packet to packet list */
371 spin_lock_bh(&bat_priv->forw_bat_list_lock);
372 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
373 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
374
375 /* start timer for this packet */
376 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
377 send_outstanding_bat_ogm_packet);
378 queue_delayed_work(bat_event_workqueue,
379 &forw_packet_aggr->delayed_work,
380 send_time - jiffies);
381
382 return;
383out:
384 hardif_free_ref(if_incoming);
385}
386
387/* aggregate a new packet into the existing ogm packet */
388static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
389 const unsigned char *packet_buff,
390 int packet_len, bool direct_link)
391{
392 unsigned char *skb_buff;
393
394 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
395 memcpy(skb_buff, packet_buff, packet_len);
396 forw_packet_aggr->packet_len += packet_len;
397 forw_packet_aggr->num_packets++;
398
399 /* save packet direct link flag status */
400 if (direct_link)
401 forw_packet_aggr->direct_link_flags |=
402 (1 << forw_packet_aggr->num_packets);
403}
404
405static void bat_ogm_queue_add(struct bat_priv *bat_priv,
406 unsigned char *packet_buff,
407 int packet_len, struct hard_iface *if_incoming,
408 int own_packet, unsigned long send_time)
409{
410 /**
411 * _aggr -> pointer to the packet we want to aggregate with
412 * _pos -> pointer to the position in the queue
413 */
414 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
415 struct hlist_node *tmp_node;
416 struct batman_ogm_packet *batman_ogm_packet;
417 bool direct_link;
418
419 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
420 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
421
422 /* find position for the packet in the forward queue */
423 spin_lock_bh(&bat_priv->forw_bat_list_lock);
424 /* own packets are not to be aggregated */
425 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
426 hlist_for_each_entry(forw_packet_pos, tmp_node,
427 &bat_priv->forw_bat_list, list) {
428 if (bat_ogm_can_aggregate(batman_ogm_packet,
429 bat_priv, packet_len,
430 send_time, direct_link,
431 if_incoming,
432 forw_packet_pos)) {
433 forw_packet_aggr = forw_packet_pos;
434 break;
435 }
436 }
437 }
438
439 /* nothing to aggregate with - either aggregation disabled or no
440 * suitable aggregation packet found */
441 if (!forw_packet_aggr) {
442 /* the following section can run without the lock */
443 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
444
445 /**
446 * if we could not aggregate this packet with one of the others
447 * we hold it back for a while, so that it might be aggregated
448 * later on
449 */
450 if ((!own_packet) &&
451 (atomic_read(&bat_priv->aggregated_ogms)))
452 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
453
454 bat_ogm_aggregate_new(packet_buff, packet_len,
455 send_time, direct_link,
456 if_incoming, own_packet);
457 } else {
458 bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
459 direct_link);
460 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
461 }
462}
463
464static void bat_ogm_forward(struct orig_node *orig_node,
465 const struct ethhdr *ethhdr,
466 struct batman_ogm_packet *batman_ogm_packet,
467 int directlink, struct hard_iface *if_incoming)
468{
469 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
470 struct neigh_node *router;
471 uint8_t in_tq, in_ttl, tq_avg = 0;
472 uint8_t tt_num_changes;
473
474 if (batman_ogm_packet->ttl <= 1) {
475 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
476 return;
477 }
478
479 router = orig_node_get_router(orig_node);
480
481 in_tq = batman_ogm_packet->tq;
482 in_ttl = batman_ogm_packet->ttl;
483 tt_num_changes = batman_ogm_packet->tt_num_changes;
484
485 batman_ogm_packet->ttl--;
486 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
487
488 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
489 * of our best tq value */
490 if (router && router->tq_avg != 0) {
491
492 /* rebroadcast ogm of best ranking neighbor as is */
493 if (!compare_eth(router->addr, ethhdr->h_source)) {
494 batman_ogm_packet->tq = router->tq_avg;
495
496 if (router->last_ttl)
497 batman_ogm_packet->ttl = router->last_ttl - 1;
498 }
499
500 tq_avg = router->tq_avg;
501 }
502
503 if (router)
504 neigh_node_free_ref(router);
505
506 /* apply hop penalty */
507 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
508
509 bat_dbg(DBG_BATMAN, bat_priv,
510 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
511 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
512 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
513 batman_ogm_packet->ttl);
514
515 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
516 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
517
518 /* switch of primaries first hop flag when forwarding */
519 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
520 if (directlink)
521 batman_ogm_packet->flags |= DIRECTLINK;
522 else
523 batman_ogm_packet->flags &= ~DIRECTLINK;
524
525 bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
526 BATMAN_OGM_LEN + tt_len(tt_num_changes),
527 if_incoming, 0, bat_ogm_fwd_send_time());
528}
529
530void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
531{
532 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
533 struct batman_ogm_packet *batman_ogm_packet;
534 struct hard_iface *primary_if;
535 int vis_server;
536
537 vis_server = atomic_read(&bat_priv->vis_mode);
538 primary_if = primary_if_get_selected(bat_priv);
539
540 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
541
542 /* change sequence number to network order */
543 batman_ogm_packet->seqno =
544 htonl((uint32_t)atomic_read(&hard_iface->seqno));
545
546 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
547 batman_ogm_packet->tt_crc = htons((uint16_t)
548 atomic_read(&bat_priv->tt_crc));
549 if (tt_num_changes >= 0)
550 batman_ogm_packet->tt_num_changes = tt_num_changes;
551
552 if (vis_server == VIS_TYPE_SERVER_SYNC)
553 batman_ogm_packet->flags |= VIS_SERVER;
554 else
555 batman_ogm_packet->flags &= ~VIS_SERVER;
556
557 if ((hard_iface == primary_if) &&
558 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
559 batman_ogm_packet->gw_flags =
560 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
561 else
562 batman_ogm_packet->gw_flags = NO_FLAGS;
563
564 atomic_inc(&hard_iface->seqno);
565
566 slide_own_bcast_window(hard_iface);
567 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
568 hard_iface->packet_len, hard_iface, 1,
569 bat_ogm_emit_send_time(bat_priv));
570
571 if (primary_if)
572 hardif_free_ref(primary_if);
573}
574
575static void bat_ogm_orig_update(struct bat_priv *bat_priv,
576 struct orig_node *orig_node,
577 const struct ethhdr *ethhdr,
578 const struct batman_ogm_packet
579 *batman_ogm_packet,
580 struct hard_iface *if_incoming,
581 const unsigned char *tt_buff, int is_duplicate)
582{
583 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
584 struct neigh_node *router = NULL;
585 struct orig_node *orig_node_tmp;
586 struct hlist_node *node;
587 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
588
589 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
590 "Searching and updating originator entry of received packet\n");
591
592 rcu_read_lock();
593 hlist_for_each_entry_rcu(tmp_neigh_node, node,
594 &orig_node->neigh_list, list) {
595 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
596 (tmp_neigh_node->if_incoming == if_incoming) &&
597 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
598 if (neigh_node)
599 neigh_node_free_ref(neigh_node);
600 neigh_node = tmp_neigh_node;
601 continue;
602 }
603
604 if (is_duplicate)
605 continue;
606
607 spin_lock_bh(&tmp_neigh_node->tq_lock);
608 ring_buffer_set(tmp_neigh_node->tq_recv,
609 &tmp_neigh_node->tq_index, 0);
610 tmp_neigh_node->tq_avg =
611 ring_buffer_avg(tmp_neigh_node->tq_recv);
612 spin_unlock_bh(&tmp_neigh_node->tq_lock);
613 }
614
615 if (!neigh_node) {
616 struct orig_node *orig_tmp;
617
618 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
619 if (!orig_tmp)
620 goto unlock;
621
622 neigh_node = create_neighbor(orig_node, orig_tmp,
623 ethhdr->h_source, if_incoming);
624
625 orig_node_free_ref(orig_tmp);
626 if (!neigh_node)
627 goto unlock;
628 } else
629 bat_dbg(DBG_BATMAN, bat_priv,
630 "Updating existing last-hop neighbor of originator\n");
631
632 rcu_read_unlock();
633
634 orig_node->flags = batman_ogm_packet->flags;
635 neigh_node->last_valid = jiffies;
636
637 spin_lock_bh(&neigh_node->tq_lock);
638 ring_buffer_set(neigh_node->tq_recv,
639 &neigh_node->tq_index,
640 batman_ogm_packet->tq);
641 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
642 spin_unlock_bh(&neigh_node->tq_lock);
643
644 if (!is_duplicate) {
645 orig_node->last_ttl = batman_ogm_packet->ttl;
646 neigh_node->last_ttl = batman_ogm_packet->ttl;
647 }
648
649 bonding_candidate_add(orig_node, neigh_node);
650
651 /* if this neighbor already is our next hop there is nothing
652 * to change */
653 router = orig_node_get_router(orig_node);
654 if (router == neigh_node)
655 goto update_tt;
656
657 /* if this neighbor does not offer a better TQ we won't consider it */
658 if (router && (router->tq_avg > neigh_node->tq_avg))
659 goto update_tt;
660
661 /* if the TQ is the same and the link not more symmetric we
662 * won't consider it either */
663 if (router && (neigh_node->tq_avg == router->tq_avg)) {
664 orig_node_tmp = router->orig_node;
665 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
666 bcast_own_sum_orig =
667 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
668 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
669
670 orig_node_tmp = neigh_node->orig_node;
671 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
672 bcast_own_sum_neigh =
673 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
674 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
675
676 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
677 goto update_tt;
678 }
679
680 update_route(bat_priv, orig_node, neigh_node);
681
682update_tt:
683 /* I have to check for transtable changes only if the OGM has been
684 * sent through a primary interface */
685 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
686 (batman_ogm_packet->ttl > 2)) ||
687 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
688 tt_update_orig(bat_priv, orig_node, tt_buff,
689 batman_ogm_packet->tt_num_changes,
690 batman_ogm_packet->ttvn,
691 batman_ogm_packet->tt_crc);
692
693 if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
694 gw_node_update(bat_priv, orig_node,
695 batman_ogm_packet->gw_flags);
696
697 orig_node->gw_flags = batman_ogm_packet->gw_flags;
698
699 /* restart gateway selection if fast or late switching was enabled */
700 if ((orig_node->gw_flags) &&
701 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
702 (atomic_read(&bat_priv->gw_sel_class) > 2))
703 gw_check_election(bat_priv, orig_node);
704
705 goto out;
706
707unlock:
708 rcu_read_unlock();
709out:
710 if (neigh_node)
711 neigh_node_free_ref(neigh_node);
712 if (router)
713 neigh_node_free_ref(router);
714}
715
716static int bat_ogm_calc_tq(struct orig_node *orig_node,
717 struct orig_node *orig_neigh_node,
718 struct batman_ogm_packet *batman_ogm_packet,
719 struct hard_iface *if_incoming)
720{
721 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
722 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
723 struct hlist_node *node;
724 uint8_t total_count;
725 uint8_t orig_eq_count, neigh_rq_count, tq_own;
726 int tq_asym_penalty, ret = 0;
727
728 /* find corresponding one hop neighbor */
729 rcu_read_lock();
730 hlist_for_each_entry_rcu(tmp_neigh_node, node,
731 &orig_neigh_node->neigh_list, list) {
732
733 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
734 continue;
735
736 if (tmp_neigh_node->if_incoming != if_incoming)
737 continue;
738
739 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
740 continue;
741
742 neigh_node = tmp_neigh_node;
743 break;
744 }
745 rcu_read_unlock();
746
747 if (!neigh_node)
748 neigh_node = create_neighbor(orig_neigh_node,
749 orig_neigh_node,
750 orig_neigh_node->orig,
751 if_incoming);
752
753 if (!neigh_node)
754 goto out;
755
756 /* if orig_node is direct neighbor update neigh_node last_valid */
757 if (orig_node == orig_neigh_node)
758 neigh_node->last_valid = jiffies;
759
760 orig_node->last_valid = jiffies;
761
762 /* find packet count of corresponding one hop neighbor */
763 spin_lock_bh(&orig_node->ogm_cnt_lock);
764 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
765 neigh_rq_count = neigh_node->real_packet_count;
766 spin_unlock_bh(&orig_node->ogm_cnt_lock);
767
768 /* pay attention to not get a value bigger than 100 % */
769 total_count = (orig_eq_count > neigh_rq_count ?
770 neigh_rq_count : orig_eq_count);
771
772 /* if we have too few packets (too less data) we set tq_own to zero */
773 /* if we receive too few packets it is not considered bidirectional */
774 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
775 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
776 tq_own = 0;
777 else
778 /* neigh_node->real_packet_count is never zero as we
779 * only purge old information when getting new
780 * information */
781 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
782
783 /*
784 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
785 * affect the nearly-symmetric links only a little, but
786 * punishes asymmetric links more. This will give a value
787 * between 0 and TQ_MAX_VALUE
788 */
789 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
790 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
791 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
792 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
793 (TQ_LOCAL_WINDOW_SIZE *
794 TQ_LOCAL_WINDOW_SIZE *
795 TQ_LOCAL_WINDOW_SIZE);
796
797 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
798 * tq_asym_penalty) /
799 (TQ_MAX_VALUE * TQ_MAX_VALUE));
800
801 bat_dbg(DBG_BATMAN, bat_priv,
802 "bidirectional: "
803 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
804 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
805 "total tq: %3i\n",
806 orig_node->orig, orig_neigh_node->orig, total_count,
807 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
808
809 /* if link has the minimum required transmission quality
810 * consider it bidirectional */
811 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
812 ret = 1;
813
814out:
815 if (neigh_node)
816 neigh_node_free_ref(neigh_node);
817 return ret;
818}
819
820/* processes a batman packet for all interfaces, adjusts the sequence number and
821 * finds out whether it is a duplicate.
822 * returns:
823 * 1 the packet is a duplicate
824 * 0 the packet has not yet been received
825 * -1 the packet is old and has been received while the seqno window
826 * was protected. Caller should drop it.
827 */
828static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
829 const struct batman_ogm_packet
830 *batman_ogm_packet,
831 const struct hard_iface *if_incoming)
832{
833 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
834 struct orig_node *orig_node;
835 struct neigh_node *tmp_neigh_node;
836 struct hlist_node *node;
837 int is_duplicate = 0;
838 int32_t seq_diff;
839 int need_update = 0;
840 int set_mark, ret = -1;
841
842 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
843 if (!orig_node)
844 return 0;
845
846 spin_lock_bh(&orig_node->ogm_cnt_lock);
847 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
848
849 /* signalize caller that the packet is to be dropped. */
850 if (window_protected(bat_priv, seq_diff,
851 &orig_node->batman_seqno_reset))
852 goto out;
853
854 rcu_read_lock();
855 hlist_for_each_entry_rcu(tmp_neigh_node, node,
856 &orig_node->neigh_list, list) {
857
858 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
859 orig_node->last_real_seqno,
860 batman_ogm_packet->seqno);
861
862 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
863 (tmp_neigh_node->if_incoming == if_incoming))
864 set_mark = 1;
865 else
866 set_mark = 0;
867
868 /* if the window moved, set the update flag. */
869 need_update |= bit_get_packet(bat_priv,
870 tmp_neigh_node->real_bits,
871 seq_diff, set_mark);
872
873 tmp_neigh_node->real_packet_count =
874 bit_packet_count(tmp_neigh_node->real_bits);
875 }
876 rcu_read_unlock();
877
878 if (need_update) {
879 bat_dbg(DBG_BATMAN, bat_priv,
880 "updating last_seqno: old %d, new %d\n",
881 orig_node->last_real_seqno, batman_ogm_packet->seqno);
882 orig_node->last_real_seqno = batman_ogm_packet->seqno;
883 }
884
885 ret = is_duplicate;
886
887out:
888 spin_unlock_bh(&orig_node->ogm_cnt_lock);
889 orig_node_free_ref(orig_node);
890 return ret;
891}
892
893static void bat_ogm_process(const struct ethhdr *ethhdr,
894 struct batman_ogm_packet *batman_ogm_packet,
895 const unsigned char *tt_buff,
896 struct hard_iface *if_incoming)
897{
898 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
899 struct hard_iface *hard_iface;
900 struct orig_node *orig_neigh_node, *orig_node;
901 struct neigh_node *router = NULL, *router_router = NULL;
902 struct neigh_node *orig_neigh_router = NULL;
903 int has_directlink_flag;
904 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
905 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
906 int is_duplicate;
907 uint32_t if_incoming_seqno;
908
909 /* Silently drop when the batman packet is actually not a
910 * correct packet.
911 *
912 * This might happen if a packet is padded (e.g. Ethernet has a
913 * minimum frame length of 64 byte) and the aggregation interprets
914 * it as an additional length.
915 *
916 * TODO: A more sane solution would be to have a bit in the
917 * batman_ogm_packet to detect whether the packet is the last
918 * packet in an aggregation. Here we expect that the padding
919 * is always zero (or not 0x01)
920 */
921 if (batman_ogm_packet->packet_type != BAT_OGM)
922 return;
923
924 /* could be changed by schedule_own_packet() */
925 if_incoming_seqno = atomic_read(&if_incoming->seqno);
926
927 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
928
929 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
930 batman_ogm_packet->orig) ? 1 : 0);
931
932 bat_dbg(DBG_BATMAN, bat_priv,
933 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
934 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
935 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
936 ethhdr->h_source, if_incoming->net_dev->name,
937 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
938 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
939 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
940 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
941 batman_ogm_packet->ttl, batman_ogm_packet->version,
942 has_directlink_flag);
943
944 rcu_read_lock();
945 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
946 if (hard_iface->if_status != IF_ACTIVE)
947 continue;
948
949 if (hard_iface->soft_iface != if_incoming->soft_iface)
950 continue;
951
952 if (compare_eth(ethhdr->h_source,
953 hard_iface->net_dev->dev_addr))
954 is_my_addr = 1;
955
956 if (compare_eth(batman_ogm_packet->orig,
957 hard_iface->net_dev->dev_addr))
958 is_my_orig = 1;
959
960 if (compare_eth(batman_ogm_packet->prev_sender,
961 hard_iface->net_dev->dev_addr))
962 is_my_oldorig = 1;
963
964 if (is_broadcast_ether_addr(ethhdr->h_source))
965 is_broadcast = 1;
966 }
967 rcu_read_unlock();
968
969 if (batman_ogm_packet->version != COMPAT_VERSION) {
970 bat_dbg(DBG_BATMAN, bat_priv,
971 "Drop packet: incompatible batman version (%i)\n",
972 batman_ogm_packet->version);
973 return;
974 }
975
976 if (is_my_addr) {
977 bat_dbg(DBG_BATMAN, bat_priv,
978 "Drop packet: received my own broadcast (sender: %pM"
979 ")\n",
980 ethhdr->h_source);
981 return;
982 }
983
984 if (is_broadcast) {
985 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
986 "ignoring all packets with broadcast source addr (sender: %pM"
987 ")\n", ethhdr->h_source);
988 return;
989 }
990
991 if (is_my_orig) {
992 unsigned long *word;
993 int offset;
994
995 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
996 if (!orig_neigh_node)
997 return;
998
999 /* neighbor has to indicate direct link and it has to
1000 * come via the corresponding interface */
1001 /* save packet seqno for bidirectional check */
1002 if (has_directlink_flag &&
1003 compare_eth(if_incoming->net_dev->dev_addr,
1004 batman_ogm_packet->orig)) {
1005 offset = if_incoming->if_num * NUM_WORDS;
1006
1007 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1008 word = &(orig_neigh_node->bcast_own[offset]);
1009 bit_mark(word,
1010 if_incoming_seqno -
1011 batman_ogm_packet->seqno - 2);
1012 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
1013 bit_packet_count(word);
1014 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1015 }
1016
1017 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
1018 "originator packet from myself (via neighbor)\n");
1019 orig_node_free_ref(orig_neigh_node);
1020 return;
1021 }
1022
1023 if (is_my_oldorig) {
1024 bat_dbg(DBG_BATMAN, bat_priv,
1025 "Drop packet: ignoring all rebroadcast echos (sender: "
1026 "%pM)\n", ethhdr->h_source);
1027 return;
1028 }
1029
1030 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
1031 if (!orig_node)
1032 return;
1033
1034 is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1035 if_incoming);
1036
1037 if (is_duplicate == -1) {
1038 bat_dbg(DBG_BATMAN, bat_priv,
1039 "Drop packet: packet within seqno protection time "
1040 "(sender: %pM)\n", ethhdr->h_source);
1041 goto out;
1042 }
1043
1044 if (batman_ogm_packet->tq == 0) {
1045 bat_dbg(DBG_BATMAN, bat_priv,
1046 "Drop packet: originator packet with tq equal 0\n");
1047 goto out;
1048 }
1049
1050 router = orig_node_get_router(orig_node);
1051 if (router)
1052 router_router = orig_node_get_router(router->orig_node);
1053
1054 /* avoid temporary routing loops */
1055 if (router && router_router &&
1056 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
1057 !(compare_eth(batman_ogm_packet->orig,
1058 batman_ogm_packet->prev_sender)) &&
1059 (compare_eth(router->addr, router_router->addr))) {
1060 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: ignoring all rebroadcast packets that "
1062 "may make me loop (sender: %pM)\n", ethhdr->h_source);
1063 goto out;
1064 }
1065
1066 /* if sender is a direct neighbor the sender mac equals
1067 * originator mac */
1068 orig_neigh_node = (is_single_hop_neigh ?
1069 orig_node :
1070 get_orig_node(bat_priv, ethhdr->h_source));
1071 if (!orig_neigh_node)
1072 goto out;
1073
1074 orig_neigh_router = orig_node_get_router(orig_neigh_node);
1075
1076 /* drop packet if sender is not a direct neighbor and if we
1077 * don't route towards it */
1078 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1079 bat_dbg(DBG_BATMAN, bat_priv,
1080 "Drop packet: OGM via unknown neighbor!\n");
1081 goto out_neigh;
1082 }
1083
1084 is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
1085 batman_ogm_packet, if_incoming);
1086
1087 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1088
1089 /* update ranking if it is not a duplicate or has the same
1090 * seqno and similar ttl as the non-duplicate */
1091 if (is_bidirectional &&
1092 (!is_duplicate ||
1093 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
1094 (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
1095 bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
1096 batman_ogm_packet, if_incoming,
1097 tt_buff, is_duplicate);
1098
1099 /* is single hop (direct) neighbor */
1100 if (is_single_hop_neigh) {
1101
1102 /* mark direct link on incoming interface */
1103 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1104 1, if_incoming);
1105
1106 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
1107 "rebroadcast neighbor packet with direct link flag\n");
1108 goto out_neigh;
1109 }
1110
1111 /* multihop originator */
1112 if (!is_bidirectional) {
1113 bat_dbg(DBG_BATMAN, bat_priv,
1114 "Drop packet: not received via bidirectional link\n");
1115 goto out_neigh;
1116 }
1117
1118 if (is_duplicate) {
1119 bat_dbg(DBG_BATMAN, bat_priv,
1120 "Drop packet: duplicate packet received\n");
1121 goto out_neigh;
1122 }
1123
1124 bat_dbg(DBG_BATMAN, bat_priv,
1125 "Forwarding packet: rebroadcast originator packet\n");
1126 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
1127
1128out_neigh:
1129 if ((orig_neigh_node) && (!is_single_hop_neigh))
1130 orig_node_free_ref(orig_neigh_node);
1131out:
1132 if (router)
1133 neigh_node_free_ref(router);
1134 if (router_router)
1135 neigh_node_free_ref(router_router);
1136 if (orig_neigh_router)
1137 neigh_node_free_ref(orig_neigh_router);
1138
1139 orig_node_free_ref(orig_node);
1140}
1141
1142void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
1143 int packet_len, struct hard_iface *if_incoming)
1144{
1145 struct batman_ogm_packet *batman_ogm_packet;
1146 int buff_pos = 0;
1147 unsigned char *tt_buff;
1148
1149 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1150
1151 /* unpack the aggregated packets and process them one by one */
1152 do {
1153 /* network to host order for our 32bit seqno and the
1154 orig_interval */
1155 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1156 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1157
1158 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
1159
1160 bat_ogm_process(ethhdr, batman_ogm_packet,
1161 tt_buff, if_incoming);
1162
1163 buff_pos += BATMAN_OGM_LEN +
1164 tt_len(batman_ogm_packet->tt_num_changes);
1165
1166 batman_ogm_packet = (struct batman_ogm_packet *)
1167 (packet_buff + buff_pos);
1168 } while (bat_ogm_aggr_packet(buff_pos, packet_len,
1169 batman_ogm_packet->tt_num_changes));
1170}
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_ogm.h
new file mode 100644
index 000000000000..69329c107e28
--- /dev/null
+++ b/net/batman-adv/bat_ogm.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_OGM_H_
23#define _NET_BATMAN_ADV_OGM_H_
24
25#include "main.h"
26
27void bat_ogm_init(struct hard_iface *hard_iface);
28void bat_ogm_init_primary(struct hard_iface *hard_iface);
29void bat_ogm_update_mac(struct hard_iface *hard_iface);
30void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
31void bat_ogm_emit(struct forw_packet *forw_packet);
32void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
33 int packet_len, struct hard_iface *if_incoming);
34
35#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd15deba60a1..b8a7414c3571 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -380,6 +380,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
380BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); 380BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
381BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); 381BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); 382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
383BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
383static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 384static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
384static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); 385static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
385BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); 386BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
@@ -396,6 +397,7 @@ static struct bat_attribute *mesh_attrs[] = {
396 &bat_attr_aggregated_ogms, 397 &bat_attr_aggregated_ogms,
397 &bat_attr_bonding, 398 &bat_attr_bonding,
398 &bat_attr_fragmentation, 399 &bat_attr_fragmentation,
400 &bat_attr_ap_isolation,
399 &bat_attr_vis_mode, 401 &bat_attr_vis_mode,
400 &bat_attr_gw_mode, 402 &bat_attr_gw_mode,
401 &bat_attr_orig_interval, 403 &bat_attr_orig_interval,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index c1f4bfc09cc3..0be9ff346fa0 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -97,12 +97,12 @@ static void bit_shift(unsigned long *seq_bits, int32_t n)
97 (seq_bits[i - word_num - 1] >> 97 (seq_bits[i - word_num - 1] >>
98 (WORD_BIT_SIZE-word_offset)); 98 (WORD_BIT_SIZE-word_offset));
99 /* and the upper part of the right half and shift it left to 99 /* and the upper part of the right half and shift it left to
100 * it's position */ 100 * its position */
101 /* for our example that would be: word[0] = 9800 + 0076 = 101 /* for our example that would be: word[0] = 9800 + 0076 =
102 * 9876 */ 102 * 9876 */
103 } 103 }
104 /* now for our last word, i==word_num, we only have the it's "left" 104 /* now for our last word, i==word_num, we only have its "left" half.
105 * half. that's the 1000 word in our example.*/ 105 * that's the 1000 word in our example.*/
106 106
107 seq_bits[i] = (seq_bits[i - word_num] << word_offset); 107 seq_bits[i] = (seq_bits[i - word_num] << word_offset);
108 108
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 056180ef9e1a..619fb73b3b76 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -532,14 +532,14 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; 532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
533 533
534 /* Access the dhcp option lists. Each entry is made up by: 534 /* Access the dhcp option lists. Each entry is made up by:
535 * - octect 1: option type 535 * - octet 1: option type
536 * - octect 2: option data len (only if type != 255 and 0) 536 * - octet 2: option data len (only if type != 255 and 0)
537 * - octect 3: option data */ 537 * - octet 3: option data */
538 while (*p != 255 && !ret) { 538 while (*p != 255 && !ret) {
539 /* p now points to the first octect: option type */ 539 /* p now points to the first octet: option type */
540 if (*p == 53) { 540 if (*p == 53) {
541 /* type 53 is the message type option. 541 /* type 53 is the message type option.
542 * Jump the len octect and go to the data octect */ 542 * Jump the len octet and go to the data octet */
543 if (pkt_len < 2) 543 if (pkt_len < 2)
544 goto out; 544 goto out;
545 p += 2; 545 p += 2;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index db7aacf1e095..7704df468e0b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bat_ogm.h"
31 32
32#include <linux/if_arp.h> 33#include <linux/if_arp.h>
33 34
@@ -131,7 +132,6 @@ static void primary_if_select(struct bat_priv *bat_priv,
131 struct hard_iface *new_hard_iface) 132 struct hard_iface *new_hard_iface)
132{ 133{
133 struct hard_iface *curr_hard_iface; 134 struct hard_iface *curr_hard_iface;
134 struct batman_packet *batman_packet;
135 135
136 ASSERT_RTNL(); 136 ASSERT_RTNL();
137 137
@@ -147,10 +147,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
147 if (!new_hard_iface) 147 if (!new_hard_iface)
148 return; 148 return;
149 149
150 batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff); 150 bat_ogm_init_primary(new_hard_iface);
151 batman_packet->flags = PRIMARIES_FIRST_HOP;
152 batman_packet->ttl = TTL;
153
154 primary_if_update_addr(bat_priv); 151 primary_if_update_addr(bat_priv);
155} 152}
156 153
@@ -162,14 +159,6 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
162 return false; 159 return false;
163} 160}
164 161
165static void update_mac_addresses(struct hard_iface *hard_iface)
166{
167 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
168 hard_iface->net_dev->dev_addr, ETH_ALEN);
169 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
170 hard_iface->net_dev->dev_addr, ETH_ALEN);
171}
172
173static void check_known_mac_addr(const struct net_device *net_dev) 162static void check_known_mac_addr(const struct net_device *net_dev)
174{ 163{
175 const struct hard_iface *hard_iface; 164 const struct hard_iface *hard_iface;
@@ -244,12 +233,12 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
244 233
245 bat_priv = netdev_priv(hard_iface->soft_iface); 234 bat_priv = netdev_priv(hard_iface->soft_iface);
246 235
247 update_mac_addresses(hard_iface); 236 bat_ogm_update_mac(hard_iface);
248 hard_iface->if_status = IF_TO_BE_ACTIVATED; 237 hard_iface->if_status = IF_TO_BE_ACTIVATED;
249 238
250 /** 239 /**
251 * the first active interface becomes our primary interface or 240 * the first active interface becomes our primary interface or
252 * the next active interface after the old primay interface was removed 241 * the next active interface after the old primary interface was removed
253 */ 242 */
254 primary_if = primary_if_get_selected(bat_priv); 243 primary_if = primary_if_get_selected(bat_priv);
255 if (!primary_if) 244 if (!primary_if)
@@ -283,7 +272,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
283 const char *iface_name) 272 const char *iface_name)
284{ 273{
285 struct bat_priv *bat_priv; 274 struct bat_priv *bat_priv;
286 struct batman_packet *batman_packet;
287 struct net_device *soft_iface; 275 struct net_device *soft_iface;
288 int ret; 276 int ret;
289 277
@@ -318,8 +306,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
318 306
319 hard_iface->soft_iface = soft_iface; 307 hard_iface->soft_iface = soft_iface;
320 bat_priv = netdev_priv(hard_iface->soft_iface); 308 bat_priv = netdev_priv(hard_iface->soft_iface);
321 hard_iface->packet_len = BAT_PACKET_LEN; 309
322 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 310 bat_ogm_init(hard_iface);
323 311
324 if (!hard_iface->packet_buff) { 312 if (!hard_iface->packet_buff) {
325 bat_err(hard_iface->soft_iface, "Can't add interface packet " 313 bat_err(hard_iface->soft_iface, "Can't add interface packet "
@@ -328,15 +316,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
328 goto err; 316 goto err;
329 } 317 }
330 318
331 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
332 batman_packet->packet_type = BAT_PACKET;
333 batman_packet->version = COMPAT_VERSION;
334 batman_packet->flags = NO_FLAGS;
335 batman_packet->ttl = 2;
336 batman_packet->tq = TQ_MAX_VALUE;
337 batman_packet->tt_num_changes = 0;
338 batman_packet->ttvn = 0;
339
340 hard_iface->if_num = bat_priv->num_ifaces; 319 hard_iface->if_num = bat_priv->num_ifaces;
341 bat_priv->num_ifaces++; 320 bat_priv->num_ifaces++;
342 hard_iface->if_status = IF_INACTIVE; 321 hard_iface->if_status = IF_INACTIVE;
@@ -381,7 +360,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
381 hard_iface->net_dev->name); 360 hard_iface->net_dev->name);
382 361
383 /* begin scheduling originator messages on that interface */ 362 /* begin scheduling originator messages on that interface */
384 schedule_own_packet(hard_iface); 363 schedule_bat_ogm(hard_iface);
385 364
386out: 365out:
387 return 0; 366 return 0;
@@ -455,11 +434,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
455 dev_hold(net_dev); 434 dev_hold(net_dev);
456 435
457 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC); 436 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
458 if (!hard_iface) { 437 if (!hard_iface)
459 pr_err("Can't add interface (%s): out of memory\n",
460 net_dev->name);
461 goto release_dev; 438 goto release_dev;
462 }
463 439
464 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); 440 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
465 if (ret) 441 if (ret)
@@ -551,7 +527,7 @@ static int hard_if_event(struct notifier_block *this,
551 goto hardif_put; 527 goto hardif_put;
552 528
553 check_known_mac_addr(hard_iface->net_dev); 529 check_known_mac_addr(hard_iface->net_dev);
554 update_mac_addresses(hard_iface); 530 bat_ogm_update_mac(hard_iface);
555 531
556 bat_priv = netdev_priv(hard_iface->soft_iface); 532 bat_priv = netdev_priv(hard_iface->soft_iface);
557 primary_if = primary_if_get_selected(bat_priv); 533 primary_if = primary_if_get_selected(bat_priv);
@@ -573,14 +549,14 @@ out:
573 return NOTIFY_DONE; 549 return NOTIFY_DONE;
574} 550}
575 551
576/* receive a packet with the batman ethertype coming on a hard 552/* incoming packets with the batman ethertype received on any active hard
577 * interface */ 553 * interface */
578static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 554static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
579 struct packet_type *ptype, 555 struct packet_type *ptype,
580 struct net_device *orig_dev) 556 struct net_device *orig_dev)
581{ 557{
582 struct bat_priv *bat_priv; 558 struct bat_priv *bat_priv;
583 struct batman_packet *batman_packet; 559 struct batman_ogm_packet *batman_ogm_packet;
584 struct hard_iface *hard_iface; 560 struct hard_iface *hard_iface;
585 int ret; 561 int ret;
586 562
@@ -612,22 +588,22 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
612 if (hard_iface->if_status != IF_ACTIVE) 588 if (hard_iface->if_status != IF_ACTIVE)
613 goto err_free; 589 goto err_free;
614 590
615 batman_packet = (struct batman_packet *)skb->data; 591 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
616 592
617 if (batman_packet->version != COMPAT_VERSION) { 593 if (batman_ogm_packet->version != COMPAT_VERSION) {
618 bat_dbg(DBG_BATMAN, bat_priv, 594 bat_dbg(DBG_BATMAN, bat_priv,
619 "Drop packet: incompatible batman version (%i)\n", 595 "Drop packet: incompatible batman version (%i)\n",
620 batman_packet->version); 596 batman_ogm_packet->version);
621 goto err_free; 597 goto err_free;
622 } 598 }
623 599
624 /* all receive handlers return whether they received or reused 600 /* all receive handlers return whether they received or reused
625 * the supplied skb. if not, we have to free the skb. */ 601 * the supplied skb. if not, we have to free the skb. */
626 602
627 switch (batman_packet->packet_type) { 603 switch (batman_ogm_packet->packet_type) {
628 /* batman originator packet */ 604 /* batman originator packet */
629 case BAT_PACKET: 605 case BAT_OGM:
630 ret = recv_bat_packet(skb, hard_iface); 606 ret = recv_bat_ogm_packet(skb, hard_iface);
631 break; 607 break;
632 608
633 /* batman icmp packet */ 609 /* batman icmp packet */
@@ -681,6 +657,36 @@ err_out:
681 return NET_RX_DROP; 657 return NET_RX_DROP;
682} 658}
683 659
660/* This function returns true if the interface represented by ifindex is a
661 * 802.11 wireless device */
662bool is_wifi_iface(int ifindex)
663{
664 struct net_device *net_device = NULL;
665 bool ret = false;
666
667 if (ifindex == NULL_IFINDEX)
668 goto out;
669
670 net_device = dev_get_by_index(&init_net, ifindex);
671 if (!net_device)
672 goto out;
673
674#ifdef CONFIG_WIRELESS_EXT
675 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
676 * check for wireless_handlers != NULL */
677 if (net_device->wireless_handlers)
678 ret = true;
679 else
680#endif
681 /* cfg80211 drivers have to set ieee80211_ptr */
682 if (net_device->ieee80211_ptr)
683 ret = true;
684out:
685 if (net_device)
686 dev_put(net_device);
687 return ret;
688}
689
684struct notifier_block hard_if_notifier = { 690struct notifier_block hard_if_notifier = {
685 .notifier_call = hard_if_event, 691 .notifier_call = hard_if_event,
686}; 692};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 442eacbc9e3a..67f78d1a63b4 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -42,6 +42,7 @@ void hardif_remove_interfaces(void);
42int hardif_min_mtu(struct net_device *soft_iface); 42int hardif_min_mtu(struct net_device *soft_iface);
43void update_min_mtu(struct net_device *soft_iface); 43void update_min_mtu(struct net_device *soft_iface);
44void hardif_free_rcu(struct rcu_head *rcu); 44void hardif_free_rcu(struct rcu_head *rcu);
45bool is_wifi_iface(int ifindex);
45 46
46static inline void hardif_free_ref(struct hard_iface *hard_iface) 47static inline void hardif_free_ref(struct hard_iface *hard_iface)
47{ 48{
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index dd5c9fd7a905..d20aa71ba1e8 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -76,19 +76,30 @@ static inline void hash_delete(struct hashtable_t *hash,
76 hash_destroy(hash); 76 hash_destroy(hash);
77} 77}
78 78
79/* adds data to the hashtable. returns 0 on success, -1 on error */ 79/**
80 * hash_add - adds data to the hashtable
81 * @hash: storage hash table
82 * @compare: callback to determine if 2 hash elements are identical
83 * @choose: callback calculating the hash index
84 * @data: data passed to the aforementioned callbacks as argument
85 * @data_node: to be added element
86 *
87 * Returns 0 on success, 1 if the element already is in the hash
88 * and -1 on error.
89 */
90
80static inline int hash_add(struct hashtable_t *hash, 91static inline int hash_add(struct hashtable_t *hash,
81 hashdata_compare_cb compare, 92 hashdata_compare_cb compare,
82 hashdata_choose_cb choose, 93 hashdata_choose_cb choose,
83 const void *data, struct hlist_node *data_node) 94 const void *data, struct hlist_node *data_node)
84{ 95{
85 int index; 96 int index, ret = -1;
86 struct hlist_head *head; 97 struct hlist_head *head;
87 struct hlist_node *node; 98 struct hlist_node *node;
88 spinlock_t *list_lock; /* spinlock to protect write access */ 99 spinlock_t *list_lock; /* spinlock to protect write access */
89 100
90 if (!hash) 101 if (!hash)
91 goto err; 102 goto out;
92 103
93 index = choose(data, hash->size); 104 index = choose(data, hash->size);
94 head = &hash->table[index]; 105 head = &hash->table[index];
@@ -99,6 +110,7 @@ static inline int hash_add(struct hashtable_t *hash,
99 if (!compare(node, data)) 110 if (!compare(node, data))
100 continue; 111 continue;
101 112
113 ret = 1;
102 goto err_unlock; 114 goto err_unlock;
103 } 115 }
104 rcu_read_unlock(); 116 rcu_read_unlock();
@@ -108,12 +120,13 @@ static inline int hash_add(struct hashtable_t *hash,
108 hlist_add_head_rcu(data_node, head); 120 hlist_add_head_rcu(data_node, head);
109 spin_unlock_bh(list_lock); 121 spin_unlock_bh(list_lock);
110 122
111 return 0; 123 ret = 0;
124 goto out;
112 125
113err_unlock: 126err_unlock:
114 rcu_read_unlock(); 127 rcu_read_unlock();
115err: 128out:
116 return -1; 129 return ret;
117} 130}
118 131
119/* removes data from hash, if found. returns pointer do data on success, so you 132/* removes data from hash, if found. returns pointer do data on success, so you
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b0f9068ade57..fb87bdc2ce9b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -107,7 +107,7 @@ int mesh_init(struct net_device *soft_iface)
107 if (tt_init(bat_priv) < 1) 107 if (tt_init(bat_priv) < 1)
108 goto err; 108 goto err;
109 109
110 tt_local_add(soft_iface, soft_iface->dev_addr); 110 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
111 111
112 if (vis_init(bat_priv) < 1) 112 if (vis_init(bat_priv) < 1)
113 goto err; 113 goto err;
@@ -117,8 +117,6 @@ int mesh_init(struct net_device *soft_iface)
117 goto end; 117 goto end;
118 118
119err: 119err:
120 pr_err("Unable to allocate memory for mesh information structures: "
121 "out of mem ?\n");
122 mesh_free(soft_iface); 120 mesh_free(soft_iface);
123 return -1; 121 return -1;
124 122
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a6df61a6933b..964ad4d8ba33 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#ifndef SOURCE_VERSION 30#ifndef SOURCE_VERSION
31#define SOURCE_VERSION "2011.3.0" 31#define SOURCE_VERSION "2011.4.0"
32#endif 32#endif
33 33
34/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
44#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200
45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600 46#define TT_CLIENT_ROAM_TIMEOUT 600
47/* sliding packet range of received originator messages in squence numbers 47/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
49#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */ 50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
@@ -62,6 +62,8 @@
62 62
63#define NO_FLAGS 0 63#define NO_FLAGS 0
64 64
65#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
66
65#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 67#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
66 68
67#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 69#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -133,7 +135,7 @@ enum dbg_level {
133#include <linux/mutex.h> /* mutex */ 135#include <linux/mutex.h> /* mutex */
134#include <linux/module.h> /* needed by all modules */ 136#include <linux/module.h> /* needed by all modules */
135#include <linux/netdevice.h> /* netdevice */ 137#include <linux/netdevice.h> /* netdevice */
136#include <linux/etherdevice.h> /* ethernet address classifaction */ 138#include <linux/etherdevice.h> /* ethernet address classification */
137#include <linux/if_ether.h> /* ethernet header */ 139#include <linux/if_ether.h> /* ethernet header */
138#include <linux/poll.h> /* poll_table */ 140#include <linux/poll.h> /* poll_table */
139#include <linux/kthread.h> /* kernel threads */ 141#include <linux/kthread.h> /* kernel threads */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f3c3f620d195..0e5b77255d99 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -252,7 +252,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
252 252
253 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 253 hash_added = hash_add(bat_priv->orig_hash, compare_orig,
254 choose_orig, orig_node, &orig_node->hash_entry); 254 choose_orig, orig_node, &orig_node->hash_entry);
255 if (hash_added < 0) 255 if (hash_added != 0)
256 goto free_bcast_own_sum; 256 goto free_bcast_own_sum;
257 257
258 return orig_node; 258 return orig_node;
@@ -336,8 +336,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
336 } else { 336 } else {
337 if (purge_orig_neighbors(bat_priv, orig_node, 337 if (purge_orig_neighbors(bat_priv, orig_node,
338 &best_neigh_node)) { 338 &best_neigh_node)) {
339 update_routes(bat_priv, orig_node, 339 update_route(bat_priv, orig_node, best_neigh_node);
340 best_neigh_node);
341 } 340 }
342 } 341 }
343 342
@@ -493,10 +492,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
493 492
494 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 493 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
495 GFP_ATOMIC); 494 GFP_ATOMIC);
496 if (!data_ptr) { 495 if (!data_ptr)
497 pr_err("Can't resize orig: out of memory\n");
498 return -1; 496 return -1;
499 }
500 497
501 memcpy(data_ptr, orig_node->bcast_own, 498 memcpy(data_ptr, orig_node->bcast_own,
502 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); 499 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
@@ -504,10 +501,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
504 orig_node->bcast_own = data_ptr; 501 orig_node->bcast_own = data_ptr;
505 502
506 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 503 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
507 if (!data_ptr) { 504 if (!data_ptr)
508 pr_err("Can't resize orig: out of memory\n");
509 return -1; 505 return -1;
510 }
511 506
512 memcpy(data_ptr, orig_node->bcast_own_sum, 507 memcpy(data_ptr, orig_node->bcast_own_sum,
513 (max_if_num - 1) * sizeof(uint8_t)); 508 (max_if_num - 1) * sizeof(uint8_t));
@@ -562,10 +557,8 @@ static int orig_node_del_if(struct orig_node *orig_node,
562 557
563 chunk_size = sizeof(unsigned long) * NUM_WORDS; 558 chunk_size = sizeof(unsigned long) * NUM_WORDS;
564 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 559 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
565 if (!data_ptr) { 560 if (!data_ptr)
566 pr_err("Can't resize orig: out of memory\n");
567 return -1; 561 return -1;
568 }
569 562
570 /* copy first part */ 563 /* copy first part */
571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 564 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -583,10 +576,8 @@ free_bcast_own:
583 goto free_own_sum; 576 goto free_own_sum;
584 577
585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 578 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
586 if (!data_ptr) { 579 if (!data_ptr)
587 pr_err("Can't resize orig: out of memory\n");
588 return -1; 580 return -1;
589 }
590 581
591 memcpy(data_ptr, orig_node->bcast_own_sum, 582 memcpy(data_ptr, orig_node->bcast_own_sum,
592 del_if_num * sizeof(uint8_t)); 583 del_if_num * sizeof(uint8_t));
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b76b4be10b92..4d9e54c57a36 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,14 +25,14 @@
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27enum bat_packettype { 27enum bat_packettype {
28 BAT_PACKET = 0x01, 28 BAT_OGM = 0x01,
29 BAT_ICMP = 0x02, 29 BAT_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 30 BAT_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 31 BAT_BCAST = 0x04,
32 BAT_VIS = 0x05, 32 BAT_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06, 33 BAT_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07, 34 BAT_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08 35 BAT_ROAM_ADV = 0x08
36}; 36};
37 37
38/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
@@ -84,12 +84,13 @@ enum tt_query_flags {
84enum tt_client_flags { 84enum tt_client_flags {
85 TT_CLIENT_DEL = 1 << 0, 85 TT_CLIENT_DEL = 1 << 0,
86 TT_CLIENT_ROAM = 1 << 1, 86 TT_CLIENT_ROAM = 1 << 1,
87 TT_CLIENT_WIFI = 1 << 2,
87 TT_CLIENT_NOPURGE = 1 << 8, 88 TT_CLIENT_NOPURGE = 1 << 8,
88 TT_CLIENT_NEW = 1 << 9, 89 TT_CLIENT_NEW = 1 << 9,
89 TT_CLIENT_PENDING = 1 << 10 90 TT_CLIENT_PENDING = 1 << 10
90}; 91};
91 92
92struct batman_packet { 93struct batman_ogm_packet {
93 uint8_t packet_type; 94 uint8_t packet_type;
94 uint8_t version; /* batman version field */ 95 uint8_t version; /* batman version field */
95 uint8_t ttl; 96 uint8_t ttl;
@@ -104,7 +105,7 @@ struct batman_packet {
104 uint16_t tt_crc; 105 uint16_t tt_crc;
105} __packed; 106} __packed;
106 107
107#define BAT_PACKET_LEN sizeof(struct batman_packet) 108#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
108 109
109struct icmp_packet { 110struct icmp_packet {
110 uint8_t packet_type; 111 uint8_t packet_type;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0f32c818874d..f961cc5eade5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -22,18 +22,14 @@
22#include "main.h" 22#include "main.h"
23#include "routing.h" 23#include "routing.h"
24#include "send.h" 24#include "send.h"
25#include "hash.h"
26#include "soft-interface.h" 25#include "soft-interface.h"
27#include "hard-interface.h" 26#include "hard-interface.h"
28#include "icmp_socket.h" 27#include "icmp_socket.h"
29#include "translation-table.h" 28#include "translation-table.h"
30#include "originator.h" 29#include "originator.h"
31#include "ring_buffer.h"
32#include "vis.h" 30#include "vis.h"
33#include "aggregation.h"
34#include "gateway_common.h"
35#include "gateway_client.h"
36#include "unicast.h" 31#include "unicast.h"
32#include "bat_ogm.h"
37 33
38void slide_own_bcast_window(struct hard_iface *hard_iface) 34void slide_own_bcast_window(struct hard_iface *hard_iface)
39{ 35{
@@ -64,69 +60,9 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 60 }
65} 61}
66 62
67static void update_transtable(struct bat_priv *bat_priv, 63static void _update_route(struct bat_priv *bat_priv,
68 struct orig_node *orig_node, 64 struct orig_node *orig_node,
69 const unsigned char *tt_buff, 65 struct neigh_node *neigh_node)
70 uint8_t tt_num_changes, uint8_t ttvn,
71 uint16_t tt_crc)
72{
73 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
74 bool full_table = true;
75
76 /* the ttvn increased by one -> we can apply the attached changes */
77 if (ttvn - orig_ttvn == 1) {
78 /* the OGM could not contain the changes because they were too
79 * many to fit in one frame or because they have already been
80 * sent TT_OGM_APPEND_MAX times. In this case send a tt
81 * request */
82 if (!tt_num_changes) {
83 full_table = false;
84 goto request_table;
85 }
86
87 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
88 (struct tt_change *)tt_buff);
89
90 /* Even if we received the crc into the OGM, we prefer
91 * to recompute it to spot any possible inconsistency
92 * in the global table */
93 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
94
95 /* The ttvn alone is not enough to guarantee consistency
96 * because a single value could repesent different states
97 * (due to the wrap around). Thus a node has to check whether
98 * the resulting table (after applying the changes) is still
99 * consistent or not. E.g. a node could disconnect while its
100 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
101 * checking the CRC value is mandatory to detect the
102 * inconsistency */
103 if (orig_node->tt_crc != tt_crc)
104 goto request_table;
105
106 /* Roaming phase is over: tables are in sync again. I can
107 * unset the flag */
108 orig_node->tt_poss_change = false;
109 } else {
110 /* if we missed more than one change or our tables are not
111 * in sync anymore -> request fresh tt data */
112 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
113request_table:
114 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
115 "Need to retrieve the correct information "
116 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
117 "%u num_changes: %u)\n", orig_node->orig, ttvn,
118 orig_ttvn, tt_crc, orig_node->tt_crc,
119 tt_num_changes);
120 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
121 full_table);
122 return;
123 }
124 }
125}
126
127static void update_route(struct bat_priv *bat_priv,
128 struct orig_node *orig_node,
129 struct neigh_node *neigh_node)
130{ 66{
131 struct neigh_node *curr_router; 67 struct neigh_node *curr_router;
132 68
@@ -170,8 +106,8 @@ static void update_route(struct bat_priv *bat_priv,
170 neigh_node_free_ref(curr_router); 106 neigh_node_free_ref(curr_router);
171} 107}
172 108
173void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 109void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
174 struct neigh_node *neigh_node) 110 struct neigh_node *neigh_node)
175{ 111{
176 struct neigh_node *router = NULL; 112 struct neigh_node *router = NULL;
177 113
@@ -181,116 +117,13 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
181 router = orig_node_get_router(orig_node); 117 router = orig_node_get_router(orig_node);
182 118
183 if (router != neigh_node) 119 if (router != neigh_node)
184 update_route(bat_priv, orig_node, neigh_node); 120 _update_route(bat_priv, orig_node, neigh_node);
185 121
186out: 122out:
187 if (router) 123 if (router)
188 neigh_node_free_ref(router); 124 neigh_node_free_ref(router);
189} 125}
190 126
191static int is_bidirectional_neigh(struct orig_node *orig_node,
192 struct orig_node *orig_neigh_node,
193 struct batman_packet *batman_packet,
194 struct hard_iface *if_incoming)
195{
196 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
197 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
198 struct hlist_node *node;
199 uint8_t total_count;
200 uint8_t orig_eq_count, neigh_rq_count, tq_own;
201 int tq_asym_penalty, ret = 0;
202
203 /* find corresponding one hop neighbor */
204 rcu_read_lock();
205 hlist_for_each_entry_rcu(tmp_neigh_node, node,
206 &orig_neigh_node->neigh_list, list) {
207
208 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
209 continue;
210
211 if (tmp_neigh_node->if_incoming != if_incoming)
212 continue;
213
214 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
215 continue;
216
217 neigh_node = tmp_neigh_node;
218 break;
219 }
220 rcu_read_unlock();
221
222 if (!neigh_node)
223 neigh_node = create_neighbor(orig_neigh_node,
224 orig_neigh_node,
225 orig_neigh_node->orig,
226 if_incoming);
227
228 if (!neigh_node)
229 goto out;
230
231 /* if orig_node is direct neighbour update neigh_node last_valid */
232 if (orig_node == orig_neigh_node)
233 neigh_node->last_valid = jiffies;
234
235 orig_node->last_valid = jiffies;
236
237 /* find packet count of corresponding one hop neighbor */
238 spin_lock_bh(&orig_node->ogm_cnt_lock);
239 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
240 neigh_rq_count = neigh_node->real_packet_count;
241 spin_unlock_bh(&orig_node->ogm_cnt_lock);
242
243 /* pay attention to not get a value bigger than 100 % */
244 total_count = (orig_eq_count > neigh_rq_count ?
245 neigh_rq_count : orig_eq_count);
246
247 /* if we have too few packets (too less data) we set tq_own to zero */
248 /* if we receive too few packets it is not considered bidirectional */
249 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
250 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
251 tq_own = 0;
252 else
253 /* neigh_node->real_packet_count is never zero as we
254 * only purge old information when getting new
255 * information */
256 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
257
258 /*
259 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
260 * affect the nearly-symmetric links only a little, but
261 * punishes asymmetric links more. This will give a value
262 * between 0 and TQ_MAX_VALUE
263 */
264 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
265 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
266 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
267 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
268 (TQ_LOCAL_WINDOW_SIZE *
269 TQ_LOCAL_WINDOW_SIZE *
270 TQ_LOCAL_WINDOW_SIZE);
271
272 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
273 (TQ_MAX_VALUE * TQ_MAX_VALUE));
274
275 bat_dbg(DBG_BATMAN, bat_priv,
276 "bidirectional: "
277 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
278 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
279 "total tq: %3i\n",
280 orig_node->orig, orig_neigh_node->orig, total_count,
281 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
282
283 /* if link has the minimum required transmission quality
284 * consider it bidirectional */
285 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
286 ret = 1;
287
288out:
289 if (neigh_node)
290 neigh_node_free_ref(neigh_node);
291 return ret;
292}
293
294/* caller must hold the neigh_list_lock */ 127/* caller must hold the neigh_list_lock */
295void bonding_candidate_del(struct orig_node *orig_node, 128void bonding_candidate_del(struct orig_node *orig_node,
296 struct neigh_node *neigh_node) 129 struct neigh_node *neigh_node)
@@ -308,8 +141,8 @@ out:
308 return; 141 return;
309} 142}
310 143
311static void bonding_candidate_add(struct orig_node *orig_node, 144void bonding_candidate_add(struct orig_node *orig_node,
312 struct neigh_node *neigh_node) 145 struct neigh_node *neigh_node)
313{ 146{
314 struct hlist_node *node; 147 struct hlist_node *node;
315 struct neigh_node *tmp_neigh_node, *router = NULL; 148 struct neigh_node *tmp_neigh_node, *router = NULL;
@@ -379,162 +212,23 @@ out:
379} 212}
380 213
381/* copy primary address for bonding */ 214/* copy primary address for bonding */
382static void bonding_save_primary(const struct orig_node *orig_node, 215void bonding_save_primary(const struct orig_node *orig_node,
383 struct orig_node *orig_neigh_node, 216 struct orig_node *orig_neigh_node,
384 const struct batman_packet *batman_packet) 217 const struct batman_ogm_packet *batman_ogm_packet)
385{ 218{
386 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 219 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
387 return; 220 return;
388 221
389 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 222 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
390} 223}
391 224
392static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
393 const struct ethhdr *ethhdr,
394 const struct batman_packet *batman_packet,
395 struct hard_iface *if_incoming,
396 const unsigned char *tt_buff, int is_duplicate)
397{
398 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
399 struct neigh_node *router = NULL;
400 struct orig_node *orig_node_tmp;
401 struct hlist_node *node;
402 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
403
404 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
405 "Searching and updating originator entry of received packet\n");
406
407 rcu_read_lock();
408 hlist_for_each_entry_rcu(tmp_neigh_node, node,
409 &orig_node->neigh_list, list) {
410 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
411 (tmp_neigh_node->if_incoming == if_incoming) &&
412 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
413 if (neigh_node)
414 neigh_node_free_ref(neigh_node);
415 neigh_node = tmp_neigh_node;
416 continue;
417 }
418
419 if (is_duplicate)
420 continue;
421
422 spin_lock_bh(&tmp_neigh_node->tq_lock);
423 ring_buffer_set(tmp_neigh_node->tq_recv,
424 &tmp_neigh_node->tq_index, 0);
425 tmp_neigh_node->tq_avg =
426 ring_buffer_avg(tmp_neigh_node->tq_recv);
427 spin_unlock_bh(&tmp_neigh_node->tq_lock);
428 }
429
430 if (!neigh_node) {
431 struct orig_node *orig_tmp;
432
433 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
434 if (!orig_tmp)
435 goto unlock;
436
437 neigh_node = create_neighbor(orig_node, orig_tmp,
438 ethhdr->h_source, if_incoming);
439
440 orig_node_free_ref(orig_tmp);
441 if (!neigh_node)
442 goto unlock;
443 } else
444 bat_dbg(DBG_BATMAN, bat_priv,
445 "Updating existing last-hop neighbor of originator\n");
446
447 rcu_read_unlock();
448
449 orig_node->flags = batman_packet->flags;
450 neigh_node->last_valid = jiffies;
451
452 spin_lock_bh(&neigh_node->tq_lock);
453 ring_buffer_set(neigh_node->tq_recv,
454 &neigh_node->tq_index,
455 batman_packet->tq);
456 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
457 spin_unlock_bh(&neigh_node->tq_lock);
458
459 if (!is_duplicate) {
460 orig_node->last_ttl = batman_packet->ttl;
461 neigh_node->last_ttl = batman_packet->ttl;
462 }
463
464 bonding_candidate_add(orig_node, neigh_node);
465
466 /* if this neighbor already is our next hop there is nothing
467 * to change */
468 router = orig_node_get_router(orig_node);
469 if (router == neigh_node)
470 goto update_tt;
471
472 /* if this neighbor does not offer a better TQ we won't consider it */
473 if (router && (router->tq_avg > neigh_node->tq_avg))
474 goto update_tt;
475
476 /* if the TQ is the same and the link not more symetric we
477 * won't consider it either */
478 if (router && (neigh_node->tq_avg == router->tq_avg)) {
479 orig_node_tmp = router->orig_node;
480 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
481 bcast_own_sum_orig =
482 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
483 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
484
485 orig_node_tmp = neigh_node->orig_node;
486 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
487 bcast_own_sum_neigh =
488 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
489 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
490
491 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
492 goto update_tt;
493 }
494
495 update_routes(bat_priv, orig_node, neigh_node);
496
497update_tt:
498 /* I have to check for transtable changes only if the OGM has been
499 * sent through a primary interface */
500 if (((batman_packet->orig != ethhdr->h_source) &&
501 (batman_packet->ttl > 2)) ||
502 (batman_packet->flags & PRIMARIES_FIRST_HOP))
503 update_transtable(bat_priv, orig_node, tt_buff,
504 batman_packet->tt_num_changes,
505 batman_packet->ttvn,
506 batman_packet->tt_crc);
507
508 if (orig_node->gw_flags != batman_packet->gw_flags)
509 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
510
511 orig_node->gw_flags = batman_packet->gw_flags;
512
513 /* restart gateway selection if fast or late switching was enabled */
514 if ((orig_node->gw_flags) &&
515 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
516 (atomic_read(&bat_priv->gw_sel_class) > 2))
517 gw_check_election(bat_priv, orig_node);
518
519 goto out;
520
521unlock:
522 rcu_read_unlock();
523out:
524 if (neigh_node)
525 neigh_node_free_ref(neigh_node);
526 if (router)
527 neigh_node_free_ref(router);
528}
529
530/* checks whether the host restarted and is in the protection time. 225/* checks whether the host restarted and is in the protection time.
531 * returns: 226 * returns:
532 * 0 if the packet is to be accepted 227 * 0 if the packet is to be accepted
533 * 1 if the packet is to be ignored. 228 * 1 if the packet is to be ignored.
534 */ 229 */
535static int window_protected(struct bat_priv *bat_priv, 230int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
536 int32_t seq_num_diff, 231 unsigned long *last_reset)
537 unsigned long *last_reset)
538{ 232{
539 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 233 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
540 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 234 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
@@ -552,330 +246,12 @@ static int window_protected(struct bat_priv *bat_priv,
552 return 0; 246 return 0;
553} 247}
554 248
555/* processes a batman packet for all interfaces, adjusts the sequence number and 249int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
556 * finds out whether it is a duplicate.
557 * returns:
558 * 1 the packet is a duplicate
559 * 0 the packet has not yet been received
560 * -1 the packet is old and has been received while the seqno window
561 * was protected. Caller should drop it.
562 */
563static int count_real_packets(const struct ethhdr *ethhdr,
564 const struct batman_packet *batman_packet,
565 const struct hard_iface *if_incoming)
566{
567 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
568 struct orig_node *orig_node;
569 struct neigh_node *tmp_neigh_node;
570 struct hlist_node *node;
571 int is_duplicate = 0;
572 int32_t seq_diff;
573 int need_update = 0;
574 int set_mark, ret = -1;
575
576 orig_node = get_orig_node(bat_priv, batman_packet->orig);
577 if (!orig_node)
578 return 0;
579
580 spin_lock_bh(&orig_node->ogm_cnt_lock);
581 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
582
583 /* signalize caller that the packet is to be dropped. */
584 if (window_protected(bat_priv, seq_diff,
585 &orig_node->batman_seqno_reset))
586 goto out;
587
588 rcu_read_lock();
589 hlist_for_each_entry_rcu(tmp_neigh_node, node,
590 &orig_node->neigh_list, list) {
591
592 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
593 orig_node->last_real_seqno,
594 batman_packet->seqno);
595
596 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
597 (tmp_neigh_node->if_incoming == if_incoming))
598 set_mark = 1;
599 else
600 set_mark = 0;
601
602 /* if the window moved, set the update flag. */
603 need_update |= bit_get_packet(bat_priv,
604 tmp_neigh_node->real_bits,
605 seq_diff, set_mark);
606
607 tmp_neigh_node->real_packet_count =
608 bit_packet_count(tmp_neigh_node->real_bits);
609 }
610 rcu_read_unlock();
611
612 if (need_update) {
613 bat_dbg(DBG_BATMAN, bat_priv,
614 "updating last_seqno: old %d, new %d\n",
615 orig_node->last_real_seqno, batman_packet->seqno);
616 orig_node->last_real_seqno = batman_packet->seqno;
617 }
618
619 ret = is_duplicate;
620
621out:
622 spin_unlock_bh(&orig_node->ogm_cnt_lock);
623 orig_node_free_ref(orig_node);
624 return ret;
625}
626
627void receive_bat_packet(const struct ethhdr *ethhdr,
628 struct batman_packet *batman_packet,
629 const unsigned char *tt_buff,
630 struct hard_iface *if_incoming)
631{
632 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
633 struct hard_iface *hard_iface;
634 struct orig_node *orig_neigh_node, *orig_node;
635 struct neigh_node *router = NULL, *router_router = NULL;
636 struct neigh_node *orig_neigh_router = NULL;
637 int has_directlink_flag;
638 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
639 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
640 int is_duplicate;
641 uint32_t if_incoming_seqno;
642
643 /* Silently drop when the batman packet is actually not a
644 * correct packet.
645 *
646 * This might happen if a packet is padded (e.g. Ethernet has a
647 * minimum frame length of 64 byte) and the aggregation interprets
648 * it as an additional length.
649 *
650 * TODO: A more sane solution would be to have a bit in the
651 * batman_packet to detect whether the packet is the last
652 * packet in an aggregation. Here we expect that the padding
653 * is always zero (or not 0x01)
654 */
655 if (batman_packet->packet_type != BAT_PACKET)
656 return;
657
658 /* could be changed by schedule_own_packet() */
659 if_incoming_seqno = atomic_read(&if_incoming->seqno);
660
661 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
662
663 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
664 batman_packet->orig) ? 1 : 0);
665
666 bat_dbg(DBG_BATMAN, bat_priv,
667 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
668 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
669 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
670 ethhdr->h_source, if_incoming->net_dev->name,
671 if_incoming->net_dev->dev_addr, batman_packet->orig,
672 batman_packet->prev_sender, batman_packet->seqno,
673 batman_packet->ttvn, batman_packet->tt_crc,
674 batman_packet->tt_num_changes, batman_packet->tq,
675 batman_packet->ttl, batman_packet->version,
676 has_directlink_flag);
677
678 rcu_read_lock();
679 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
680 if (hard_iface->if_status != IF_ACTIVE)
681 continue;
682
683 if (hard_iface->soft_iface != if_incoming->soft_iface)
684 continue;
685
686 if (compare_eth(ethhdr->h_source,
687 hard_iface->net_dev->dev_addr))
688 is_my_addr = 1;
689
690 if (compare_eth(batman_packet->orig,
691 hard_iface->net_dev->dev_addr))
692 is_my_orig = 1;
693
694 if (compare_eth(batman_packet->prev_sender,
695 hard_iface->net_dev->dev_addr))
696 is_my_oldorig = 1;
697
698 if (is_broadcast_ether_addr(ethhdr->h_source))
699 is_broadcast = 1;
700 }
701 rcu_read_unlock();
702
703 if (batman_packet->version != COMPAT_VERSION) {
704 bat_dbg(DBG_BATMAN, bat_priv,
705 "Drop packet: incompatible batman version (%i)\n",
706 batman_packet->version);
707 return;
708 }
709
710 if (is_my_addr) {
711 bat_dbg(DBG_BATMAN, bat_priv,
712 "Drop packet: received my own broadcast (sender: %pM"
713 ")\n",
714 ethhdr->h_source);
715 return;
716 }
717
718 if (is_broadcast) {
719 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
720 "ignoring all packets with broadcast source addr (sender: %pM"
721 ")\n", ethhdr->h_source);
722 return;
723 }
724
725 if (is_my_orig) {
726 unsigned long *word;
727 int offset;
728
729 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
730 if (!orig_neigh_node)
731 return;
732
733 /* neighbor has to indicate direct link and it has to
734 * come via the corresponding interface */
735 /* save packet seqno for bidirectional check */
736 if (has_directlink_flag &&
737 compare_eth(if_incoming->net_dev->dev_addr,
738 batman_packet->orig)) {
739 offset = if_incoming->if_num * NUM_WORDS;
740
741 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
742 word = &(orig_neigh_node->bcast_own[offset]);
743 bit_mark(word,
744 if_incoming_seqno - batman_packet->seqno - 2);
745 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
746 bit_packet_count(word);
747 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
748 }
749
750 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
751 "originator packet from myself (via neighbor)\n");
752 orig_node_free_ref(orig_neigh_node);
753 return;
754 }
755
756 if (is_my_oldorig) {
757 bat_dbg(DBG_BATMAN, bat_priv,
758 "Drop packet: ignoring all rebroadcast echos (sender: "
759 "%pM)\n", ethhdr->h_source);
760 return;
761 }
762
763 orig_node = get_orig_node(bat_priv, batman_packet->orig);
764 if (!orig_node)
765 return;
766
767 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
768
769 if (is_duplicate == -1) {
770 bat_dbg(DBG_BATMAN, bat_priv,
771 "Drop packet: packet within seqno protection time "
772 "(sender: %pM)\n", ethhdr->h_source);
773 goto out;
774 }
775
776 if (batman_packet->tq == 0) {
777 bat_dbg(DBG_BATMAN, bat_priv,
778 "Drop packet: originator packet with tq equal 0\n");
779 goto out;
780 }
781
782 router = orig_node_get_router(orig_node);
783 if (router)
784 router_router = orig_node_get_router(router->orig_node);
785
786 /* avoid temporary routing loops */
787 if (router && router_router &&
788 (compare_eth(router->addr, batman_packet->prev_sender)) &&
789 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
790 (compare_eth(router->addr, router_router->addr))) {
791 bat_dbg(DBG_BATMAN, bat_priv,
792 "Drop packet: ignoring all rebroadcast packets that "
793 "may make me loop (sender: %pM)\n", ethhdr->h_source);
794 goto out;
795 }
796
797 /* if sender is a direct neighbor the sender mac equals
798 * originator mac */
799 orig_neigh_node = (is_single_hop_neigh ?
800 orig_node :
801 get_orig_node(bat_priv, ethhdr->h_source));
802 if (!orig_neigh_node)
803 goto out;
804
805 orig_neigh_router = orig_node_get_router(orig_neigh_node);
806
807 /* drop packet if sender is not a direct neighbor and if we
808 * don't route towards it */
809 if (!is_single_hop_neigh && (!orig_neigh_router)) {
810 bat_dbg(DBG_BATMAN, bat_priv,
811 "Drop packet: OGM via unknown neighbor!\n");
812 goto out_neigh;
813 }
814
815 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
816 batman_packet, if_incoming);
817
818 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
819
820 /* update ranking if it is not a duplicate or has the same
821 * seqno and similar ttl as the non-duplicate */
822 if (is_bidirectional &&
823 (!is_duplicate ||
824 ((orig_node->last_real_seqno == batman_packet->seqno) &&
825 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
826 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
827 if_incoming, tt_buff, is_duplicate);
828
829 /* is single hop (direct) neighbor */
830 if (is_single_hop_neigh) {
831
832 /* mark direct link on incoming interface */
833 schedule_forward_packet(orig_node, ethhdr, batman_packet,
834 1, if_incoming);
835
836 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
837 "rebroadcast neighbor packet with direct link flag\n");
838 goto out_neigh;
839 }
840
841 /* multihop originator */
842 if (!is_bidirectional) {
843 bat_dbg(DBG_BATMAN, bat_priv,
844 "Drop packet: not received via bidirectional link\n");
845 goto out_neigh;
846 }
847
848 if (is_duplicate) {
849 bat_dbg(DBG_BATMAN, bat_priv,
850 "Drop packet: duplicate packet received\n");
851 goto out_neigh;
852 }
853
854 bat_dbg(DBG_BATMAN, bat_priv,
855 "Forwarding packet: rebroadcast originator packet\n");
856 schedule_forward_packet(orig_node, ethhdr, batman_packet,
857 0, if_incoming);
858
859out_neigh:
860 if ((orig_neigh_node) && (!is_single_hop_neigh))
861 orig_node_free_ref(orig_neigh_node);
862out:
863 if (router)
864 neigh_node_free_ref(router);
865 if (router_router)
866 neigh_node_free_ref(router_router);
867 if (orig_neigh_router)
868 neigh_node_free_ref(orig_neigh_router);
869
870 orig_node_free_ref(orig_node);
871}
872
873int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
874{ 250{
875 struct ethhdr *ethhdr; 251 struct ethhdr *ethhdr;
876 252
877 /* drop packet if it has not necessary minimum size */ 253 /* drop packet if it has not necessary minimum size */
878 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet)))) 254 if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
879 return NET_RX_DROP; 255 return NET_RX_DROP;
880 256
881 ethhdr = (struct ethhdr *)skb_mac_header(skb); 257 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -898,10 +274,7 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
898 274
899 ethhdr = (struct ethhdr *)skb_mac_header(skb); 275 ethhdr = (struct ethhdr *)skb_mac_header(skb);
900 276
901 receive_aggr_bat_packet(ethhdr, 277 bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
902 skb->data,
903 skb_headlen(skb),
904 hard_iface);
905 278
906 kfree_skb(skb); 279 kfree_skb(skb);
907 return NET_RX_SUCCESS; 280 return NET_RX_SUCCESS;
@@ -1243,7 +616,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
1243 } 616 }
1244 break; 617 break;
1245 case TT_RESPONSE: 618 case TT_RESPONSE:
1246 /* packet needs to be linearised to access the TT changes */ 619 /* packet needs to be linearized to access the TT changes */
1247 if (skb_linearize(skb) < 0) 620 if (skb_linearize(skb) < 0)
1248 goto out; 621 goto out;
1249 622
@@ -1300,7 +673,7 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
1300 roam_adv_packet->client); 673 roam_adv_packet->client);
1301 674
1302 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 675 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
1303 atomic_read(&orig_node->last_ttvn) + 1, true); 676 atomic_read(&orig_node->last_ttvn) + 1, true, false);
1304 677
1305 /* Roaming phase starts: I have new information but the ttvn has not 678 /* Roaming phase starts: I have new information but the ttvn has not
1306 * been incremented yet. This flag will make me check all the incoming 679 * been incremented yet. This flag will make me check all the incoming
@@ -1536,7 +909,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
1536 909
1537 ethhdr = (struct ethhdr *)(skb->data + 910 ethhdr = (struct ethhdr *)(skb->data +
1538 sizeof(struct unicast_packet)); 911 sizeof(struct unicast_packet));
1539 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 912 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
1540 913
1541 if (!orig_node) { 914 if (!orig_node) {
1542 if (!is_my_client(bat_priv, ethhdr->h_dest)) 915 if (!is_my_client(bat_priv, ethhdr->h_dest))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index fb14e9579b19..7aaee0fb0fdc 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,19 +23,15 @@
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(const struct ethhdr *ethhdr, 26void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
27 struct batman_packet *batman_packet, 27 struct neigh_node *neigh_node);
28 const unsigned char *tt_buff,
29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node);
32int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 28int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
33int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 29int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 30int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 31int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
36int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
37int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
38int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); 35int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
40int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); 36int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
41struct neigh_node *find_router(struct bat_priv *bat_priv, 37struct neigh_node *find_router(struct bat_priv *bat_priv,
@@ -43,5 +39,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
43 const struct hard_iface *recv_if); 39 const struct hard_iface *recv_if);
44void bonding_candidate_del(struct orig_node *orig_node, 40void bonding_candidate_del(struct orig_node *orig_node,
45 struct neigh_node *neigh_node); 41 struct neigh_node *neigh_node);
42void bonding_candidate_add(struct orig_node *orig_node,
43 struct neigh_node *neigh_node);
44void bonding_save_primary(const struct orig_node *orig_node,
45 struct orig_node *orig_neigh_node,
46 const struct batman_ogm_packet *batman_ogm_packet);
47int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
48 unsigned long *last_reset);
46 49
47#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ 50#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 58d14472068c..8a684eb738ad 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -26,33 +26,12 @@
26#include "soft-interface.h" 26#include "soft-interface.h"
27#include "hard-interface.h" 27#include "hard-interface.h"
28#include "vis.h" 28#include "vis.h"
29#include "aggregation.h"
30#include "gateway_common.h" 29#include "gateway_common.h"
31#include "originator.h" 30#include "originator.h"
31#include "bat_ogm.h"
32 32
33static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
34 34
35/* apply hop penalty for a normal link */
36static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40}
41
42/* when do we schedule our own packet to be sent */
43static unsigned long own_send_time(const struct bat_priv *bat_priv)
44{
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
48}
49
50/* when do we schedule a forwarded packet to be sent */
51static unsigned long forward_send_time(void)
52{
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54}
55
56/* send out an already prepared packet to the given address via the 35/* send out an already prepared packet to the given address via the
57 * specified batman interface */ 36 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 37int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
@@ -99,141 +78,17 @@ send_skb_err:
99 return NET_XMIT_DROP; 78 return NET_XMIT_DROP;
100} 79}
101 80
102/* Send a packet to a given interface */
103static void send_packet_to_if(struct forw_packet *forw_packet,
104 struct hard_iface *hard_iface)
105{
106 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
107 char *fwd_str;
108 uint8_t packet_num;
109 int16_t buff_pos;
110 struct batman_packet *batman_packet;
111 struct sk_buff *skb;
112
113 if (hard_iface->if_status != IF_ACTIVE)
114 return;
115
116 packet_num = 0;
117 buff_pos = 0;
118 batman_packet = (struct batman_packet *)forw_packet->skb->data;
119
120 /* adjust all flags and log packets */
121 while (aggregated_packet(buff_pos,
122 forw_packet->packet_len,
123 batman_packet->tt_num_changes)) {
124
125 /* we might have aggregated direct link packets with an
126 * ordinary base packet */
127 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
128 (forw_packet->if_incoming == hard_iface))
129 batman_packet->flags |= DIRECTLINK;
130 else
131 batman_packet->flags &= ~DIRECTLINK;
132
133 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
134 "Sending own" :
135 "Forwarding"));
136 bat_dbg(DBG_BATMAN, bat_priv,
137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
138 " IDF %s, hvn %d) on interface %s [%pM]\n",
139 fwd_str, (packet_num > 0 ? "aggregated " : ""),
140 batman_packet->orig, ntohl(batman_packet->seqno),
141 batman_packet->tq, batman_packet->ttl,
142 (batman_packet->flags & DIRECTLINK ?
143 "on" : "off"),
144 batman_packet->ttvn, hard_iface->net_dev->name,
145 hard_iface->net_dev->dev_addr);
146
147 buff_pos += sizeof(*batman_packet) +
148 tt_len(batman_packet->tt_num_changes);
149 packet_num++;
150 batman_packet = (struct batman_packet *)
151 (forw_packet->skb->data + buff_pos);
152 }
153
154 /* create clone because function is called more than once */
155 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156 if (skb)
157 send_skb_packet(skb, hard_iface, broadcast_addr);
158}
159
160/* send a batman packet */
161static void send_packet(struct forw_packet *forw_packet)
162{
163 struct hard_iface *hard_iface;
164 struct net_device *soft_iface;
165 struct bat_priv *bat_priv;
166 struct hard_iface *primary_if = NULL;
167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data);
169 int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170
171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n");
174 goto out;
175 }
176
177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface);
179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 goto out;
182
183 primary_if = primary_if_get_selected(bat_priv);
184 if (!primary_if)
185 goto out;
186
187 /* multihomed peer assumed */
188 /* non-primary OGMs are only broadcasted on their interface */
189 if ((directlink && (batman_packet->ttl == 1)) ||
190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
191
192 /* FIXME: what about aggregated packets ? */
193 bat_dbg(DBG_BATMAN, bat_priv,
194 "%s packet (originator %pM, seqno %d, TTL %d) "
195 "on interface %s [%pM]\n",
196 (forw_packet->own ? "Sending own" : "Forwarding"),
197 batman_packet->orig, ntohl(batman_packet->seqno),
198 batman_packet->ttl,
199 forw_packet->if_incoming->net_dev->name,
200 forw_packet->if_incoming->net_dev->dev_addr);
201
202 /* skb is only used once and than forw_packet is free'd */
203 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
204 broadcast_addr);
205 forw_packet->skb = NULL;
206
207 goto out;
208 }
209
210 /* broadcast on every interface */
211 rcu_read_lock();
212 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
213 if (hard_iface->soft_iface != soft_iface)
214 continue;
215
216 send_packet_to_if(forw_packet, hard_iface);
217 }
218 rcu_read_unlock();
219
220out:
221 if (primary_if)
222 hardif_free_ref(primary_if);
223}
224
225static void realloc_packet_buffer(struct hard_iface *hard_iface, 81static void realloc_packet_buffer(struct hard_iface *hard_iface,
226 int new_len) 82 int new_len)
227{ 83{
228 unsigned char *new_buff; 84 unsigned char *new_buff;
229 struct batman_packet *batman_packet;
230 85
231 new_buff = kmalloc(new_len, GFP_ATOMIC); 86 new_buff = kmalloc(new_len, GFP_ATOMIC);
232 87
233 /* keep old buffer if kmalloc should fail */ 88 /* keep old buffer if kmalloc should fail */
234 if (new_buff) { 89 if (new_buff) {
235 memcpy(new_buff, hard_iface->packet_buff, 90 memcpy(new_buff, hard_iface->packet_buff,
236 sizeof(*batman_packet)); 91 BATMAN_OGM_LEN);
237 92
238 kfree(hard_iface->packet_buff); 93 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff; 94 hard_iface->packet_buff = new_buff;
@@ -242,60 +97,48 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
242} 97}
243 98
244/* when calling this function (hard_iface == primary_if) has to be true */ 99/* when calling this function (hard_iface == primary_if) has to be true */
245static void prepare_packet_buffer(struct bat_priv *bat_priv, 100static int prepare_packet_buffer(struct bat_priv *bat_priv,
246 struct hard_iface *hard_iface) 101 struct hard_iface *hard_iface)
247{ 102{
248 int new_len; 103 int new_len;
249 struct batman_packet *batman_packet;
250 104
251 new_len = BAT_PACKET_LEN + 105 new_len = BATMAN_OGM_LEN +
252 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); 106 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253 107
254 /* if we have too many changes for one packet don't send any 108 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */ 109 * and wait for the tt table request which will be fragmented */
256 if (new_len > hard_iface->soft_iface->mtu) 110 if (new_len > hard_iface->soft_iface->mtu)
257 new_len = BAT_PACKET_LEN; 111 new_len = BATMAN_OGM_LEN;
258 112
259 realloc_packet_buffer(hard_iface, new_len); 113 realloc_packet_buffer(hard_iface, new_len);
260 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261 114
262 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); 115 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263 116
264 /* reset the sending counter */ 117 /* reset the sending counter */
265 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); 118 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266 119
267 batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv, 120 return tt_changes_fill_buffer(bat_priv,
268 hard_iface->packet_buff + BAT_PACKET_LEN, 121 hard_iface->packet_buff + BATMAN_OGM_LEN,
269 hard_iface->packet_len - BAT_PACKET_LEN); 122 hard_iface->packet_len - BATMAN_OGM_LEN);
270
271} 123}
272 124
273static void reset_packet_buffer(struct bat_priv *bat_priv, 125static int reset_packet_buffer(struct bat_priv *bat_priv,
274 struct hard_iface *hard_iface) 126 struct hard_iface *hard_iface)
275{ 127{
276 struct batman_packet *batman_packet; 128 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
277 129 return 0;
278 realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 batman_packet->tt_num_changes = 0;
282} 130}
283 131
284void schedule_own_packet(struct hard_iface *hard_iface) 132void schedule_bat_ogm(struct hard_iface *hard_iface)
285{ 133{
286 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 134 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
287 struct hard_iface *primary_if; 135 struct hard_iface *primary_if;
288 unsigned long send_time; 136 int tt_num_changes = -1;
289 struct batman_packet *batman_packet;
290 int vis_server;
291 137
292 if ((hard_iface->if_status == IF_NOT_IN_USE) || 138 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
293 (hard_iface->if_status == IF_TO_BE_REMOVED)) 139 (hard_iface->if_status == IF_TO_BE_REMOVED))
294 return; 140 return;
295 141
296 vis_server = atomic_read(&bat_priv->vis_mode);
297 primary_if = primary_if_get_selected(bat_priv);
298
299 /** 142 /**
300 * the interface gets activated here to avoid race conditions between 143 * the interface gets activated here to avoid race conditions between
301 * the moment of activating the interface in 144 * the moment of activating the interface in
@@ -306,124 +149,26 @@ void schedule_own_packet(struct hard_iface *hard_iface)
306 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 149 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
307 hard_iface->if_status = IF_ACTIVE; 150 hard_iface->if_status = IF_ACTIVE;
308 151
152 primary_if = primary_if_get_selected(bat_priv);
153
309 if (hard_iface == primary_if) { 154 if (hard_iface == primary_if) {
310 /* if at least one change happened */ 155 /* if at least one change happened */
311 if (atomic_read(&bat_priv->tt_local_changes) > 0) { 156 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
312 tt_commit_changes(bat_priv); 157 tt_commit_changes(bat_priv);
313 prepare_packet_buffer(bat_priv, hard_iface); 158 tt_num_changes = prepare_packet_buffer(bat_priv,
159 hard_iface);
314 } 160 }
315 161
316 /* if the changes have been sent enough times */ 162 /* if the changes have been sent often enough */
317 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) 163 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
318 reset_packet_buffer(bat_priv, hard_iface); 164 tt_num_changes = reset_packet_buffer(bat_priv,
165 hard_iface);
319 } 166 }
320 167
321 /**
322 * NOTE: packet_buff might just have been re-allocated in
323 * prepare_packet_buffer() or in reset_packet_buffer()
324 */
325 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
326
327 /* change sequence number to network order */
328 batman_packet->seqno =
329 htonl((uint32_t)atomic_read(&hard_iface->seqno));
330
331 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
332 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
333
334 if (vis_server == VIS_TYPE_SERVER_SYNC)
335 batman_packet->flags |= VIS_SERVER;
336 else
337 batman_packet->flags &= ~VIS_SERVER;
338
339 if ((hard_iface == primary_if) &&
340 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
341 batman_packet->gw_flags =
342 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
343 else
344 batman_packet->gw_flags = NO_FLAGS;
345
346 atomic_inc(&hard_iface->seqno);
347
348 slide_own_bcast_window(hard_iface);
349 send_time = own_send_time(bat_priv);
350 add_bat_packet_to_list(bat_priv,
351 hard_iface->packet_buff,
352 hard_iface->packet_len,
353 hard_iface, 1, send_time);
354
355 if (primary_if) 168 if (primary_if)
356 hardif_free_ref(primary_if); 169 hardif_free_ref(primary_if);
357}
358
359void schedule_forward_packet(struct orig_node *orig_node,
360 const struct ethhdr *ethhdr,
361 struct batman_packet *batman_packet,
362 int directlink,
363 struct hard_iface *if_incoming)
364{
365 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
366 struct neigh_node *router;
367 uint8_t in_tq, in_ttl, tq_avg = 0;
368 unsigned long send_time;
369 uint8_t tt_num_changes;
370
371 if (batman_packet->ttl <= 1) {
372 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
373 return;
374 }
375
376 router = orig_node_get_router(orig_node);
377
378 in_tq = batman_packet->tq;
379 in_ttl = batman_packet->ttl;
380 tt_num_changes = batman_packet->tt_num_changes;
381
382 batman_packet->ttl--;
383 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
384
385 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
386 * of our best tq value */
387 if (router && router->tq_avg != 0) {
388
389 /* rebroadcast ogm of best ranking neighbor as is */
390 if (!compare_eth(router->addr, ethhdr->h_source)) {
391 batman_packet->tq = router->tq_avg;
392
393 if (router->last_ttl)
394 batman_packet->ttl = router->last_ttl - 1;
395 }
396
397 tq_avg = router->tq_avg;
398 }
399
400 if (router)
401 neigh_node_free_ref(router);
402
403 /* apply hop penalty */
404 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
405
406 bat_dbg(DBG_BATMAN, bat_priv,
407 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
408 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
409 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
410 batman_packet->ttl);
411
412 batman_packet->seqno = htonl(batman_packet->seqno);
413 batman_packet->tt_crc = htons(batman_packet->tt_crc);
414
415 /* switch of primaries first hop flag when forwarding */
416 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
417 if (directlink)
418 batman_packet->flags |= DIRECTLINK;
419 else
420 batman_packet->flags &= ~DIRECTLINK;
421 170
422 send_time = forward_send_time(); 171 bat_ogm_schedule(hard_iface, tt_num_changes);
423 add_bat_packet_to_list(bat_priv,
424 (unsigned char *)batman_packet,
425 sizeof(*batman_packet) + tt_len(tt_num_changes),
426 if_incoming, 0, send_time);
427} 172}
428 173
429static void forw_packet_free(struct forw_packet *forw_packet) 174static void forw_packet_free(struct forw_packet *forw_packet)
@@ -454,7 +199,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
454} 199}
455 200
456/* add a broadcast packet to the queue and setup timers. broadcast packets 201/* add a broadcast packet to the queue and setup timers. broadcast packets
457 * are sent multiple times to increase probability for beeing received. 202 * are sent multiple times to increase probability for being received.
458 * 203 *
459 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on 204 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
460 * errors. 205 * errors.
@@ -557,7 +302,7 @@ out:
557 atomic_inc(&bat_priv->bcast_queue_left); 302 atomic_inc(&bat_priv->bcast_queue_left);
558} 303}
559 304
560void send_outstanding_bat_packet(struct work_struct *work) 305void send_outstanding_bat_ogm_packet(struct work_struct *work)
561{ 306{
562 struct delayed_work *delayed_work = 307 struct delayed_work *delayed_work =
563 container_of(work, struct delayed_work, work); 308 container_of(work, struct delayed_work, work);
@@ -573,7 +318,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
573 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 318 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
574 goto out; 319 goto out;
575 320
576 send_packet(forw_packet); 321 bat_ogm_emit(forw_packet);
577 322
578 /** 323 /**
579 * we have to have at least one packet in the queue 324 * we have to have at least one packet in the queue
@@ -581,7 +326,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
581 * shutting down 326 * shutting down
582 */ 327 */
583 if (forw_packet->own) 328 if (forw_packet->own)
584 schedule_own_packet(forw_packet->if_incoming); 329 schedule_bat_ogm(forw_packet->if_incoming);
585 330
586out: 331out:
587 /* don't count own packet */ 332 /* don't count own packet */
@@ -612,7 +357,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
612 &bat_priv->forw_bcast_list, list) { 357 &bat_priv->forw_bcast_list, list) {
613 358
614 /** 359 /**
615 * if purge_outstanding_packets() was called with an argmument 360 * if purge_outstanding_packets() was called with an argument
616 * we delete only packets belonging to the given interface 361 * we delete only packets belonging to the given interface
617 */ 362 */
618 if ((hard_iface) && 363 if ((hard_iface) &&
@@ -641,7 +386,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
641 &bat_priv->forw_bat_list, list) { 386 &bat_priv->forw_bat_list, list) {
642 387
643 /** 388 /**
644 * if purge_outstanding_packets() was called with an argmument 389 * if purge_outstanding_packets() was called with an argument
645 * we delete only packets belonging to the given interface 390 * we delete only packets belonging to the given interface
646 */ 391 */
647 if ((hard_iface) && 392 if ((hard_iface) &&
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 1f2d1e877663..c8ca3ef7385b 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -24,15 +24,10 @@
24 24
25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
26 const uint8_t *dst_addr); 26 const uint8_t *dst_addr);
27void schedule_own_packet(struct hard_iface *hard_iface); 27void schedule_bat_ogm(struct hard_iface *hard_iface);
28void schedule_forward_packet(struct orig_node *orig_node,
29 const struct ethhdr *ethhdr,
30 struct batman_packet *batman_packet,
31 int directlink,
32 struct hard_iface *if_outgoing);
33int add_bcast_packet_to_list(struct bat_priv *bat_priv, 28int add_bcast_packet_to_list(struct bat_priv *bat_priv,
34 const struct sk_buff *skb, unsigned long delay); 29 const struct sk_buff *skb, unsigned long delay);
35void send_outstanding_bat_packet(struct work_struct *work); 30void send_outstanding_bat_ogm_packet(struct work_struct *work);
36void purge_outstanding_packets(struct bat_priv *bat_priv, 31void purge_outstanding_packets(struct bat_priv *bat_priv,
37 const struct hard_iface *hard_iface); 32 const struct hard_iface *hard_iface);
38 33
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 05dd35114a27..f9cc95728989 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -445,30 +445,31 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
445{ 445{
446 struct bat_priv *bat_priv = netdev_priv(dev); 446 struct bat_priv *bat_priv = netdev_priv(dev);
447 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 447 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
448 struct batman_packet *batman_packet; 448 struct batman_ogm_packet *batman_ogm_packet;
449 struct softif_neigh *softif_neigh = NULL; 449 struct softif_neigh *softif_neigh = NULL;
450 struct hard_iface *primary_if = NULL; 450 struct hard_iface *primary_if = NULL;
451 struct softif_neigh *curr_softif_neigh = NULL; 451 struct softif_neigh *curr_softif_neigh = NULL;
452 452
453 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 453 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
454 batman_packet = (struct batman_packet *) 454 batman_ogm_packet = (struct batman_ogm_packet *)
455 (skb->data + ETH_HLEN + VLAN_HLEN); 455 (skb->data + ETH_HLEN + VLAN_HLEN);
456 else 456 else
457 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN); 457 batman_ogm_packet = (struct batman_ogm_packet *)
458 (skb->data + ETH_HLEN);
458 459
459 if (batman_packet->version != COMPAT_VERSION) 460 if (batman_ogm_packet->version != COMPAT_VERSION)
460 goto out; 461 goto out;
461 462
462 if (batman_packet->packet_type != BAT_PACKET) 463 if (batman_ogm_packet->packet_type != BAT_OGM)
463 goto out; 464 goto out;
464 465
465 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 466 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
466 goto out; 467 goto out;
467 468
468 if (is_my_mac(batman_packet->orig)) 469 if (is_my_mac(batman_ogm_packet->orig))
469 goto out; 470 goto out;
470 471
471 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid); 472 softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
472 if (!softif_neigh) 473 if (!softif_neigh)
473 goto out; 474 goto out;
474 475
@@ -532,11 +533,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
532 if (!is_valid_ether_addr(addr->sa_data)) 533 if (!is_valid_ether_addr(addr->sa_data))
533 return -EADDRNOTAVAIL; 534 return -EADDRNOTAVAIL;
534 535
535 /* only modify transtable if it has been initialised before */ 536 /* only modify transtable if it has been initialized before */
536 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 537 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
537 tt_local_remove(bat_priv, dev->dev_addr, 538 tt_local_remove(bat_priv, dev->dev_addr,
538 "mac address changed", false); 539 "mac address changed", false);
539 tt_local_add(dev, addr->sa_data); 540 tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
540 } 541 }
541 542
542 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 543 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -595,11 +596,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
595 goto dropped; 596 goto dropped;
596 597
597 /* Register the client MAC in the transtable */ 598 /* Register the client MAC in the transtable */
598 tt_local_add(soft_iface, ethhdr->h_source); 599 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
599 600
600 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 601 orig_node = transtable_search(bat_priv, ethhdr->h_source,
602 ethhdr->h_dest);
601 do_bcast = is_multicast_ether_addr(ethhdr->h_dest); 603 do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
602 if (do_bcast || (orig_node && orig_node->gw_flags)) { 604 if (do_bcast || (orig_node && orig_node->gw_flags)) {
603 ret = gw_is_target(bat_priv, skb, orig_node); 605 ret = gw_is_target(bat_priv, skb, orig_node);
604 606
605 if (ret < 0) 607 if (ret < 0)
@@ -739,6 +741,9 @@ void interface_rx(struct net_device *soft_iface,
739 741
740 soft_iface->last_rx = jiffies; 742 soft_iface->last_rx = jiffies;
741 743
744 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
745 goto dropped;
746
742 netif_rx(skb); 747 netif_rx(skb);
743 goto out; 748 goto out;
744 749
@@ -796,10 +801,8 @@ struct net_device *softif_create(const char *name)
796 801
797 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup); 802 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
798 803
799 if (!soft_iface) { 804 if (!soft_iface)
800 pr_err("Unable to allocate the batman interface: %s\n", name);
801 goto out; 805 goto out;
802 }
803 806
804 ret = register_netdevice(soft_iface); 807 ret = register_netdevice(soft_iface);
805 if (ret < 0) { 808 if (ret < 0) {
@@ -812,6 +815,7 @@ struct net_device *softif_create(const char *name)
812 815
813 atomic_set(&bat_priv->aggregated_ogms, 1); 816 atomic_set(&bat_priv->aggregated_ogms, 1);
814 atomic_set(&bat_priv->bonding, 0); 817 atomic_set(&bat_priv->bonding, 0);
818 atomic_set(&bat_priv->ap_isolation, 0);
815 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 819 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
816 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 820 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
817 atomic_set(&bat_priv->gw_sel_class, 20); 821 atomic_set(&bat_priv->gw_sel_class, 20);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index fb6931d00cd7..c7aafc7c5ed4 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -137,10 +137,22 @@ static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
137 kfree_rcu(tt_local_entry, rcu); 137 kfree_rcu(tt_local_entry, rcu);
138} 138}
139 139
140static void tt_global_entry_free_rcu(struct rcu_head *rcu)
141{
142 struct tt_global_entry *tt_global_entry;
143
144 tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
145
146 if (tt_global_entry->orig_node)
147 orig_node_free_ref(tt_global_entry->orig_node);
148
149 kfree(tt_global_entry);
150}
151
140static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 152static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
141{ 153{
142 if (atomic_dec_and_test(&tt_global_entry->refcount)) 154 if (atomic_dec_and_test(&tt_global_entry->refcount))
143 kfree_rcu(tt_global_entry, rcu); 155 call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
144} 156}
145 157
146static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 158static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -183,7 +195,8 @@ static int tt_local_init(struct bat_priv *bat_priv)
183 return 1; 195 return 1;
184} 196}
185 197
186void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) 198void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
199 int ifindex)
187{ 200{
188 struct bat_priv *bat_priv = netdev_priv(soft_iface); 201 struct bat_priv *bat_priv = netdev_priv(soft_iface);
189 struct tt_local_entry *tt_local_entry = NULL; 202 struct tt_local_entry *tt_local_entry = NULL;
@@ -207,6 +220,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
207 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 220 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
208 tt_local_entry->last_seen = jiffies; 221 tt_local_entry->last_seen = jiffies;
209 tt_local_entry->flags = NO_FLAGS; 222 tt_local_entry->flags = NO_FLAGS;
223 if (is_wifi_iface(ifindex))
224 tt_local_entry->flags |= TT_CLIENT_WIFI;
210 atomic_set(&tt_local_entry->refcount, 2); 225 atomic_set(&tt_local_entry->refcount, 2);
211 226
212 /* the batman interface mac address should never be purged */ 227 /* the batman interface mac address should never be purged */
@@ -329,7 +344,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
329 344
330 rcu_read_lock(); 345 rcu_read_lock();
331 __hlist_for_each_rcu(node, head) 346 __hlist_for_each_rcu(node, head)
332 buf_size += 21; 347 buf_size += 29;
333 rcu_read_unlock(); 348 rcu_read_unlock();
334 } 349 }
335 350
@@ -348,8 +363,19 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
348 rcu_read_lock(); 363 rcu_read_lock();
349 hlist_for_each_entry_rcu(tt_local_entry, node, 364 hlist_for_each_entry_rcu(tt_local_entry, node,
350 head, hash_entry) { 365 head, hash_entry) {
351 pos += snprintf(buff + pos, 22, " * %pM\n", 366 pos += snprintf(buff + pos, 30, " * %pM "
352 tt_local_entry->addr); 367 "[%c%c%c%c%c]\n",
368 tt_local_entry->addr,
369 (tt_local_entry->flags &
370 TT_CLIENT_ROAM ? 'R' : '.'),
371 (tt_local_entry->flags &
372 TT_CLIENT_NOPURGE ? 'P' : '.'),
373 (tt_local_entry->flags &
374 TT_CLIENT_NEW ? 'N' : '.'),
375 (tt_local_entry->flags &
376 TT_CLIENT_PENDING ? 'X' : '.'),
377 (tt_local_entry->flags &
378 TT_CLIENT_WIFI ? 'W' : '.'));
353 } 379 }
354 rcu_read_unlock(); 380 rcu_read_unlock();
355 } 381 }
@@ -369,8 +395,8 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
369 tt_local_event(bat_priv, tt_local_entry->addr, 395 tt_local_event(bat_priv, tt_local_entry->addr,
370 tt_local_entry->flags | flags); 396 tt_local_entry->flags | flags);
371 397
372 /* The local client has to be merked as "pending to be removed" but has 398 /* The local client has to be marked as "pending to be removed" but has
373 * to be kept in the table in order to send it in an full tables 399 * to be kept in the table in order to send it in a full table
374 * response issued before the net ttvn increment (consistency check) */ 400 * response issued before the net ttvn increment (consistency check) */
375 tt_local_entry->flags |= TT_CLIENT_PENDING; 401 tt_local_entry->flags |= TT_CLIENT_PENDING;
376} 402}
@@ -495,7 +521,8 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
495 521
496/* caller must hold orig_node refcount */ 522/* caller must hold orig_node refcount */
497int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 523int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
498 const unsigned char *tt_addr, uint8_t ttvn, bool roaming) 524 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
525 bool wifi)
499{ 526{
500 struct tt_global_entry *tt_global_entry; 527 struct tt_global_entry *tt_global_entry;
501 struct orig_node *orig_node_tmp; 528 struct orig_node *orig_node_tmp;
@@ -537,6 +564,9 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
537 tt_global_entry->roam_at = 0; 564 tt_global_entry->roam_at = 0;
538 } 565 }
539 566
567 if (wifi)
568 tt_global_entry->flags |= TT_CLIENT_WIFI;
569
540 bat_dbg(DBG_TT, bat_priv, 570 bat_dbg(DBG_TT, bat_priv,
541 "Creating new global tt entry: %pM (via %pM)\n", 571 "Creating new global tt entry: %pM (via %pM)\n",
542 tt_global_entry->addr, orig_node->orig); 572 tt_global_entry->addr, orig_node->orig);
@@ -582,8 +612,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
582 seq_printf(seq, 612 seq_printf(seq,
583 "Globally announced TT entries received via the mesh %s\n", 613 "Globally announced TT entries received via the mesh %s\n",
584 net_dev->name); 614 net_dev->name);
585 seq_printf(seq, " %-13s %s %-15s %s\n", 615 seq_printf(seq, " %-13s %s %-15s %s %s\n",
586 "Client", "(TTVN)", "Originator", "(Curr TTVN)"); 616 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
587 617
588 buf_size = 1; 618 buf_size = 1;
589 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via 619 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
@@ -593,7 +623,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
593 623
594 rcu_read_lock(); 624 rcu_read_lock();
595 __hlist_for_each_rcu(node, head) 625 __hlist_for_each_rcu(node, head)
596 buf_size += 59; 626 buf_size += 67;
597 rcu_read_unlock(); 627 rcu_read_unlock();
598 } 628 }
599 629
@@ -612,14 +642,20 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
612 rcu_read_lock(); 642 rcu_read_lock();
613 hlist_for_each_entry_rcu(tt_global_entry, node, 643 hlist_for_each_entry_rcu(tt_global_entry, node,
614 head, hash_entry) { 644 head, hash_entry) {
615 pos += snprintf(buff + pos, 61, 645 pos += snprintf(buff + pos, 69,
616 " * %pM (%3u) via %pM (%3u)\n", 646 " * %pM (%3u) via %pM (%3u) "
617 tt_global_entry->addr, 647 "[%c%c%c]\n", tt_global_entry->addr,
618 tt_global_entry->ttvn, 648 tt_global_entry->ttvn,
619 tt_global_entry->orig_node->orig, 649 tt_global_entry->orig_node->orig,
620 (uint8_t) atomic_read( 650 (uint8_t) atomic_read(
621 &tt_global_entry->orig_node-> 651 &tt_global_entry->orig_node->
622 last_ttvn)); 652 last_ttvn),
653 (tt_global_entry->flags &
654 TT_CLIENT_ROAM ? 'R' : '.'),
655 (tt_global_entry->flags &
656 TT_CLIENT_PENDING ? 'X' : '.'),
657 (tt_global_entry->flags &
658 TT_CLIENT_WIFI ? 'W' : '.'));
623 } 659 }
624 rcu_read_unlock(); 660 rcu_read_unlock();
625 } 661 }
@@ -686,6 +722,9 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
686 struct hlist_head *head; 722 struct hlist_head *head;
687 spinlock_t *list_lock; /* protects write access to the hash lists */ 723 spinlock_t *list_lock; /* protects write access to the hash lists */
688 724
725 if (!hash)
726 return;
727
689 for (i = 0; i < hash->size; i++) { 728 for (i = 0; i < hash->size; i++) {
690 head = &hash->table[i]; 729 head = &hash->table[i];
691 list_lock = &hash->list_locks[i]; 730 list_lock = &hash->list_locks[i];
@@ -774,30 +813,56 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
774 bat_priv->tt_global_hash = NULL; 813 bat_priv->tt_global_hash = NULL;
775} 814}
776 815
816static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
817 struct tt_global_entry *tt_global_entry)
818{
819 bool ret = false;
820
821 if (tt_local_entry->flags & TT_CLIENT_WIFI &&
822 tt_global_entry->flags & TT_CLIENT_WIFI)
823 ret = true;
824
825 return ret;
826}
827
777struct orig_node *transtable_search(struct bat_priv *bat_priv, 828struct orig_node *transtable_search(struct bat_priv *bat_priv,
778 const uint8_t *addr) 829 const uint8_t *src, const uint8_t *addr)
779{ 830{
780 struct tt_global_entry *tt_global_entry; 831 struct tt_local_entry *tt_local_entry = NULL;
832 struct tt_global_entry *tt_global_entry = NULL;
781 struct orig_node *orig_node = NULL; 833 struct orig_node *orig_node = NULL;
782 834
783 tt_global_entry = tt_global_hash_find(bat_priv, addr); 835 if (src && atomic_read(&bat_priv->ap_isolation)) {
836 tt_local_entry = tt_local_hash_find(bat_priv, src);
837 if (!tt_local_entry)
838 goto out;
839 }
784 840
841 tt_global_entry = tt_global_hash_find(bat_priv, addr);
785 if (!tt_global_entry) 842 if (!tt_global_entry)
786 goto out; 843 goto out;
787 844
845 /* check whether the clients should not communicate due to AP
846 * isolation */
847 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
848 goto out;
849
788 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 850 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
789 goto free_tt; 851 goto out;
790 852
791 /* A global client marked as PENDING has already moved from that 853 /* A global client marked as PENDING has already moved from that
792 * originator */ 854 * originator */
793 if (tt_global_entry->flags & TT_CLIENT_PENDING) 855 if (tt_global_entry->flags & TT_CLIENT_PENDING)
794 goto free_tt; 856 goto out;
795 857
796 orig_node = tt_global_entry->orig_node; 858 orig_node = tt_global_entry->orig_node;
797 859
798free_tt:
799 tt_global_entry_free_ref(tt_global_entry);
800out: 860out:
861 if (tt_global_entry)
862 tt_global_entry_free_ref(tt_global_entry);
863 if (tt_local_entry)
864 tt_local_entry_free_ref(tt_local_entry);
865
801 return orig_node; 866 return orig_node;
802} 867}
803 868
@@ -999,7 +1064,6 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
999 tt_response = (struct tt_query_packet *)skb_put(skb, 1064 tt_response = (struct tt_query_packet *)skb_put(skb,
1000 tt_query_size + tt_len); 1065 tt_query_size + tt_len);
1001 tt_response->ttvn = ttvn; 1066 tt_response->ttvn = ttvn;
1002 tt_response->tt_data = htons(tt_tot);
1003 1067
1004 tt_change = (struct tt_change *)(skb->data + tt_query_size); 1068 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1005 tt_count = 0; 1069 tt_count = 0;
@@ -1025,12 +1089,17 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1025 } 1089 }
1026 rcu_read_unlock(); 1090 rcu_read_unlock();
1027 1091
1092 /* store in the message the number of entries we have successfully
1093 * copied */
1094 tt_response->tt_data = htons(tt_count);
1095
1028out: 1096out:
1029 return skb; 1097 return skb;
1030} 1098}
1031 1099
1032int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node, 1100static int send_tt_request(struct bat_priv *bat_priv,
1033 uint8_t ttvn, uint16_t tt_crc, bool full_table) 1101 struct orig_node *dst_orig_node,
1102 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1034{ 1103{
1035 struct sk_buff *skb = NULL; 1104 struct sk_buff *skb = NULL;
1036 struct tt_query_packet *tt_request; 1105 struct tt_query_packet *tt_request;
@@ -1137,12 +1206,12 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1137 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1206 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1138 req_ttvn = tt_request->ttvn; 1207 req_ttvn = tt_request->ttvn;
1139 1208
1140 /* I have not the requested data */ 1209 /* I don't have the requested data */
1141 if (orig_ttvn != req_ttvn || 1210 if (orig_ttvn != req_ttvn ||
1142 tt_request->tt_data != req_dst_orig_node->tt_crc) 1211 tt_request->tt_data != req_dst_orig_node->tt_crc)
1143 goto out; 1212 goto out;
1144 1213
1145 /* If it has explicitly been requested the full table */ 1214 /* If the full table has been explicitly requested */
1146 if (tt_request->flags & TT_FULL_TABLE || 1215 if (tt_request->flags & TT_FULL_TABLE ||
1147 !req_dst_orig_node->tt_buff) 1216 !req_dst_orig_node->tt_buff)
1148 full_table = true; 1217 full_table = true;
@@ -1363,7 +1432,9 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1363 (tt_change + i)->flags & TT_CLIENT_ROAM); 1432 (tt_change + i)->flags & TT_CLIENT_ROAM);
1364 else 1433 else
1365 if (!tt_global_add(bat_priv, orig_node, 1434 if (!tt_global_add(bat_priv, orig_node,
1366 (tt_change + i)->addr, ttvn, false)) 1435 (tt_change + i)->addr, ttvn, false,
1436 (tt_change + i)->flags &
1437 TT_CLIENT_WIFI))
1367 /* In case of problem while storing a 1438 /* In case of problem while storing a
1368 * global_entry, we stop the updating 1439 * global_entry, we stop the updating
1369 * procedure without committing the 1440 * procedure without committing the
@@ -1403,9 +1474,10 @@ out:
1403 orig_node_free_ref(orig_node); 1474 orig_node_free_ref(orig_node);
1404} 1475}
1405 1476
1406void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, 1477static void tt_update_changes(struct bat_priv *bat_priv,
1407 uint16_t tt_num_changes, uint8_t ttvn, 1478 struct orig_node *orig_node,
1408 struct tt_change *tt_change) 1479 uint16_t tt_num_changes, uint8_t ttvn,
1480 struct tt_change *tt_change)
1409{ 1481{
1410 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, 1482 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1411 ttvn); 1483 ttvn);
@@ -1668,6 +1740,8 @@ static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
1668 rcu_read_lock(); 1740 rcu_read_lock();
1669 hlist_for_each_entry_rcu(tt_local_entry, node, 1741 hlist_for_each_entry_rcu(tt_local_entry, node,
1670 head, hash_entry) { 1742 head, hash_entry) {
1743 if (!(tt_local_entry->flags & flags))
1744 continue;
1671 tt_local_entry->flags &= ~flags; 1745 tt_local_entry->flags &= ~flags;
1672 atomic_inc(&bat_priv->num_local_tt); 1746 atomic_inc(&bat_priv->num_local_tt);
1673 } 1747 }
@@ -1720,3 +1794,90 @@ void tt_commit_changes(struct bat_priv *bat_priv)
1720 atomic_inc(&bat_priv->ttvn); 1794 atomic_inc(&bat_priv->ttvn);
1721 bat_priv->tt_poss_change = false; 1795 bat_priv->tt_poss_change = false;
1722} 1796}
1797
1798bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
1799{
1800 struct tt_local_entry *tt_local_entry = NULL;
1801 struct tt_global_entry *tt_global_entry = NULL;
1802 bool ret = true;
1803
1804 if (!atomic_read(&bat_priv->ap_isolation))
1805 return false;
1806
1807 tt_local_entry = tt_local_hash_find(bat_priv, dst);
1808 if (!tt_local_entry)
1809 goto out;
1810
1811 tt_global_entry = tt_global_hash_find(bat_priv, src);
1812 if (!tt_global_entry)
1813 goto out;
1814
1815 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
1816 goto out;
1817
1818 ret = false;
1819
1820out:
1821 if (tt_global_entry)
1822 tt_global_entry_free_ref(tt_global_entry);
1823 if (tt_local_entry)
1824 tt_local_entry_free_ref(tt_local_entry);
1825 return ret;
1826}
1827
1828void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1829 const unsigned char *tt_buff, uint8_t tt_num_changes,
1830 uint8_t ttvn, uint16_t tt_crc)
1831{
1832 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1833 bool full_table = true;
1834
1835 /* the ttvn increased by one -> we can apply the attached changes */
1836 if (ttvn - orig_ttvn == 1) {
1837 /* the OGM could not contain the changes due to their size or
1838 * because they have already been sent TT_OGM_APPEND_MAX times.
1839 * In this case send a tt request */
1840 if (!tt_num_changes) {
1841 full_table = false;
1842 goto request_table;
1843 }
1844
1845 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
1846 (struct tt_change *)tt_buff);
1847
1848 /* Even if we received the precomputed crc with the OGM, we
1849 * prefer to recompute it to spot any possible inconsistency
1850 * in the global table */
1851 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1852
1853 /* The ttvn alone is not enough to guarantee consistency
1854 * because a single value could represent different states
1855 * (due to the wrap around). Thus a node has to check whether
1856 * the resulting table (after applying the changes) is still
1857 * consistent or not. E.g. a node could disconnect while its
1858 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1859 * checking the CRC value is mandatory to detect the
1860 * inconsistency */
1861 if (orig_node->tt_crc != tt_crc)
1862 goto request_table;
1863
1864 /* Roaming phase is over: tables are in sync again. I can
1865 * unset the flag */
1866 orig_node->tt_poss_change = false;
1867 } else {
1868 /* if we missed more than one change or our tables are not
1869 * in sync anymore -> request fresh tt data */
1870 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
1871request_table:
1872 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
1873 "Need to retrieve the correct information "
1874 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1875 "%u num_changes: %u)\n", orig_node->orig, ttvn,
1876 orig_ttvn, tt_crc, orig_node->tt_crc,
1877 tt_num_changes);
1878 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
1879 full_table);
1880 return;
1881 }
1882 }
1883}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index d4122cba53b8..30efd49881a3 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -26,15 +26,16 @@ int tt_len(int changes_num);
26int tt_changes_fill_buffer(struct bat_priv *bat_priv, 26int tt_changes_fill_buffer(struct bat_priv *bat_priv,
27 unsigned char *buff, int buff_len); 27 unsigned char *buff, int buff_len);
28int tt_init(struct bat_priv *bat_priv); 28int tt_init(struct bat_priv *bat_priv);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr); 29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
30 int ifindex);
30void tt_local_remove(struct bat_priv *bat_priv, 31void tt_local_remove(struct bat_priv *bat_priv,
31 const uint8_t *addr, const char *message, bool roaming); 32 const uint8_t *addr, const char *message, bool roaming);
32int tt_local_seq_print_text(struct seq_file *seq, void *offset); 33int tt_local_seq_print_text(struct seq_file *seq, void *offset);
33void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 34void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
34 const unsigned char *tt_buff, int tt_buff_len); 35 const unsigned char *tt_buff, int tt_buff_len);
35int tt_global_add(struct bat_priv *bat_priv, 36int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
36 struct orig_node *orig_node, const unsigned char *addr, 37 const unsigned char *addr, uint8_t ttvn, bool roaming,
37 uint8_t ttvn, bool roaming); 38 bool wifi);
38int tt_global_seq_print_text(struct seq_file *seq, void *offset); 39int tt_global_seq_print_text(struct seq_file *seq, void *offset);
39void tt_global_del_orig(struct bat_priv *bat_priv, 40void tt_global_del_orig(struct bat_priv *bat_priv,
40 struct orig_node *orig_node, const char *message); 41 struct orig_node *orig_node, const char *message);
@@ -42,25 +43,23 @@ void tt_global_del(struct bat_priv *bat_priv,
42 struct orig_node *orig_node, const unsigned char *addr, 43 struct orig_node *orig_node, const unsigned char *addr,
43 const char *message, bool roaming); 44 const char *message, bool roaming);
44struct orig_node *transtable_search(struct bat_priv *bat_priv, 45struct orig_node *transtable_search(struct bat_priv *bat_priv,
45 const uint8_t *addr); 46 const uint8_t *src, const uint8_t *addr);
46void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, 47void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
47 const unsigned char *tt_buff, uint8_t tt_num_changes); 48 const unsigned char *tt_buff, uint8_t tt_num_changes);
48uint16_t tt_local_crc(struct bat_priv *bat_priv); 49uint16_t tt_local_crc(struct bat_priv *bat_priv);
49uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); 50uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
50void tt_free(struct bat_priv *bat_priv); 51void tt_free(struct bat_priv *bat_priv);
51int send_tt_request(struct bat_priv *bat_priv,
52 struct orig_node *dst_orig_node, uint8_t hvn,
53 uint16_t tt_crc, bool full_table);
54bool send_tt_response(struct bat_priv *bat_priv, 52bool send_tt_response(struct bat_priv *bat_priv,
55 struct tt_query_packet *tt_request); 53 struct tt_query_packet *tt_request);
56void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
57 uint16_t tt_num_changes, uint8_t ttvn,
58 struct tt_change *tt_change);
59bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 54bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
60void handle_tt_response(struct bat_priv *bat_priv, 55void handle_tt_response(struct bat_priv *bat_priv,
61 struct tt_query_packet *tt_response); 56 struct tt_query_packet *tt_response);
62void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 57void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
63 struct orig_node *orig_node); 58 struct orig_node *orig_node);
64void tt_commit_changes(struct bat_priv *bat_priv); 59void tt_commit_changes(struct bat_priv *bat_priv);
60bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
61void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
62 const unsigned char *tt_buff, uint8_t tt_num_changes,
63 uint8_t ttvn, uint16_t tt_crc);
65 64
66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 65#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 25bd1db35370..ab8d0fe6df5a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -57,7 +57,7 @@ struct hard_iface {
57 * @batman_seqno_reset: time when the batman seqno window was reset 57 * @batman_seqno_reset: time when the batman seqno window was reset
58 * @gw_flags: flags related to gateway class 58 * @gw_flags: flags related to gateway class
59 * @flags: for now only VIS_SERVER flag 59 * @flags: for now only VIS_SERVER flag
60 * @last_real_seqno: last and best known squence number 60 * @last_real_seqno: last and best known sequence number
61 * @last_ttl: ttl of last received packet 61 * @last_ttl: ttl of last received packet
62 * @last_bcast_seqno: last broadcast sequence number received by this host 62 * @last_bcast_seqno: last broadcast sequence number received by this host
63 * 63 *
@@ -146,6 +146,7 @@ struct bat_priv {
146 atomic_t aggregated_ogms; /* boolean */ 146 atomic_t aggregated_ogms; /* boolean */
147 atomic_t bonding; /* boolean */ 147 atomic_t bonding; /* boolean */
148 atomic_t fragmentation; /* boolean */ 148 atomic_t fragmentation; /* boolean */
149 atomic_t ap_isolation; /* boolean */
149 atomic_t vis_mode; /* VIS_TYPE_* */ 150 atomic_t vis_mode; /* VIS_TYPE_* */
150 atomic_t gw_mode; /* GW_MODE_* */ 151 atomic_t gw_mode; /* GW_MODE_* */
151 atomic_t gw_sel_class; /* uint */ 152 atomic_t gw_sel_class; /* uint */
@@ -156,7 +157,7 @@ struct bat_priv {
156 atomic_t bcast_seqno; 157 atomic_t bcast_seqno;
157 atomic_t bcast_queue_left; 158 atomic_t bcast_queue_left;
158 atomic_t batman_queue_left; 159 atomic_t batman_queue_left;
159 atomic_t ttvn; /* tranlation table version number */ 160 atomic_t ttvn; /* translation table version number */
160 atomic_t tt_ogm_append_cnt; 161 atomic_t tt_ogm_append_cnt;
161 atomic_t tt_local_changes; /* changes registered in a OGM interval */ 162 atomic_t tt_local_changes; /* changes registered in a OGM interval */
162 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 163 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
@@ -223,22 +224,22 @@ struct socket_packet {
223 224
224struct tt_local_entry { 225struct tt_local_entry {
225 uint8_t addr[ETH_ALEN]; 226 uint8_t addr[ETH_ALEN];
227 struct hlist_node hash_entry;
226 unsigned long last_seen; 228 unsigned long last_seen;
227 uint16_t flags; 229 uint16_t flags;
228 atomic_t refcount; 230 atomic_t refcount;
229 struct rcu_head rcu; 231 struct rcu_head rcu;
230 struct hlist_node hash_entry;
231}; 232};
232 233
233struct tt_global_entry { 234struct tt_global_entry {
234 uint8_t addr[ETH_ALEN]; 235 uint8_t addr[ETH_ALEN];
236 struct hlist_node hash_entry; /* entry in the global table */
235 struct orig_node *orig_node; 237 struct orig_node *orig_node;
236 uint8_t ttvn; 238 uint8_t ttvn;
237 uint16_t flags; /* only TT_GLOBAL_ROAM is used */ 239 uint16_t flags; /* only TT_GLOBAL_ROAM is used */
238 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 240 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
239 atomic_t refcount; 241 atomic_t refcount;
240 struct rcu_head rcu; 242 struct rcu_head rcu;
241 struct hlist_node hash_entry; /* entry in the global table */
242}; 243};
243 244
244struct tt_change_node { 245struct tt_change_node {
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 32b125fb3d3b..07d1c1da89dd 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -299,8 +299,10 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
299 goto find_router; 299 goto find_router;
300 } 300 }
301 301
302 /* check for tt host - increases orig_node refcount */ 302 /* check for tt host - increases orig_node refcount.
303 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 303 * returns NULL in case of AP isolation */
304 orig_node = transtable_search(bat_priv, ethhdr->h_source,
305 ethhdr->h_dest);
304 306
305find_router: 307find_router:
306 /** 308 /**
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 62f54b954625..8fd5535544b9 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -24,7 +24,7 @@
24 24
25#include "packet.h" 25#include "packet.h"
26 26
27#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */ 27#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ 28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
29 29
30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 8a1b98589d76..f81a6b668b0c 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -131,7 +131,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
131 return; 131 return;
132 } 132 }
133 133
134 /* its a new address, add it to the list */ 134 /* it's a new address, add it to the list */
135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
136 if (!entry) 136 if (!entry)
137 return; 137 return;
@@ -465,7 +465,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
465 /* try to add it */ 465 /* try to add it */
466 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 466 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
467 info, &info->hash_entry); 467 info, &info->hash_entry);
468 if (hash_added < 0) { 468 if (hash_added != 0) {
469 /* did not work (for some reason) */ 469 /* did not work (for some reason) */
470 kref_put(&info->refcount, free_info); 470 kref_put(&info->refcount, free_info);
471 info = NULL; 471 info = NULL;
@@ -887,10 +887,8 @@ int vis_init(struct bat_priv *bat_priv)
887 } 887 }
888 888
889 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 889 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
890 if (!bat_priv->my_vis_info) { 890 if (!bat_priv->my_vis_info)
891 pr_err("Can't initialize vis packet\n");
892 goto err; 891 goto err;
893 }
894 892
895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 893 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
896 MAX_VIS_PACKET_SIZE + 894 MAX_VIS_PACKET_SIZE +
@@ -920,7 +918,7 @@ int vis_init(struct bat_priv *bat_priv)
920 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 918 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
921 bat_priv->my_vis_info, 919 bat_priv->my_vis_info,
922 &bat_priv->my_vis_info->hash_entry); 920 &bat_priv->my_vis_info->hash_entry);
923 if (hash_added < 0) { 921 if (hash_added != 0) {
924 pr_err("Can't add own vis packet into hash\n"); 922 pr_err("Can't add own vis packet into hash\n");
925 /* not in hash, need to remove it manually. */ 923 /* not in hash, need to remove it manually. */
926 kref_put(&bat_priv->my_vis_info->refcount, free_info); 924 kref_put(&bat_priv->my_vis_info->refcount, free_info);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d161780..062124cd89cf 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -349,7 +349,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
349 } 349 }
350 350
351 chunk = min_t(unsigned int, skb->len, size); 351 chunk = min_t(unsigned int, skb->len, size);
352 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 352 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
353 skb_queue_head(&sk->sk_receive_queue, skb); 353 skb_queue_head(&sk->sk_receive_queue, skb);
354 if (!copied) 354 if (!copied)
355 copied = -EFAULT; 355 copied = -EFAULT;
@@ -361,7 +361,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
361 sock_recv_ts_and_drops(msg, sk, skb); 361 sock_recv_ts_and_drops(msg, sk, skb);
362 362
363 if (!(flags & MSG_PEEK)) { 363 if (!(flags & MSG_PEEK)) {
364 skb_pull(skb, chunk); 364 int skb_len = skb_headlen(skb);
365
366 if (chunk <= skb_len) {
367 __skb_pull(skb, chunk);
368 } else {
369 struct sk_buff *frag;
370
371 __skb_pull(skb, skb_len);
372 chunk -= skb_len;
373
374 skb_walk_frags(skb, frag) {
375 if (chunk <= frag->len) {
376 /* Pulling partial data */
377 skb->len -= chunk;
378 skb->data_len -= chunk;
379 __skb_pull(frag, chunk);
380 break;
381 } else if (frag->len) {
382 /* Pulling all frag data */
383 chunk -= frag->len;
384 skb->len -= frag->len;
385 skb->data_len -= frag->len;
386 __skb_pull(frag, frag->len);
387 }
388 }
389 }
390
365 if (skb->len) { 391 if (skb->len) {
366 skb_queue_head(&sk->sk_receive_queue, skb); 392 skb_queue_head(&sk->sk_receive_queue, skb);
367 break; 393 break;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index d9edfe8bf9d6..91bcd3a961ec 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -492,7 +492,10 @@ static int bnep_session(void *arg)
492 /* RX */ 492 /* RX */
493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
494 skb_orphan(skb); 494 skb_orphan(skb);
495 bnep_rx_frame(s, skb); 495 if (!skb_linearize(skb))
496 bnep_rx_frame(s, skb);
497 else
498 kfree_skb(skb);
496 } 499 }
497 500
498 if (sk->sk_state != BT_CONNECTED) 501 if (sk->sk_state != BT_CONNECTED)
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d4f5dff7c955..bc4086480d97 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = {
217 .ndo_stop = bnep_net_close, 217 .ndo_stop = bnep_net_close,
218 .ndo_start_xmit = bnep_net_xmit, 218 .ndo_start_xmit = bnep_net_xmit,
219 .ndo_validate_addr = eth_validate_addr, 219 .ndo_validate_addr = eth_validate_addr,
220 .ndo_set_multicast_list = bnep_net_set_mc_list, 220 .ndo_set_rx_mode = bnep_net_set_mc_list,
221 .ndo_set_mac_address = bnep_net_set_mac_addr, 221 .ndo_set_mac_address = bnep_net_set_mac_addr,
222 .ndo_tx_timeout = bnep_net_timeout, 222 .ndo_tx_timeout = bnep_net_timeout,
223 .ndo_change_mtu = eth_change_mtu, 223 .ndo_change_mtu = eth_change_mtu,
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 521baa4fe835..7d00ddf9e9dc 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -302,7 +302,10 @@ static int cmtp_session(void *arg)
302 302
303 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 303 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
304 skb_orphan(skb); 304 skb_orphan(skb);
305 cmtp_recv_frame(session, skb); 305 if (!skb_linearize(skb))
306 cmtp_recv_frame(session, skb);
307 else
308 kfree_skb(skb);
306 } 309 }
307 310
308 cmtp_process_transmit(session); 311 cmtp_process_transmit(session);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ea7f031f3b04..c1c597e3e198 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -56,15 +56,15 @@ static void hci_le_connect(struct hci_conn *conn)
56 conn->sec_level = BT_SECURITY_LOW; 56 conn->sec_level = BT_SECURITY_LOW;
57 57
58 memset(&cp, 0, sizeof(cp)); 58 memset(&cp, 0, sizeof(cp));
59 cp.scan_interval = cpu_to_le16(0x0004); 59 cp.scan_interval = cpu_to_le16(0x0060);
60 cp.scan_window = cpu_to_le16(0x0004); 60 cp.scan_window = cpu_to_le16(0x0030);
61 bacpy(&cp.peer_addr, &conn->dst); 61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type; 62 cp.peer_addr_type = conn->dst_type;
63 cp.conn_interval_min = cpu_to_le16(0x0008); 63 cp.conn_interval_min = cpu_to_le16(0x0028);
64 cp.conn_interval_max = cpu_to_le16(0x0100); 64 cp.conn_interval_max = cpu_to_le16(0x0038);
65 cp.supervision_timeout = cpu_to_le16(0x0064); 65 cp.supervision_timeout = cpu_to_le16(0x002a);
66 cp.min_ce_len = cpu_to_le16(0x0001); 66 cp.min_ce_len = cpu_to_le16(0x0000);
67 cp.max_ce_len = cpu_to_le16(0x0001); 67 cp.max_ce_len = cpu_to_le16(0x0000);
68 68
69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70} 70}
@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
218 cp.handle = cpu_to_le16(conn->handle); 218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk)); 219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv; 220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(rand)); 221 memcpy(cp.rand, rand, sizeof(cp.rand));
222 222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224} 224}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 56943add45cc..be84ae33ae36 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -613,7 +613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
613 if (!test_bit(HCI_RAW, &hdev->flags)) { 613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags); 614 set_bit(HCI_INIT, &hdev->flags);
615 __hci_request(hdev, hci_reset_req, 0, 615 __hci_request(hdev, hci_reset_req, 0,
616 msecs_to_jiffies(250)); 616 msecs_to_jiffies(HCI_INIT_TIMEOUT));
617 clear_bit(HCI_INIT, &hdev->flags); 617 clear_bit(HCI_INIT, &hdev->flags);
618 } 618 }
619 619
@@ -1312,59 +1312,41 @@ int hci_blacklist_clear(struct hci_dev *hdev)
1312int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) 1312int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1313{ 1313{
1314 struct bdaddr_list *entry; 1314 struct bdaddr_list *entry;
1315 int err;
1316 1315
1317 if (bacmp(bdaddr, BDADDR_ANY) == 0) 1316 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1318 return -EBADF; 1317 return -EBADF;
1319 1318
1320 hci_dev_lock_bh(hdev); 1319 if (hci_blacklist_lookup(hdev, bdaddr))
1321 1320 return -EEXIST;
1322 if (hci_blacklist_lookup(hdev, bdaddr)) {
1323 err = -EEXIST;
1324 goto err;
1325 }
1326 1321
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 1322 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1328 if (!entry) { 1323 if (!entry)
1329 err = -ENOMEM; 1324 return -ENOMEM;
1330 goto err;
1331 }
1332 1325
1333 bacpy(&entry->bdaddr, bdaddr); 1326 bacpy(&entry->bdaddr, bdaddr);
1334 1327
1335 list_add(&entry->list, &hdev->blacklist); 1328 list_add(&entry->list, &hdev->blacklist);
1336 1329
1337 err = 0; 1330 return mgmt_device_blocked(hdev->id, bdaddr);
1338
1339err:
1340 hci_dev_unlock_bh(hdev);
1341 return err;
1342} 1331}
1343 1332
1344int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) 1333int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{ 1334{
1346 struct bdaddr_list *entry; 1335 struct bdaddr_list *entry;
1347 int err = 0;
1348
1349 hci_dev_lock_bh(hdev);
1350 1336
1351 if (bacmp(bdaddr, BDADDR_ANY) == 0) { 1337 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1352 hci_blacklist_clear(hdev); 1338 return hci_blacklist_clear(hdev);
1353 goto done;
1354 } 1339 }
1355 1340
1356 entry = hci_blacklist_lookup(hdev, bdaddr); 1341 entry = hci_blacklist_lookup(hdev, bdaddr);
1357 if (!entry) { 1342 if (!entry) {
1358 err = -ENOENT; 1343 return -ENOENT;
1359 goto done;
1360 } 1344 }
1361 1345
1362 list_del(&entry->list); 1346 list_del(&entry->list);
1363 kfree(entry); 1347 kfree(entry);
1364 1348
1365done: 1349 return mgmt_device_unblocked(hdev->id, bdaddr);
1366 hci_dev_unlock_bh(hdev);
1367 return err;
1368} 1350}
1369 1351
1370static void hci_clear_adv_cache(unsigned long arg) 1352static void hci_clear_adv_cache(unsigned long arg)
@@ -1523,11 +1505,6 @@ int hci_register_dev(struct hci_dev *hdev)
1523 if (!hdev->workqueue) 1505 if (!hdev->workqueue)
1524 goto nomem; 1506 goto nomem;
1525 1507
1526 hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1527 if (IS_ERR(hdev->tfm))
1528 BT_INFO("Failed to load transform for ecb(aes): %ld",
1529 PTR_ERR(hdev->tfm));
1530
1531 hci_register_sysfs(hdev); 1508 hci_register_sysfs(hdev);
1532 1509
1533 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1510 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1576,9 +1553,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
1576 !test_bit(HCI_SETUP, &hdev->flags)) 1553 !test_bit(HCI_SETUP, &hdev->flags))
1577 mgmt_index_removed(hdev->id); 1554 mgmt_index_removed(hdev->id);
1578 1555
1579 if (!IS_ERR(hdev->tfm))
1580 crypto_free_blkcipher(hdev->tfm);
1581
1582 hci_notify(hdev, HCI_DEV_UNREG); 1556 hci_notify(hdev, HCI_DEV_UNREG);
1583 1557
1584 if (hdev->rfkill) { 1558 if (hdev->rfkill) {
@@ -2074,6 +2048,9 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2074 min = c->sent; 2048 min = c->sent;
2075 conn = c; 2049 conn = c;
2076 } 2050 }
2051
2052 if (hci_conn_num(hdev, type) == num)
2053 break;
2077 } 2054 }
2078 2055
2079 if (conn) { 2056 if (conn) {
@@ -2131,6 +2108,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2131 2108
2132 BT_DBG("%s", hdev->name); 2109 BT_DBG("%s", hdev->name);
2133 2110
2111 if (!hci_conn_num(hdev, ACL_LINK))
2112 return;
2113
2134 if (!test_bit(HCI_RAW, &hdev->flags)) { 2114 if (!test_bit(HCI_RAW, &hdev->flags)) {
2135 /* ACL tx timeout must be longer than maximum 2115 /* ACL tx timeout must be longer than maximum
2136 * link supervision timeout (40.9 seconds) */ 2116 * link supervision timeout (40.9 seconds) */
@@ -2162,6 +2142,9 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2162 2142
2163 BT_DBG("%s", hdev->name); 2143 BT_DBG("%s", hdev->name);
2164 2144
2145 if (!hci_conn_num(hdev, SCO_LINK))
2146 return;
2147
2165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { 2148 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2167 BT_DBG("skb %p len %d", skb, skb->len); 2150 BT_DBG("skb %p len %d", skb, skb->len);
@@ -2182,6 +2165,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2182 2165
2183 BT_DBG("%s", hdev->name); 2166 BT_DBG("%s", hdev->name);
2184 2167
2168 if (!hci_conn_num(hdev, ESCO_LINK))
2169 return;
2170
2185 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2171 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2186 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2187 BT_DBG("skb %p len %d", skb, skb->len); 2173 BT_DBG("skb %p len %d", skb, skb->len);
@@ -2202,6 +2188,9 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2202 2188
2203 BT_DBG("%s", hdev->name); 2189 BT_DBG("%s", hdev->name);
2204 2190
2191 if (!hci_conn_num(hdev, LE_LINK))
2192 return;
2193
2205 if (!test_bit(HCI_RAW, &hdev->flags)) { 2194 if (!test_bit(HCI_RAW, &hdev->flags)) {
2206 /* LE tx timeout must be longer than maximum 2195 /* LE tx timeout must be longer than maximum
2207 * link supervision timeout (40.9 seconds) */ 2196 * link supervision timeout (40.9 seconds) */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7ef4eb4435fb..d7d96b6b1f0d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -898,16 +898,15 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
898 if (!cp) 898 if (!cp)
899 return; 899 return;
900 900
901 hci_dev_lock(hdev);
902
903 if (cp->enable == 0x01) { 901 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer); 902 del_timer(&hdev->adv_timer);
903
904 hci_dev_lock(hdev);
905 hci_adv_entries_clear(hdev); 905 hci_adv_entries_clear(hdev);
906 hci_dev_unlock(hdev);
906 } else if (cp->enable == 0x00) { 907 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 908 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
908 } 909 }
909
910 hci_dev_unlock(hdev);
911} 910}
912 911
913static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 912static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1103,9 +1102,10 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1103 return 0; 1102 return 0;
1104 1103
1105 /* Only request authentication for SSP connections or non-SSP 1104 /* Only request authentication for SSP connections or non-SSP
1106 * devices with sec_level HIGH */ 1105 * devices with sec_level HIGH or if MITM protection is requested */
1107 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1106 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1108 conn->pending_sec_level != BT_SECURITY_HIGH) 1107 conn->pending_sec_level != BT_SECURITY_HIGH &&
1108 !(conn->auth_type & 0x01))
1109 return 0; 1109 return 0;
1110 1110
1111 return 1; 1111 return 1;
@@ -1412,7 +1412,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1412 conn->state = BT_CONFIG; 1412 conn->state = BT_CONFIG;
1413 hci_conn_hold(conn); 1413 hci_conn_hold(conn);
1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1415 mgmt_connected(hdev->id, &ev->bdaddr); 1415 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
1416 } else 1416 } else
1417 conn->state = BT_CONNECTED; 1417 conn->state = BT_CONNECTED;
1418 1418
@@ -2174,7 +2174,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2174 hci_dev_lock(hdev); 2174 hci_dev_lock(hdev);
2175 2175
2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2177 if (conn && conn->state == BT_CONNECTED) { 2177 if (!conn)
2178 goto unlock;
2179
2180 if (conn->state == BT_CONNECTED) {
2178 hci_conn_hold(conn); 2181 hci_conn_hold(conn);
2179 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2182 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2180 hci_conn_put(conn); 2183 hci_conn_put(conn);
@@ -2194,6 +2197,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2194 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2197 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2195 } 2198 }
2196 2199
2200unlock:
2197 hci_dev_unlock(hdev); 2201 hci_dev_unlock(hdev);
2198} 2202}
2199 2203
@@ -2816,7 +2820,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2816 goto unlock; 2820 goto unlock;
2817 } 2821 }
2818 2822
2819 mgmt_connected(hdev->id, &ev->bdaddr); 2823 mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
2820 2824
2821 conn->sec_level = BT_SECURITY_LOW; 2825 conn->sec_level = BT_SECURITY_LOW;
2822 conn->handle = __le16_to_cpu(ev->handle); 2826 conn->handle = __le16_to_cpu(ev->handle);
@@ -2834,19 +2838,17 @@ unlock:
2834static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 2838static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2835 struct sk_buff *skb) 2839 struct sk_buff *skb)
2836{ 2840{
2837 struct hci_ev_le_advertising_info *ev; 2841 u8 num_reports = skb->data[0];
2838 u8 num_reports; 2842 void *ptr = &skb->data[1];
2839
2840 num_reports = skb->data[0];
2841 ev = (void *) &skb->data[1];
2842 2843
2843 hci_dev_lock(hdev); 2844 hci_dev_lock(hdev);
2844 2845
2845 hci_add_adv_entry(hdev, ev); 2846 while (num_reports--) {
2847 struct hci_ev_le_advertising_info *ev = ptr;
2846 2848
2847 while (--num_reports) {
2848 ev = (void *) (ev->data + ev->length + 1);
2849 hci_add_adv_entry(hdev, ev); 2849 hci_add_adv_entry(hdev, ev);
2850
2851 ptr += sizeof(*ev) + ev->length + 1;
2850 } 2852 }
2851 2853
2852 hci_dev_unlock(hdev); 2854 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ff02cf5e77cc..f6afe3d76a66 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -183,21 +183,35 @@ static int hci_sock_release(struct socket *sock)
183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) 183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184{ 184{
185 bdaddr_t bdaddr; 185 bdaddr_t bdaddr;
186 int err;
186 187
187 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 188 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
188 return -EFAULT; 189 return -EFAULT;
189 190
190 return hci_blacklist_add(hdev, &bdaddr); 191 hci_dev_lock_bh(hdev);
192
193 err = hci_blacklist_add(hdev, &bdaddr);
194
195 hci_dev_unlock_bh(hdev);
196
197 return err;
191} 198}
192 199
193static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) 200static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
194{ 201{
195 bdaddr_t bdaddr; 202 bdaddr_t bdaddr;
203 int err;
196 204
197 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 205 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
198 return -EFAULT; 206 return -EFAULT;
199 207
200 return hci_blacklist_del(hdev, &bdaddr); 208 hci_dev_lock_bh(hdev);
209
210 err = hci_blacklist_del(hdev, &bdaddr);
211
212 hci_dev_unlock_bh(hdev);
213
214 return err;
201} 215}
202 216
203/* Ioctls that require bound socket */ 217/* Ioctls that require bound socket */
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a6c3aa8be1f7..661b461cf0b0 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -5,6 +5,7 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/debugfs.h> 6#include <linux/debugfs.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/module.h>
8 9
9#include <net/bluetooth/bluetooth.h> 10#include <net/bluetooth/bluetooth.h>
10#include <net/bluetooth/hci_core.h> 11#include <net/bluetooth/hci_core.h>
@@ -23,6 +24,8 @@ static inline char *link_typetostr(int type)
23 return "SCO"; 24 return "SCO";
24 case ESCO_LINK: 25 case ESCO_LINK:
25 return "eSCO"; 26 return "eSCO";
27 case LE_LINK:
28 return "LE";
26 default: 29 default:
27 return "UNKNOWN"; 30 return "UNKNOWN";
28 } 31 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fb68f344c34a..075a3e920caf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -716,12 +716,18 @@ static int hidp_session(void *arg)
716 716
717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
718 skb_orphan(skb); 718 skb_orphan(skb);
719 hidp_recv_ctrl_frame(session, skb); 719 if (!skb_linearize(skb))
720 hidp_recv_ctrl_frame(session, skb);
721 else
722 kfree_skb(skb);
720 } 723 }
721 724
722 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { 725 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
723 skb_orphan(skb); 726 skb_orphan(skb);
724 hidp_recv_intr_frame(session, skb); 727 if (!skb_linearize(skb))
728 hidp_recv_intr_frame(session, skb);
729 else
730 kfree_skb(skb);
725 } 731 }
726 732
727 hidp_process_transmit(session); 733 hidp_process_transmit(session);
@@ -872,6 +878,9 @@ static int hidp_start(struct hid_device *hid)
872 struct hidp_session *session = hid->driver_data; 878 struct hidp_session *session = hid->driver_data;
873 struct hid_report *report; 879 struct hid_report *report;
874 880
881 if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
882 return 0;
883
875 list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. 884 list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
876 report_list, list) 885 report_list, list)
877 hidp_send_report(session, report); 886 hidp_send_report(session, report);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bdb482bbe6..8cd12917733b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -907,6 +907,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn); 908 l2cap_le_conn_ready(conn);
909 909
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
912
910 read_lock(&conn->chan_lock); 913 read_lock(&conn->chan_lock);
911 914
912 list_for_each_entry(chan, &conn->chan_l, list) { 915 list_for_each_entry(chan, &conn->chan_l, list) {
@@ -986,8 +989,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer); 990 del_timer_sync(&conn->info_timer);
988 991
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) 992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
990 del_timer(&conn->security_timer); 993 del_timer(&conn->security_timer);
994 smp_chan_destroy(conn);
995 }
991 996
992 hcon->l2cap_data = NULL; 997 hcon->l2cap_data = NULL;
993 kfree(conn); 998 kfree(conn);
@@ -1240,7 +1245,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1240 __clear_retrans_timer(chan); 1245 __clear_retrans_timer(chan);
1241} 1246}
1242 1247
1243void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1248static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1244{ 1249{
1245 struct hci_conn *hcon = chan->conn->hcon; 1250 struct hci_conn *hcon = chan->conn->hcon;
1246 u16 flags; 1251 u16 flags;
@@ -1256,7 +1261,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1256 hci_send_acl(hcon, skb, flags); 1261 hci_send_acl(hcon, skb, flags);
1257} 1262}
1258 1263
1259void l2cap_streaming_send(struct l2cap_chan *chan) 1264static void l2cap_streaming_send(struct l2cap_chan *chan)
1260{ 1265{
1261 struct sk_buff *skb; 1266 struct sk_buff *skb;
1262 u16 control, fcs; 1267 u16 control, fcs;
@@ -1322,7 +1327,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1322 l2cap_do_send(chan, tx_skb); 1327 l2cap_do_send(chan, tx_skb);
1323} 1328}
1324 1329
1325int l2cap_ertm_send(struct l2cap_chan *chan) 1330static int l2cap_ertm_send(struct l2cap_chan *chan)
1326{ 1331{
1327 struct sk_buff *skb, *tx_skb; 1332 struct sk_buff *skb, *tx_skb;
1328 u16 control, fcs; 1333 u16 control, fcs;
@@ -1460,7 +1465,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1460 return sent; 1465 return sent;
1461} 1466}
1462 1467
1463struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1468static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1464{ 1469{
1465 struct sock *sk = chan->sk; 1470 struct sock *sk = chan->sk;
1466 struct l2cap_conn *conn = chan->conn; 1471 struct l2cap_conn *conn = chan->conn;
@@ -1490,7 +1495,7 @@ struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr
1490 return skb; 1495 return skb;
1491} 1496}
1492 1497
1493struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1498static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1494{ 1499{
1495 struct sock *sk = chan->sk; 1500 struct sock *sk = chan->sk;
1496 struct l2cap_conn *conn = chan->conn; 1501 struct l2cap_conn *conn = chan->conn;
@@ -1519,7 +1524,9 @@ struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *m
1519 return skb; 1524 return skb;
1520} 1525}
1521 1526
1522struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1527static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 struct msghdr *msg, size_t len,
1529 u16 control, u16 sdulen)
1523{ 1530{
1524 struct sock *sk = chan->sk; 1531 struct sock *sk = chan->sk;
1525 struct l2cap_conn *conn = chan->conn; 1532 struct l2cap_conn *conn = chan->conn;
@@ -1565,7 +1572,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *
1565 return skb; 1572 return skb;
1566} 1573}
1567 1574
1568int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1575static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1569{ 1576{
1570 struct sk_buff *skb; 1577 struct sk_buff *skb;
1571 struct sk_buff_head sar_queue; 1578 struct sk_buff_head sar_queue;
@@ -3121,102 +3128,104 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
3121 return 0; 3128 return 0;
3122} 3129}
3123 3130
3124static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3131static void append_skb_frag(struct sk_buff *skb,
3132 struct sk_buff *new_frag, struct sk_buff **last_frag)
3125{ 3133{
3126 struct sk_buff *_skb; 3134 /* skb->len reflects data in skb as well as all fragments
3127 int err; 3135 * skb->data_len reflects only data in fragments
3136 */
3137 if (!skb_has_frag_list(skb))
3138 skb_shinfo(skb)->frag_list = new_frag;
3139
3140 new_frag->next = NULL;
3141
3142 (*last_frag)->next = new_frag;
3143 *last_frag = new_frag;
3144
3145 skb->len += new_frag->len;
3146 skb->data_len += new_frag->len;
3147 skb->truesize += new_frag->truesize;
3148}
3149
3150static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3151{
3152 int err = -EINVAL;
3128 3153
3129 switch (control & L2CAP_CTRL_SAR) { 3154 switch (control & L2CAP_CTRL_SAR) {
3130 case L2CAP_SDU_UNSEGMENTED: 3155 case L2CAP_SDU_UNSEGMENTED:
3131 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) 3156 if (chan->sdu)
3132 goto drop; 3157 break;
3133 3158
3134 return chan->ops->recv(chan->data, skb); 3159 err = chan->ops->recv(chan->data, skb);
3160 break;
3135 3161
3136 case L2CAP_SDU_START: 3162 case L2CAP_SDU_START:
3137 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) 3163 if (chan->sdu)
3138 goto drop; 3164 break;
3139 3165
3140 chan->sdu_len = get_unaligned_le16(skb->data); 3166 chan->sdu_len = get_unaligned_le16(skb->data);
3167 skb_pull(skb, 2);
3141 3168
3142 if (chan->sdu_len > chan->imtu) 3169 if (chan->sdu_len > chan->imtu) {
3143 goto disconnect; 3170 err = -EMSGSIZE;
3144 3171 break;
3145 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC); 3172 }
3146 if (!chan->sdu)
3147 return -ENOMEM;
3148 3173
3149 /* pull sdu_len bytes only after alloc, because of Local Busy 3174 if (skb->len >= chan->sdu_len)
3150 * condition we have to be sure that this will be executed 3175 break;
3151 * only once, i.e., when alloc does not fail */
3152 skb_pull(skb, 2);
3153 3176
3154 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3177 chan->sdu = skb;
3178 chan->sdu_last_frag = skb;
3155 3179
3156 set_bit(CONN_SAR_SDU, &chan->conn_state); 3180 skb = NULL;
3157 chan->partial_sdu_len = skb->len; 3181 err = 0;
3158 break; 3182 break;
3159 3183
3160 case L2CAP_SDU_CONTINUE: 3184 case L2CAP_SDU_CONTINUE:
3161 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3162 goto disconnect;
3163
3164 if (!chan->sdu) 3185 if (!chan->sdu)
3165 goto disconnect; 3186 break;
3166 3187
3167 chan->partial_sdu_len += skb->len; 3188 append_skb_frag(chan->sdu, skb,
3168 if (chan->partial_sdu_len > chan->sdu_len) 3189 &chan->sdu_last_frag);
3169 goto drop; 3190 skb = NULL;
3170 3191
3171 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3192 if (chan->sdu->len >= chan->sdu_len)
3193 break;
3172 3194
3195 err = 0;
3173 break; 3196 break;
3174 3197
3175 case L2CAP_SDU_END: 3198 case L2CAP_SDU_END:
3176 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3177 goto disconnect;
3178
3179 if (!chan->sdu) 3199 if (!chan->sdu)
3180 goto disconnect; 3200 break;
3181
3182 chan->partial_sdu_len += skb->len;
3183
3184 if (chan->partial_sdu_len > chan->imtu)
3185 goto drop;
3186 3201
3187 if (chan->partial_sdu_len != chan->sdu_len) 3202 append_skb_frag(chan->sdu, skb,
3188 goto drop; 3203 &chan->sdu_last_frag);
3204 skb = NULL;
3189 3205
3190 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3206 if (chan->sdu->len != chan->sdu_len)
3207 break;
3191 3208
3192 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3209 err = chan->ops->recv(chan->data, chan->sdu);
3193 if (!_skb) {
3194 return -ENOMEM;
3195 }
3196 3210
3197 err = chan->ops->recv(chan->data, _skb); 3211 if (!err) {
3198 if (err < 0) { 3212 /* Reassembly complete */
3199 kfree_skb(_skb); 3213 chan->sdu = NULL;
3200 return err; 3214 chan->sdu_last_frag = NULL;
3215 chan->sdu_len = 0;
3201 } 3216 }
3202
3203 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3204
3205 kfree_skb(chan->sdu);
3206 break; 3217 break;
3207 } 3218 }
3208 3219
3209 kfree_skb(skb); 3220 if (err) {
3210 return 0; 3221 kfree_skb(skb);
3211 3222 kfree_skb(chan->sdu);
3212drop: 3223 chan->sdu = NULL;
3213 kfree_skb(chan->sdu); 3224 chan->sdu_last_frag = NULL;
3214 chan->sdu = NULL; 3225 chan->sdu_len = 0;
3226 }
3215 3227
3216disconnect: 3228 return err;
3217 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3218 kfree_skb(skb);
3219 return 0;
3220} 3229}
3221 3230
3222static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 3231static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
@@ -3270,99 +3279,6 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3270 } 3279 }
3271} 3280}
3272 3281
3273static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3274{
3275 struct sk_buff *_skb;
3276 int err = -EINVAL;
3277
3278 /*
3279 * TODO: We have to notify the userland if some data is lost with the
3280 * Streaming Mode.
3281 */
3282
3283 switch (control & L2CAP_CTRL_SAR) {
3284 case L2CAP_SDU_UNSEGMENTED:
3285 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3286 kfree_skb(chan->sdu);
3287 break;
3288 }
3289
3290 err = chan->ops->recv(chan->data, skb);
3291 if (!err)
3292 return 0;
3293
3294 break;
3295
3296 case L2CAP_SDU_START:
3297 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3298 kfree_skb(chan->sdu);
3299 break;
3300 }
3301
3302 chan->sdu_len = get_unaligned_le16(skb->data);
3303 skb_pull(skb, 2);
3304
3305 if (chan->sdu_len > chan->imtu) {
3306 err = -EMSGSIZE;
3307 break;
3308 }
3309
3310 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3311 if (!chan->sdu) {
3312 err = -ENOMEM;
3313 break;
3314 }
3315
3316 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3317
3318 set_bit(CONN_SAR_SDU, &chan->conn_state);
3319 chan->partial_sdu_len = skb->len;
3320 err = 0;
3321 break;
3322
3323 case L2CAP_SDU_CONTINUE:
3324 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3325 break;
3326
3327 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3328
3329 chan->partial_sdu_len += skb->len;
3330 if (chan->partial_sdu_len > chan->sdu_len)
3331 kfree_skb(chan->sdu);
3332 else
3333 err = 0;
3334
3335 break;
3336
3337 case L2CAP_SDU_END:
3338 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3339 break;
3340
3341 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3342
3343 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3344 chan->partial_sdu_len += skb->len;
3345
3346 if (chan->partial_sdu_len > chan->imtu)
3347 goto drop;
3348
3349 if (chan->partial_sdu_len == chan->sdu_len) {
3350 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3351 err = chan->ops->recv(chan->data, _skb);
3352 if (err < 0)
3353 kfree_skb(_skb);
3354 }
3355 err = 0;
3356
3357drop:
3358 kfree_skb(chan->sdu);
3359 break;
3360 }
3361
3362 kfree_skb(skb);
3363 return err;
3364}
3365
3366static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) 3282static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3367{ 3283{
3368 struct sk_buff *skb; 3284 struct sk_buff *skb;
@@ -3377,7 +3293,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3377 3293
3378 skb = skb_dequeue(&chan->srej_q); 3294 skb = skb_dequeue(&chan->srej_q);
3379 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3295 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3380 err = l2cap_ertm_reassembly_sdu(chan, skb, control); 3296 err = l2cap_reassemble_sdu(chan, skb, control);
3381 3297
3382 if (err < 0) { 3298 if (err < 0) {
3383 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3299 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3537,7 +3453,7 @@ expected:
3537 return 0; 3453 return 0;
3538 } 3454 }
3539 3455
3540 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control); 3456 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3541 chan->buffer_seq = (chan->buffer_seq + 1) % 64; 3457 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3542 if (err < 0) { 3458 if (err < 0) {
3543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3459 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3853,12 +3769,20 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3853 3769
3854 tx_seq = __get_txseq(control); 3770 tx_seq = __get_txseq(control);
3855 3771
3856 if (chan->expected_tx_seq == tx_seq) 3772 if (chan->expected_tx_seq != tx_seq) {
3857 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3773 /* Frame(s) missing - must discard partial SDU */
3858 else 3774 kfree_skb(chan->sdu);
3859 chan->expected_tx_seq = (tx_seq + 1) % 64; 3775 chan->sdu = NULL;
3776 chan->sdu_last_frag = NULL;
3777 chan->sdu_len = 0;
3778
3779 /* TODO: Notify userland of missing data */
3780 }
3781
3782 chan->expected_tx_seq = (tx_seq + 1) % 64;
3860 3783
3861 l2cap_streaming_reassembly_sdu(chan, skb, control); 3784 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3785 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3862 3786
3863 goto done; 3787 goto done;
3864 3788
@@ -4093,6 +4017,11 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4093 4017
4094 BT_DBG("conn %p", conn); 4018 BT_DBG("conn %p", conn);
4095 4019
4020 if (hcon->type == LE_LINK) {
4021 smp_distribute_keys(conn, 0);
4022 del_timer(&conn->security_timer);
4023 }
4024
4096 read_lock(&conn->chan_lock); 4025 read_lock(&conn->chan_lock);
4097 4026
4098 list_for_each_entry(chan, &conn->chan_l, list) { 4027 list_for_each_entry(chan, &conn->chan_l, list) {
@@ -4105,9 +4034,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4105 if (chan->scid == L2CAP_CID_LE_DATA) { 4034 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) { 4035 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level; 4036 chan->sec_level = hcon->sec_level;
4108 del_timer(&conn->security_timer);
4109 l2cap_chan_ready(sk); 4037 l2cap_chan_ready(sk);
4110 smp_distribute_keys(conn, 0);
4111 } 4038 }
4112 4039
4113 bh_unlock_sock(sk); 4040 bh_unlock_sock(sk);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e8292369cdcf..5c406d3136f7 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,6 +27,7 @@
27/* Bluetooth L2CAP sockets. */ 27/* Bluetooth L2CAP sockets. */
28 28
29#include <linux/security.h> 29#include <linux/security.h>
30#include <linux/export.h>
30 31
31#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
32#include <net/bluetooth/hci_core.h> 33#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 53e109eb043e..2c7634296866 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -23,6 +23,7 @@
23/* Bluetooth HCI Management interface */ 23/* Bluetooth HCI Management interface */
24 24
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/module.h>
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27 28
28#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
@@ -147,8 +148,6 @@ static int read_index_list(struct sock *sk)
147 148
148 hci_del_off_timer(d); 149 hci_del_off_timer(d);
149 150
150 set_bit(HCI_MGMT, &d->flags);
151
152 if (test_bit(HCI_SETUP, &d->flags)) 151 if (test_bit(HCI_SETUP, &d->flags))
153 continue; 152 continue;
154 153
@@ -908,7 +907,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
908 struct hci_dev *hdev; 907 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp; 908 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len; 909 u16 key_count, expected_len;
911 int i, err; 910 int i;
912 911
913 cp = (void *) data; 912 cp = (void *) data;
914 913
@@ -918,9 +917,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
918 key_count = get_unaligned_le16(&cp->key_count); 917 key_count = get_unaligned_le16(&cp->key_count);
919 918
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 919 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len > len) { 920 if (expected_len != len) {
922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes", 921 BT_ERR("load_keys: expected %u bytes, got %u bytes",
923 expected_len, len); 922 len, expected_len);
924 return -EINVAL; 923 return -EINVAL;
925 } 924 }
926 925
@@ -942,36 +941,17 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
942 else 941 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 942 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944 943
945 len -= sizeof(*cp); 944 for (i = 0; i < key_count; i++) {
946 i = 0; 945 struct mgmt_key_info *key = &cp->keys[i];
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
964 946
965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 947 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
966 key->pin_len); 948 key->pin_len);
967 } 949 }
968 950
969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev); 951 hci_dev_unlock_bh(hdev);
972 hci_dev_put(hdev); 952 hci_dev_put(hdev);
973 953
974 return err; 954 return 0;
975} 955}
976 956
977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 957static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1347,6 +1327,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1347 struct hci_dev *hdev; 1327 struct hci_dev *hdev;
1348 struct mgmt_cp_pair_device *cp; 1328 struct mgmt_cp_pair_device *cp;
1349 struct pending_cmd *cmd; 1329 struct pending_cmd *cmd;
1330 struct adv_entry *entry;
1350 u8 sec_level, auth_type; 1331 u8 sec_level, auth_type;
1351 struct hci_conn *conn; 1332 struct hci_conn *conn;
1352 int err; 1333 int err;
@@ -1364,15 +1345,20 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1364 1345
1365 hci_dev_lock_bh(hdev); 1346 hci_dev_lock_bh(hdev);
1366 1347
1367 if (cp->io_cap == 0x03) { 1348 sec_level = BT_SECURITY_MEDIUM;
1368 sec_level = BT_SECURITY_MEDIUM; 1349 if (cp->io_cap == 0x03)
1369 auth_type = HCI_AT_DEDICATED_BONDING; 1350 auth_type = HCI_AT_DEDICATED_BONDING;
1370 } else { 1351 else
1371 sec_level = BT_SECURITY_HIGH;
1372 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1352 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1373 }
1374 1353
1375 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); 1354 entry = hci_find_adv_entry(hdev, &cp->bdaddr);
1355 if (entry)
1356 conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
1357 auth_type);
1358 else
1359 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
1360 auth_type);
1361
1376 if (IS_ERR(conn)) { 1362 if (IS_ERR(conn)) {
1377 err = PTR_ERR(conn); 1363 err = PTR_ERR(conn);
1378 goto unlock; 1364 goto unlock;
@@ -1391,7 +1377,10 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1391 goto unlock; 1377 goto unlock;
1392 } 1378 }
1393 1379
1394 conn->connect_cfm_cb = pairing_complete_cb; 1380 /* For LE, just connecting isn't a proof that the pairing finished */
1381 if (!entry)
1382 conn->connect_cfm_cb = pairing_complete_cb;
1383
1395 conn->security_cfm_cb = pairing_complete_cb; 1384 conn->security_cfm_cb = pairing_complete_cb;
1396 conn->disconn_cfm_cb = pairing_complete_cb; 1385 conn->disconn_cfm_cb = pairing_complete_cb;
1397 conn->io_capability = cp->io_cap; 1386 conn->io_capability = cp->io_cap;
@@ -1689,13 +1678,12 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len) 1678 u16 len)
1690{ 1679{
1691 struct hci_dev *hdev; 1680 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp; 1681 struct pending_cmd *cmd;
1682 struct mgmt_cp_block_device *cp = (void *) data;
1693 int err; 1683 int err;
1694 1684
1695 BT_DBG("hci%u", index); 1685 BT_DBG("hci%u", index);
1696 1686
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp)) 1687 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 1688 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL); 1689 EINVAL);
@@ -1705,6 +1693,14 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 1693 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV); 1694 ENODEV);
1707 1695
1696 hci_dev_lock_bh(hdev);
1697
1698 cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
1699 if (!cmd) {
1700 err = -ENOMEM;
1701 goto failed;
1702 }
1703
1708 err = hci_blacklist_add(hdev, &cp->bdaddr); 1704 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709 1705
1710 if (err < 0) 1706 if (err < 0)
@@ -1712,6 +1708,11 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1712 else 1708 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 1709 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0); 1710 NULL, 0);
1711
1712 mgmt_pending_remove(cmd);
1713
1714failed:
1715 hci_dev_unlock_bh(hdev);
1715 hci_dev_put(hdev); 1716 hci_dev_put(hdev);
1716 1717
1717 return err; 1718 return err;
@@ -1721,13 +1722,12 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len) 1722 u16 len)
1722{ 1723{
1723 struct hci_dev *hdev; 1724 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp; 1725 struct pending_cmd *cmd;
1726 struct mgmt_cp_unblock_device *cp = (void *) data;
1725 int err; 1727 int err;
1726 1728
1727 BT_DBG("hci%u", index); 1729 BT_DBG("hci%u", index);
1728 1730
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp)) 1731 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL); 1733 EINVAL);
@@ -1737,6 +1737,14 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV); 1738 ENODEV);
1739 1739
1740 hci_dev_lock_bh(hdev);
1741
1742 cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
1743 if (!cmd) {
1744 err = -ENOMEM;
1745 goto failed;
1746 }
1747
1740 err = hci_blacklist_del(hdev, &cp->bdaddr); 1748 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741 1749
1742 if (err < 0) 1750 if (err < 0)
@@ -1744,6 +1752,67 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1744 else 1752 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1753 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0); 1754 NULL, 0);
1755
1756 mgmt_pending_remove(cmd);
1757
1758failed:
1759 hci_dev_unlock_bh(hdev);
1760 hci_dev_put(hdev);
1761
1762 return err;
1763}
1764
1765static int set_fast_connectable(struct sock *sk, u16 index,
1766 unsigned char *data, u16 len)
1767{
1768 struct hci_dev *hdev;
1769 struct mgmt_cp_set_fast_connectable *cp = (void *) data;
1770 struct hci_cp_write_page_scan_activity acp;
1771 u8 type;
1772 int err;
1773
1774 BT_DBG("hci%u", index);
1775
1776 if (len != sizeof(*cp))
1777 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1778 EINVAL);
1779
1780 hdev = hci_dev_get(index);
1781 if (!hdev)
1782 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1783 ENODEV);
1784
1785 hci_dev_lock(hdev);
1786
1787 if (cp->enable) {
1788 type = PAGE_SCAN_TYPE_INTERLACED;
1789 acp.interval = 0x0024; /* 22.5 msec page scan interval */
1790 } else {
1791 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1792 acp.interval = 0x0800; /* default 1.28 sec page scan */
1793 }
1794
1795 acp.window = 0x0012; /* default 11.25 msec page scan window */
1796
1797 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1798 sizeof(acp), &acp);
1799 if (err < 0) {
1800 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1801 -err);
1802 goto done;
1803 }
1804
1805 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1806 if (err < 0) {
1807 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1808 -err);
1809 goto done;
1810 }
1811
1812 err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
1813 NULL, 0);
1814done:
1815 hci_dev_unlock(hdev);
1747 hci_dev_put(hdev); 1816 hci_dev_put(hdev);
1748 1817
1749 return err; 1818 return err;
@@ -1869,6 +1938,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1869 case MGMT_OP_UNBLOCK_DEVICE: 1938 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len); 1939 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break; 1940 break;
1941 case MGMT_OP_SET_FAST_CONNECTABLE:
1942 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
1943 len);
1944 break;
1872 default: 1945 default:
1873 BT_DBG("Unknown op %u", opcode); 1946 BT_DBG("Unknown op %u", opcode);
1874 err = cmd_status(sk, index, opcode, 0x01); 1947 err = cmd_status(sk, index, opcode, 0x01);
@@ -1977,35 +2050,25 @@ int mgmt_connectable(u16 index, u8 connectable)
1977 2050
1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 2051int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1979{ 2052{
1980 struct mgmt_ev_new_key *ev; 2053 struct mgmt_ev_new_key ev;
1981 int err, total;
1982
1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1987
1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1989 ev->key.type = key->type;
1990 memcpy(ev->key.val, key->val, 16);
1991 ev->key.pin_len = key->pin_len;
1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1994 2054
1995 memcpy(ev->key.data, key->data, key->dlen); 2055 memset(&ev, 0, sizeof(ev));
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998 2056
1999 kfree(ev); 2057 ev.store_hint = persistent;
2058 bacpy(&ev.key.bdaddr, &key->bdaddr);
2059 ev.key.type = key->type;
2060 memcpy(ev.key.val, key->val, 16);
2061 ev.key.pin_len = key->pin_len;
2000 2062
2001 return err; 2063 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
2002} 2064}
2003 2065
2004int mgmt_connected(u16 index, bdaddr_t *bdaddr) 2066int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
2005{ 2067{
2006 struct mgmt_ev_connected ev; 2068 struct mgmt_ev_connected ev;
2007 2069
2008 bacpy(&ev.bdaddr, bdaddr); 2070 bacpy(&ev.bdaddr, bdaddr);
2071 ev.link_type = link_type;
2009 2072
2010 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); 2073 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
2011} 2074}
@@ -2260,12 +2323,14 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2260 memset(&ev, 0, sizeof(ev)); 2323 memset(&ev, 0, sizeof(ev));
2261 2324
2262 bacpy(&ev.bdaddr, bdaddr); 2325 bacpy(&ev.bdaddr, bdaddr);
2263 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2264 ev.rssi = rssi; 2326 ev.rssi = rssi;
2265 2327
2266 if (eir) 2328 if (eir)
2267 memcpy(ev.eir, eir, sizeof(ev.eir)); 2329 memcpy(ev.eir, eir, sizeof(ev.eir));
2268 2330
2331 if (dev_class)
2332 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2333
2269 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); 2334 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2270} 2335}
2271 2336
@@ -2286,3 +2351,29 @@ int mgmt_discovering(u16 index, u8 discovering)
2286 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, 2351 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2287 sizeof(discovering), NULL); 2352 sizeof(discovering), NULL);
2288} 2353}
2354
2355int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
2356{
2357 struct pending_cmd *cmd;
2358 struct mgmt_ev_device_blocked ev;
2359
2360 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
2361
2362 bacpy(&ev.bdaddr, bdaddr);
2363
2364 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
2365 cmd ? cmd->sk : NULL);
2366}
2367
2368int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
2369{
2370 struct pending_cmd *cmd;
2371 struct mgmt_ev_device_unblocked ev;
2372
2373 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
2374
2375 bacpy(&ev.bdaddr, bdaddr);
2376
2377 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
2378 cmd ? cmd->sk : NULL);
2379}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5ba3f6df665c..4e32e18211f9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1802,6 +1802,11 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1802 continue; 1802 continue;
1803 } 1803 }
1804 1804
1805 if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
1806 __rfcomm_dlc_close(d, ECONNREFUSED);
1807 continue;
1808 }
1809
1805 if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { 1810 if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
1806 rfcomm_dlc_clear_timer(d); 1811 rfcomm_dlc_clear_timer(d);
1807 if (d->out) { 1812 if (d->out) {
@@ -1853,7 +1858,10 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1853 /* Get data directly from socket receive queue without copying it. */ 1858 /* Get data directly from socket receive queue without copying it. */
1854 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 1859 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1855 skb_orphan(skb); 1860 skb_orphan(skb);
1856 rfcomm_recv_frame(s, skb); 1861 if (!skb_linearize(skb))
1862 rfcomm_recv_frame(s, skb);
1863 else
1864 kfree_skb(skb);
1857 } 1865 }
1858 1866
1859 if (sk->sk_state == BT_CLOSED) { 1867 if (sk->sk_state == BT_CLOSED) {
@@ -2074,7 +2082,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2074 if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { 2082 if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
2075 rfcomm_dlc_clear_timer(d); 2083 rfcomm_dlc_clear_timer(d);
2076 if (status || encrypt == 0x00) { 2084 if (status || encrypt == 0x00) {
2077 __rfcomm_dlc_close(d, ECONNREFUSED); 2085 set_bit(RFCOMM_ENC_DROP, &d->flags);
2078 continue; 2086 continue;
2079 } 2087 }
2080 } 2088 }
@@ -2085,7 +2093,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2085 rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); 2093 rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
2086 continue; 2094 continue;
2087 } else if (d->sec_level == BT_SECURITY_HIGH) { 2095 } else if (d->sec_level == BT_SECURITY_HIGH) {
2088 __rfcomm_dlc_close(d, ECONNREFUSED); 2096 set_bit(RFCOMM_ENC_DROP, &d->flags);
2089 continue; 2097 continue;
2090 } 2098 }
2091 } 2099 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b88a92..759b63572641 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -182,18 +182,9 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
182 return; 182 return;
183 183
184 hci_send_acl(conn->hcon, skb, 0); 184 hci_send_acl(conn->hcon, skb, 0);
185}
186
187static __u8 seclevel_to_authreq(__u8 level)
188{
189 switch (level) {
190 case BT_SECURITY_HIGH:
191 /* Right now we don't support bonding */
192 return SMP_AUTH_MITM;
193 185
194 default: 186 mod_timer(&conn->security_timer, jiffies +
195 return SMP_AUTH_NONE; 187 msecs_to_jiffies(SMP_TIMEOUT));
196 }
197} 188}
198 189
199static void build_pairing_cmd(struct l2cap_conn *conn, 190static void build_pairing_cmd(struct l2cap_conn *conn,
@@ -205,7 +196,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
205 196
206 dist_keys = 0; 197 dist_keys = 0;
207 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { 198 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
208 dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN; 199 dist_keys = SMP_DIST_ENC_KEY;
209 authreq |= SMP_AUTH_BONDING; 200 authreq |= SMP_AUTH_BONDING;
210 } 201 }
211 202
@@ -229,24 +220,184 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
229 220
230static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) 221static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
231{ 222{
223 struct smp_chan *smp = conn->smp_chan;
224
232 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || 225 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
233 (max_key_size < SMP_MIN_ENC_KEY_SIZE)) 226 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
234 return SMP_ENC_KEY_SIZE; 227 return SMP_ENC_KEY_SIZE;
235 228
236 conn->smp_key_size = max_key_size; 229 smp->smp_key_size = max_key_size;
237 230
238 return 0; 231 return 0;
239} 232}
240 233
234static void confirm_work(struct work_struct *work)
235{
236 struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
237 struct l2cap_conn *conn = smp->conn;
238 struct crypto_blkcipher *tfm;
239 struct smp_cmd_pairing_confirm cp;
240 int ret;
241 u8 res[16], reason;
242
243 BT_DBG("conn %p", conn);
244
245 tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
246 if (IS_ERR(tfm)) {
247 reason = SMP_UNSPECIFIED;
248 goto error;
249 }
250
251 smp->tfm = tfm;
252
253 if (conn->hcon->out)
254 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
255 conn->src, conn->hcon->dst_type, conn->dst,
256 res);
257 else
258 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
259 conn->hcon->dst_type, conn->dst, 0, conn->src,
260 res);
261 if (ret) {
262 reason = SMP_UNSPECIFIED;
263 goto error;
264 }
265
266 swap128(res, cp.confirm_val);
267 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
268
269 return;
270
271error:
272 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
273 smp_chan_destroy(conn);
274}
275
276static void random_work(struct work_struct *work)
277{
278 struct smp_chan *smp = container_of(work, struct smp_chan, random);
279 struct l2cap_conn *conn = smp->conn;
280 struct hci_conn *hcon = conn->hcon;
281 struct crypto_blkcipher *tfm = smp->tfm;
282 u8 reason, confirm[16], res[16], key[16];
283 int ret;
284
285 if (IS_ERR_OR_NULL(tfm)) {
286 reason = SMP_UNSPECIFIED;
287 goto error;
288 }
289
290 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
291
292 if (hcon->out)
293 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
294 conn->src, hcon->dst_type, conn->dst,
295 res);
296 else
297 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
298 hcon->dst_type, conn->dst, 0, conn->src,
299 res);
300 if (ret) {
301 reason = SMP_UNSPECIFIED;
302 goto error;
303 }
304
305 swap128(res, confirm);
306
307 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
308 BT_ERR("Pairing failed (confirmation values mismatch)");
309 reason = SMP_CONFIRM_FAILED;
310 goto error;
311 }
312
313 if (hcon->out) {
314 u8 stk[16], rand[8];
315 __le16 ediv;
316
317 memset(rand, 0, sizeof(rand));
318 ediv = 0;
319
320 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
321 swap128(key, stk);
322
323 memset(stk + smp->smp_key_size, 0,
324 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
325
326 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) {
327 reason = SMP_UNSPECIFIED;
328 goto error;
329 }
330
331 hci_le_start_enc(hcon, ediv, rand, stk);
332 hcon->enc_key_size = smp->smp_key_size;
333 } else {
334 u8 stk[16], r[16], rand[8];
335 __le16 ediv;
336
337 memset(rand, 0, sizeof(rand));
338 ediv = 0;
339
340 swap128(smp->prnd, r);
341 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
342
343 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
344 swap128(key, stk);
345
346 memset(stk + smp->smp_key_size, 0,
347 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
348
349 hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size,
350 ediv, rand, stk);
351 }
352
353 return;
354
355error:
356 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
357 smp_chan_destroy(conn);
358}
359
360static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
361{
362 struct smp_chan *smp;
363
364 smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
365 if (!smp)
366 return NULL;
367
368 INIT_WORK(&smp->confirm, confirm_work);
369 INIT_WORK(&smp->random, random_work);
370
371 smp->conn = conn;
372 conn->smp_chan = smp;
373
374 hci_conn_hold(conn->hcon);
375
376 return smp;
377}
378
379void smp_chan_destroy(struct l2cap_conn *conn)
380{
381 kfree(conn->smp_chan);
382 hci_conn_put(conn->hcon);
383}
384
241static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) 385static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
242{ 386{
243 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 387 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
388 struct smp_chan *smp;
244 u8 key_size; 389 u8 key_size;
390 int ret;
245 391
246 BT_DBG("conn %p", conn); 392 BT_DBG("conn %p", conn);
247 393
248 conn->preq[0] = SMP_CMD_PAIRING_REQ; 394 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
249 memcpy(&conn->preq[1], req, sizeof(*req)); 395 smp = smp_chan_create(conn);
396
397 smp = conn->smp_chan;
398
399 smp->preq[0] = SMP_CMD_PAIRING_REQ;
400 memcpy(&smp->preq[1], req, sizeof(*req));
250 skb_pull(skb, sizeof(*req)); 401 skb_pull(skb, sizeof(*req));
251 402
252 if (req->oob_flag) 403 if (req->oob_flag)
@@ -260,32 +411,33 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
260 return SMP_ENC_KEY_SIZE; 411 return SMP_ENC_KEY_SIZE;
261 412
262 /* Just works */ 413 /* Just works */
263 memset(conn->tk, 0, sizeof(conn->tk)); 414 memset(smp->tk, 0, sizeof(smp->tk));
415
416 ret = smp_rand(smp->prnd);
417 if (ret)
418 return SMP_UNSPECIFIED;
264 419
265 conn->prsp[0] = SMP_CMD_PAIRING_RSP; 420 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
266 memcpy(&conn->prsp[1], &rsp, sizeof(rsp)); 421 memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
267 422
268 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); 423 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
269 424
270 mod_timer(&conn->security_timer, jiffies +
271 msecs_to_jiffies(SMP_TIMEOUT));
272
273 return 0; 425 return 0;
274} 426}
275 427
276static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) 428static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
277{ 429{
278 struct smp_cmd_pairing *req, *rsp = (void *) skb->data; 430 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
279 struct smp_cmd_pairing_confirm cp; 431 struct smp_chan *smp = conn->smp_chan;
280 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; 432 struct hci_dev *hdev = conn->hcon->hdev;
433 u8 key_size;
281 int ret; 434 int ret;
282 u8 res[16], key_size;
283 435
284 BT_DBG("conn %p", conn); 436 BT_DBG("conn %p", conn);
285 437
286 skb_pull(skb, sizeof(*rsp)); 438 skb_pull(skb, sizeof(*rsp));
287 439
288 req = (void *) &conn->preq[1]; 440 req = (void *) &smp->preq[1];
289 441
290 key_size = min(req->max_key_size, rsp->max_key_size); 442 key_size = min(req->max_key_size, rsp->max_key_size);
291 if (check_enc_key_size(conn, key_size)) 443 if (check_enc_key_size(conn, key_size))
@@ -295,222 +447,154 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
295 return SMP_OOB_NOT_AVAIL; 447 return SMP_OOB_NOT_AVAIL;
296 448
297 /* Just works */ 449 /* Just works */
298 memset(conn->tk, 0, sizeof(conn->tk)); 450 memset(smp->tk, 0, sizeof(smp->tk));
299
300 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
301 memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
302
303 ret = smp_rand(conn->prnd);
304 if (ret)
305 return SMP_UNSPECIFIED;
306 451
307 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0, 452 ret = smp_rand(smp->prnd);
308 conn->src, conn->hcon->dst_type, conn->dst, res);
309 if (ret) 453 if (ret)
310 return SMP_UNSPECIFIED; 454 return SMP_UNSPECIFIED;
311 455
312 swap128(res, cp.confirm_val); 456 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
457 memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
313 458
314 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); 459 queue_work(hdev->workqueue, &smp->confirm);
315 460
316 return 0; 461 return 0;
317} 462}
318 463
319static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) 464static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
320{ 465{
321 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; 466 struct smp_chan *smp = conn->smp_chan;
467 struct hci_dev *hdev = conn->hcon->hdev;
322 468
323 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 469 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
324 470
325 memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf)); 471 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
326 skb_pull(skb, sizeof(conn->pcnf)); 472 skb_pull(skb, sizeof(smp->pcnf));
327 473
328 if (conn->hcon->out) { 474 if (conn->hcon->out) {
329 u8 random[16]; 475 u8 random[16];
330 476
331 swap128(conn->prnd, random); 477 swap128(smp->prnd, random);
332 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), 478 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
333 random); 479 random);
334 } else { 480 } else {
335 struct smp_cmd_pairing_confirm cp; 481 queue_work(hdev->workqueue, &smp->confirm);
336 int ret;
337 u8 res[16];
338
339 ret = smp_rand(conn->prnd);
340 if (ret)
341 return SMP_UNSPECIFIED;
342
343 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
344 conn->hcon->dst_type, conn->dst,
345 0, conn->src, res);
346 if (ret)
347 return SMP_CONFIRM_FAILED;
348
349 swap128(res, cp.confirm_val);
350
351 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
352 } 482 }
353 483
354 mod_timer(&conn->security_timer, jiffies +
355 msecs_to_jiffies(SMP_TIMEOUT));
356
357 return 0; 484 return 0;
358} 485}
359 486
360static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) 487static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
361{ 488{
362 struct hci_conn *hcon = conn->hcon; 489 struct smp_chan *smp = conn->smp_chan;
363 struct crypto_blkcipher *tfm = hcon->hdev->tfm; 490 struct hci_dev *hdev = conn->hcon->hdev;
364 int ret;
365 u8 key[16], res[16], random[16], confirm[16];
366 491
367 swap128(skb->data, random); 492 BT_DBG("conn %p", conn);
368 skb_pull(skb, sizeof(random));
369
370 if (conn->hcon->out)
371 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
372 conn->src, conn->hcon->dst_type, conn->dst,
373 res);
374 else
375 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
376 conn->hcon->dst_type, conn->dst, 0, conn->src,
377 res);
378 if (ret)
379 return SMP_UNSPECIFIED;
380
381 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
382
383 swap128(res, confirm);
384
385 if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
386 BT_ERR("Pairing failed (confirmation values mismatch)");
387 return SMP_CONFIRM_FAILED;
388 }
389
390 if (conn->hcon->out) {
391 u8 stk[16], rand[8];
392 __le16 ediv;
393
394 memset(rand, 0, sizeof(rand));
395 ediv = 0;
396 493
397 smp_s1(tfm, conn->tk, random, conn->prnd, key); 494 swap128(skb->data, smp->rrnd);
398 swap128(key, stk); 495 skb_pull(skb, sizeof(smp->rrnd));
399 496
400 memset(stk + conn->smp_key_size, 0, 497 queue_work(hdev->workqueue, &smp->random);
401 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
402 498
403 hci_le_start_enc(hcon, ediv, rand, stk); 499 return 0;
404 hcon->enc_key_size = conn->smp_key_size; 500}
405 } else {
406 u8 stk[16], r[16], rand[8];
407 __le16 ediv;
408 501
409 memset(rand, 0, sizeof(rand)); 502static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
410 ediv = 0; 503{
504 struct link_key *key;
505 struct key_master_id *master;
506 struct hci_conn *hcon = conn->hcon;
411 507
412 swap128(conn->prnd, r); 508 key = hci_find_link_key_type(hcon->hdev, conn->dst,
413 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); 509 HCI_LK_SMP_LTK);
510 if (!key)
511 return 0;
414 512
415 smp_s1(tfm, conn->tk, conn->prnd, random, key); 513 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND,
416 swap128(key, stk); 514 &hcon->pend))
515 return 1;
417 516
418 memset(stk + conn->smp_key_size, 0, 517 master = (void *) key->data;
419 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size); 518 hci_le_start_enc(hcon, master->ediv, master->rand,
519 key->val);
520 hcon->enc_key_size = key->pin_len;
420 521
421 hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size, 522 return 1;
422 ediv, rand, stk);
423 }
424 523
425 return 0;
426} 524}
427
428static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) 525static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
429{ 526{
430 struct smp_cmd_security_req *rp = (void *) skb->data; 527 struct smp_cmd_security_req *rp = (void *) skb->data;
431 struct smp_cmd_pairing cp; 528 struct smp_cmd_pairing cp;
432 struct hci_conn *hcon = conn->hcon; 529 struct hci_conn *hcon = conn->hcon;
530 struct smp_chan *smp;
433 531
434 BT_DBG("conn %p", conn); 532 BT_DBG("conn %p", conn);
435 533
436 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) 534 hcon->pending_sec_level = BT_SECURITY_MEDIUM;
535
536 if (smp_ltk_encrypt(conn))
437 return 0; 537 return 0;
438 538
539 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
540 return 0;
541
542 smp = smp_chan_create(conn);
543
439 skb_pull(skb, sizeof(*rp)); 544 skb_pull(skb, sizeof(*rp));
440 545
441 memset(&cp, 0, sizeof(cp)); 546 memset(&cp, 0, sizeof(cp));
442 build_pairing_cmd(conn, &cp, NULL, rp->auth_req); 547 build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
443 548
444 conn->preq[0] = SMP_CMD_PAIRING_REQ; 549 smp->preq[0] = SMP_CMD_PAIRING_REQ;
445 memcpy(&conn->preq[1], &cp, sizeof(cp)); 550 memcpy(&smp->preq[1], &cp, sizeof(cp));
446 551
447 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 552 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
448 553
449 mod_timer(&conn->security_timer, jiffies +
450 msecs_to_jiffies(SMP_TIMEOUT));
451
452 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
453
454 return 0; 554 return 0;
455} 555}
456 556
457int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) 557int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
458{ 558{
459 struct hci_conn *hcon = conn->hcon; 559 struct hci_conn *hcon = conn->hcon;
460 __u8 authreq; 560 struct smp_chan *smp = conn->smp_chan;
461 561
462 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); 562 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
463 563
464 if (!lmp_host_le_capable(hcon->hdev)) 564 if (!lmp_host_le_capable(hcon->hdev))
465 return 1; 565 return 1;
466 566
467 if (IS_ERR(hcon->hdev->tfm))
468 return 1;
469
470 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
471 return 0;
472
473 if (sec_level == BT_SECURITY_LOW) 567 if (sec_level == BT_SECURITY_LOW)
474 return 1; 568 return 1;
475 569
476 if (hcon->sec_level >= sec_level) 570 if (hcon->sec_level >= sec_level)
477 return 1; 571 return 1;
478 572
479 authreq = seclevel_to_authreq(sec_level); 573 if (hcon->link_mode & HCI_LM_MASTER)
480 574 if (smp_ltk_encrypt(conn))
481 if (hcon->link_mode & HCI_LM_MASTER) { 575 goto done;
482 struct smp_cmd_pairing cp;
483 struct link_key *key;
484 576
485 key = hci_find_link_key_type(hcon->hdev, conn->dst, 577 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
486 HCI_LK_SMP_LTK); 578 return 0;
487 if (key) {
488 struct key_master_id *master = (void *) key->data;
489 579
490 hci_le_start_enc(hcon, master->ediv, master->rand, 580 smp = smp_chan_create(conn);
491 key->val);
492 hcon->enc_key_size = key->pin_len;
493 581
494 goto done; 582 if (hcon->link_mode & HCI_LM_MASTER) {
495 } 583 struct smp_cmd_pairing cp;
496
497 build_pairing_cmd(conn, &cp, NULL, authreq);
498 conn->preq[0] = SMP_CMD_PAIRING_REQ;
499 memcpy(&conn->preq[1], &cp, sizeof(cp));
500 584
501 mod_timer(&conn->security_timer, jiffies + 585 build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
502 msecs_to_jiffies(SMP_TIMEOUT)); 586 smp->preq[0] = SMP_CMD_PAIRING_REQ;
587 memcpy(&smp->preq[1], &cp, sizeof(cp));
503 588
504 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 589 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
505 } else { 590 } else {
506 struct smp_cmd_security_req cp; 591 struct smp_cmd_security_req cp;
507 cp.auth_req = authreq; 592 cp.auth_req = SMP_AUTH_NONE;
508 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); 593 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
509 } 594 }
510 595
511done: 596done:
512 hcon->pending_sec_level = sec_level; 597 hcon->pending_sec_level = sec_level;
513 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
514 598
515 return 0; 599 return 0;
516} 600}
@@ -518,10 +602,11 @@ done:
518static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) 602static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
519{ 603{
520 struct smp_cmd_encrypt_info *rp = (void *) skb->data; 604 struct smp_cmd_encrypt_info *rp = (void *) skb->data;
605 struct smp_chan *smp = conn->smp_chan;
521 606
522 skb_pull(skb, sizeof(*rp)); 607 skb_pull(skb, sizeof(*rp));
523 608
524 memcpy(conn->tk, rp->ltk, sizeof(conn->tk)); 609 memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
525 610
526 return 0; 611 return 0;
527} 612}
@@ -529,11 +614,12 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
529static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) 614static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
530{ 615{
531 struct smp_cmd_master_ident *rp = (void *) skb->data; 616 struct smp_cmd_master_ident *rp = (void *) skb->data;
617 struct smp_chan *smp = conn->smp_chan;
532 618
533 skb_pull(skb, sizeof(*rp)); 619 skb_pull(skb, sizeof(*rp));
534 620
535 hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size, 621 hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
536 rp->ediv, rp->rand, conn->tk); 622 rp->ediv, rp->rand, smp->tk);
537 623
538 smp_distribute_keys(conn, 1); 624 smp_distribute_keys(conn, 1);
539 625
@@ -552,12 +638,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
552 goto done; 638 goto done;
553 } 639 }
554 640
555 if (IS_ERR(conn->hcon->hdev->tfm)) {
556 err = PTR_ERR(conn->hcon->hdev->tfm);
557 reason = SMP_PAIRING_NOTSUPP;
558 goto done;
559 }
560
561 skb_pull(skb, sizeof(code)); 641 skb_pull(skb, sizeof(code));
562 642
563 switch (code) { 643 switch (code) {
@@ -621,20 +701,21 @@ done:
621int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) 701int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
622{ 702{
623 struct smp_cmd_pairing *req, *rsp; 703 struct smp_cmd_pairing *req, *rsp;
704 struct smp_chan *smp = conn->smp_chan;
624 __u8 *keydist; 705 __u8 *keydist;
625 706
626 BT_DBG("conn %p force %d", conn, force); 707 BT_DBG("conn %p force %d", conn, force);
627 708
628 if (IS_ERR(conn->hcon->hdev->tfm)) 709 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
629 return PTR_ERR(conn->hcon->hdev->tfm); 710 return 0;
630 711
631 rsp = (void *) &conn->prsp[1]; 712 rsp = (void *) &smp->prsp[1];
632 713
633 /* The responder sends its keys first */ 714 /* The responder sends its keys first */
634 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) 715 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
635 return 0; 716 return 0;
636 717
637 req = (void *) &conn->preq[1]; 718 req = (void *) &smp->preq[1];
638 719
639 if (conn->hcon->out) { 720 if (conn->hcon->out) {
640 keydist = &rsp->init_key_dist; 721 keydist = &rsp->init_key_dist;
@@ -658,7 +739,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
658 739
659 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); 740 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
660 741
661 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size, 742 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
662 ediv, ident.rand, enc.ltk); 743 ediv, ident.rand, enc.ltk);
663 744
664 ident.ediv = cpu_to_le16(ediv); 745 ident.ediv = cpu_to_le16(ediv);
@@ -698,5 +779,11 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
698 *keydist &= ~SMP_DIST_SIGN; 779 *keydist &= ~SMP_DIST_SIGN;
699 } 780 }
700 781
782 if (conn->hcon->out || force) {
783 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
784 del_timer(&conn->security_timer);
785 smp_chan_destroy(conn);
786 }
787
701 return 0; 788 return 0;
702} 789}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ff3ed6086ce1..feb77ea7b58e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -301,7 +301,7 @@ static const struct net_device_ops br_netdev_ops = {
301 .ndo_start_xmit = br_dev_xmit, 301 .ndo_start_xmit = br_dev_xmit,
302 .ndo_get_stats64 = br_get_stats64, 302 .ndo_get_stats64 = br_get_stats64,
303 .ndo_set_mac_address = br_set_mac_address, 303 .ndo_set_mac_address = br_set_mac_address,
304 .ndo_set_multicast_list = br_dev_set_multicast_list, 304 .ndo_set_rx_mode = br_dev_set_multicast_list,
305 .ndo_change_mtu = br_change_mtu, 305 .ndo_change_mtu = br_change_mtu,
306 .ndo_do_ioctl = br_dev_ioctl, 306 .ndo_do_ioctl = br_dev_ioctl,
307#ifdef CONFIG_NET_POLL_CONTROLLER 307#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -358,6 +358,8 @@ void br_dev_setup(struct net_device *dev)
358 memcpy(br->group_addr, br_group_address, ETH_ALEN); 358 memcpy(br->group_addr, br_group_address, ETH_ALEN);
359 359
360 br->stp_enabled = BR_NO_STP; 360 br->stp_enabled = BR_NO_STP;
361 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
362
361 br->designated_root = br->bridge_id; 363 br->designated_root = br->bridge_id;
362 br->bridge_max_age = br->max_age = 20 * HZ; 364 br->bridge_max_age = br->max_age = 20 * HZ;
363 br->bridge_hello_time = br->hello_time = 2 * HZ; 365 br->bridge_hello_time = br->hello_time = 2 * HZ;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 68def3b7fb49..c8e7861b88b0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -558,19 +558,28 @@ skip:
558 558
559/* Create new static fdb entry */ 559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state) 561 __u16 state, __u16 flags)
562{ 562{
563 struct net_bridge *br = source->br; 563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb; 565 struct net_bridge_fdb_entry *fdb;
566 566
567 fdb = fdb_find(head, addr); 567 fdb = fdb_find(head, addr);
568 if (fdb) 568 if (fdb == NULL) {
569 return -EEXIST; 569 if (!(flags & NLM_F_CREATE))
570 return -ENOENT;
570 571
571 fdb = fdb_create(head, source, addr); 572 fdb = fdb_create(head, source, addr);
572 if (!fdb) 573 if (!fdb)
573 return -ENOMEM; 574 return -ENOMEM;
575 } else {
576 if (flags & NLM_F_EXCL)
577 return -EEXIST;
578
579 if (flags & NLM_F_REPLACE)
580 fdb->updated = fdb->used = jiffies;
581 fdb->is_local = fdb->is_static = 0;
582 }
574 583
575 if (state & NUD_PERMANENT) 584 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1; 585 fdb->is_local = fdb->is_static = 1;
@@ -626,7 +635,7 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
626 } 635 }
627 636
628 spin_lock_bh(&p->br->hash_lock); 637 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state); 638 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
630 spin_unlock_bh(&p->br->hash_lock); 639 spin_unlock_bh(&p->br->hash_lock);
631 640
632 return err; 641 return err;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1d420f64ff27..f603e5b0b930 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
16#include <linux/netpoll.h> 17#include <linux/netpoll.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/if_arp.h> 19#include <linux/if_arp.h>
@@ -33,20 +34,18 @@
33 */ 34 */
34static int port_cost(struct net_device *dev) 35static int port_cost(struct net_device *dev)
35{ 36{
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 37 struct ethtool_cmd ecmd;
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 38
38 39 if (!__ethtool_get_settings(dev, &ecmd)) {
39 if (!dev_ethtool_get_settings(dev, &ecmd)) { 40 switch (ethtool_cmd_speed(&ecmd)) {
40 switch (ethtool_cmd_speed(&ecmd)) { 41 case SPEED_10000:
41 case SPEED_10000: 42 return 2;
42 return 2; 43 case SPEED_1000:
43 case SPEED_1000: 44 return 4;
44 return 4; 45 case SPEED_100:
45 case SPEED_100: 46 return 19;
46 return 19; 47 case SPEED_10:
47 case SPEED_10: 48 return 100;
48 return 100;
49 }
50 } 49 }
51 } 50 }
52 51
@@ -325,7 +324,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
325 324
326 /* Don't allow bridging non-ethernet like devices */ 325 /* Don't allow bridging non-ethernet like devices */
327 if ((dev->flags & IFF_LOOPBACK) || 326 if ((dev->flags & IFF_LOOPBACK) ||
328 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN) 327 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
328 !is_valid_ether_addr(dev->dev_addr))
329 return -EINVAL; 329 return -EINVAL;
330 330
331 /* No bridging of bridges */ 331 /* No bridging of bridges */
@@ -353,10 +353,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
353 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), 353 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
354 SYSFS_BRIDGE_PORT_ATTR); 354 SYSFS_BRIDGE_PORT_ATTR);
355 if (err) 355 if (err)
356 goto err0;
357
358 err = br_fdb_insert(br, p, dev->dev_addr);
359 if (err)
360 goto err1; 356 goto err1;
361 357
362 err = br_sysfs_addif(p); 358 err = br_sysfs_addif(p);
@@ -397,6 +393,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
397 393
398 dev_set_mtu(br->dev, br_min_mtu(br)); 394 dev_set_mtu(br->dev, br_min_mtu(br));
399 395
396 if (br_fdb_insert(br, p, dev->dev_addr))
397 netdev_err(dev, "failed insert local address bridge forwarding table\n");
398
400 kobject_uevent(&p->kobj, KOBJ_ADD); 399 kobject_uevent(&p->kobj, KOBJ_ADD);
401 400
402 return 0; 401 return 0;
@@ -406,11 +405,9 @@ err4:
406err3: 405err3:
407 sysfs_remove_link(br->ifobj, p->dev->name); 406 sysfs_remove_link(br->ifobj, p->dev->name);
408err2: 407err2:
409 br_fdb_delete_by_port(br, p, 1);
410err1:
411 kobject_put(&p->kobj); 408 kobject_put(&p->kobj);
412 p = NULL; /* kobject_put frees */ 409 p = NULL; /* kobject_put frees */
413err0: 410err1:
414 dev_set_promiscuity(dev, -1); 411 dev_set_promiscuity(dev, -1);
415put_back: 412put_back:
416 dev_put(dev); 413 dev_put(dev);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f06ee39c73fd..5a31731be4d0 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -16,6 +16,7 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/netfilter_bridge.h> 18#include <linux/netfilter_bridge.h>
19#include <linux/export.h>
19#include "br_private.h" 20#include "br_private.h"
20 21
21/* Bridge group multicast address 802.1d (pg 51). */ 22/* Bridge group multicast address 802.1d (pg 51). */
@@ -162,14 +163,37 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
162 p = br_port_get_rcu(skb->dev); 163 p = br_port_get_rcu(skb->dev);
163 164
164 if (unlikely(is_link_local(dest))) { 165 if (unlikely(is_link_local(dest))) {
165 /* Pause frames shouldn't be passed up by driver anyway */ 166 /*
166 if (skb->protocol == htons(ETH_P_PAUSE)) 167 * See IEEE 802.1D Table 7-10 Reserved addresses
168 *
169 * Assignment Value
170 * Bridge Group Address 01-80-C2-00-00-00
171 * (MAC Control) 802.3 01-80-C2-00-00-01
172 * (Link Aggregation) 802.3 01-80-C2-00-00-02
173 * 802.1X PAE address 01-80-C2-00-00-03
174 *
175 * 802.1AB LLDP 01-80-C2-00-00-0E
176 *
177 * Others reserved for future standardization
178 */
179 switch (dest[5]) {
180 case 0x00: /* Bridge Group Address */
181 /* If STP is turned off,
182 then must forward to keep loop detection */
183 if (p->br->stp_enabled == BR_NO_STP)
184 goto forward;
185 break;
186
187 case 0x01: /* IEEE MAC (Pause) */
167 goto drop; 188 goto drop;
168 189
169 /* If STP is turned off, then forward */ 190 default:
170 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) 191 /* Allow selective forwarding for most other protocols */
171 goto forward; 192 if (p->br->group_fwd_mask & (1u << dest[5]))
193 goto forward;
194 }
172 195
196 /* Deliver packet to local host only */
173 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 197 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
174 NULL, br_handle_local_finish)) { 198 NULL, br_handle_local_finish)) {
175 return RX_HANDLER_CONSUMED; /* consumed by filter */ 199 return RX_HANDLER_CONSUMED; /* consumed by filter */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 857a021deea9..d7d6fb05411f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -29,6 +29,11 @@
29 29
30#define BR_VERSION "2.3" 30#define BR_VERSION "2.3"
31 31
32/* Control of forwarding link local multicast */
33#define BR_GROUPFWD_DEFAULT 0
34/* Don't allow forwarding control protocols like STP and LLDP */
35#define BR_GROUPFWD_RESTRICTED 0x4007u
36
32/* Path to usermode spanning tree program */ 37/* Path to usermode spanning tree program */
33#define BR_STP_PROG "/sbin/bridge-stp" 38#define BR_STP_PROG "/sbin/bridge-stp"
34 39
@@ -193,6 +198,8 @@ struct net_bridge
193 unsigned long flags; 198 unsigned long flags;
194#define BR_SET_MAC_ADDR 0x00000001 199#define BR_SET_MAC_ADDR 0x00000001
195 200
201 u16 group_fwd_mask;
202
196 /* STP */ 203 /* STP */
197 bridge_id designated_root; 204 bridge_id designated_root;
198 bridge_id bridge_id; 205 bridge_id bridge_id;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 10eda3cd1d71..19308e305d85 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/kmod.h>
15#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
17 18
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 68b893ea8c3a..c236c0e43984 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -149,6 +149,39 @@ static ssize_t store_stp_state(struct device *d,
149static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, 149static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
150 store_stp_state); 150 store_stp_state);
151 151
152static ssize_t show_group_fwd_mask(struct device *d,
153 struct device_attribute *attr, char *buf)
154{
155 struct net_bridge *br = to_bridge(d);
156 return sprintf(buf, "%#x\n", br->group_fwd_mask);
157}
158
159
160static ssize_t store_group_fwd_mask(struct device *d,
161 struct device_attribute *attr, const char *buf,
162 size_t len)
163{
164 struct net_bridge *br = to_bridge(d);
165 char *endp;
166 unsigned long val;
167
168 if (!capable(CAP_NET_ADMIN))
169 return -EPERM;
170
171 val = simple_strtoul(buf, &endp, 0);
172 if (endp == buf)
173 return -EINVAL;
174
175 if (val & BR_GROUPFWD_RESTRICTED)
176 return -EINVAL;
177
178 br->group_fwd_mask = val;
179
180 return len;
181}
182static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
183 store_group_fwd_mask);
184
152static ssize_t show_priority(struct device *d, struct device_attribute *attr, 185static ssize_t show_priority(struct device *d, struct device_attribute *attr,
153 char *buf) 186 char *buf)
154{ 187{
@@ -652,6 +685,7 @@ static struct attribute *bridge_attrs[] = {
652 &dev_attr_max_age.attr, 685 &dev_attr_max_age.attr,
653 &dev_attr_ageing_time.attr, 686 &dev_attr_ageing_time.attr,
654 &dev_attr_stp_state.attr, 687 &dev_attr_stp_state.attr,
688 &dev_attr_group_fwd_mask.attr,
655 &dev_attr_priority.attr, 689 &dev_attr_priority.attr,
656 &dev_attr_bridge_id.attr, 690 &dev_attr_bridge_id.attr,
657 &dev_attr_root_id.attr, 691 &dev_attr_root_id.attr,
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index bf2a333ca7c7..5449294bdd5e 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -102,16 +102,15 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
102 unsigned int n; 102 unsigned int n;
103 103
104 n = max(size, nlbufsiz); 104 n = max(size, nlbufsiz);
105 skb = alloc_skb(n, GFP_ATOMIC); 105 skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
106 if (!skb) { 106 if (!skb) {
107 pr_debug("cannot alloc whole buffer of size %ub!\n", n);
108 if (n > size) { 107 if (n > size) {
109 /* try to allocate only as much as we need for 108 /* try to allocate only as much as we need for
110 * current packet */ 109 * current packet */
111 skb = alloc_skb(size, GFP_ATOMIC); 110 skb = alloc_skb(size, GFP_ATOMIC);
112 if (!skb) 111 if (!skb)
113 pr_debug("cannot even allocate " 112 pr_debug("cannot even allocate buffer of size %ub\n",
114 "buffer of size %ub\n", size); 113 size);
115 } 114 }
116 } 115 }
117 116
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1bcaf36ad612..40d8258bf74f 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, 90 RCU_INIT_POINTER(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute); 91 (br_should_route_hook_t *)ebt_broute);
92 return 0; 92 return 0;
93} 93}
94 94
95static void __exit ebtable_broute_fini(void) 95static void __exit ebtable_broute_fini(void)
96{ 96{
97 rcu_assign_pointer(br_should_route_hook, NULL); 97 RCU_INIT_POINTER(br_should_route_hook, NULL);
98 synchronize_net(); 98 synchronize_net();
99 unregister_pernet_subsys(&broute_net_ops); 99 unregister_pernet_subsys(&broute_net_ops);
100} 100}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7f9ac0742d19..f1fa1f6e658d 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -16,6 +16,7 @@
16#include <linux/net.h> 16#include <linux/net.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/module.h>
19#include <net/netns/generic.h> 20#include <net/netns/generic.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
@@ -212,8 +213,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
212 enum cfcnfg_phy_preference pref; 213 enum cfcnfg_phy_preference pref;
213 enum cfcnfg_phy_type phy_type; 214 enum cfcnfg_phy_type phy_type;
214 struct cfcnfg *cfg; 215 struct cfcnfg *cfg;
215 struct caif_device_entry_list *caifdevs = 216 struct caif_device_entry_list *caifdevs;
216 caif_device_list(dev_net(dev));
217 217
218 if (dev->type != ARPHRD_CAIF) 218 if (dev->type != ARPHRD_CAIF)
219 return 0; 219 return 0;
@@ -222,6 +222,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
222 if (cfg == NULL) 222 if (cfg == NULL)
223 return 0; 223 return 0;
224 224
225 caifdevs = caif_device_list(dev_net(dev));
226
225 switch (what) { 227 switch (what) {
226 case NETDEV_REGISTER: 228 case NETDEV_REGISTER:
227 caifd = caif_device_alloc(dev); 229 caifd = caif_device_alloc(dev);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 52fe33bee029..00523ecc4ced 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -78,10 +78,8 @@ struct cfcnfg *cfcnfg_create(void)
78 78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this)
82 pr_warn("Out of memory\n");
83 return NULL; 82 return NULL;
84 }
85 this->mux = cfmuxl_create(); 83 this->mux = cfmuxl_create();
86 if (!this->mux) 84 if (!this->mux)
87 goto out_of_mem; 85 goto out_of_mem;
@@ -108,8 +106,6 @@ struct cfcnfg *cfcnfg_create(void)
108 106
109 return this; 107 return this;
110out_of_mem: 108out_of_mem:
111 pr_warn("Out of memory\n");
112
113 synchronize_rcu(); 109 synchronize_rcu();
114 110
115 kfree(this->mux); 111 kfree(this->mux);
@@ -448,10 +444,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
448 "- unknown channel type\n"); 444 "- unknown channel type\n");
449 goto unlock; 445 goto unlock;
450 } 446 }
451 if (!servicel) { 447 if (!servicel)
452 pr_warn("Out of memory\n");
453 goto unlock; 448 goto unlock;
454 }
455 layer_set_dn(servicel, cnfg->mux); 449 layer_set_dn(servicel, cnfg->mux);
456 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 450 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
457 layer_set_up(servicel, adapt_layer); 451 layer_set_up(servicel, adapt_layer);
@@ -473,7 +467,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
473{ 467{
474 struct cflayer *frml; 468 struct cflayer *frml;
475 struct cflayer *phy_driver = NULL; 469 struct cflayer *phy_driver = NULL;
476 struct cfcnfg_phyinfo *phyinfo; 470 struct cfcnfg_phyinfo *phyinfo = NULL;
477 int i; 471 int i;
478 u8 phyid; 472 u8 phyid;
479 473
@@ -488,25 +482,25 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
488 goto got_phyid; 482 goto got_phyid;
489 } 483 }
490 pr_warn("Too many CAIF Link Layers (max 6)\n"); 484 pr_warn("Too many CAIF Link Layers (max 6)\n");
491 goto out; 485 goto out_err;
492 486
493got_phyid: 487got_phyid:
494 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); 488 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
489 if (!phyinfo)
490 goto out_err;
495 491
496 switch (phy_type) { 492 switch (phy_type) {
497 case CFPHYTYPE_FRAG: 493 case CFPHYTYPE_FRAG:
498 phy_driver = 494 phy_driver =
499 cfserl_create(CFPHYTYPE_FRAG, phyid, stx); 495 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
500 if (!phy_driver) { 496 if (!phy_driver)
501 pr_warn("Out of memory\n"); 497 goto out_err;
502 goto out;
503 }
504 break; 498 break;
505 case CFPHYTYPE_CAIF: 499 case CFPHYTYPE_CAIF:
506 phy_driver = NULL; 500 phy_driver = NULL;
507 break; 501 break;
508 default: 502 default:
509 goto out; 503 goto out_err;
510 } 504 }
511 phy_layer->id = phyid; 505 phy_layer->id = phyid;
512 phyinfo->pref = pref; 506 phyinfo->pref = pref;
@@ -520,11 +514,8 @@ got_phyid:
520 514
521 frml = cffrml_create(phyid, fcs); 515 frml = cffrml_create(phyid, fcs);
522 516
523 if (!frml) { 517 if (!frml)
524 pr_warn("Out of memory\n"); 518 goto out_err;
525 kfree(phyinfo);
526 goto out;
527 }
528 phyinfo->frm_layer = frml; 519 phyinfo->frm_layer = frml;
529 layer_set_up(frml, cnfg->mux); 520 layer_set_up(frml, cnfg->mux);
530 521
@@ -540,7 +531,12 @@ got_phyid:
540 } 531 }
541 532
542 list_add_rcu(&phyinfo->node, &cnfg->phys); 533 list_add_rcu(&phyinfo->node, &cnfg->phys);
543out: 534 mutex_unlock(&cnfg->lock);
535 return;
536
537out_err:
538 kfree(phy_driver);
539 kfree(phyinfo);
544 mutex_unlock(&cnfg->lock); 540 mutex_unlock(&cnfg->lock);
545} 541}
546EXPORT_SYMBOL(cfcnfg_add_phy_layer); 542EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index e22671bed669..5cf52225692e 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -35,15 +35,12 @@ struct cflayer *cfctrl_create(void)
35{ 35{
36 struct dev_info dev_info; 36 struct dev_info dev_info;
37 struct cfctrl *this = 37 struct cfctrl *this =
38 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 38 kzalloc(sizeof(struct cfctrl), GFP_ATOMIC);
39 if (!this) { 39 if (!this)
40 pr_warn("Out of memory\n");
41 return NULL; 40 return NULL;
42 }
43 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 41 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
44 memset(&dev_info, 0, sizeof(dev_info)); 42 memset(&dev_info, 0, sizeof(dev_info));
45 dev_info.id = 0xff; 43 dev_info.id = 0xff;
46 memset(this, 0, sizeof(*this));
47 cfsrvl_init(&this->serv, 0, &dev_info, false); 44 cfsrvl_init(&this->serv, 0, &dev_info, false);
48 atomic_set(&this->req_seq_no, 1); 45 atomic_set(&this->req_seq_no, 1);
49 atomic_set(&this->rsp_seq_no, 1); 46 atomic_set(&this->rsp_seq_no, 1);
@@ -180,10 +177,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
180 struct cfctrl *cfctrl = container_obj(layer); 177 struct cfctrl *cfctrl = container_obj(layer);
181 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
182 struct cflayer *dn = cfctrl->serv.layer.dn; 179 struct cflayer *dn = cfctrl->serv.layer.dn;
183 if (!pkt) { 180 if (!pkt)
184 pr_warn("Out of memory\n");
185 return; 181 return;
186 }
187 if (!dn) { 182 if (!dn) {
188 pr_debug("not able to send enum request\n"); 183 pr_debug("not able to send enum request\n");
189 return; 184 return;
@@ -224,10 +219,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
224 } 219 }
225 220
226 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 221 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
227 if (!pkt) { 222 if (!pkt)
228 pr_warn("Out of memory\n");
229 return -ENOMEM; 223 return -ENOMEM;
230 }
231 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 224 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
232 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype); 225 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
233 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid); 226 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
@@ -275,10 +268,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
275 return -EINVAL; 268 return -EINVAL;
276 } 269 }
277 req = kzalloc(sizeof(*req), GFP_KERNEL); 270 req = kzalloc(sizeof(*req), GFP_KERNEL);
278 if (!req) { 271 if (!req)
279 pr_warn("Out of memory\n");
280 return -ENOMEM; 272 return -ENOMEM;
281 }
282 req->client_layer = user_layer; 273 req->client_layer = user_layer;
283 req->cmd = CFCTRL_CMD_LINK_SETUP; 274 req->cmd = CFCTRL_CMD_LINK_SETUP;
284 req->param = *param; 275 req->param = *param;
@@ -312,10 +303,8 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
312 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 303 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
313 struct cflayer *dn = cfctrl->serv.layer.dn; 304 struct cflayer *dn = cfctrl->serv.layer.dn;
314 305
315 if (!pkt) { 306 if (!pkt)
316 pr_warn("Out of memory\n");
317 return -ENOMEM; 307 return -ENOMEM;
318 }
319 308
320 if (!dn) { 309 if (!dn) {
321 pr_debug("not able to send link-down request\n"); 310 pr_debug("not able to send link-down request\n");
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 11a2af4c162a..65d6ef3cf9aa 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -19,13 +19,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
19 19
20struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) 20struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
21{ 21{
22 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 22 struct cfsrvl *dbg = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!dbg) { 23 if (!dbg)
24 pr_warn("Out of memory\n");
25 return NULL; 24 return NULL;
26 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0); 25 caif_assert(offsetof(struct cfsrvl, layer) == 0);
28 memset(dbg, 0, sizeof(struct cfsrvl));
29 cfsrvl_init(dbg, channel_id, dev_info, false); 26 cfsrvl_init(dbg, channel_id, dev_info, false);
30 dbg->layer.receive = cfdbgl_receive; 27 dbg->layer.receive = cfdbgl_receive;
31 dbg->layer.transmit = cfdbgl_transmit; 28 dbg->layer.transmit = cfdbgl_transmit;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0382dec84fdc..0f5ff27aa41c 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -26,13 +26,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26 26
27struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) 27struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
28{ 28{
29 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *dgm = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
30 if (!dgm) { 30 if (!dgm)
31 pr_warn("Out of memory\n");
32 return NULL; 31 return NULL;
33 }
34 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
35 memset(dgm, 0, sizeof(struct cfsrvl));
36 cfsrvl_init(dgm, channel_id, dev_info, true); 33 cfsrvl_init(dgm, channel_id, dev_info, true);
37 dgm->layer.receive = cfdgml_receive; 34 dgm->layer.receive = cfdgml_receive;
38 dgm->layer.transmit = cfdgml_transmit; 35 dgm->layer.transmit = cfdgml_transmit;
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 04204b202718..f39921171d0d 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -34,11 +34,9 @@ static u32 cffrml_rcv_error;
34static u32 cffrml_rcv_checsum_error; 34static u32 cffrml_rcv_checsum_error;
35struct cflayer *cffrml_create(u16 phyid, bool use_fcs) 35struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
36{ 36{
37 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); 37 struct cffrml *this = kzalloc(sizeof(struct cffrml), GFP_ATOMIC);
38 if (!this) { 38 if (!this)
39 pr_warn("Out of memory\n");
40 return NULL; 39 return NULL;
41 }
42 this->pcpu_refcnt = alloc_percpu(int); 40 this->pcpu_refcnt = alloc_percpu(int);
43 if (this->pcpu_refcnt == NULL) { 41 if (this->pcpu_refcnt == NULL) {
44 kfree(this); 42 kfree(this);
@@ -47,7 +45,6 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
47 45
48 caif_assert(offsetof(struct cffrml, layer) == 0); 46 caif_assert(offsetof(struct cffrml, layer) == 0);
49 47
50 memset(this, 0, sizeof(struct cflayer));
51 this->layer.receive = cffrml_receive; 48 this->layer.receive = cffrml_receive;
52 this->layer.transmit = cffrml_transmit; 49 this->layer.transmit = cffrml_transmit;
53 this->layer.ctrlcmd = cffrml_ctrlcmd; 50 this->layer.ctrlcmd = cffrml_ctrlcmd;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index c23979e79dfa..b36f24a4c8e7 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
108 int idx = phyid % DN_CACHE_SIZE; 108 int idx = phyid % DN_CACHE_SIZE;
109 109
110 spin_lock_bh(&muxl->transmit_lock); 110 spin_lock_bh(&muxl->transmit_lock);
111 rcu_assign_pointer(muxl->dn_cache[idx], NULL); 111 RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
112 dn = get_from_id(&muxl->frml_list, phyid); 112 dn = get_from_id(&muxl->frml_list, phyid);
113 if (dn == NULL) 113 if (dn == NULL)
114 goto out; 114 goto out;
@@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
164 if (up == NULL) 164 if (up == NULL)
165 goto out; 165 goto out;
166 166
167 rcu_assign_pointer(muxl->up_cache[idx], NULL); 167 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
168 list_del_rcu(&up->node); 168 list_del_rcu(&up->node);
169out: 169out:
170 spin_unlock_bh(&muxl->receive_lock); 170 spin_unlock_bh(&muxl->receive_lock);
@@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
261 261
262 idx = layer->id % UP_CACHE_SIZE; 262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock); 263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL); 264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node); 265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock); 266 spin_unlock_bh(&muxl->receive_lock);
267 } 267 }
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 75d4bfae1a78..df08c47183d4 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,6 +9,7 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/export.h>
12#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
13 14
14#define PKT_PREFIX 48 15#define PKT_PREFIX 48
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 0deabb440051..81660f809713 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -46,13 +46,10 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
46 int mtu_size) 46 int mtu_size)
47{ 47{
48 int tmp; 48 int tmp;
49 struct cfrfml *this = 49 struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
50 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
51 50
52 if (!this) { 51 if (!this)
53 pr_warn("Out of memory\n");
54 return NULL; 52 return NULL;
55 }
56 53
57 cfsrvl_init(&this->serv, channel_id, dev_info, false); 54 cfsrvl_init(&this->serv, channel_id, dev_info, false);
58 this->serv.release = cfrfml_release; 55 this->serv.release = cfrfml_release;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 2715c84cfa87..797c8d165993 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -33,13 +33,10 @@ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
33 33
34struct cflayer *cfserl_create(int type, int instance, bool use_stx) 34struct cflayer *cfserl_create(int type, int instance, bool use_stx)
35{ 35{
36 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); 36 struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
37 if (!this) { 37 if (!this)
38 pr_warn("Out of memory\n");
39 return NULL; 38 return NULL;
40 }
41 caif_assert(offsetof(struct cfserl, layer) == 0); 39 caif_assert(offsetof(struct cfserl, layer) == 0);
42 memset(this, 0, sizeof(struct cfserl));
43 this->layer.receive = cfserl_receive; 40 this->layer.receive = cfserl_receive;
44 this->layer.transmit = cfserl_transmit; 41 this->layer.transmit = cfserl_transmit;
45 this->layer.ctrlcmd = cfserl_ctrlcmd; 42 this->layer.ctrlcmd = cfserl_ctrlcmd;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 535a1e72b366..b99f5b22689d 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -108,10 +108,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
108 struct caif_payload_info *info; 108 struct caif_payload_info *info;
109 u8 flow_on = SRVL_FLOW_ON; 109 u8 flow_on = SRVL_FLOW_ON;
110 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 110 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
111 if (!pkt) { 111 if (!pkt)
112 pr_warn("Out of memory\n");
113 return -ENOMEM; 112 return -ENOMEM;
114 }
115 113
116 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { 114 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
117 pr_err("Packet is erroneous!\n"); 115 pr_err("Packet is erroneous!\n");
@@ -130,10 +128,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
130 struct caif_payload_info *info; 128 struct caif_payload_info *info;
131 u8 flow_off = SRVL_FLOW_OFF; 129 u8 flow_off = SRVL_FLOW_OFF;
132 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 130 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
133 if (!pkt) { 131 if (!pkt)
134 pr_warn("Out of memory\n");
135 return -ENOMEM; 132 return -ENOMEM;
136 }
137 133
138 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 134 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
139 pr_err("Packet is erroneous!\n"); 135 pr_err("Packet is erroneous!\n");
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 98e027db18ed..53e49f3e3af3 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -26,13 +26,10 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
26 26
27struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) 27struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
28{ 28{
29 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *util = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
30 if (!util) { 30 if (!util)
31 pr_warn("Out of memory\n");
32 return NULL; 31 return NULL;
33 }
34 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
35 memset(util, 0, sizeof(struct cfsrvl));
36 cfsrvl_init(util, channel_id, dev_info, true); 33 cfsrvl_init(util, channel_id, dev_info, true);
37 util->layer.receive = cfutill_receive; 34 util->layer.receive = cfutill_receive;
38 util->layer.transmit = cfutill_transmit; 35 util->layer.transmit = cfutill_transmit;
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3ec83fbc2887..910ab0661f66 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -25,13 +25,10 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
25 25
26struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) 26struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
27{ 27{
28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 28 struct cfsrvl *vei = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
29 if (!vei) { 29 if (!vei)
30 pr_warn("Out of memory\n");
31 return NULL; 30 return NULL;
32 }
33 caif_assert(offsetof(struct cfsrvl, layer) == 0); 31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
34 memset(vei, 0, sizeof(struct cfsrvl));
35 cfsrvl_init(vei, channel_id, dev_info, true); 32 cfsrvl_init(vei, channel_id, dev_info, true);
36 vei->layer.receive = cfvei_receive; 33 vei->layer.receive = cfvei_receive;
37 vei->layer.transmit = cfvei_transmit; 34 vei->layer.transmit = cfvei_transmit;
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index b2f5989ad455..e3f37db40ac3 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -21,14 +21,11 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
21 21
22struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) 22struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
23{ 23{
24 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 24 struct cfsrvl *vid = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
25 if (!vid) { 25 if (!vid)
26 pr_warn("Out of memory\n");
27 return NULL; 26 return NULL;
28 }
29 caif_assert(offsetof(struct cfsrvl, layer) == 0); 27 caif_assert(offsetof(struct cfsrvl, layer) == 0);
30 28
31 memset(vid, 0, sizeof(struct cfsrvl));
32 cfsrvl_init(vid, channel_id, dev_info, false); 29 cfsrvl_init(vid, channel_id, dev_info, false);
33 vid->layer.receive = cfvidl_receive; 30 vid->layer.receive = cfvidl_receive;
34 vid->layer.transmit = cfvidl_transmit; 31 vid->layer.transmit = cfvidl_transmit;
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 89395b2c8bca..03200699d274 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -40,5 +40,16 @@ config CAN_BCM
40 CAN messages are used on the bus (e.g. in automotive environments). 40 CAN messages are used on the bus (e.g. in automotive environments).
41 To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM. 41 To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM.
42 42
43config CAN_GW
44 tristate "CAN Gateway/Router (with netlink configuration)"
45 depends on CAN
46 default N
47 ---help---
48 The CAN Gateway/Router is used to route (and modify) CAN frames.
49 It is based on the PF_CAN core infrastructure for msg filtering and
50 msg sending and can optionally modify routed CAN frames on the fly.
51 CAN frames can be routed between CAN network interfaces (one hop).
52 They can be modified with AND/OR/XOR/SET operations as configured
53 by the netlink configuration interface known e.g. from iptables.
43 54
44source "drivers/net/can/Kconfig" 55source "drivers/net/can/Kconfig"
diff --git a/net/can/Makefile b/net/can/Makefile
index 2d3894b32742..cef49eb1f5c7 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -10,3 +10,6 @@ can-raw-y := raw.o
10 10
11obj-$(CONFIG_CAN_BCM) += can-bcm.o 11obj-$(CONFIG_CAN_BCM) += can-bcm.o
12can-bcm-y := bcm.o 12can-bcm-y := bcm.o
13
14obj-$(CONFIG_CAN_GW) += can-gw.o
15can-gw-y := gw.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 9b0c32a2690c..0ce2ad0696da 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -38,8 +38,6 @@
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE. 39 * DAMAGE.
40 * 40 *
41 * Send feedback to <socketcan-users@lists.berlios.de>
42 *
43 */ 41 */
44 42
45#include <linux/module.h> 43#include <linux/module.h>
@@ -719,7 +717,7 @@ int can_proto_register(const struct can_proto *cp)
719 proto); 717 proto);
720 err = -EBUSY; 718 err = -EBUSY;
721 } else 719 } else
722 rcu_assign_pointer(proto_tab[proto], cp); 720 RCU_INIT_POINTER(proto_tab[proto], cp);
723 721
724 mutex_unlock(&proto_tab_lock); 722 mutex_unlock(&proto_tab_lock);
725 723
@@ -740,7 +738,7 @@ void can_proto_unregister(const struct can_proto *cp)
740 738
741 mutex_lock(&proto_tab_lock); 739 mutex_lock(&proto_tab_lock);
742 BUG_ON(proto_tab[proto] != cp); 740 BUG_ON(proto_tab[proto] != cp);
743 rcu_assign_pointer(proto_tab[proto], NULL); 741 RCU_INIT_POINTER(proto_tab[proto], NULL);
744 mutex_unlock(&proto_tab_lock); 742 mutex_unlock(&proto_tab_lock);
745 743
746 synchronize_rcu(); 744 synchronize_rcu();
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 34253b84e30f..fd882dbadad3 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -35,8 +35,6 @@
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
36 * DAMAGE. 36 * DAMAGE.
37 * 37 *
38 * Send feedback to <socketcan-users@lists.berlios.de>
39 *
40 */ 38 */
41 39
42#ifndef AF_CAN_H 40#ifndef AF_CAN_H
diff --git a/net/can/bcm.c b/net/can/bcm.c
index c84963d2dee6..151b7730c12c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/can/gw.c b/net/can/gw.c
new file mode 100644
index 000000000000..3d79b127881e
--- /dev/null
+++ b/net/can/gw.c
@@ -0,0 +1,957 @@
1/*
2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3 *
4 * Copyright (c) 2011 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/types.h>
45#include <linux/list.h>
46#include <linux/spinlock.h>
47#include <linux/rcupdate.h>
48#include <linux/rculist.h>
49#include <linux/net.h>
50#include <linux/netdevice.h>
51#include <linux/if_arp.h>
52#include <linux/skbuff.h>
53#include <linux/can.h>
54#include <linux/can/core.h>
55#include <linux/can/gw.h>
56#include <net/rtnetlink.h>
57#include <net/net_namespace.h>
58#include <net/sock.h>
59
60#define CAN_GW_VERSION "20101209"
61static __initdata const char banner[] =
62 KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
63
64MODULE_DESCRIPTION("PF_CAN netlink gateway");
65MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS("can-gw");
68
69HLIST_HEAD(cgw_list);
70static struct notifier_block notifier;
71
72static struct kmem_cache *cgw_cache __read_mostly;
73
74/* structure that contains the (on-the-fly) CAN frame modifications */
75struct cf_mod {
76 struct {
77 struct can_frame and;
78 struct can_frame or;
79 struct can_frame xor;
80 struct can_frame set;
81 } modframe;
82 struct {
83 u8 and;
84 u8 or;
85 u8 xor;
86 u8 set;
87 } modtype;
88 void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
89 struct cf_mod *mod);
90
91 /* CAN frame checksum calculation after CAN frame modifications */
92 struct {
93 struct cgw_csum_xor xor;
94 struct cgw_csum_crc8 crc8;
95 } csum;
96 struct {
97 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
98 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
99 } csumfunc;
100};
101
102
103/*
104 * So far we just support CAN -> CAN routing and frame modifications.
105 *
106 * The internal can_can_gw structure contains data and attributes for
107 * a CAN -> CAN gateway job.
108 */
109struct can_can_gw {
110 struct can_filter filter;
111 int src_idx;
112 int dst_idx;
113};
114
115/* list entry for CAN gateways jobs */
116struct cgw_job {
117 struct hlist_node list;
118 struct rcu_head rcu;
119 u32 handled_frames;
120 u32 dropped_frames;
121 struct cf_mod mod;
122 union {
123 /* CAN frame data source */
124 struct net_device *dev;
125 } src;
126 union {
127 /* CAN frame data destination */
128 struct net_device *dev;
129 } dst;
130 union {
131 struct can_can_gw ccgw;
132 /* tbc */
133 };
134 u8 gwtype;
135 u16 flags;
136};
137
138/* modification functions that are invoked in the hot path in can_can_gw_rcv */
139
140#define MODFUNC(func, op) static void func(struct can_frame *cf, \
141 struct cf_mod *mod) { op ; }
142
143MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
144MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
145MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
146MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
147MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
148MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
149MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
150MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
151MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
152MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
153MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
154MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
155
156static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
157{
158 /*
159 * Copy the struct members separately to ensure that no uninitialized
160 * data are copied in the 3 bytes hole of the struct. This is needed
161 * to make easy compares of the data in the struct cf_mod.
162 */
163
164 dst->can_id = src->can_id;
165 dst->can_dlc = src->can_dlc;
166 *(u64 *)dst->data = *(u64 *)src->data;
167}
168
169static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
170{
171 /*
172 * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
173 * relative to received dlc -1 .. -8 :
174 * e.g. for received dlc = 8
175 * -1 => index = 7 (data[7])
176 * -3 => index = 5 (data[5])
177 * -8 => index = 0 (data[0])
178 */
179
180 if (fr > -9 && fr < 8 &&
181 to > -9 && to < 8 &&
182 re > -9 && re < 8)
183 return 0;
184 else
185 return -EINVAL;
186}
187
188static inline int calc_idx(int idx, int rx_dlc)
189{
190 if (idx < 0)
191 return rx_dlc + idx;
192 else
193 return idx;
194}
195
196static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
197{
198 int from = calc_idx(xor->from_idx, cf->can_dlc);
199 int to = calc_idx(xor->to_idx, cf->can_dlc);
200 int res = calc_idx(xor->result_idx, cf->can_dlc);
201 u8 val = xor->init_xor_val;
202 int i;
203
204 if (from < 0 || to < 0 || res < 0)
205 return;
206
207 if (from <= to) {
208 for (i = from; i <= to; i++)
209 val ^= cf->data[i];
210 } else {
211 for (i = from; i >= to; i--)
212 val ^= cf->data[i];
213 }
214
215 cf->data[res] = val;
216}
217
218static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
219{
220 u8 val = xor->init_xor_val;
221 int i;
222
223 for (i = xor->from_idx; i <= xor->to_idx; i++)
224 val ^= cf->data[i];
225
226 cf->data[xor->result_idx] = val;
227}
228
229static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
230{
231 u8 val = xor->init_xor_val;
232 int i;
233
234 for (i = xor->from_idx; i >= xor->to_idx; i--)
235 val ^= cf->data[i];
236
237 cf->data[xor->result_idx] = val;
238}
239
240static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
241{
242 int from = calc_idx(crc8->from_idx, cf->can_dlc);
243 int to = calc_idx(crc8->to_idx, cf->can_dlc);
244 int res = calc_idx(crc8->result_idx, cf->can_dlc);
245 u8 crc = crc8->init_crc_val;
246 int i;
247
248 if (from < 0 || to < 0 || res < 0)
249 return;
250
251 if (from <= to) {
252 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
253 crc = crc8->crctab[crc^cf->data[i]];
254 } else {
255 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
256 crc = crc8->crctab[crc^cf->data[i]];
257 }
258
259 switch (crc8->profile) {
260
261 case CGW_CRC8PRF_1U8:
262 crc = crc8->crctab[crc^crc8->profile_data[0]];
263 break;
264
265 case CGW_CRC8PRF_16U8:
266 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
267 break;
268
269 case CGW_CRC8PRF_SFFID_XOR:
270 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
271 (cf->can_id >> 8 & 0xFF)];
272 break;
273
274 }
275
276 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
277}
278
279static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
280{
281 u8 crc = crc8->init_crc_val;
282 int i;
283
284 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
285 crc = crc8->crctab[crc^cf->data[i]];
286
287 switch (crc8->profile) {
288
289 case CGW_CRC8PRF_1U8:
290 crc = crc8->crctab[crc^crc8->profile_data[0]];
291 break;
292
293 case CGW_CRC8PRF_16U8:
294 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
295 break;
296
297 case CGW_CRC8PRF_SFFID_XOR:
298 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
299 (cf->can_id >> 8 & 0xFF)];
300 break;
301 }
302
303 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
304}
305
306static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
307{
308 u8 crc = crc8->init_crc_val;
309 int i;
310
311 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
312 crc = crc8->crctab[crc^cf->data[i]];
313
314 switch (crc8->profile) {
315
316 case CGW_CRC8PRF_1U8:
317 crc = crc8->crctab[crc^crc8->profile_data[0]];
318 break;
319
320 case CGW_CRC8PRF_16U8:
321 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
322 break;
323
324 case CGW_CRC8PRF_SFFID_XOR:
325 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
326 (cf->can_id >> 8 & 0xFF)];
327 break;
328 }
329
330 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
331}
332
333/* the receive & process & send function */
334static void can_can_gw_rcv(struct sk_buff *skb, void *data)
335{
336 struct cgw_job *gwj = (struct cgw_job *)data;
337 struct can_frame *cf;
338 struct sk_buff *nskb;
339 int modidx = 0;
340
341 /* do not handle already routed frames - see comment below */
342 if (skb_mac_header_was_set(skb))
343 return;
344
345 if (!(gwj->dst.dev->flags & IFF_UP)) {
346 gwj->dropped_frames++;
347 return;
348 }
349
350 /*
351 * clone the given skb, which has not been done in can_rcv()
352 *
353 * When there is at least one modification function activated,
354 * we need to copy the skb as we want to modify skb->data.
355 */
356 if (gwj->mod.modfunc[0])
357 nskb = skb_copy(skb, GFP_ATOMIC);
358 else
359 nskb = skb_clone(skb, GFP_ATOMIC);
360
361 if (!nskb) {
362 gwj->dropped_frames++;
363 return;
364 }
365
366 /*
367 * Mark routed frames by setting some mac header length which is
368 * not relevant for the CAN frames located in the skb->data section.
369 *
370 * As dev->header_ops is not set in CAN netdevices no one is ever
371 * accessing the various header offsets in the CAN skbuffs anyway.
372 * E.g. using the packet socket to read CAN frames is still working.
373 */
374 skb_set_mac_header(nskb, 8);
375 nskb->dev = gwj->dst.dev;
376
377 /* pointer to modifiable CAN frame */
378 cf = (struct can_frame *)nskb->data;
379
380 /* perform preprocessed modification functions if there are any */
381 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
382 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
383
384 /* check for checksum updates when the CAN frame has been modified */
385 if (modidx) {
386 if (gwj->mod.csumfunc.crc8)
387 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
388
389 if (gwj->mod.csumfunc.xor)
390 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
391 }
392
393 /* clear the skb timestamp if not configured the other way */
394 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
395 nskb->tstamp.tv64 = 0;
396
397 /* send to netdevice */
398 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
399 gwj->dropped_frames++;
400 else
401 gwj->handled_frames++;
402}
403
404static inline int cgw_register_filter(struct cgw_job *gwj)
405{
406 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
407 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
408 gwj, "gw");
409}
410
411static inline void cgw_unregister_filter(struct cgw_job *gwj)
412{
413 can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
414 gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
415}
416
417static int cgw_notifier(struct notifier_block *nb,
418 unsigned long msg, void *data)
419{
420 struct net_device *dev = (struct net_device *)data;
421
422 if (!net_eq(dev_net(dev), &init_net))
423 return NOTIFY_DONE;
424 if (dev->type != ARPHRD_CAN)
425 return NOTIFY_DONE;
426
427 if (msg == NETDEV_UNREGISTER) {
428
429 struct cgw_job *gwj = NULL;
430 struct hlist_node *n, *nx;
431
432 ASSERT_RTNL();
433
434 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
435
436 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
437 hlist_del(&gwj->list);
438 cgw_unregister_filter(gwj);
439 kfree(gwj);
440 }
441 }
442 }
443
444 return NOTIFY_DONE;
445}
446
447static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
448{
449 struct cgw_frame_mod mb;
450 struct rtcanmsg *rtcan;
451 struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
452 if (!nlh)
453 return -EMSGSIZE;
454
455 rtcan = nlmsg_data(nlh);
456 rtcan->can_family = AF_CAN;
457 rtcan->gwtype = gwj->gwtype;
458 rtcan->flags = gwj->flags;
459
460 /* add statistics if available */
461
462 if (gwj->handled_frames) {
463 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
464 goto cancel;
465 else
466 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
467 }
468
469 if (gwj->dropped_frames) {
470 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
471 goto cancel;
472 else
473 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
474 }
475
476 /* check non default settings of attributes */
477
478 if (gwj->mod.modtype.and) {
479 memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
480 mb.modtype = gwj->mod.modtype.and;
481 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
482 goto cancel;
483 else
484 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
485 }
486
487 if (gwj->mod.modtype.or) {
488 memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
489 mb.modtype = gwj->mod.modtype.or;
490 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
491 goto cancel;
492 else
493 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
494 }
495
496 if (gwj->mod.modtype.xor) {
497 memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
498 mb.modtype = gwj->mod.modtype.xor;
499 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
500 goto cancel;
501 else
502 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
503 }
504
505 if (gwj->mod.modtype.set) {
506 memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
507 mb.modtype = gwj->mod.modtype.set;
508 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
509 goto cancel;
510 else
511 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
512 }
513
514 if (gwj->mod.csumfunc.crc8) {
515 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
516 &gwj->mod.csum.crc8) < 0)
517 goto cancel;
518 else
519 nlh->nlmsg_len += NLA_HDRLEN + \
520 NLA_ALIGN(CGW_CS_CRC8_LEN);
521 }
522
523 if (gwj->mod.csumfunc.xor) {
524 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
525 &gwj->mod.csum.xor) < 0)
526 goto cancel;
527 else
528 nlh->nlmsg_len += NLA_HDRLEN + \
529 NLA_ALIGN(CGW_CS_XOR_LEN);
530 }
531
532 if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
533
534 if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
535 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
536 &gwj->ccgw.filter) < 0)
537 goto cancel;
538 else
539 nlh->nlmsg_len += NLA_HDRLEN +
540 NLA_ALIGN(sizeof(struct can_filter));
541 }
542
543 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
544 goto cancel;
545 else
546 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
547
548 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
549 goto cancel;
550 else
551 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
552 }
553
554 return skb->len;
555
556cancel:
557 nlmsg_cancel(skb, nlh);
558 return -EMSGSIZE;
559}
560
561/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
562static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
563{
564 struct cgw_job *gwj = NULL;
565 struct hlist_node *n;
566 int idx = 0;
567 int s_idx = cb->args[0];
568
569 rcu_read_lock();
570 hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
571 if (idx < s_idx)
572 goto cont;
573
574 if (cgw_put_job(skb, gwj) < 0)
575 break;
576cont:
577 idx++;
578 }
579 rcu_read_unlock();
580
581 cb->args[0] = idx;
582
583 return skb->len;
584}
585
586/* check for common and gwtype specific attributes */
587static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
588 u8 gwtype, void *gwtypeattr)
589{
590 struct nlattr *tb[CGW_MAX+1];
591 struct cgw_frame_mod mb;
592 int modidx = 0;
593 int err = 0;
594
595 /* initialize modification & checksum data space */
596 memset(mod, 0, sizeof(*mod));
597
598 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
599 if (err < 0)
600 return err;
601
602 /* check for AND/OR/XOR/SET modifications */
603
604 if (tb[CGW_MOD_AND] &&
605 nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
606 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
607
608 canframecpy(&mod->modframe.and, &mb.cf);
609 mod->modtype.and = mb.modtype;
610
611 if (mb.modtype & CGW_MOD_ID)
612 mod->modfunc[modidx++] = mod_and_id;
613
614 if (mb.modtype & CGW_MOD_DLC)
615 mod->modfunc[modidx++] = mod_and_dlc;
616
617 if (mb.modtype & CGW_MOD_DATA)
618 mod->modfunc[modidx++] = mod_and_data;
619 }
620
621 if (tb[CGW_MOD_OR] &&
622 nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
623 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
624
625 canframecpy(&mod->modframe.or, &mb.cf);
626 mod->modtype.or = mb.modtype;
627
628 if (mb.modtype & CGW_MOD_ID)
629 mod->modfunc[modidx++] = mod_or_id;
630
631 if (mb.modtype & CGW_MOD_DLC)
632 mod->modfunc[modidx++] = mod_or_dlc;
633
634 if (mb.modtype & CGW_MOD_DATA)
635 mod->modfunc[modidx++] = mod_or_data;
636 }
637
638 if (tb[CGW_MOD_XOR] &&
639 nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
640 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
641
642 canframecpy(&mod->modframe.xor, &mb.cf);
643 mod->modtype.xor = mb.modtype;
644
645 if (mb.modtype & CGW_MOD_ID)
646 mod->modfunc[modidx++] = mod_xor_id;
647
648 if (mb.modtype & CGW_MOD_DLC)
649 mod->modfunc[modidx++] = mod_xor_dlc;
650
651 if (mb.modtype & CGW_MOD_DATA)
652 mod->modfunc[modidx++] = mod_xor_data;
653 }
654
655 if (tb[CGW_MOD_SET] &&
656 nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
657 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
658
659 canframecpy(&mod->modframe.set, &mb.cf);
660 mod->modtype.set = mb.modtype;
661
662 if (mb.modtype & CGW_MOD_ID)
663 mod->modfunc[modidx++] = mod_set_id;
664
665 if (mb.modtype & CGW_MOD_DLC)
666 mod->modfunc[modidx++] = mod_set_dlc;
667
668 if (mb.modtype & CGW_MOD_DATA)
669 mod->modfunc[modidx++] = mod_set_data;
670 }
671
672 /* check for checksum operations after CAN frame modifications */
673 if (modidx) {
674
675 if (tb[CGW_CS_CRC8] &&
676 nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
677
678 struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
679 nla_data(tb[CGW_CS_CRC8]);
680
681 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
682 c->result_idx);
683 if (err)
684 return err;
685
686 nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
687 CGW_CS_CRC8_LEN);
688
689 /*
690 * select dedicated processing function to reduce
691 * runtime operations in receive hot path.
692 */
693 if (c->from_idx < 0 || c->to_idx < 0 ||
694 c->result_idx < 0)
695 mod->csumfunc.crc8 = cgw_csum_crc8_rel;
696 else if (c->from_idx <= c->to_idx)
697 mod->csumfunc.crc8 = cgw_csum_crc8_pos;
698 else
699 mod->csumfunc.crc8 = cgw_csum_crc8_neg;
700 }
701
702 if (tb[CGW_CS_XOR] &&
703 nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
704
705 struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
706 nla_data(tb[CGW_CS_XOR]);
707
708 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
709 c->result_idx);
710 if (err)
711 return err;
712
713 nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
714 CGW_CS_XOR_LEN);
715
716 /*
717 * select dedicated processing function to reduce
718 * runtime operations in receive hot path.
719 */
720 if (c->from_idx < 0 || c->to_idx < 0 ||
721 c->result_idx < 0)
722 mod->csumfunc.xor = cgw_csum_xor_rel;
723 else if (c->from_idx <= c->to_idx)
724 mod->csumfunc.xor = cgw_csum_xor_pos;
725 else
726 mod->csumfunc.xor = cgw_csum_xor_neg;
727 }
728 }
729
730 if (gwtype == CGW_TYPE_CAN_CAN) {
731
732 /* check CGW_TYPE_CAN_CAN specific attributes */
733
734 struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
735 memset(ccgw, 0, sizeof(*ccgw));
736
737 /* check for can_filter in attributes */
738 if (tb[CGW_FILTER] &&
739 nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
740 nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
741 sizeof(struct can_filter));
742
743 err = -ENODEV;
744
745 /* specifying two interfaces is mandatory */
746 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
747 return err;
748
749 if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
750 nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
751 sizeof(u32));
752
753 if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
754 nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
755 sizeof(u32));
756
757 /* both indices set to 0 for flushing all routing entries */
758 if (!ccgw->src_idx && !ccgw->dst_idx)
759 return 0;
760
761 /* only one index set to 0 is an error */
762 if (!ccgw->src_idx || !ccgw->dst_idx)
763 return err;
764 }
765
766 /* add the checks for other gwtypes here */
767
768 return 0;
769}
770
771static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
772 void *arg)
773{
774 struct rtcanmsg *r;
775 struct cgw_job *gwj;
776 int err = 0;
777
778 if (nlmsg_len(nlh) < sizeof(*r))
779 return -EINVAL;
780
781 r = nlmsg_data(nlh);
782 if (r->can_family != AF_CAN)
783 return -EPFNOSUPPORT;
784
785 /* so far we only support CAN -> CAN routings */
786 if (r->gwtype != CGW_TYPE_CAN_CAN)
787 return -EINVAL;
788
789 gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
790 if (!gwj)
791 return -ENOMEM;
792
793 gwj->handled_frames = 0;
794 gwj->dropped_frames = 0;
795 gwj->flags = r->flags;
796 gwj->gwtype = r->gwtype;
797
798 err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
799 if (err < 0)
800 goto out;
801
802 err = -ENODEV;
803
804 /* ifindex == 0 is not allowed for job creation */
805 if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
806 goto out;
807
808 gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
809
810 if (!gwj->src.dev)
811 goto out;
812
813 /* check for CAN netdev not using header_ops - see gw_rcv() */
814 if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
815 goto put_src_out;
816
817 gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
818
819 if (!gwj->dst.dev)
820 goto put_src_out;
821
822 /* check for CAN netdev not using header_ops - see gw_rcv() */
823 if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
824 goto put_src_dst_out;
825
826 ASSERT_RTNL();
827
828 err = cgw_register_filter(gwj);
829 if (!err)
830 hlist_add_head_rcu(&gwj->list, &cgw_list);
831
832put_src_dst_out:
833 dev_put(gwj->dst.dev);
834put_src_out:
835 dev_put(gwj->src.dev);
836out:
837 if (err)
838 kmem_cache_free(cgw_cache, gwj);
839
840 return err;
841}
842
843static void cgw_remove_all_jobs(void)
844{
845 struct cgw_job *gwj = NULL;
846 struct hlist_node *n, *nx;
847
848 ASSERT_RTNL();
849
850 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
851 hlist_del(&gwj->list);
852 cgw_unregister_filter(gwj);
853 kfree(gwj);
854 }
855}
856
857static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
858{
859 struct cgw_job *gwj = NULL;
860 struct hlist_node *n, *nx;
861 struct rtcanmsg *r;
862 struct cf_mod mod;
863 struct can_can_gw ccgw;
864 int err = 0;
865
866 if (nlmsg_len(nlh) < sizeof(*r))
867 return -EINVAL;
868
869 r = nlmsg_data(nlh);
870 if (r->can_family != AF_CAN)
871 return -EPFNOSUPPORT;
872
873 /* so far we only support CAN -> CAN routings */
874 if (r->gwtype != CGW_TYPE_CAN_CAN)
875 return -EINVAL;
876
877 err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
878 if (err < 0)
879 return err;
880
881 /* two interface indices both set to 0 => remove all entries */
882 if (!ccgw.src_idx && !ccgw.dst_idx) {
883 cgw_remove_all_jobs();
884 return 0;
885 }
886
887 err = -EINVAL;
888
889 ASSERT_RTNL();
890
891 /* remove only the first matching entry */
892 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
893
894 if (gwj->flags != r->flags)
895 continue;
896
897 if (memcmp(&gwj->mod, &mod, sizeof(mod)))
898 continue;
899
900 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
901 if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
902 continue;
903
904 hlist_del(&gwj->list);
905 cgw_unregister_filter(gwj);
906 kfree(gwj);
907 err = 0;
908 break;
909 }
910
911 return err;
912}
913
914static __init int cgw_module_init(void)
915{
916 printk(banner);
917
918 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
919 0, 0, NULL);
920
921 if (!cgw_cache)
922 return -ENOMEM;
923
924 /* set notifier */
925 notifier.notifier_call = cgw_notifier;
926 register_netdevice_notifier(&notifier);
927
928 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
929 unregister_netdevice_notifier(&notifier);
930 kmem_cache_destroy(cgw_cache);
931 return -ENOBUFS;
932 }
933
934 /* Only the first call to __rtnl_register can fail */
935 __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
936 __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
937
938 return 0;
939}
940
941static __exit void cgw_module_exit(void)
942{
943 rtnl_unregister_all(PF_CAN);
944
945 unregister_netdevice_notifier(&notifier);
946
947 rtnl_lock();
948 cgw_remove_all_jobs();
949 rtnl_unlock();
950
951 rcu_barrier(); /* Wait for completion of call_rcu()'s */
952
953 kmem_cache_destroy(cgw_cache);
954}
955
956module_init(cgw_module_init);
957module_exit(cgw_module_exit);
diff --git a/net/can/proc.c b/net/can/proc.c
index 0016f7339699..ba873c36d2fd 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/can/raw.c b/net/can/raw.c
index dea99a6e596c..cde1b4a20f75 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -37,8 +37,6 @@
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE. 38 * DAMAGE.
39 * 39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */ 40 */
43 41
44#include <linux/module.h> 42#include <linux/module.h>
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index be683f2d401f..cc04dd667a10 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -27,3 +27,17 @@ config CEPH_LIB_PRETTYDEBUG
27 27
28 If unsure, say N. 28 If unsure, say N.
29 29
30config CEPH_LIB_USE_DNS_RESOLVER
31 bool "Use in-kernel support for DNS lookup"
32 depends on CEPH_LIB
33 select DNS_RESOLVER
34 default n
35 help
36 If you say Y here, hostnames (e.g. monitor addresses) will
37 be resolved using the CONFIG_DNS_RESOLVER facility.
38
39 For information on how to use CONFIG_DNS_RESOLVER consult
40 Documentation/networking/dns_resolver.txt
41
42 If unsure, say N.
43
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 2883ea01e680..97f70e50ad3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -432,9 +432,12 @@ EXPORT_SYMBOL(ceph_client_id);
432/* 432/*
433 * create a fresh client instance 433 * create a fresh client instance
434 */ 434 */
435struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) 435struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
436 unsigned supported_features,
437 unsigned required_features)
436{ 438{
437 struct ceph_client *client; 439 struct ceph_client *client;
440 struct ceph_entity_addr *myaddr = NULL;
438 int err = -ENOMEM; 441 int err = -ENOMEM;
439 442
440 client = kzalloc(sizeof(*client), GFP_KERNEL); 443 client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -449,15 +452,27 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
449 client->auth_err = 0; 452 client->auth_err = 0;
450 453
451 client->extra_mon_dispatch = NULL; 454 client->extra_mon_dispatch = NULL;
452 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT; 455 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT |
453 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT; 456 supported_features;
454 457 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT |
455 client->msgr = NULL; 458 required_features;
459
460 /* msgr */
461 if (ceph_test_opt(client, MYIP))
462 myaddr = &client->options->my_addr;
463 client->msgr = ceph_messenger_create(myaddr,
464 client->supported_features,
465 client->required_features);
466 if (IS_ERR(client->msgr)) {
467 err = PTR_ERR(client->msgr);
468 goto fail;
469 }
470 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
456 471
457 /* subsystems */ 472 /* subsystems */
458 err = ceph_monc_init(&client->monc, client); 473 err = ceph_monc_init(&client->monc, client);
459 if (err < 0) 474 if (err < 0)
460 goto fail; 475 goto fail_msgr;
461 err = ceph_osdc_init(&client->osdc, client); 476 err = ceph_osdc_init(&client->osdc, client);
462 if (err < 0) 477 if (err < 0)
463 goto fail_monc; 478 goto fail_monc;
@@ -466,6 +481,8 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
466 481
467fail_monc: 482fail_monc:
468 ceph_monc_stop(&client->monc); 483 ceph_monc_stop(&client->monc);
484fail_msgr:
485 ceph_messenger_destroy(client->msgr);
469fail: 486fail:
470 kfree(client); 487 kfree(client);
471 return ERR_PTR(err); 488 return ERR_PTR(err);
@@ -490,8 +507,7 @@ void ceph_destroy_client(struct ceph_client *client)
490 507
491 ceph_debugfs_client_cleanup(client); 508 ceph_debugfs_client_cleanup(client);
492 509
493 if (client->msgr) 510 ceph_messenger_destroy(client->msgr);
494 ceph_messenger_destroy(client->msgr);
495 511
496 ceph_destroy_options(client->options); 512 ceph_destroy_options(client->options);
497 513
@@ -514,24 +530,9 @@ static int have_mon_and_osd_map(struct ceph_client *client)
514 */ 530 */
515int __ceph_open_session(struct ceph_client *client, unsigned long started) 531int __ceph_open_session(struct ceph_client *client, unsigned long started)
516{ 532{
517 struct ceph_entity_addr *myaddr = NULL;
518 int err; 533 int err;
519 unsigned long timeout = client->options->mount_timeout * HZ; 534 unsigned long timeout = client->options->mount_timeout * HZ;
520 535
521 /* initialize the messenger */
522 if (client->msgr == NULL) {
523 if (ceph_test_opt(client, MYIP))
524 myaddr = &client->options->my_addr;
525 client->msgr = ceph_messenger_create(myaddr,
526 client->supported_features,
527 client->required_features);
528 if (IS_ERR(client->msgr)) {
529 client->msgr = NULL;
530 return PTR_ERR(client->msgr);
531 }
532 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
533 }
534
535 /* open session, and wait for mon and osd maps */ 536 /* open session, and wait for mon and osd maps */
536 err = ceph_monc_open_session(&client->monc); 537 err = ceph_monc_open_session(&client->monc);
537 if (err < 0) 538 if (err < 0)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9918e9eb276e..ad5b70801f37 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -11,12 +11,14 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/dns_resolver.h>
14#include <net/tcp.h> 15#include <net/tcp.h>
15 16
16#include <linux/ceph/libceph.h> 17#include <linux/ceph/libceph.h>
17#include <linux/ceph/messenger.h> 18#include <linux/ceph/messenger.h>
18#include <linux/ceph/decode.h> 19#include <linux/ceph/decode.h>
19#include <linux/ceph/pagelist.h> 20#include <linux/ceph/pagelist.h>
21#include <linux/export.h>
20 22
21/* 23/*
22 * Ceph uses the messenger to exchange ceph_msg messages with other 24 * Ceph uses the messenger to exchange ceph_msg messages with other
@@ -1078,6 +1080,101 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
1078} 1080}
1079 1081
1080/* 1082/*
1083 * Unlike other *_pton function semantics, zero indicates success.
1084 */
1085static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1086 char delim, const char **ipend)
1087{
1088 struct sockaddr_in *in4 = (void *)ss;
1089 struct sockaddr_in6 *in6 = (void *)ss;
1090
1091 memset(ss, 0, sizeof(*ss));
1092
1093 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1094 ss->ss_family = AF_INET;
1095 return 0;
1096 }
1097
1098 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1099 ss->ss_family = AF_INET6;
1100 return 0;
1101 }
1102
1103 return -EINVAL;
1104}
1105
1106/*
1107 * Extract hostname string and resolve using kernel DNS facility.
1108 */
1109#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1110static int ceph_dns_resolve_name(const char *name, size_t namelen,
1111 struct sockaddr_storage *ss, char delim, const char **ipend)
1112{
1113 const char *end, *delim_p;
1114 char *colon_p, *ip_addr = NULL;
1115 int ip_len, ret;
1116
1117 /*
1118 * The end of the hostname occurs immediately preceding the delimiter or
1119 * the port marker (':') where the delimiter takes precedence.
1120 */
1121 delim_p = memchr(name, delim, namelen);
1122 colon_p = memchr(name, ':', namelen);
1123
1124 if (delim_p && colon_p)
1125 end = delim_p < colon_p ? delim_p : colon_p;
1126 else if (!delim_p && colon_p)
1127 end = colon_p;
1128 else {
1129 end = delim_p;
1130 if (!end) /* case: hostname:/ */
1131 end = name + namelen;
1132 }
1133
1134 if (end <= name)
1135 return -EINVAL;
1136
1137 /* do dns_resolve upcall */
1138 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1139 if (ip_len > 0)
1140 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1141 else
1142 ret = -ESRCH;
1143
1144 kfree(ip_addr);
1145
1146 *ipend = end;
1147
1148 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1149 ret, ret ? "failed" : ceph_pr_addr(ss));
1150
1151 return ret;
1152}
1153#else
1154static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1155 struct sockaddr_storage *ss, char delim, const char **ipend)
1156{
1157 return -EINVAL;
1158}
1159#endif
1160
1161/*
1162 * Parse a server name (IP or hostname). If a valid IP address is not found
1163 * then try to extract a hostname to resolve using userspace DNS upcall.
1164 */
1165static int ceph_parse_server_name(const char *name, size_t namelen,
1166 struct sockaddr_storage *ss, char delim, const char **ipend)
1167{
1168 int ret;
1169
1170 ret = ceph_pton(name, namelen, ss, delim, ipend);
1171 if (ret)
1172 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1173
1174 return ret;
1175}
1176
1177/*
1081 * Parse an ip[:port] list into an addr array. Use the default 1178 * Parse an ip[:port] list into an addr array. Use the default
1082 * monitor port if a port isn't specified. 1179 * monitor port if a port isn't specified.
1083 */ 1180 */
@@ -1085,15 +1182,13 @@ int ceph_parse_ips(const char *c, const char *end,
1085 struct ceph_entity_addr *addr, 1182 struct ceph_entity_addr *addr,
1086 int max_count, int *count) 1183 int max_count, int *count)
1087{ 1184{
1088 int i; 1185 int i, ret = -EINVAL;
1089 const char *p = c; 1186 const char *p = c;
1090 1187
1091 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1188 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1092 for (i = 0; i < max_count; i++) { 1189 for (i = 0; i < max_count; i++) {
1093 const char *ipend; 1190 const char *ipend;
1094 struct sockaddr_storage *ss = &addr[i].in_addr; 1191 struct sockaddr_storage *ss = &addr[i].in_addr;
1095 struct sockaddr_in *in4 = (void *)ss;
1096 struct sockaddr_in6 *in6 = (void *)ss;
1097 int port; 1192 int port;
1098 char delim = ','; 1193 char delim = ',';
1099 1194
@@ -1102,15 +1197,11 @@ int ceph_parse_ips(const char *c, const char *end,
1102 p++; 1197 p++;
1103 } 1198 }
1104 1199
1105 memset(ss, 0, sizeof(*ss)); 1200 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1106 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, 1201 if (ret)
1107 delim, &ipend))
1108 ss->ss_family = AF_INET;
1109 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1110 delim, &ipend))
1111 ss->ss_family = AF_INET6;
1112 else
1113 goto bad; 1202 goto bad;
1203 ret = -EINVAL;
1204
1114 p = ipend; 1205 p = ipend;
1115 1206
1116 if (delim == ']') { 1207 if (delim == ']') {
@@ -1155,7 +1246,7 @@ int ceph_parse_ips(const char *c, const char *end,
1155 1246
1156bad: 1247bad:
1157 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1248 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1158 return -EINVAL; 1249 return ret;
1159} 1250}
1160EXPORT_SYMBOL(ceph_parse_ips); 1251EXPORT_SYMBOL(ceph_parse_ips);
1161 1252
@@ -2281,7 +2372,8 @@ EXPORT_SYMBOL(ceph_con_keepalive);
2281 * construct a new message with given type, size 2372 * construct a new message with given type, size
2282 * the new msg has a ref count of 1. 2373 * the new msg has a ref count of 1.
2283 */ 2374 */
2284struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) 2375struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2376 bool can_fail)
2285{ 2377{
2286 struct ceph_msg *m; 2378 struct ceph_msg *m;
2287 2379
@@ -2333,7 +2425,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2333 m->front.iov_base = kmalloc(front_len, flags); 2425 m->front.iov_base = kmalloc(front_len, flags);
2334 } 2426 }
2335 if (m->front.iov_base == NULL) { 2427 if (m->front.iov_base == NULL) {
2336 pr_err("msg_new can't allocate %d bytes\n", 2428 dout("ceph_msg_new can't allocate %d bytes\n",
2337 front_len); 2429 front_len);
2338 goto out2; 2430 goto out2;
2339 } 2431 }
@@ -2348,7 +2440,14 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2348out2: 2440out2:
2349 ceph_msg_put(m); 2441 ceph_msg_put(m);
2350out: 2442out:
2351 pr_err("msg_new can't create type %d front %d\n", type, front_len); 2443 if (!can_fail) {
2444 pr_err("msg_new can't create type %d front %d\n", type,
2445 front_len);
2446 WARN_ON(1);
2447 } else {
2448 dout("msg_new can't create type %d front %d\n", type,
2449 front_len);
2450 }
2352 return NULL; 2451 return NULL;
2353} 2452}
2354EXPORT_SYMBOL(ceph_msg_new); 2453EXPORT_SYMBOL(ceph_msg_new);
@@ -2398,7 +2497,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2398 } 2497 }
2399 if (!msg) { 2498 if (!msg) {
2400 *skip = 0; 2499 *skip = 0;
2401 msg = ceph_msg_new(type, front_len, GFP_NOFS); 2500 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2402 if (!msg) { 2501 if (!msg) {
2403 pr_err("unable to allocate msg type %d len %d\n", 2502 pr_err("unable to allocate msg type %d len %d\n",
2404 type, front_len); 2503 type, front_len);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index cbe31fa45508..0b62deae42bd 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -116,14 +116,12 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
116 */ 116 */
117static void __close_session(struct ceph_mon_client *monc) 117static void __close_session(struct ceph_mon_client *monc)
118{ 118{
119 if (monc->con) { 119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 dout("__close_session closing mon%d\n", monc->cur_mon); 120 ceph_con_revoke(monc->con, monc->m_auth);
121 ceph_con_revoke(monc->con, monc->m_auth); 121 ceph_con_close(monc->con);
122 ceph_con_close(monc->con); 122 monc->cur_mon = -1;
123 monc->cur_mon = -1; 123 monc->pending_auth = 0;
124 monc->pending_auth = 0; 124 ceph_auth_reset(monc->auth);
125 ceph_auth_reset(monc->auth);
126 }
127} 125}
128 126
129/* 127/*
@@ -302,15 +300,6 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
302 */ 300 */
303int ceph_monc_open_session(struct ceph_mon_client *monc) 301int ceph_monc_open_session(struct ceph_mon_client *monc)
304{ 302{
305 if (!monc->con) {
306 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
307 if (!monc->con)
308 return -ENOMEM;
309 ceph_con_init(monc->client->msgr, monc->con);
310 monc->con->private = monc;
311 monc->con->ops = &mon_con_ops;
312 }
313
314 mutex_lock(&monc->mutex); 303 mutex_lock(&monc->mutex);
315 __open_session(monc); 304 __open_session(monc);
316 __schedule_delayed(monc); 305 __schedule_delayed(monc);
@@ -528,10 +517,12 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
528 init_completion(&req->completion); 517 init_completion(&req->completion);
529 518
530 err = -ENOMEM; 519 err = -ENOMEM;
531 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS); 520 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
521 true);
532 if (!req->request) 522 if (!req->request)
533 goto out; 523 goto out;
534 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS); 524 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
525 true);
535 if (!req->reply) 526 if (!req->reply)
536 goto out; 527 goto out;
537 528
@@ -626,10 +617,12 @@ int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
626 init_completion(&req->completion); 617 init_completion(&req->completion);
627 618
628 err = -ENOMEM; 619 err = -ENOMEM;
629 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS); 620 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
621 true);
630 if (!req->request) 622 if (!req->request)
631 goto out; 623 goto out;
632 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS); 624 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
625 true);
633 if (!req->reply) 626 if (!req->reply)
634 goto out; 627 goto out;
635 628
@@ -755,13 +748,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
755 if (err) 748 if (err)
756 goto out; 749 goto out;
757 750
758 monc->con = NULL; 751 /* connection */
752 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
753 if (!monc->con)
754 goto out_monmap;
755 ceph_con_init(monc->client->msgr, monc->con);
756 monc->con->private = monc;
757 monc->con->ops = &mon_con_ops;
759 758
760 /* authentication */ 759 /* authentication */
761 monc->auth = ceph_auth_init(cl->options->name, 760 monc->auth = ceph_auth_init(cl->options->name,
762 cl->options->key); 761 cl->options->key);
763 if (IS_ERR(monc->auth)) 762 if (IS_ERR(monc->auth)) {
764 return PTR_ERR(monc->auth); 763 err = PTR_ERR(monc->auth);
764 goto out_con;
765 }
765 monc->auth->want_keys = 766 monc->auth->want_keys =
766 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | 767 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
767 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; 768 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
@@ -770,19 +771,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
770 err = -ENOMEM; 771 err = -ENOMEM;
771 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, 772 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
772 sizeof(struct ceph_mon_subscribe_ack), 773 sizeof(struct ceph_mon_subscribe_ack),
773 GFP_NOFS); 774 GFP_NOFS, true);
774 if (!monc->m_subscribe_ack) 775 if (!monc->m_subscribe_ack)
775 goto out_monmap; 776 goto out_auth;
776 777
777 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS); 778 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
779 true);
778 if (!monc->m_subscribe) 780 if (!monc->m_subscribe)
779 goto out_subscribe_ack; 781 goto out_subscribe_ack;
780 782
781 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS); 783 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
784 true);
782 if (!monc->m_auth_reply) 785 if (!monc->m_auth_reply)
783 goto out_subscribe; 786 goto out_subscribe;
784 787
785 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS); 788 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
786 monc->pending_auth = 0; 789 monc->pending_auth = 0;
787 if (!monc->m_auth) 790 if (!monc->m_auth)
788 goto out_auth_reply; 791 goto out_auth_reply;
@@ -808,6 +811,10 @@ out_subscribe:
808 ceph_msg_put(monc->m_subscribe); 811 ceph_msg_put(monc->m_subscribe);
809out_subscribe_ack: 812out_subscribe_ack:
810 ceph_msg_put(monc->m_subscribe_ack); 813 ceph_msg_put(monc->m_subscribe_ack);
814out_auth:
815 ceph_auth_destroy(monc->auth);
816out_con:
817 monc->con->ops->put(monc->con);
811out_monmap: 818out_monmap:
812 kfree(monc->monmap); 819 kfree(monc->monmap);
813out: 820out:
@@ -822,11 +829,11 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
822 829
823 mutex_lock(&monc->mutex); 830 mutex_lock(&monc->mutex);
824 __close_session(monc); 831 __close_session(monc);
825 if (monc->con) { 832
826 monc->con->private = NULL; 833 monc->con->private = NULL;
827 monc->con->ops->put(monc->con); 834 monc->con->ops->put(monc->con);
828 monc->con = NULL; 835 monc->con = NULL;
829 } 836
830 mutex_unlock(&monc->mutex); 837 mutex_unlock(&monc->mutex);
831 838
832 ceph_auth_destroy(monc->auth); 839 ceph_auth_destroy(monc->auth);
@@ -973,7 +980,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
973 case CEPH_MSG_MON_MAP: 980 case CEPH_MSG_MON_MAP:
974 case CEPH_MSG_MDS_MAP: 981 case CEPH_MSG_MDS_MAP:
975 case CEPH_MSG_OSD_MAP: 982 case CEPH_MSG_OSD_MAP:
976 m = ceph_msg_new(type, front_len, GFP_NOFS); 983 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
977 break; 984 break;
978 } 985 }
979 986
@@ -1000,7 +1007,7 @@ static void mon_fault(struct ceph_connection *con)
1000 if (!con->private) 1007 if (!con->private)
1001 goto out; 1008 goto out;
1002 1009
1003 if (monc->con && !monc->hunting) 1010 if (!monc->hunting)
1004 pr_info("mon%d %s session lost, " 1011 pr_info("mon%d %s session lost, "
1005 "hunting for new mon\n", monc->cur_mon, 1012 "hunting for new mon\n", monc->cur_mon,
1006 ceph_pr_addr(&monc->con->peer_addr.in_addr)); 1013 ceph_pr_addr(&monc->con->peer_addr.in_addr));
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 1f4cb30a42c5..11d5f4196a73 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 struct ceph_msg *msg; 13 struct ceph_msg *msg;
14 14
15 msg = ceph_msg_new(0, pool->front_len, gfp_mask); 15 msg = ceph_msg_new(0, pool->front_len, gfp_mask, true);
16 if (!msg) { 16 if (!msg) {
17 dout("msgpool_alloc %s failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 } else { 18 } else {
@@ -61,7 +61,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
61 WARN_ON(1); 61 WARN_ON(1);
62 62
63 /* try to alloc a fresh message */ 63 /* try to alloc a fresh message */
64 return ceph_msg_new(0, front_len, GFP_NOFS); 64 return ceph_msg_new(0, front_len, GFP_NOFS, false);
65 } 65 }
66 66
67 msg = mempool_alloc(pool->pool, GFP_NOFS); 67 msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 88ad8a2501b5..733e46008b89 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -227,7 +227,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
228 else 228 else
229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
230 OSD_OPREPLY_FRONT_LEN, gfp_flags); 230 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
231 if (!msg) { 231 if (!msg) {
232 ceph_osdc_put_request(req); 232 ceph_osdc_put_request(req);
233 return NULL; 233 return NULL;
@@ -250,7 +250,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
250 if (use_mempool) 250 if (use_mempool)
251 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 251 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
252 else 252 else
253 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags); 253 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
254 if (!msg) { 254 if (!msg) {
255 ceph_osdc_put_request(req); 255 ceph_osdc_put_request(req);
256 return NULL; 256 return NULL;
@@ -943,7 +943,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
943 * Caller should hold map_sem for read and request_mutex. 943 * Caller should hold map_sem for read and request_mutex.
944 */ 944 */
945static int __map_request(struct ceph_osd_client *osdc, 945static int __map_request(struct ceph_osd_client *osdc,
946 struct ceph_osd_request *req) 946 struct ceph_osd_request *req, int force_resend)
947{ 947{
948 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; 948 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
949 struct ceph_pg pgid; 949 struct ceph_pg pgid;
@@ -967,7 +967,8 @@ static int __map_request(struct ceph_osd_client *osdc,
967 num = err; 967 num = err;
968 } 968 }
969 969
970 if ((req->r_osd && req->r_osd->o_osd == o && 970 if ((!force_resend &&
971 req->r_osd && req->r_osd->o_osd == o &&
971 req->r_sent >= req->r_osd->o_incarnation && 972 req->r_sent >= req->r_osd->o_incarnation &&
972 req->r_num_pg_osds == num && 973 req->r_num_pg_osds == num &&
973 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || 974 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
@@ -1289,18 +1290,18 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1289 * 1290 *
1290 * Caller should hold map_sem for read and request_mutex. 1291 * Caller should hold map_sem for read and request_mutex.
1291 */ 1292 */
1292static void kick_requests(struct ceph_osd_client *osdc) 1293static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1293{ 1294{
1294 struct ceph_osd_request *req, *nreq; 1295 struct ceph_osd_request *req, *nreq;
1295 struct rb_node *p; 1296 struct rb_node *p;
1296 int needmap = 0; 1297 int needmap = 0;
1297 int err; 1298 int err;
1298 1299
1299 dout("kick_requests\n"); 1300 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1300 mutex_lock(&osdc->request_mutex); 1301 mutex_lock(&osdc->request_mutex);
1301 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 1302 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
1302 req = rb_entry(p, struct ceph_osd_request, r_node); 1303 req = rb_entry(p, struct ceph_osd_request, r_node);
1303 err = __map_request(osdc, req); 1304 err = __map_request(osdc, req, force_resend);
1304 if (err < 0) 1305 if (err < 0)
1305 continue; /* error */ 1306 continue; /* error */
1306 if (req->r_osd == NULL) { 1307 if (req->r_osd == NULL) {
@@ -1318,7 +1319,7 @@ static void kick_requests(struct ceph_osd_client *osdc)
1318 r_linger_item) { 1319 r_linger_item) {
1319 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1320 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1320 1321
1321 err = __map_request(osdc, req); 1322 err = __map_request(osdc, req, force_resend);
1322 if (err == 0) 1323 if (err == 0)
1323 continue; /* no change and no osd was specified */ 1324 continue; /* no change and no osd was specified */
1324 if (err < 0) 1325 if (err < 0)
@@ -1395,7 +1396,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1395 ceph_osdmap_destroy(osdc->osdmap); 1396 ceph_osdmap_destroy(osdc->osdmap);
1396 osdc->osdmap = newmap; 1397 osdc->osdmap = newmap;
1397 } 1398 }
1398 kick_requests(osdc); 1399 kick_requests(osdc, 0);
1399 reset_changed_osds(osdc); 1400 reset_changed_osds(osdc);
1400 } else { 1401 } else {
1401 dout("ignoring incremental map %u len %d\n", 1402 dout("ignoring incremental map %u len %d\n",
@@ -1423,6 +1424,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1423 "older than our %u\n", epoch, maplen, 1424 "older than our %u\n", epoch, maplen,
1424 osdc->osdmap->epoch); 1425 osdc->osdmap->epoch);
1425 } else { 1426 } else {
1427 int skipped_map = 0;
1428
1426 dout("taking full map %u len %d\n", epoch, maplen); 1429 dout("taking full map %u len %d\n", epoch, maplen);
1427 newmap = osdmap_decode(&p, p+maplen); 1430 newmap = osdmap_decode(&p, p+maplen);
1428 if (IS_ERR(newmap)) { 1431 if (IS_ERR(newmap)) {
@@ -1432,9 +1435,12 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1432 BUG_ON(!newmap); 1435 BUG_ON(!newmap);
1433 oldmap = osdc->osdmap; 1436 oldmap = osdc->osdmap;
1434 osdc->osdmap = newmap; 1437 osdc->osdmap = newmap;
1435 if (oldmap) 1438 if (oldmap) {
1439 if (oldmap->epoch + 1 < newmap->epoch)
1440 skipped_map = 1;
1436 ceph_osdmap_destroy(oldmap); 1441 ceph_osdmap_destroy(oldmap);
1437 kick_requests(osdc); 1442 }
1443 kick_requests(osdc, skipped_map);
1438 } 1444 }
1439 p += maplen; 1445 p += maplen;
1440 nr_maps--; 1446 nr_maps--;
@@ -1707,7 +1713,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1707 * the request still han't been touched yet. 1713 * the request still han't been touched yet.
1708 */ 1714 */
1709 if (req->r_sent == 0) { 1715 if (req->r_sent == 0) {
1710 rc = __map_request(osdc, req); 1716 rc = __map_request(osdc, req, 0);
1711 if (rc < 0) { 1717 if (rc < 0) {
1712 if (nofail) { 1718 if (nofail) {
1713 dout("osdc_start_request failed map, " 1719 dout("osdc_start_request failed map, "
@@ -2032,7 +2038,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2032 if (front > req->r_reply->front.iov_len) { 2038 if (front > req->r_reply->front.iov_len) {
2033 pr_warning("get_reply front %d > preallocated %d\n", 2039 pr_warning("get_reply front %d > preallocated %d\n",
2034 front, (int)req->r_reply->front.iov_len); 2040 front, (int)req->r_reply->front.iov_len);
2035 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS); 2041 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2036 if (!m) 2042 if (!m)
2037 goto out; 2043 goto out;
2038 ceph_msg_put(req->r_reply); 2044 ceph_msg_put(req->r_reply);
@@ -2080,7 +2086,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2080 switch (type) { 2086 switch (type) {
2081 case CEPH_MSG_OSD_MAP: 2087 case CEPH_MSG_OSD_MAP:
2082 case CEPH_MSG_WATCH_NOTIFY: 2088 case CEPH_MSG_WATCH_NOTIFY:
2083 return ceph_msg_new(type, front, GFP_NOFS); 2089 return ceph_msg_new(type, front, GFP_NOFS, false);
2084 case CEPH_MSG_OSD_OPREPLY: 2090 case CEPH_MSG_OSD_OPREPLY:
2085 return get_reply(con, hdr, skip); 2091 return get_reply(con, hdr, skip);
2086 default: 2092 default:
diff --git a/net/compat.c b/net/compat.c
index c578d9382e19..6def90e0a112 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -22,6 +22,7 @@
22#include <linux/filter.h> 22#include <linux/filter.h>
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/security.h> 24#include <linux/security.h>
25#include <linux/export.h>
25 26
26#include <net/scm.h> 27#include <net/scm.h>
27#include <net/sock.h> 28#include <net/sock.h>
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 18ac112ea7ae..68bbf9f65cb0 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,15 +324,15 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
324 /* Copy paged appendix. Hmm... why does this look so complicated? */ 324 /* Copy paged appendix. Hmm... why does this look so complicated? */
325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
326 int end; 326 int end;
327 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
327 328
328 WARN_ON(start > offset + len); 329 WARN_ON(start > offset + len);
329 330
330 end = start + skb_shinfo(skb)->frags[i].size; 331 end = start + skb_frag_size(frag);
331 if ((copy = end - offset) > 0) { 332 if ((copy = end - offset) > 0) {
332 int err; 333 int err;
333 u8 *vaddr; 334 u8 *vaddr;
334 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 335 struct page *page = skb_frag_page(frag);
335 struct page *page = frag->page;
336 336
337 if (copy > len) 337 if (copy > len)
338 copy = len; 338 copy = len;
@@ -410,15 +410,15 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
410 /* Copy paged appendix. Hmm... why does this look so complicated? */ 410 /* Copy paged appendix. Hmm... why does this look so complicated? */
411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
412 int end; 412 int end;
413 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
413 414
414 WARN_ON(start > offset + len); 415 WARN_ON(start > offset + len);
415 416
416 end = start + skb_shinfo(skb)->frags[i].size; 417 end = start + skb_frag_size(frag);
417 if ((copy = end - offset) > 0) { 418 if ((copy = end - offset) > 0) {
418 int err; 419 int err;
419 u8 *vaddr; 420 u8 *vaddr;
420 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 421 struct page *page = skb_frag_page(frag);
421 struct page *page = frag->page;
422 422
423 if (copy > len) 423 if (copy > len)
424 copy = len; 424 copy = len;
@@ -500,15 +500,15 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
500 /* Copy paged appendix. Hmm... why does this look so complicated? */ 500 /* Copy paged appendix. Hmm... why does this look so complicated? */
501 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 501 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
502 int end; 502 int end;
503 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
503 504
504 WARN_ON(start > offset + len); 505 WARN_ON(start > offset + len);
505 506
506 end = start + skb_shinfo(skb)->frags[i].size; 507 end = start + skb_frag_size(frag);
507 if ((copy = end - offset) > 0) { 508 if ((copy = end - offset) > 0) {
508 int err; 509 int err;
509 u8 *vaddr; 510 u8 *vaddr;
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 511 struct page *page = skb_frag_page(frag);
511 struct page *page = frag->page;
512 512
513 if (copy > len) 513 if (copy > len)
514 copy = len; 514 copy = len;
@@ -585,16 +585,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
585 585
586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
587 int end; 587 int end;
588 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
588 589
589 WARN_ON(start > offset + len); 590 WARN_ON(start > offset + len);
590 591
591 end = start + skb_shinfo(skb)->frags[i].size; 592 end = start + skb_frag_size(frag);
592 if ((copy = end - offset) > 0) { 593 if ((copy = end - offset) > 0) {
593 __wsum csum2; 594 __wsum csum2;
594 int err = 0; 595 int err = 0;
595 u8 *vaddr; 596 u8 *vaddr;
596 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 597 struct page *page = skb_frag_page(frag);
597 struct page *page = frag->page;
598 598
599 if (copy > len) 599 if (copy > len)
600 copy = len; 600 copy = len;
diff --git a/net/core/dev.c b/net/core/dev.c
index b10ff0a71855..6ba50a1e404c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,10 @@
133#include <linux/pci.h> 133#include <linux/pci.h>
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/if_tunnel.h>
137#include <linux/if_pppox.h>
138#include <linux/ppp_defs.h>
139#include <linux/net_tstamp.h>
136 140
137#include "net-sysfs.h" 141#include "net-sysfs.h"
138 142
@@ -1474,6 +1478,57 @@ static inline void net_timestamp_check(struct sk_buff *skb)
1474 __net_timestamp(skb); 1478 __net_timestamp(skb);
1475} 1479}
1476 1480
1481static int net_hwtstamp_validate(struct ifreq *ifr)
1482{
1483 struct hwtstamp_config cfg;
1484 enum hwtstamp_tx_types tx_type;
1485 enum hwtstamp_rx_filters rx_filter;
1486 int tx_type_valid = 0;
1487 int rx_filter_valid = 0;
1488
1489 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1490 return -EFAULT;
1491
1492 if (cfg.flags) /* reserved for future extensions */
1493 return -EINVAL;
1494
1495 tx_type = cfg.tx_type;
1496 rx_filter = cfg.rx_filter;
1497
1498 switch (tx_type) {
1499 case HWTSTAMP_TX_OFF:
1500 case HWTSTAMP_TX_ON:
1501 case HWTSTAMP_TX_ONESTEP_SYNC:
1502 tx_type_valid = 1;
1503 break;
1504 }
1505
1506 switch (rx_filter) {
1507 case HWTSTAMP_FILTER_NONE:
1508 case HWTSTAMP_FILTER_ALL:
1509 case HWTSTAMP_FILTER_SOME:
1510 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1513 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1514 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1515 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1518 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1519 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1520 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1521 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1522 rx_filter_valid = 1;
1523 break;
1524 }
1525
1526 if (!tx_type_valid || !rx_filter_valid)
1527 return -ERANGE;
1528
1529 return 0;
1530}
1531
1477static inline bool is_skb_forwardable(struct net_device *dev, 1532static inline bool is_skb_forwardable(struct net_device *dev,
1478 struct sk_buff *skb) 1533 struct sk_buff *skb)
1479{ 1534{
@@ -1955,9 +2010,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1955#ifdef CONFIG_HIGHMEM 2010#ifdef CONFIG_HIGHMEM
1956 int i; 2011 int i;
1957 if (!(dev->features & NETIF_F_HIGHDMA)) { 2012 if (!(dev->features & NETIF_F_HIGHDMA)) {
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1959 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 2014 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2015 if (PageHighMem(skb_frag_page(frag)))
1960 return 1; 2016 return 1;
2017 }
1961 } 2018 }
1962 2019
1963 if (PCI_DMA_BUS_IS_PHYS) { 2020 if (PCI_DMA_BUS_IS_PHYS) {
@@ -1966,7 +2023,8 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1966 if (!pdev) 2023 if (!pdev)
1967 return 0; 2024 return 0;
1968 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1969 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); 2026 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2027 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
1970 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2028 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1971 return 1; 2029 return 1;
1972 } 2030 }
@@ -2527,25 +2585,31 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2527 2585
2528/* 2586/*
2529 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2587 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2530 * and src/dst port numbers. Returns a non-zero hash number on success 2588 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2531 * and 0 on failure. 2589 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2590 * if hash is a canonical 4-tuple hash over transport ports.
2532 */ 2591 */
2533__u32 __skb_get_rxhash(struct sk_buff *skb) 2592void __skb_get_rxhash(struct sk_buff *skb)
2534{ 2593{
2535 int nhoff, hash = 0, poff; 2594 int nhoff, hash = 0, poff;
2536 const struct ipv6hdr *ip6; 2595 const struct ipv6hdr *ip6;
2537 const struct iphdr *ip; 2596 const struct iphdr *ip;
2597 const struct vlan_hdr *vlan;
2538 u8 ip_proto; 2598 u8 ip_proto;
2539 u32 addr1, addr2, ihl; 2599 u32 addr1, addr2;
2600 u16 proto;
2540 union { 2601 union {
2541 u32 v32; 2602 u32 v32;
2542 u16 v16[2]; 2603 u16 v16[2];
2543 } ports; 2604 } ports;
2544 2605
2545 nhoff = skb_network_offset(skb); 2606 nhoff = skb_network_offset(skb);
2607 proto = skb->protocol;
2546 2608
2547 switch (skb->protocol) { 2609again:
2610 switch (proto) {
2548 case __constant_htons(ETH_P_IP): 2611 case __constant_htons(ETH_P_IP):
2612ip:
2549 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2613 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2550 goto done; 2614 goto done;
2551 2615
@@ -2556,9 +2620,10 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2556 ip_proto = ip->protocol; 2620 ip_proto = ip->protocol;
2557 addr1 = (__force u32) ip->saddr; 2621 addr1 = (__force u32) ip->saddr;
2558 addr2 = (__force u32) ip->daddr; 2622 addr2 = (__force u32) ip->daddr;
2559 ihl = ip->ihl; 2623 nhoff += ip->ihl * 4;
2560 break; 2624 break;
2561 case __constant_htons(ETH_P_IPV6): 2625 case __constant_htons(ETH_P_IPV6):
2626ipv6:
2562 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2627 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2563 goto done; 2628 goto done;
2564 2629
@@ -2566,20 +2631,71 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2566 ip_proto = ip6->nexthdr; 2631 ip_proto = ip6->nexthdr;
2567 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2632 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2568 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2633 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2569 ihl = (40 >> 2); 2634 nhoff += 40;
2570 break; 2635 break;
2636 case __constant_htons(ETH_P_8021Q):
2637 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2638 goto done;
2639 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2640 proto = vlan->h_vlan_encapsulated_proto;
2641 nhoff += sizeof(*vlan);
2642 goto again;
2643 case __constant_htons(ETH_P_PPP_SES):
2644 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2645 goto done;
2646 proto = *((__be16 *) (skb->data + nhoff +
2647 sizeof(struct pppoe_hdr)));
2648 nhoff += PPPOE_SES_HLEN;
2649 switch (proto) {
2650 case __constant_htons(PPP_IP):
2651 goto ip;
2652 case __constant_htons(PPP_IPV6):
2653 goto ipv6;
2654 default:
2655 goto done;
2656 }
2571 default: 2657 default:
2572 goto done; 2658 goto done;
2573 } 2659 }
2574 2660
2661 switch (ip_proto) {
2662 case IPPROTO_GRE:
2663 if (pskb_may_pull(skb, nhoff + 16)) {
2664 u8 *h = skb->data + nhoff;
2665 __be16 flags = *(__be16 *)h;
2666
2667 /*
2668 * Only look inside GRE if version zero and no
2669 * routing
2670 */
2671 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2672 proto = *(__be16 *)(h + 2);
2673 nhoff += 4;
2674 if (flags & GRE_CSUM)
2675 nhoff += 4;
2676 if (flags & GRE_KEY)
2677 nhoff += 4;
2678 if (flags & GRE_SEQ)
2679 nhoff += 4;
2680 goto again;
2681 }
2682 }
2683 break;
2684 case IPPROTO_IPIP:
2685 goto again;
2686 default:
2687 break;
2688 }
2689
2575 ports.v32 = 0; 2690 ports.v32 = 0;
2576 poff = proto_ports_offset(ip_proto); 2691 poff = proto_ports_offset(ip_proto);
2577 if (poff >= 0) { 2692 if (poff >= 0) {
2578 nhoff += ihl * 4 + poff; 2693 nhoff += poff;
2579 if (pskb_may_pull(skb, nhoff + 4)) { 2694 if (pskb_may_pull(skb, nhoff + 4)) {
2580 ports.v32 = * (__force u32 *) (skb->data + nhoff); 2695 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2581 if (ports.v16[1] < ports.v16[0]) 2696 if (ports.v16[1] < ports.v16[0])
2582 swap(ports.v16[0], ports.v16[1]); 2697 swap(ports.v16[0], ports.v16[1]);
2698 skb->l4_rxhash = 1;
2583 } 2699 }
2584 } 2700 }
2585 2701
@@ -2592,7 +2708,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2592 hash = 1; 2708 hash = 1;
2593 2709
2594done: 2710done:
2595 return hash; 2711 skb->rxhash = hash;
2596} 2712}
2597EXPORT_SYMBOL(__skb_get_rxhash); 2713EXPORT_SYMBOL(__skb_get_rxhash);
2598 2714
@@ -2606,10 +2722,7 @@ static struct rps_dev_flow *
2606set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2722set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2607 struct rps_dev_flow *rflow, u16 next_cpu) 2723 struct rps_dev_flow *rflow, u16 next_cpu)
2608{ 2724{
2609 u16 tcpu; 2725 if (next_cpu != RPS_NO_CPU) {
2610
2611 tcpu = rflow->cpu = next_cpu;
2612 if (tcpu != RPS_NO_CPU) {
2613#ifdef CONFIG_RFS_ACCEL 2726#ifdef CONFIG_RFS_ACCEL
2614 struct netdev_rx_queue *rxqueue; 2727 struct netdev_rx_queue *rxqueue;
2615 struct rps_dev_flow_table *flow_table; 2728 struct rps_dev_flow_table *flow_table;
@@ -2637,16 +2750,16 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2637 goto out; 2750 goto out;
2638 old_rflow = rflow; 2751 old_rflow = rflow;
2639 rflow = &flow_table->flows[flow_id]; 2752 rflow = &flow_table->flows[flow_id];
2640 rflow->cpu = next_cpu;
2641 rflow->filter = rc; 2753 rflow->filter = rc;
2642 if (old_rflow->filter == rflow->filter) 2754 if (old_rflow->filter == rflow->filter)
2643 old_rflow->filter = RPS_NO_FILTER; 2755 old_rflow->filter = RPS_NO_FILTER;
2644 out: 2756 out:
2645#endif 2757#endif
2646 rflow->last_qtail = 2758 rflow->last_qtail =
2647 per_cpu(softnet_data, tcpu).input_queue_head; 2759 per_cpu(softnet_data, next_cpu).input_queue_head;
2648 } 2760 }
2649 2761
2762 rflow->cpu = next_cpu;
2650 return rflow; 2763 return rflow;
2651} 2764}
2652 2765
@@ -2681,13 +2794,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2681 map = rcu_dereference(rxqueue->rps_map); 2794 map = rcu_dereference(rxqueue->rps_map);
2682 if (map) { 2795 if (map) {
2683 if (map->len == 1 && 2796 if (map->len == 1 &&
2684 !rcu_dereference_raw(rxqueue->rps_flow_table)) { 2797 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2685 tcpu = map->cpus[0]; 2798 tcpu = map->cpus[0];
2686 if (cpu_online(tcpu)) 2799 if (cpu_online(tcpu))
2687 cpu = tcpu; 2800 cpu = tcpu;
2688 goto done; 2801 goto done;
2689 } 2802 }
2690 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { 2803 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2691 goto done; 2804 goto done;
2692 } 2805 }
2693 2806
@@ -3102,8 +3215,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3102{ 3215{
3103 3216
3104 ASSERT_RTNL(); 3217 ASSERT_RTNL();
3105 rcu_assign_pointer(dev->rx_handler, NULL); 3218 RCU_INIT_POINTER(dev->rx_handler, NULL);
3106 rcu_assign_pointer(dev->rx_handler_data, NULL); 3219 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3107} 3220}
3108EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3221EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3109 3222
@@ -3171,6 +3284,17 @@ ncls:
3171#endif 3284#endif
3172 3285
3173 rx_handler = rcu_dereference(skb->dev->rx_handler); 3286 rx_handler = rcu_dereference(skb->dev->rx_handler);
3287 if (vlan_tx_tag_present(skb)) {
3288 if (pt_prev) {
3289 ret = deliver_skb(skb, pt_prev, orig_dev);
3290 pt_prev = NULL;
3291 }
3292 if (vlan_do_receive(&skb, !rx_handler))
3293 goto another_round;
3294 else if (unlikely(!skb))
3295 goto out;
3296 }
3297
3174 if (rx_handler) { 3298 if (rx_handler) {
3175 if (pt_prev) { 3299 if (pt_prev) {
3176 ret = deliver_skb(skb, pt_prev, orig_dev); 3300 ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3190,18 +3314,6 @@ ncls:
3190 } 3314 }
3191 } 3315 }
3192 3316
3193 if (vlan_tx_tag_present(skb)) {
3194 if (pt_prev) {
3195 ret = deliver_skb(skb, pt_prev, orig_dev);
3196 pt_prev = NULL;
3197 }
3198 if (vlan_do_receive(&skb)) {
3199 ret = __netif_receive_skb(skb);
3200 goto out;
3201 } else if (unlikely(!skb))
3202 goto out;
3203 }
3204
3205 /* deliver only exact match when indicated */ 3317 /* deliver only exact match when indicated */
3206 null_or_dev = deliver_exact ? skb->dev : NULL; 3318 null_or_dev = deliver_exact ? skb->dev : NULL;
3207 3319
@@ -3429,10 +3541,10 @@ pull:
3429 skb->data_len -= grow; 3541 skb->data_len -= grow;
3430 3542
3431 skb_shinfo(skb)->frags[0].page_offset += grow; 3543 skb_shinfo(skb)->frags[0].page_offset += grow;
3432 skb_shinfo(skb)->frags[0].size -= grow; 3544 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3433 3545
3434 if (unlikely(!skb_shinfo(skb)->frags[0].size)) { 3546 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3435 put_page(skb_shinfo(skb)->frags[0].page); 3547 skb_frag_unref(skb, 0);
3436 memmove(skb_shinfo(skb)->frags, 3548 memmove(skb_shinfo(skb)->frags,
3437 skb_shinfo(skb)->frags + 1, 3549 skb_shinfo(skb)->frags + 1,
3438 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3550 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -3496,11 +3608,10 @@ void skb_gro_reset_offset(struct sk_buff *skb)
3496 NAPI_GRO_CB(skb)->frag0_len = 0; 3608 NAPI_GRO_CB(skb)->frag0_len = 0;
3497 3609
3498 if (skb->mac_header == skb->tail && 3610 if (skb->mac_header == skb->tail &&
3499 !PageHighMem(skb_shinfo(skb)->frags[0].page)) { 3611 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3500 NAPI_GRO_CB(skb)->frag0 = 3612 NAPI_GRO_CB(skb)->frag0 =
3501 page_address(skb_shinfo(skb)->frags[0].page) + 3613 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3502 skb_shinfo(skb)->frags[0].page_offset; 3614 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3503 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3504 } 3615 }
3505} 3616}
3506EXPORT_SYMBOL(skb_gro_reset_offset); 3617EXPORT_SYMBOL(skb_gro_reset_offset);
@@ -3982,6 +4093,60 @@ static int dev_ifconf(struct net *net, char __user *arg)
3982} 4093}
3983 4094
3984#ifdef CONFIG_PROC_FS 4095#ifdef CONFIG_PROC_FS
4096
4097#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
4098
4099struct dev_iter_state {
4100 struct seq_net_private p;
4101 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4102};
4103
4104#define get_bucket(x) ((x) >> BUCKET_SPACE)
4105#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4106#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4107
4108static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
4109{
4110 struct dev_iter_state *state = seq->private;
4111 struct net *net = seq_file_net(seq);
4112 struct net_device *dev;
4113 struct hlist_node *p;
4114 struct hlist_head *h;
4115 unsigned int count, bucket, offset;
4116
4117 bucket = get_bucket(state->pos);
4118 offset = get_offset(state->pos);
4119 h = &net->dev_name_head[bucket];
4120 count = 0;
4121 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4122 if (count++ == offset) {
4123 state->pos = set_bucket_offset(bucket, count);
4124 return dev;
4125 }
4126 }
4127
4128 return NULL;
4129}
4130
4131static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4132{
4133 struct dev_iter_state *state = seq->private;
4134 struct net_device *dev;
4135 unsigned int bucket;
4136
4137 bucket = get_bucket(state->pos);
4138 do {
4139 dev = dev_from_same_bucket(seq);
4140 if (dev)
4141 return dev;
4142
4143 bucket++;
4144 state->pos = set_bucket_offset(bucket, 0);
4145 } while (bucket < NETDEV_HASHENTRIES);
4146
4147 return NULL;
4148}
4149
3985/* 4150/*
3986 * This is invoked by the /proc filesystem handler to display a device 4151 * This is invoked by the /proc filesystem handler to display a device
3987 * in detail. 4152 * in detail.
@@ -3989,33 +4154,33 @@ static int dev_ifconf(struct net *net, char __user *arg)
3989void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4154void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3990 __acquires(RCU) 4155 __acquires(RCU)
3991{ 4156{
3992 struct net *net = seq_file_net(seq); 4157 struct dev_iter_state *state = seq->private;
3993 loff_t off;
3994 struct net_device *dev;
3995 4158
3996 rcu_read_lock(); 4159 rcu_read_lock();
3997 if (!*pos) 4160 if (!*pos)
3998 return SEQ_START_TOKEN; 4161 return SEQ_START_TOKEN;
3999 4162
4000 off = 1; 4163 /* check for end of the hash */
4001 for_each_netdev_rcu(net, dev) 4164 if (state->pos == 0 && *pos > 1)
4002 if (off++ == *pos) 4165 return NULL;
4003 return dev;
4004 4166
4005 return NULL; 4167 return dev_from_new_bucket(seq);
4006} 4168}
4007 4169
4008void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4170void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4009{ 4171{
4010 struct net_device *dev = v; 4172 struct net_device *dev;
4173
4174 ++*pos;
4011 4175
4012 if (v == SEQ_START_TOKEN) 4176 if (v == SEQ_START_TOKEN)
4013 dev = first_net_device_rcu(seq_file_net(seq)); 4177 return dev_from_new_bucket(seq);
4014 else
4015 dev = next_net_device_rcu(dev);
4016 4178
4017 ++*pos; 4179 dev = dev_from_same_bucket(seq);
4018 return dev; 4180 if (dev)
4181 return dev;
4182
4183 return dev_from_new_bucket(seq);
4019} 4184}
4020 4185
4021void dev_seq_stop(struct seq_file *seq, void *v) 4186void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4114,7 +4279,7 @@ static const struct seq_operations dev_seq_ops = {
4114static int dev_seq_open(struct inode *inode, struct file *file) 4279static int dev_seq_open(struct inode *inode, struct file *file)
4115{ 4280{
4116 return seq_open_net(inode, file, &dev_seq_ops, 4281 return seq_open_net(inode, file, &dev_seq_ops,
4117 sizeof(struct seq_net_private)); 4282 sizeof(struct dev_iter_state));
4118} 4283}
4119 4284
4120static const struct file_operations dev_seq_fops = { 4285static const struct file_operations dev_seq_fops = {
@@ -4497,9 +4662,7 @@ void __dev_set_rx_mode(struct net_device *dev)
4497 if (!netif_device_present(dev)) 4662 if (!netif_device_present(dev))
4498 return; 4663 return;
4499 4664
4500 if (ops->ndo_set_rx_mode) 4665 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4501 ops->ndo_set_rx_mode(dev);
4502 else {
4503 /* Unicast addresses changes may only happen under the rtnl, 4666 /* Unicast addresses changes may only happen under the rtnl,
4504 * therefore calling __dev_set_promiscuity here is safe. 4667 * therefore calling __dev_set_promiscuity here is safe.
4505 */ 4668 */
@@ -4510,10 +4673,10 @@ void __dev_set_rx_mode(struct net_device *dev)
4510 __dev_set_promiscuity(dev, -1); 4673 __dev_set_promiscuity(dev, -1);
4511 dev->uc_promisc = false; 4674 dev->uc_promisc = false;
4512 } 4675 }
4513
4514 if (ops->ndo_set_multicast_list)
4515 ops->ndo_set_multicast_list(dev);
4516 } 4676 }
4677
4678 if (ops->ndo_set_rx_mode)
4679 ops->ndo_set_rx_mode(dev);
4517} 4680}
4518 4681
4519void dev_set_rx_mode(struct net_device *dev) 4682void dev_set_rx_mode(struct net_device *dev)
@@ -4524,30 +4687,6 @@ void dev_set_rx_mode(struct net_device *dev)
4524} 4687}
4525 4688
4526/** 4689/**
4527 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4528 * @dev: device
4529 * @cmd: memory area for ethtool_ops::get_settings() result
4530 *
4531 * The cmd arg is initialized properly (cleared and
4532 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4533 *
4534 * Return device's ethtool_ops::get_settings() result value or
4535 * -EOPNOTSUPP when device doesn't expose
4536 * ethtool_ops::get_settings() operation.
4537 */
4538int dev_ethtool_get_settings(struct net_device *dev,
4539 struct ethtool_cmd *cmd)
4540{
4541 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4542 return -EOPNOTSUPP;
4543
4544 memset(cmd, 0, sizeof(struct ethtool_cmd));
4545 cmd->cmd = ETHTOOL_GSET;
4546 return dev->ethtool_ops->get_settings(dev, cmd);
4547}
4548EXPORT_SYMBOL(dev_ethtool_get_settings);
4549
4550/**
4551 * dev_get_flags - get flags reported to userspace 4690 * dev_get_flags - get flags reported to userspace
4552 * @dev: device 4691 * @dev: device
4553 * 4692 *
@@ -4863,7 +5002,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4863 return -EOPNOTSUPP; 5002 return -EOPNOTSUPP;
4864 5003
4865 case SIOCADDMULTI: 5004 case SIOCADDMULTI:
4866 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 5005 if (!ops->ndo_set_rx_mode ||
4867 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5006 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4868 return -EINVAL; 5007 return -EINVAL;
4869 if (!netif_device_present(dev)) 5008 if (!netif_device_present(dev))
@@ -4871,7 +5010,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4871 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 5010 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4872 5011
4873 case SIOCDELMULTI: 5012 case SIOCDELMULTI:
4874 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 5013 if (!ops->ndo_set_rx_mode ||
4875 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5014 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4876 return -EINVAL; 5015 return -EINVAL;
4877 if (!netif_device_present(dev)) 5016 if (!netif_device_present(dev))
@@ -4888,6 +5027,12 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4888 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 5027 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4889 return dev_change_name(dev, ifr->ifr_newname); 5028 return dev_change_name(dev, ifr->ifr_newname);
4890 5029
5030 case SIOCSHWTSTAMP:
5031 err = net_hwtstamp_validate(ifr);
5032 if (err)
5033 return err;
5034 /* fall through */
5035
4891 /* 5036 /*
4892 * Unknown or private ioctl 5037 * Unknown or private ioctl
4893 */ 5038 */
@@ -5202,7 +5347,7 @@ static void rollback_registered_many(struct list_head *head)
5202 dev = list_first_entry(head, struct net_device, unreg_list); 5347 dev = list_first_entry(head, struct net_device, unreg_list);
5203 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5348 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5204 5349
5205 rcu_barrier(); 5350 synchronize_net();
5206 5351
5207 list_for_each_entry(dev, head, unreg_list) 5352 list_for_each_entry(dev, head, unreg_list)
5208 dev_put(dev); 5353 dev_put(dev);
@@ -5715,6 +5860,12 @@ void netdev_run_todo(void)
5715 5860
5716 __rtnl_unlock(); 5861 __rtnl_unlock();
5717 5862
5863 /* Wait for rcu callbacks to finish before attempting to drain
5864 * the device list. This usually avoids a 250ms wait.
5865 */
5866 if (!list_empty(&list))
5867 rcu_barrier();
5868
5718 while (!list_empty(&list)) { 5869 while (!list_empty(&list)) {
5719 struct net_device *dev 5870 struct net_device *dev
5720 = list_first_entry(&list, struct net_device, todo_list); 5871 = list_first_entry(&list, struct net_device, todo_list);
@@ -5735,8 +5886,8 @@ void netdev_run_todo(void)
5735 5886
5736 /* paranoia */ 5887 /* paranoia */
5737 BUG_ON(netdev_refcnt_read(dev)); 5888 BUG_ON(netdev_refcnt_read(dev));
5738 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5889 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5739 WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); 5890 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5740 WARN_ON(dev->dn_ptr); 5891 WARN_ON(dev->dn_ptr);
5741 5892
5742 if (dev->destructor) 5893 if (dev->destructor)
@@ -5940,7 +6091,7 @@ void free_netdev(struct net_device *dev)
5940 kfree(dev->_rx); 6091 kfree(dev->_rx);
5941#endif 6092#endif
5942 6093
5943 kfree(rcu_dereference_raw(dev->ingress_queue)); 6094 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5944 6095
5945 /* Flush device addresses */ 6096 /* Flush device addresses */
5946 dev_addr_flush(dev); 6097 dev_addr_flush(dev);
@@ -6115,6 +6266,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6115 */ 6266 */
6116 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6267 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6117 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6268 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6269 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6118 6270
6119 /* 6271 /*
6120 * Flush the unicast and multicast chains 6272 * Flush the unicast and multicast chains
@@ -6298,7 +6450,7 @@ const char *netdev_drivername(const struct net_device *dev)
6298 return empty; 6450 return empty;
6299} 6451}
6300 6452
6301static int __netdev_printk(const char *level, const struct net_device *dev, 6453int __netdev_printk(const char *level, const struct net_device *dev,
6302 struct va_format *vaf) 6454 struct va_format *vaf)
6303{ 6455{
6304 int r; 6456 int r;
@@ -6313,6 +6465,7 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
6313 6465
6314 return r; 6466 return r;
6315} 6467}
6468EXPORT_SYMBOL(__netdev_printk);
6316 6469
6317int netdev_printk(const char *level, const struct net_device *dev, 6470int netdev_printk(const char *level, const struct net_device *dev,
6318 const char *format, ...) 6471 const char *format, ...)
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e66939ed00..277faef9148d 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/export.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
18 19
@@ -591,8 +592,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
591 * addresses that have no users left. The source device must be 592 * addresses that have no users left. The source device must be
592 * locked by netif_tx_lock_bh. 593 * locked by netif_tx_lock_bh.
593 * 594 *
594 * This function is intended to be called from the dev->set_multicast_list 595 * This function is intended to be called from the ndo_set_rx_mode
595 * or dev->set_rx_mode function of layered software devices. 596 * function of layered software devices.
596 */ 597 */
597int dev_mc_sync(struct net_device *to, struct net_device *from) 598int dev_mc_sync(struct net_device *to, struct net_device *from)
598{ 599{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf0733..d5e2c4c09107 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->_neighbour = NULL; 174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 176 dst->xfrm = NULL;
177#endif 177#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
229 smp_rmb(); 229 smp_rmb();
230 230
231again: 231again:
232 neigh = dst->_neighbour; 232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 233 child = dst->child;
234 234
235 if (neigh) { 235 if (neigh) {
236 dst->_neighbour = NULL; 236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh); 237 neigh_release(neigh);
238 } 238 }
239 239
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 360 if (!unregister) {
361 dst->input = dst->output = dst_discard; 361 dst->input = dst->output = dst_discard;
362 } else { 362 } else {
363 struct neighbour *neigh;
364
363 dst->dev = dev_net(dst->dev)->loopback_dev; 365 dst->dev = dev_net(dst->dev)->loopback_dev;
364 dev_hold(dst->dev); 366 dev_hold(dst->dev);
365 dev_put(dev); 367 dev_put(dev);
366 if (dst->_neighbour && dst->_neighbour->dev == dev) { 368 rcu_read_lock();
367 dst->_neighbour->dev = dst->dev; 369 neigh = dst_get_neighbour(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
368 dev_hold(dst->dev); 372 dev_hold(dst->dev);
369 dev_put(dev); 373 dev_put(dev);
370 } 374 }
375 rcu_read_unlock();
371 } 376 }
372} 377}
373 378
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6cdba5fc2bed..f44481707124 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -569,15 +569,25 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
569 return 0; 569 return 0;
570} 570}
571 571
572static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 572int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
573{ 573{
574 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; 574 ASSERT_RTNL();
575 int err;
576 575
577 if (!dev->ethtool_ops->get_settings) 576 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
578 return -EOPNOTSUPP; 577 return -EOPNOTSUPP;
579 578
580 err = dev->ethtool_ops->get_settings(dev, &cmd); 579 memset(cmd, 0, sizeof(struct ethtool_cmd));
580 cmd->cmd = ETHTOOL_GSET;
581 return dev->ethtool_ops->get_settings(dev, cmd);
582}
583EXPORT_SYMBOL(__ethtool_get_settings);
584
585static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
586{
587 int err;
588 struct ethtool_cmd cmd;
589
590 err = __ethtool_get_settings(dev, &cmd);
581 if (err < 0) 591 if (err < 0)
582 return err; 592 return err;
583 593
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 27071ee2a4e1..c02e63c908da 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/module.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/sock.h> 17#include <net/sock.h>
17#include <net/fib_rules.h> 18#include <net/fib_rules.h>
@@ -490,7 +491,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
490 if (ops->nr_goto_rules > 0) { 491 if (ops->nr_goto_rules > 0) {
491 list_for_each_entry(tmp, &ops->rules_list, list) { 492 list_for_each_entry(tmp, &ops->rules_list, list) {
492 if (rtnl_dereference(tmp->ctarget) == rule) { 493 if (rtnl_dereference(tmp->ctarget) == rule) {
493 rcu_assign_pointer(tmp->ctarget, NULL); 494 RCU_INIT_POINTER(tmp->ctarget, NULL);
494 ops->unresolved_rules++; 495 ops->unresolved_rules++;
495 } 496 }
496 } 497 }
@@ -548,7 +549,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
548 frh->flags = rule->flags; 549 frh->flags = rule->flags;
549 550
550 if (rule->action == FR_ACT_GOTO && 551 if (rule->action == FR_ACT_GOTO &&
551 rcu_dereference_raw(rule->ctarget) == NULL) 552 rcu_access_pointer(rule->ctarget) == NULL)
552 frh->flags |= FIB_RULE_UNRESOLVED; 553 frh->flags |= FIB_RULE_UNRESOLVED;
553 554
554 if (rule->iifname[0]) { 555 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975fa87cb..5dea45279215 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -436,7 +436,7 @@ error:
436 * 436 *
437 * Returns 0 if the rule set is legal or -EINVAL if not. 437 * Returns 0 if the rule set is legal or -EINVAL if not.
438 */ 438 */
439int sk_chk_filter(struct sock_filter *filter, int flen) 439int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
440{ 440{
441 /* 441 /*
442 * Valid instructions are initialized to non-0. 442 * Valid instructions are initialized to non-0.
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
645 filter = rcu_dereference_protected(sk->sk_filter, 645 filter = rcu_dereference_protected(sk->sk_filter,
646 sock_owned_by_user(sk)); 646 sock_owned_by_user(sk));
647 if (filter) { 647 if (filter) {
648 rcu_assign_pointer(sk->sk_filter, NULL); 648 RCU_INIT_POINTER(sk->sk_filter, NULL);
649 sk_filter_uncharge(sk, filter); 649 sk_filter_uncharge(sk, filter);
650 ret = 0; 650 ret = 0;
651 } 651 }
diff --git a/net/core/flow.c b/net/core/flow.c
index 555a456efb07..8ae42de9c79e 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -413,7 +413,7 @@ static int __init flow_cache_init(struct flow_cache *fc)
413 413
414 for_each_online_cpu(i) { 414 for_each_online_cpu(i) {
415 if (flow_cache_cpu_prepare(fc, i)) 415 if (flow_cache_cpu_prepare(fc, i))
416 return -ENOMEM; 416 goto err;
417 } 417 }
418 fc->hotcpu_notifier = (struct notifier_block){ 418 fc->hotcpu_notifier = (struct notifier_block){
419 .notifier_call = flow_cache_cpu, 419 .notifier_call = flow_cache_cpu,
@@ -426,6 +426,18 @@ static int __init flow_cache_init(struct flow_cache *fc)
426 add_timer(&fc->rnd_timer); 426 add_timer(&fc->rnd_timer);
427 427
428 return 0; 428 return 0;
429
430err:
431 for_each_possible_cpu(i) {
432 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
433 kfree(fcp->hash_table);
434 fcp->hash_table = NULL;
435 }
436
437 free_percpu(fc->percpu);
438 fc->percpu = NULL;
439
440 return -ENOMEM;
429} 441}
430 442
431static int __init flow_cache_init_global(void) 443static int __init flow_cache_init_global(void)
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
index 283c2b993fb8..81e1ed7c8383 100644
--- a/net/core/kmap_skb.h
+++ b/net/core/kmap_skb.h
@@ -7,7 +7,7 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
7 7
8 local_bh_disable(); 8 local_bh_disable();
9#endif 9#endif
10 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); 10 return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
11} 11}
12 12
13static inline void kunmap_skb_frag(void *vaddr) 13static inline void kunmap_skb_frag(void *vaddr)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 357bd4ee4baa..c3519c6d1b16 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -78,8 +78,13 @@ static void rfc2863_policy(struct net_device *dev)
78 78
79static bool linkwatch_urgent_event(struct net_device *dev) 79static bool linkwatch_urgent_event(struct net_device *dev)
80{ 80{
81 return netif_running(dev) && netif_carrier_ok(dev) && 81 if (!netif_running(dev))
82 qdisc_tx_changing(dev); 82 return false;
83
84 if (dev->ifindex != dev->iflink)
85 return true;
86
87 return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
83} 88}
84 89
85 90
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1334d7e56f02..039d51e6c284 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
844 skb_queue_purge(&neigh->arp_queue); 844 skb_queue_purge(&neigh->arp_queue);
845} 845}
846 846
847static void neigh_probe(struct neighbour *neigh)
848 __releases(neigh->lock)
849{
850 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
851 /* keep skb alive even if arp_queue overflows */
852 if (skb)
853 skb = skb_copy(skb, GFP_ATOMIC);
854 write_unlock(&neigh->lock);
855 neigh->ops->solicit(neigh, skb);
856 atomic_inc(&neigh->probes);
857 kfree_skb(skb);
858}
859
847/* Called when a timer expires for a neighbour entry. */ 860/* Called when a timer expires for a neighbour entry. */
848 861
849static void neigh_timer_handler(unsigned long arg) 862static void neigh_timer_handler(unsigned long arg)
@@ -859,12 +872,8 @@ static void neigh_timer_handler(unsigned long arg)
859 now = jiffies; 872 now = jiffies;
860 next = now + HZ; 873 next = now + HZ;
861 874
862 if (!(state & NUD_IN_TIMER)) { 875 if (!(state & NUD_IN_TIMER))
863#ifndef CONFIG_SMP
864 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
865#endif
866 goto out; 876 goto out;
867 }
868 877
869 if (state & NUD_REACHABLE) { 878 if (state & NUD_REACHABLE) {
870 if (time_before_eq(now, 879 if (time_before_eq(now,
@@ -920,14 +929,7 @@ static void neigh_timer_handler(unsigned long arg)
920 neigh_hold(neigh); 929 neigh_hold(neigh);
921 } 930 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 931 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 932 neigh_probe(neigh);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else { 933 } else {
932out: 934out:
933 write_unlock(&neigh->lock); 935 write_unlock(&neigh->lock);
@@ -942,7 +944,7 @@ out:
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 944int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{ 945{
944 int rc; 946 int rc;
945 unsigned long now; 947 bool immediate_probe = false;
946 948
947 write_lock_bh(&neigh->lock); 949 write_lock_bh(&neigh->lock);
948 950
@@ -950,14 +952,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 952 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh; 953 goto out_unlock_bh;
952 954
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) { 956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
957 unsigned long next, now = jiffies;
958
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 959 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE; 960 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies; 961 neigh->updated = now;
960 neigh_add_timer(neigh, now + 1); 962 next = now + max(neigh->parms->retrans_time, HZ/2);
963 neigh_add_timer(neigh, next);
964 immediate_probe = true;
961 } else { 965 } else {
962 neigh->nud_state = NUD_FAILED; 966 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies; 967 neigh->updated = jiffies;
@@ -989,7 +993,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
989 rc = 1; 993 rc = 1;
990 } 994 }
991out_unlock_bh: 995out_unlock_bh:
992 write_unlock_bh(&neigh->lock); 996 if (immediate_probe)
997 neigh_probe(neigh);
998 else
999 write_unlock(&neigh->lock);
1000 local_bh_enable();
993 return rc; 1001 return rc;
994} 1002}
995EXPORT_SYMBOL(__neigh_event_send); 1003EXPORT_SYMBOL(__neigh_event_send);
@@ -1156,10 +1164,14 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1156 struct dst_entry *dst = skb_dst(skb); 1164 struct dst_entry *dst = skb_dst(skb);
1157 struct neighbour *n2, *n1 = neigh; 1165 struct neighbour *n2, *n1 = neigh;
1158 write_unlock_bh(&neigh->lock); 1166 write_unlock_bh(&neigh->lock);
1167
1168 rcu_read_lock();
1159 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1169 /* On shaper/eql skb->dst->neighbour != neigh :( */
1160 if (dst && (n2 = dst_get_neighbour(dst)) != NULL) 1170 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1161 n1 = n2; 1171 n1 = n2;
1162 n1->output(n1, skb); 1172 n1->output(n1, skb);
1173 rcu_read_unlock();
1174
1163 write_lock_bh(&neigh->lock); 1175 write_lock_bh(&neigh->lock);
1164 } 1176 }
1165 skb_queue_purge(&neigh->arp_queue); 1177 skb_queue_purge(&neigh->arp_queue);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5db2f27..c71c434a4c05 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -20,6 +20,7 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/export.h>
23#include <net/wext.h> 24#include <net/wext.h>
24 25
25#include "net-sysfs.h" 26#include "net-sysfs.h"
@@ -147,7 +148,7 @@ static ssize_t show_speed(struct device *dev,
147 148
148 if (netif_running(netdev)) { 149 if (netif_running(netdev)) {
149 struct ethtool_cmd cmd; 150 struct ethtool_cmd cmd;
150 if (!dev_ethtool_get_settings(netdev, &cmd)) 151 if (!__ethtool_get_settings(netdev, &cmd))
151 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); 152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152 } 153 }
153 rtnl_unlock(); 154 rtnl_unlock();
@@ -165,7 +166,7 @@ static ssize_t show_duplex(struct device *dev,
165 166
166 if (netif_running(netdev)) { 167 if (netif_running(netdev)) {
167 struct ethtool_cmd cmd; 168 struct ethtool_cmd cmd;
168 if (!dev_ethtool_get_settings(netdev, &cmd)) 169 if (!__ethtool_get_settings(netdev, &cmd))
169 ret = sprintf(buf, "%s\n", 170 ret = sprintf(buf, "%s\n",
170 cmd.duplex ? "full" : "half"); 171 cmd.duplex ? "full" : "half");
171 } 172 }
@@ -712,13 +713,13 @@ static void rx_queue_release(struct kobject *kobj)
712 struct rps_dev_flow_table *flow_table; 713 struct rps_dev_flow_table *flow_table;
713 714
714 715
715 map = rcu_dereference_raw(queue->rps_map); 716 map = rcu_dereference_protected(queue->rps_map, 1);
716 if (map) { 717 if (map) {
717 RCU_INIT_POINTER(queue->rps_map, NULL); 718 RCU_INIT_POINTER(queue->rps_map, NULL);
718 kfree_rcu(map, rcu); 719 kfree_rcu(map, rcu);
719 } 720 }
720 721
721 flow_table = rcu_dereference_raw(queue->rps_flow_table); 722 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
722 if (flow_table) { 723 if (flow_table) {
723 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 724 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 725 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +988,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
987 } 988 }
988 989
989 if (nonempty) 990 if (nonempty)
990 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 991 RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
991 else { 992 else {
992 kfree(new_dev_maps); 993 kfree(new_dev_maps);
993 rcu_assign_pointer(dev->xps_maps, NULL); 994 RCU_INIT_POINTER(dev->xps_maps, NULL);
994 } 995 }
995 996
996 if (dev_maps) 997 if (dev_maps)
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 52380b1d552a..ba3c0120786c 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -11,6 +11,7 @@
11#include <linux/inetdevice.h> 11#include <linux/inetdevice.h>
12#include <linux/inet.h> 12#include <linux/inet.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/export.h>
14#include <linux/netpoll.h> 15#include <linux/netpoll.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 5bbdbf0d3664..aefcd7acbffa 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -10,6 +10,7 @@
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/export.h>
13#include <net/net_namespace.h> 14#include <net/net_namespace.h>
14#include <net/netns/generic.h> 15#include <net/netns/generic.h>
15 16
diff --git a/net/core/netevent.c b/net/core/netevent.c
index 865f0ceb81fb..f17ccd291d39 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/export.h>
18#include <net/netevent.h> 19#include <net/netevent.h>
19 20
20static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); 21static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 52622517e0d8..cf64c1ffa4cd 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -23,6 +23,7 @@
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/export.h>
26#include <net/tcp.h> 27#include <net/tcp.h>
27#include <net/udp.h> 28#include <net/udp.h>
28#include <asm/unaligned.h> 29#include <asm/unaligned.h>
@@ -762,7 +763,7 @@ int __netpoll_setup(struct netpoll *np)
762 } 763 }
763 764
764 /* last thing to do is link it to the net device structure */ 765 /* last thing to do is link it to the net device structure */
765 rcu_assign_pointer(ndev->npinfo, npinfo); 766 RCU_INIT_POINTER(ndev->npinfo, npinfo);
766 767
767 return 0; 768 return 0;
768 769
@@ -903,7 +904,7 @@ void __netpoll_cleanup(struct netpoll *np)
903 if (ops->ndo_netpoll_cleanup) 904 if (ops->ndo_netpoll_cleanup)
904 ops->ndo_netpoll_cleanup(np->dev); 905 ops->ndo_netpoll_cleanup(np->dev);
905 906
906 rcu_assign_pointer(np->dev->npinfo, NULL); 907 RCU_INIT_POINTER(np->dev->npinfo, NULL);
907 908
908 /* avoid racing with NAPI reading npinfo */ 909 /* avoid racing with NAPI reading npinfo */
909 synchronize_rcu_bh(); 910 synchronize_rcu_bh();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index e35a6fbb8110..0001c243b35c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2145,9 +2145,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2145 } 2145 }
2146 2146
2147 start_time = ktime_now(); 2147 start_time = ktime_now();
2148 if (remaining < 100000) 2148 if (remaining < 100000) {
2149 ndelay(remaining); /* really small just spin */ 2149 /* for small delays (<100us), just loop until limit is reached */
2150 else { 2150 do {
2151 end_time = ktime_now();
2152 } while (ktime_lt(end_time, spin_until));
2153 } else {
2151 /* see do_nanosleep */ 2154 /* see do_nanosleep */
2152 hrtimer_init_sleeper(&t, current); 2155 hrtimer_init_sleeper(&t, current);
2153 do { 2156 do {
@@ -2162,8 +2165,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2162 hrtimer_cancel(&t.timer); 2165 hrtimer_cancel(&t.timer);
2163 } while (t.task && pkt_dev->running && !signal_pending(current)); 2166 } while (t.task && pkt_dev->running && !signal_pending(current));
2164 __set_current_state(TASK_RUNNING); 2167 __set_current_state(TASK_RUNNING);
2168 end_time = ktime_now();
2165 } 2169 }
2166 end_time = ktime_now();
2167 2170
2168 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2171 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2169 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2172 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
@@ -2602,18 +2605,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2602 if (!pkt_dev->page) 2605 if (!pkt_dev->page)
2603 break; 2606 break;
2604 } 2607 }
2605 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2606 get_page(pkt_dev->page); 2608 get_page(pkt_dev->page);
2609 skb_frag_set_page(skb, i, pkt_dev->page);
2607 skb_shinfo(skb)->frags[i].page_offset = 0; 2610 skb_shinfo(skb)->frags[i].page_offset = 0;
2608 /*last fragment, fill rest of data*/ 2611 /*last fragment, fill rest of data*/
2609 if (i == (frags - 1)) 2612 if (i == (frags - 1))
2610 skb_shinfo(skb)->frags[i].size = 2613 skb_frag_size_set(&skb_shinfo(skb)->frags[i],
2611 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2614 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
2612 else 2615 else
2613 skb_shinfo(skb)->frags[i].size = frag_len; 2616 skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
2614 datalen -= skb_shinfo(skb)->frags[i].size; 2617 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
2615 skb->len += skb_shinfo(skb)->frags[i].size; 2618 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2616 skb->data_len += skb_shinfo(skb)->frags[i].size; 2619 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2617 i++; 2620 i++;
2618 skb_shinfo(skb)->nr_frags = i; 2621 skb_shinfo(skb)->nr_frags = i;
2619 } 2622 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e953fe39..9083e82bdae5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -731,7 +731,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
731 size += num_vfs * 731 size += num_vfs *
732 (nla_total_size(sizeof(struct ifla_vf_mac)) + 732 (nla_total_size(sizeof(struct ifla_vf_mac)) +
733 nla_total_size(sizeof(struct ifla_vf_vlan)) + 733 nla_total_size(sizeof(struct ifla_vf_vlan)) +
734 nla_total_size(sizeof(struct ifla_vf_tx_rate))); 734 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
735 nla_total_size(sizeof(struct ifla_vf_spoofchk)));
735 return size; 736 return size;
736 } else 737 } else
737 return 0; 738 return 0;
@@ -954,13 +955,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
954 struct ifla_vf_mac vf_mac; 955 struct ifla_vf_mac vf_mac;
955 struct ifla_vf_vlan vf_vlan; 956 struct ifla_vf_vlan vf_vlan;
956 struct ifla_vf_tx_rate vf_tx_rate; 957 struct ifla_vf_tx_rate vf_tx_rate;
958 struct ifla_vf_spoofchk vf_spoofchk;
959
960 /*
961 * Not all SR-IOV capable drivers support the
962 * spoofcheck query. Preset to -1 so the user
963 * space tool can detect that the driver didn't
964 * report anything.
965 */
966 ivi.spoofchk = -1;
957 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 967 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
958 break; 968 break;
959 vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; 969 vf_mac.vf =
970 vf_vlan.vf =
971 vf_tx_rate.vf =
972 vf_spoofchk.vf = ivi.vf;
973
960 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 974 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
961 vf_vlan.vlan = ivi.vlan; 975 vf_vlan.vlan = ivi.vlan;
962 vf_vlan.qos = ivi.qos; 976 vf_vlan.qos = ivi.qos;
963 vf_tx_rate.rate = ivi.tx_rate; 977 vf_tx_rate.rate = ivi.tx_rate;
978 vf_spoofchk.setting = ivi.spoofchk;
964 vf = nla_nest_start(skb, IFLA_VF_INFO); 979 vf = nla_nest_start(skb, IFLA_VF_INFO);
965 if (!vf) { 980 if (!vf) {
966 nla_nest_cancel(skb, vfinfo); 981 nla_nest_cancel(skb, vfinfo);
@@ -968,7 +983,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
968 } 983 }
969 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); 984 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
970 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); 985 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
971 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); 986 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
987 &vf_tx_rate);
988 NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
989 &vf_spoofchk);
972 nla_nest_end(skb, vf); 990 nla_nest_end(skb, vf);
973 } 991 }
974 nla_nest_end(skb, vfinfo); 992 nla_nest_end(skb, vfinfo);
@@ -1202,6 +1220,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
1202 ivt->rate); 1220 ivt->rate);
1203 break; 1221 break;
1204 } 1222 }
1223 case IFLA_VF_SPOOFCHK: {
1224 struct ifla_vf_spoofchk *ivs;
1225 ivs = nla_data(vf);
1226 err = -EOPNOTSUPP;
1227 if (ops->ndo_set_vf_spoofchk)
1228 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1229 ivs->setting);
1230 break;
1231 }
1205 default: 1232 default:
1206 err = -EINVAL; 1233 err = -EINVAL;
1207 break; 1234 break;
@@ -1604,7 +1631,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1604 dev_net_set(dev, net); 1631 dev_net_set(dev, net);
1605 dev->rtnl_link_ops = ops; 1632 dev->rtnl_link_ops = ops;
1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1633 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1607 dev->real_num_tx_queues = real_num_queues;
1608 1634
1609 if (tb[IFLA_MTU]) 1635 if (tb[IFLA_MTU])
1610 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1636 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 811b53fb330e..ff52ad0a5150 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -173,7 +173,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
173 if (err) 173 if (err)
174 goto error; 174 goto error;
175 175
176 if (pid_vnr(p->pid) != p->creds.pid) { 176 if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
177 struct pid *pid; 177 struct pid *pid;
178 err = -ESRCH; 178 err = -ESRCH;
179 pid = find_get_pid(p->creds.pid); 179 pid = find_get_pid(p->creds.pid);
@@ -183,8 +183,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
183 p->pid = pid; 183 p->pid = pid;
184 } 184 }
185 185
186 if ((p->cred->euid != p->creds.uid) || 186 if (!p->cred ||
187 (p->cred->egid != p->creds.gid)) { 187 (p->cred->euid != p->creds.uid) ||
188 (p->cred->egid != p->creds.gid)) {
188 struct cred *cred; 189 struct cred *cred;
189 err = -ENOMEM; 190 err = -ENOMEM;
190 cred = prepare_creds(); 191 cred = prepare_creds();
@@ -193,7 +194,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
193 194
194 cred->uid = cred->euid = p->creds.uid; 195 cred->uid = cred->euid = p->creds.uid;
195 cred->gid = cred->egid = p->creds.gid; 196 cred->gid = cred->egid = p->creds.gid;
196 put_cred(p->cred); 197 if (p->cred)
198 put_cred(p->cred);
197 p->cred = cred; 199 p->cred = cred;
198 } 200 }
199 break; 201 break;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 45329d7c9dd9..025233de25f9 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq)
35} 35}
36 36
37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
38__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 38__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
39 __be16 sport, __be16 dport) 39 __be16 sport, __be16 dport)
40{ 40{
41 u32 secret[MD5_MESSAGE_BYTES / 4]; 41 u32 secret[MD5_MESSAGE_BYTES / 4];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f56fce..18a3cebb753d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,21 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 /* We do our best to align skb_shared_info on a separate cache
188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
187 size = SKB_DATA_ALIGN(size); 192 size = SKB_DATA_ALIGN(size);
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 193 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
189 gfp_mask, node); 194 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 195 if (!data)
191 goto nodata; 196 goto nodata;
197 /* kmalloc(size) might give us more room than requested.
198 * Put skb_shared_info exactly at the end of allocated zone,
199 * to allow max possible filling before reallocation.
200 */
201 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 202 prefetchw(data + size);
193 203
194 /* 204 /*
@@ -197,7 +207,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 207 * the tail pointer in struct sk_buff!
198 */ 208 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 209 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 210 /* Account for allocated memory : skb + skb->head */
211 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 212 atomic_set(&skb->users, 1);
202 skb->head = data; 213 skb->head = data;
203 skb->data = data; 214 skb->data = data;
@@ -326,7 +337,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 337 if (skb_shinfo(skb)->nr_frags) {
327 int i; 338 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 339 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 340 skb_frag_unref(skb, i);
330 } 341 }
331 342
332 /* 343 /*
@@ -475,6 +486,30 @@ void consume_skb(struct sk_buff *skb)
475EXPORT_SYMBOL(consume_skb); 486EXPORT_SYMBOL(consume_skb);
476 487
477/** 488/**
489 * skb_recycle - clean up an skb for reuse
490 * @skb: buffer
491 *
492 * Recycles the skb to be reused as a receive buffer. This
493 * function does any necessary reference count dropping, and
494 * cleans up the skbuff as if it just came from __alloc_skb().
495 */
496void skb_recycle(struct sk_buff *skb)
497{
498 struct skb_shared_info *shinfo;
499
500 skb_release_head_state(skb);
501
502 shinfo = skb_shinfo(skb);
503 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
504 atomic_set(&shinfo->dataref, 1);
505
506 memset(skb, 0, offsetof(struct sk_buff, tail));
507 skb->data = skb->head + NET_SKB_PAD;
508 skb_reset_tail_pointer(skb);
509}
510EXPORT_SYMBOL(skb_recycle);
511
512/**
478 * skb_recycle_check - check if skb can be reused for receive 513 * skb_recycle_check - check if skb can be reused for receive
479 * @skb: buffer 514 * @skb: buffer
480 * @skb_size: minimum receive buffer size 515 * @skb_size: minimum receive buffer size
@@ -488,33 +523,10 @@ EXPORT_SYMBOL(consume_skb);
488 */ 523 */
489bool skb_recycle_check(struct sk_buff *skb, int skb_size) 524bool skb_recycle_check(struct sk_buff *skb, int skb_size)
490{ 525{
491 struct skb_shared_info *shinfo; 526 if (!skb_is_recycleable(skb, skb_size))
492
493 if (irqs_disabled())
494 return false;
495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 return false; 527 return false;
501 528
502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 529 skb_recycle(skb);
503 if (skb_end_pointer(skb) - skb->head < skb_size)
504 return false;
505
506 if (skb_shared(skb) || skb_cloned(skb))
507 return false;
508
509 skb_release_head_state(skb);
510
511 shinfo = skb_shinfo(skb);
512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
513 atomic_set(&shinfo->dataref, 1);
514
515 memset(skb, 0, offsetof(struct sk_buff, tail));
516 skb->data = skb->head + NET_SKB_PAD;
517 skb_reset_tail_pointer(skb);
518 530
519 return true; 531 return true;
520} 532}
@@ -529,6 +541,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 541 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 542 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 543 new->rxhash = old->rxhash;
544 new->ooo_okay = old->ooo_okay;
545 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 546#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 547 new->sp = secpath_get(old->sp);
534#endif 548#endif
@@ -647,7 +661,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
647 } 661 }
648 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 662 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
649 memcpy(page_address(page), 663 memcpy(page_address(page),
650 vaddr + f->page_offset, f->size); 664 vaddr + f->page_offset, skb_frag_size(f));
651 kunmap_skb_frag(vaddr); 665 kunmap_skb_frag(vaddr);
652 page->private = (unsigned long)head; 666 page->private = (unsigned long)head;
653 head = page; 667 head = page;
@@ -655,14 +669,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
655 669
656 /* skb frags release userspace buffers */ 670 /* skb frags release userspace buffers */
657 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
658 put_page(skb_shinfo(skb)->frags[i].page); 672 skb_frag_unref(skb, i);
659 673
660 uarg->callback(uarg); 674 uarg->callback(uarg);
661 675
662 /* skb frags point to kernel buffers */ 676 /* skb frags point to kernel buffers */
663 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 677 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
664 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 678 __skb_fill_page_desc(skb, i-1, head, 0,
665 skb_shinfo(skb)->frags[i - 1].page = head; 679 skb_shinfo(skb)->frags[i - 1].size);
666 head = (struct page *)head->private; 680 head = (struct page *)head->private;
667 } 681 }
668 682
@@ -820,7 +834,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
820 } 834 }
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 836 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
823 get_page(skb_shinfo(n)->frags[i].page); 837 skb_frag_ref(skb, i);
824 } 838 }
825 skb_shinfo(n)->nr_frags = i; 839 skb_shinfo(n)->nr_frags = i;
826 } 840 }
@@ -911,7 +925,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
911 goto nofrags; 925 goto nofrags;
912 } 926 }
913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
914 get_page(skb_shinfo(skb)->frags[i].page); 928 skb_frag_ref(skb, i);
915 929
916 if (skb_has_frag_list(skb)) 930 if (skb_has_frag_list(skb))
917 skb_clone_fraglist(skb); 931 skb_clone_fraglist(skb);
@@ -1178,20 +1192,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1178 goto drop_pages; 1192 goto drop_pages;
1179 1193
1180 for (; i < nfrags; i++) { 1194 for (; i < nfrags; i++) {
1181 int end = offset + skb_shinfo(skb)->frags[i].size; 1195 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1182 1196
1183 if (end < len) { 1197 if (end < len) {
1184 offset = end; 1198 offset = end;
1185 continue; 1199 continue;
1186 } 1200 }
1187 1201
1188 skb_shinfo(skb)->frags[i++].size = len - offset; 1202 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1189 1203
1190drop_pages: 1204drop_pages:
1191 skb_shinfo(skb)->nr_frags = i; 1205 skb_shinfo(skb)->nr_frags = i;
1192 1206
1193 for (; i < nfrags; i++) 1207 for (; i < nfrags; i++)
1194 put_page(skb_shinfo(skb)->frags[i].page); 1208 skb_frag_unref(skb, i);
1195 1209
1196 if (skb_has_frag_list(skb)) 1210 if (skb_has_frag_list(skb))
1197 skb_drop_fraglist(skb); 1211 skb_drop_fraglist(skb);
@@ -1294,9 +1308,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1294 /* Estimate size of pulled pages. */ 1308 /* Estimate size of pulled pages. */
1295 eat = delta; 1309 eat = delta;
1296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1310 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297 if (skb_shinfo(skb)->frags[i].size >= eat) 1311 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1312
1313 if (size >= eat)
1298 goto pull_pages; 1314 goto pull_pages;
1299 eat -= skb_shinfo(skb)->frags[i].size; 1315 eat -= size;
1300 } 1316 }
1301 1317
1302 /* If we need update frag list, we are in troubles. 1318 /* If we need update frag list, we are in troubles.
@@ -1359,14 +1375,16 @@ pull_pages:
1359 eat = delta; 1375 eat = delta;
1360 k = 0; 1376 k = 0;
1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1377 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1362 if (skb_shinfo(skb)->frags[i].size <= eat) { 1378 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1363 put_page(skb_shinfo(skb)->frags[i].page); 1379
1364 eat -= skb_shinfo(skb)->frags[i].size; 1380 if (size <= eat) {
1381 skb_frag_unref(skb, i);
1382 eat -= size;
1365 } else { 1383 } else {
1366 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1384 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1367 if (eat) { 1385 if (eat) {
1368 skb_shinfo(skb)->frags[k].page_offset += eat; 1386 skb_shinfo(skb)->frags[k].page_offset += eat;
1369 skb_shinfo(skb)->frags[k].size -= eat; 1387 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1370 eat = 0; 1388 eat = 0;
1371 } 1389 }
1372 k++; 1390 k++;
@@ -1421,7 +1439,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1421 1439
1422 WARN_ON(start > offset + len); 1440 WARN_ON(start > offset + len);
1423 1441
1424 end = start + skb_shinfo(skb)->frags[i].size; 1442 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1425 if ((copy = end - offset) > 0) { 1443 if ((copy = end - offset) > 0) {
1426 u8 *vaddr; 1444 u8 *vaddr;
1427 1445
@@ -1619,7 +1637,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1619 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1637 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1620 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1638 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1621 1639
1622 if (__splice_segment(f->page, f->page_offset, f->size, 1640 if (__splice_segment(skb_frag_page(f),
1641 f->page_offset, skb_frag_size(f),
1623 offset, len, skb, spd, 0, sk, pipe)) 1642 offset, len, skb, spd, 0, sk, pipe))
1624 return 1; 1643 return 1;
1625 } 1644 }
@@ -1729,7 +1748,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1729 1748
1730 WARN_ON(start > offset + len); 1749 WARN_ON(start > offset + len);
1731 1750
1732 end = start + frag->size; 1751 end = start + skb_frag_size(frag);
1733 if ((copy = end - offset) > 0) { 1752 if ((copy = end - offset) > 0) {
1734 u8 *vaddr; 1753 u8 *vaddr;
1735 1754
@@ -1802,7 +1821,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1802 1821
1803 WARN_ON(start > offset + len); 1822 WARN_ON(start > offset + len);
1804 1823
1805 end = start + skb_shinfo(skb)->frags[i].size; 1824 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1806 if ((copy = end - offset) > 0) { 1825 if ((copy = end - offset) > 0) {
1807 __wsum csum2; 1826 __wsum csum2;
1808 u8 *vaddr; 1827 u8 *vaddr;
@@ -1877,7 +1896,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1877 1896
1878 WARN_ON(start > offset + len); 1897 WARN_ON(start > offset + len);
1879 1898
1880 end = start + skb_shinfo(skb)->frags[i].size; 1899 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1881 if ((copy = end - offset) > 0) { 1900 if ((copy = end - offset) > 0) {
1882 __wsum csum2; 1901 __wsum csum2;
1883 u8 *vaddr; 1902 u8 *vaddr;
@@ -2150,7 +2169,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2150 skb->data_len = len - pos; 2169 skb->data_len = len - pos;
2151 2170
2152 for (i = 0; i < nfrags; i++) { 2171 for (i = 0; i < nfrags; i++) {
2153 int size = skb_shinfo(skb)->frags[i].size; 2172 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2154 2173
2155 if (pos + size > len) { 2174 if (pos + size > len) {
2156 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2175 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2183,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2164 * where splitting is expensive. 2183 * where splitting is expensive.
2165 * 2. Split is accurately. We make this. 2184 * 2. Split is accurately. We make this.
2166 */ 2185 */
2167 get_page(skb_shinfo(skb)->frags[i].page); 2186 skb_frag_ref(skb, i);
2168 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2187 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2169 skb_shinfo(skb1)->frags[0].size -= len - pos; 2188 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2170 skb_shinfo(skb)->frags[i].size = len - pos; 2189 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2171 skb_shinfo(skb)->nr_frags++; 2190 skb_shinfo(skb)->nr_frags++;
2172 } 2191 }
2173 k++; 2192 k++;
@@ -2239,12 +2258,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2239 * commit all, so that we don't have to undo partial changes 2258 * commit all, so that we don't have to undo partial changes
2240 */ 2259 */
2241 if (!to || 2260 if (!to ||
2242 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2261 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2262 fragfrom->page_offset)) {
2243 merge = -1; 2263 merge = -1;
2244 } else { 2264 } else {
2245 merge = to - 1; 2265 merge = to - 1;
2246 2266
2247 todo -= fragfrom->size; 2267 todo -= skb_frag_size(fragfrom);
2248 if (todo < 0) { 2268 if (todo < 0) {
2249 if (skb_prepare_for_shift(skb) || 2269 if (skb_prepare_for_shift(skb) ||
2250 skb_prepare_for_shift(tgt)) 2270 skb_prepare_for_shift(tgt))
@@ -2254,8 +2274,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2274 fragfrom = &skb_shinfo(skb)->frags[from];
2255 fragto = &skb_shinfo(tgt)->frags[merge]; 2275 fragto = &skb_shinfo(tgt)->frags[merge];
2256 2276
2257 fragto->size += shiftlen; 2277 skb_frag_size_add(fragto, shiftlen);
2258 fragfrom->size -= shiftlen; 2278 skb_frag_size_sub(fragfrom, shiftlen);
2259 fragfrom->page_offset += shiftlen; 2279 fragfrom->page_offset += shiftlen;
2260 2280
2261 goto onlymerged; 2281 goto onlymerged;
@@ -2279,20 +2299,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2279 fragfrom = &skb_shinfo(skb)->frags[from]; 2299 fragfrom = &skb_shinfo(skb)->frags[from];
2280 fragto = &skb_shinfo(tgt)->frags[to]; 2300 fragto = &skb_shinfo(tgt)->frags[to];
2281 2301
2282 if (todo >= fragfrom->size) { 2302 if (todo >= skb_frag_size(fragfrom)) {
2283 *fragto = *fragfrom; 2303 *fragto = *fragfrom;
2284 todo -= fragfrom->size; 2304 todo -= skb_frag_size(fragfrom);
2285 from++; 2305 from++;
2286 to++; 2306 to++;
2287 2307
2288 } else { 2308 } else {
2289 get_page(fragfrom->page); 2309 __skb_frag_ref(fragfrom);
2290 fragto->page = fragfrom->page; 2310 fragto->page = fragfrom->page;
2291 fragto->page_offset = fragfrom->page_offset; 2311 fragto->page_offset = fragfrom->page_offset;
2292 fragto->size = todo; 2312 skb_frag_size_set(fragto, todo);
2293 2313
2294 fragfrom->page_offset += todo; 2314 fragfrom->page_offset += todo;
2295 fragfrom->size -= todo; 2315 skb_frag_size_sub(fragfrom, todo);
2296 todo = 0; 2316 todo = 0;
2297 2317
2298 to++; 2318 to++;
@@ -2307,8 +2327,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2307 fragfrom = &skb_shinfo(skb)->frags[0]; 2327 fragfrom = &skb_shinfo(skb)->frags[0];
2308 fragto = &skb_shinfo(tgt)->frags[merge]; 2328 fragto = &skb_shinfo(tgt)->frags[merge];
2309 2329
2310 fragto->size += fragfrom->size; 2330 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2311 put_page(fragfrom->page); 2331 __skb_frag_unref(fragfrom);
2312 } 2332 }
2313 2333
2314 /* Reposition in the original skb */ 2334 /* Reposition in the original skb */
@@ -2405,7 +2425,7 @@ next_skb:
2405 2425
2406 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2426 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2407 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2427 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2408 block_limit = frag->size + st->stepped_offset; 2428 block_limit = skb_frag_size(frag) + st->stepped_offset;
2409 2429
2410 if (abs_offset < block_limit) { 2430 if (abs_offset < block_limit) {
2411 if (!st->frag_data) 2431 if (!st->frag_data)
@@ -2423,7 +2443,7 @@ next_skb:
2423 } 2443 }
2424 2444
2425 st->frag_idx++; 2445 st->frag_idx++;
2426 st->stepped_offset += frag->size; 2446 st->stepped_offset += skb_frag_size(frag);
2427 } 2447 }
2428 2448
2429 if (st->frag_data) { 2449 if (st->frag_data) {
@@ -2553,14 +2573,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2553 left = PAGE_SIZE - frag->page_offset; 2573 left = PAGE_SIZE - frag->page_offset;
2554 copy = (length > left)? left : length; 2574 copy = (length > left)? left : length;
2555 2575
2556 ret = getfrag(from, (page_address(frag->page) + 2576 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2557 frag->page_offset + frag->size),
2558 offset, copy, 0, skb); 2577 offset, copy, 0, skb);
2559 if (ret < 0) 2578 if (ret < 0)
2560 return -EFAULT; 2579 return -EFAULT;
2561 2580
2562 /* copy was successful so update the size parameters */ 2581 /* copy was successful so update the size parameters */
2563 frag->size += copy; 2582 skb_frag_size_add(frag, copy);
2564 skb->len += copy; 2583 skb->len += copy;
2565 skb->data_len += copy; 2584 skb->data_len += copy;
2566 offset += copy; 2585 offset += copy;
@@ -2706,12 +2725,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2706 2725
2707 while (pos < offset + len && i < nfrags) { 2726 while (pos < offset + len && i < nfrags) {
2708 *frag = skb_shinfo(skb)->frags[i]; 2727 *frag = skb_shinfo(skb)->frags[i];
2709 get_page(frag->page); 2728 __skb_frag_ref(frag);
2710 size = frag->size; 2729 size = skb_frag_size(frag);
2711 2730
2712 if (pos < offset) { 2731 if (pos < offset) {
2713 frag->page_offset += offset - pos; 2732 frag->page_offset += offset - pos;
2714 frag->size -= offset - pos; 2733 skb_frag_size_sub(frag, offset - pos);
2715 } 2734 }
2716 2735
2717 skb_shinfo(nskb)->nr_frags++; 2736 skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2739,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2720 i++; 2739 i++;
2721 pos += size; 2740 pos += size;
2722 } else { 2741 } else {
2723 frag->size -= pos + size - (offset + len); 2742 skb_frag_size_sub(frag, pos + size - (offset + len));
2724 goto skip_fraglist; 2743 goto skip_fraglist;
2725 } 2744 }
2726 2745
@@ -2800,7 +2819,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2800 } while (--i); 2819 } while (--i);
2801 2820
2802 frag->page_offset += offset; 2821 frag->page_offset += offset;
2803 frag->size -= offset; 2822 skb_frag_size_sub(frag, offset);
2804 2823
2805 skb->truesize -= skb->data_len; 2824 skb->truesize -= skb->data_len;
2806 skb->len -= skb->data_len; 2825 skb->len -= skb->data_len;
@@ -2852,7 +2871,7 @@ merge:
2852 unsigned int eat = offset - headlen; 2871 unsigned int eat = offset - headlen;
2853 2872
2854 skbinfo->frags[0].page_offset += eat; 2873 skbinfo->frags[0].page_offset += eat;
2855 skbinfo->frags[0].size -= eat; 2874 skb_frag_size_sub(&skbinfo->frags[0], eat);
2856 skb->data_len -= eat; 2875 skb->data_len -= eat;
2857 skb->len -= eat; 2876 skb->len -= eat;
2858 offset = headlen; 2877 offset = headlen;
@@ -2923,13 +2942,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2923 2942
2924 WARN_ON(start > offset + len); 2943 WARN_ON(start > offset + len);
2925 2944
2926 end = start + skb_shinfo(skb)->frags[i].size; 2945 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2927 if ((copy = end - offset) > 0) { 2946 if ((copy = end - offset) > 0) {
2928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2929 2948
2930 if (copy > len) 2949 if (copy > len)
2931 copy = len; 2950 copy = len;
2932 sg_set_page(&sg[elt], frag->page, copy, 2951 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2933 frag->page_offset+offset-start); 2952 frag->page_offset+offset-start);
2934 elt++; 2953 elt++;
2935 if (!(len -= copy)) 2954 if (!(len -= copy))
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d00ea4d..4ed7b1d12f5e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -207,7 +207,7 @@ static struct lock_class_key af_callback_keys[AF_MAX];
207 * not depend upon such differences. 207 * not depend upon such differences.
208 */ 208 */
209#define _SK_MEM_PACKETS 256 209#define _SK_MEM_PACKETS 256
210#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 210#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
213 213
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
387 387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk); 389 sk_tx_queue_clear(sk);
390 rcu_assign_pointer(sk->sk_dst_cache, NULL); 390 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
391 dst_release(dst); 391 dst_release(dst);
392 return NULL; 392 return NULL;
393 } 393 }
@@ -738,10 +738,7 @@ set_rcvbuf:
738 /* We implement the SO_SNDLOWAT etc to 738 /* We implement the SO_SNDLOWAT etc to
739 not be settable (1003.1g 5.3) */ 739 not be settable (1003.1g 5.3) */
740 case SO_RXQ_OVFL: 740 case SO_RXQ_OVFL:
741 if (valbool) 741 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
742 sock_set_flag(sk, SOCK_RXQ_OVFL);
743 else
744 sock_reset_flag(sk, SOCK_RXQ_OVFL);
745 break; 742 break;
746 default: 743 default:
747 ret = -ENOPROTOOPT; 744 ret = -ENOPROTOOPT;
@@ -1158,7 +1155,7 @@ static void __sk_free(struct sock *sk)
1158 atomic_read(&sk->sk_wmem_alloc) == 0); 1155 atomic_read(&sk->sk_wmem_alloc) == 0);
1159 if (filter) { 1156 if (filter) {
1160 sk_filter_uncharge(sk, filter); 1157 sk_filter_uncharge(sk, filter);
1161 rcu_assign_pointer(sk->sk_filter, NULL); 1158 RCU_INIT_POINTER(sk->sk_filter, NULL);
1162 } 1159 }
1163 1160
1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1161 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
@@ -1260,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1260 /* It is still raw copy of parent, so invalidate 1257 /* It is still raw copy of parent, so invalidate
1261 * destructor and make plain sk_free() */ 1258 * destructor and make plain sk_free() */
1262 newsk->sk_destruct = NULL; 1259 newsk->sk_destruct = NULL;
1260 bh_unlock_sock(newsk);
1263 sk_free(newsk); 1261 sk_free(newsk);
1264 newsk = NULL; 1262 newsk = NULL;
1265 goto out; 1263 goto out;
@@ -1533,7 +1531,6 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1533 skb_shinfo(skb)->nr_frags = npages; 1531 skb_shinfo(skb)->nr_frags = npages;
1534 for (i = 0; i < npages; i++) { 1532 for (i = 0; i < npages; i++) {
1535 struct page *page; 1533 struct page *page;
1536 skb_frag_t *frag;
1537 1534
1538 page = alloc_pages(sk->sk_allocation, 0); 1535 page = alloc_pages(sk->sk_allocation, 0);
1539 if (!page) { 1536 if (!page) {
@@ -1543,12 +1540,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1543 goto failure; 1540 goto failure;
1544 } 1541 }
1545 1542
1546 frag = &skb_shinfo(skb)->frags[i]; 1543 __skb_fill_page_desc(skb, i,
1547 frag->page = page; 1544 page, 0,
1548 frag->page_offset = 0; 1545 (data_len >= PAGE_SIZE ?
1549 frag->size = (data_len >= PAGE_SIZE ? 1546 PAGE_SIZE :
1550 PAGE_SIZE : 1547 data_len));
1551 data_len);
1552 data_len -= PAGE_SIZE; 1548 data_len -= PAGE_SIZE;
1553 } 1549 }
1554 1550
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 98a52640e7cd..661b5a40ec10 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -21,6 +21,7 @@
21#include <linux/phy.h> 21#include <linux/phy.h>
22#include <linux/ptp_classify.h> 22#include <linux/ptp_classify.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/export.h>
24 25
25static struct sock_filter ptp_filter[] = { 26static struct sock_filter ptp_filter[] = {
26 PTP_FILTER 27 PTP_FILTER
@@ -57,9 +58,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
57 case PTP_CLASS_V2_VLAN: 58 case PTP_CLASS_V2_VLAN:
58 phydev = skb->dev->phydev; 59 phydev = skb->dev->phydev;
59 if (likely(phydev->drv->txtstamp)) { 60 if (likely(phydev->drv->txtstamp)) {
61 if (!atomic_inc_not_zero(&sk->sk_refcnt))
62 return;
60 clone = skb_clone(skb, GFP_ATOMIC); 63 clone = skb_clone(skb, GFP_ATOMIC);
61 if (!clone) 64 if (!clone) {
65 sock_put(sk);
62 return; 66 return;
67 }
63 clone->sk = sk; 68 clone->sk = sk;
64 phydev->drv->txtstamp(phydev, clone, type); 69 phydev->drv->txtstamp(phydev, clone, type);
65 } 70 }
@@ -77,8 +82,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
77 struct sock_exterr_skb *serr; 82 struct sock_exterr_skb *serr;
78 int err; 83 int err;
79 84
80 if (!hwtstamps) 85 if (!hwtstamps) {
86 sock_put(sk);
87 kfree_skb(skb);
81 return; 88 return;
89 }
82 90
83 *skb_hwtstamps(skb) = *hwtstamps; 91 *skb_hwtstamps(skb) = *hwtstamps;
84 serr = SKB_EXT_ERR(skb); 92 serr = SKB_EXT_ERR(skb);
@@ -87,6 +95,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
87 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 95 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
88 skb->sk = NULL; 96 skb->sk = NULL;
89 err = sock_queue_err_skb(sk, skb); 97 err = sock_queue_err_skb(sk, skb);
98 sock_put(sk);
90 if (err) 99 if (err)
91 kfree_skb(skb); 100 kfree_skb(skb);
92} 101}
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 25d717ebc92e..1b5fefdb8198 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/export.h>
30#include <net/tcp.h> 31#include <net/tcp.h>
31#include <net/netdma.h> 32#include <net/netdma.h>
32 33
@@ -71,14 +72,14 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
71 /* Copy paged appendix. Hmm... why does this look so complicated? */ 72 /* Copy paged appendix. Hmm... why does this look so complicated? */
72 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 73 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
73 int end; 74 int end;
75 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
74 76
75 WARN_ON(start > offset + len); 77 WARN_ON(start > offset + len);
76 78
77 end = start + skb_shinfo(skb)->frags[i].size; 79 end = start + skb_frag_size(frag);
78 copy = end - offset; 80 copy = end - offset;
79 if (copy > 0) { 81 if (copy > 0) {
80 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 82 struct page *page = skb_frag_page(frag);
81 struct page *page = frag->page;
82 83
83 if (copy > len) 84 if (copy > len)
84 copy = len; 85 copy = len;
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
index 665a8802105a..1d9eb7c60a68 100644
--- a/net/dcb/dcbevent.c
+++ b/net/dcb/dcbevent.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/export.h>
22 23
23static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain); 24static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
24 25
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3cb56af4e13c..d86053002c16 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -25,6 +25,7 @@
25#include <linux/dcbnl.h> 25#include <linux/dcbnl.h>
26#include <net/dcbevent.h> 26#include <net/dcbevent.h>
27#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
28#include <linux/module.h>
28#include <net/sock.h> 29#include <net/sock.h>
29 30
30/** 31/**
@@ -1255,7 +1256,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1255 1256
1256 spin_lock(&dcb_lock); 1257 spin_lock(&dcb_lock);
1257 list_for_each_entry(itr, &dcb_app_list, list) { 1258 list_for_each_entry(itr, &dcb_app_list, list) {
1258 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { 1259 if (itr->ifindex == netdev->ifindex) {
1259 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1260 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1260 &itr->app); 1261 &itr->app);
1261 if (err) { 1262 if (err) {
@@ -1412,7 +1413,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1412 goto dcb_unlock; 1413 goto dcb_unlock;
1413 1414
1414 list_for_each_entry(itr, &dcb_app_list, list) { 1415 list_for_each_entry(itr, &dcb_app_list, list) {
1415 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { 1416 if (itr->ifindex == netdev->ifindex) {
1416 struct nlattr *app_nest = nla_nest_start(skb, 1417 struct nlattr *app_nest = nla_nest_start(skb,
1417 DCB_ATTR_APP); 1418 DCB_ATTR_APP);
1418 if (!app_nest) 1419 if (!app_nest)
@@ -2050,7 +2051,7 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2050 list_for_each_entry(itr, &dcb_app_list, list) { 2051 list_for_each_entry(itr, &dcb_app_list, list) {
2051 if (itr->app.selector == app->selector && 2052 if (itr->app.selector == app->selector &&
2052 itr->app.protocol == app->protocol && 2053 itr->app.protocol == app->protocol &&
2053 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2054 itr->ifindex == dev->ifindex) {
2054 prio = itr->app.priority; 2055 prio = itr->app.priority;
2055 break; 2056 break;
2056 } 2057 }
@@ -2073,15 +2074,17 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2073 struct dcb_app_type *itr; 2074 struct dcb_app_type *itr;
2074 struct dcb_app_type event; 2075 struct dcb_app_type event;
2075 2076
2076 memcpy(&event.name, dev->name, sizeof(event.name)); 2077 event.ifindex = dev->ifindex;
2077 memcpy(&event.app, new, sizeof(event.app)); 2078 memcpy(&event.app, new, sizeof(event.app));
2079 if (dev->dcbnl_ops->getdcbx)
2080 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2078 2081
2079 spin_lock(&dcb_lock); 2082 spin_lock(&dcb_lock);
2080 /* Search for existing match and replace */ 2083 /* Search for existing match and replace */
2081 list_for_each_entry(itr, &dcb_app_list, list) { 2084 list_for_each_entry(itr, &dcb_app_list, list) {
2082 if (itr->app.selector == new->selector && 2085 if (itr->app.selector == new->selector &&
2083 itr->app.protocol == new->protocol && 2086 itr->app.protocol == new->protocol &&
2084 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2087 itr->ifindex == dev->ifindex) {
2085 if (new->priority) 2088 if (new->priority)
2086 itr->app.priority = new->priority; 2089 itr->app.priority = new->priority;
2087 else { 2090 else {
@@ -2101,7 +2104,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2101 } 2104 }
2102 2105
2103 memcpy(&entry->app, new, sizeof(*new)); 2106 memcpy(&entry->app, new, sizeof(*new));
2104 strncpy(entry->name, dev->name, IFNAMSIZ); 2107 entry->ifindex = dev->ifindex;
2105 list_add(&entry->list, &dcb_app_list); 2108 list_add(&entry->list, &dcb_app_list);
2106 } 2109 }
2107out: 2110out:
@@ -2127,7 +2130,7 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2127 list_for_each_entry(itr, &dcb_app_list, list) { 2130 list_for_each_entry(itr, &dcb_app_list, list) {
2128 if (itr->app.selector == app->selector && 2131 if (itr->app.selector == app->selector &&
2129 itr->app.protocol == app->protocol && 2132 itr->app.protocol == app->protocol &&
2130 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2133 itr->ifindex == dev->ifindex) {
2131 prio |= 1 << itr->app.priority; 2134 prio |= 1 << itr->app.priority;
2132 } 2135 }
2133 } 2136 }
@@ -2150,8 +2153,10 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2150 struct dcb_app_type event; 2153 struct dcb_app_type event;
2151 int err = 0; 2154 int err = 0;
2152 2155
2153 memcpy(&event.name, dev->name, sizeof(event.name)); 2156 event.ifindex = dev->ifindex;
2154 memcpy(&event.app, new, sizeof(event.app)); 2157 memcpy(&event.app, new, sizeof(event.app));
2158 if (dev->dcbnl_ops->getdcbx)
2159 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2155 2160
2156 spin_lock(&dcb_lock); 2161 spin_lock(&dcb_lock);
2157 /* Search for existing match and abort if found */ 2162 /* Search for existing match and abort if found */
@@ -2159,7 +2164,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2159 if (itr->app.selector == new->selector && 2164 if (itr->app.selector == new->selector &&
2160 itr->app.protocol == new->protocol && 2165 itr->app.protocol == new->protocol &&
2161 itr->app.priority == new->priority && 2166 itr->app.priority == new->priority &&
2162 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2167 itr->ifindex == dev->ifindex) {
2163 err = -EEXIST; 2168 err = -EEXIST;
2164 goto out; 2169 goto out;
2165 } 2170 }
@@ -2173,7 +2178,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2173 } 2178 }
2174 2179
2175 memcpy(&entry->app, new, sizeof(*new)); 2180 memcpy(&entry->app, new, sizeof(*new));
2176 strncpy(entry->name, dev->name, IFNAMSIZ); 2181 entry->ifindex = dev->ifindex;
2177 list_add(&entry->list, &dcb_app_list); 2182 list_add(&entry->list, &dcb_app_list);
2178out: 2183out:
2179 spin_unlock(&dcb_lock); 2184 spin_unlock(&dcb_lock);
@@ -2194,8 +2199,10 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2194 struct dcb_app_type event; 2199 struct dcb_app_type event;
2195 int err = -ENOENT; 2200 int err = -ENOENT;
2196 2201
2197 memcpy(&event.name, dev->name, sizeof(event.name)); 2202 event.ifindex = dev->ifindex;
2198 memcpy(&event.app, del, sizeof(event.app)); 2203 memcpy(&event.app, del, sizeof(event.app));
2204 if (dev->dcbnl_ops->getdcbx)
2205 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2199 2206
2200 spin_lock(&dcb_lock); 2207 spin_lock(&dcb_lock);
2201 /* Search for existing match and remove it. */ 2208 /* Search for existing match and remove it. */
@@ -2203,7 +2210,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2203 if (itr->app.selector == del->selector && 2210 if (itr->app.selector == del->selector &&
2204 itr->app.protocol == del->protocol && 2211 itr->app.protocol == del->protocol &&
2205 itr->app.priority == del->priority && 2212 itr->app.priority == del->priority &&
2206 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2213 itr->ifindex == dev->ifindex) {
2207 list_del(&itr->list); 2214 list_del(&itr->list);
2208 kfree(itr); 2215 kfree(itr);
2209 err = 0; 2216 err = 0;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 25b7a8d1ad58..ba07824af4c0 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -12,6 +12,7 @@
12#include "dccp.h" 12#include "dccp.h"
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/export.h>
15 16
16static struct kmem_cache *dccp_ackvec_slab; 17static struct kmem_cache *dccp_ackvec_slab;
17static struct kmem_cache *dccp_ackvec_record_slab; 18static struct kmem_cache *dccp_ackvec_record_slab;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040fc818..67164bb6ae4d 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
85 85
86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
87{ 87{
88 struct dccp_sock *dp = dccp_sk(sk);
89 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); 88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
90 89
91 /* 90 /*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
98 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); 97 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
99 val = max_ratio; 98 val = max_ratio;
100 } 99 }
101 if (val > DCCPF_ACK_RATIO_MAX) 100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
102 val = DCCPF_ACK_RATIO_MAX; 101 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102}
103 103
104 if (val == dp->dccps_l_ack_ratio) 104static void ccid2_check_l_ack_ratio(struct sock *sk)
105 return; 105{
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
106 107
107 ccid2_pr_debug("changing local ack ratio to %u\n", val); 108 /*
108 dp->dccps_l_ack_ratio = val; 109 * After a loss, idle period, application limited period, or RTO we
110 * need to check that the ack ratio is still less than the congestion
111 * window. Otherwise, we will send an entire congestion window of
112 * packets and got no response because we haven't sent ack ratio
113 * packets yet.
114 * If the ack ratio does need to be reduced, we reduce it to half of
115 * the congestion window (or 1 if that's zero) instead of to the
116 * congestion window. This prevents problems if one ack is lost.
117 */
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
123{
124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
125 clamp_val(val, DCCPF_SEQ_WMIN,
126 DCCPF_SEQ_WMAX));
109} 127}
110 128
111static void ccid2_hc_tx_rto_expire(unsigned long data) 129static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
187 } 205 }
188 hc->tx_cwnd_used = 0; 206 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now; 207 hc->tx_cwnd_stamp = now;
208
209 ccid2_check_l_ack_ratio(sk);
190} 210}
191 211
192/* This borrows the code of tcp_cwnd_restart() */ 212/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
205 225
206 hc->tx_cwnd_stamp = now; 226 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0; 227 hc->tx_cwnd_used = 0;
228
229 ccid2_check_l_ack_ratio(sk);
208} 230}
209 231
210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
405 unsigned int *maxincr) 427 unsigned int *maxincr)
406{ 428{
407 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 429 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
408 430 struct dccp_sock *dp = dccp_sk(sk);
409 if (hc->tx_cwnd < hc->tx_ssthresh) { 431 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
410 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { 432
433 if (hc->tx_cwnd < dp->dccps_l_seq_win &&
434 r_seq_used < dp->dccps_r_seq_win) {
435 if (hc->tx_cwnd < hc->tx_ssthresh) {
436 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
437 hc->tx_cwnd += 1;
438 *maxincr -= 1;
439 hc->tx_packets_acked = 0;
440 }
441 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
411 hc->tx_cwnd += 1; 442 hc->tx_cwnd += 1;
412 *maxincr -= 1;
413 hc->tx_packets_acked = 0; 443 hc->tx_packets_acked = 0;
414 } 444 }
415 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
416 hc->tx_cwnd += 1;
417 hc->tx_packets_acked = 0;
418 } 445 }
446
447 /*
448 * Adjust the local sequence window and the ack ratio to allow about
449 * 5 times the number of packets in the network (RFC 4340 7.5.2)
450 */
451 if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
452 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
453 else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
454 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
455
456 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
457 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
458 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
459 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
460
419 /* 461 /*
420 * FIXME: RTT is sampled several times per acknowledgment (for each 462 * FIXME: RTT is sampled several times per acknowledgment (for each
421 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). 463 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
441 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 483 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
442 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 484 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
443 485
444 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 486 ccid2_check_l_ack_ratio(sk);
445 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
446 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
447} 487}
448 488
449static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, 489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
494 if (hc->tx_rpdupack >= NUMDUPACK) { 534 if (hc->tx_rpdupack >= NUMDUPACK) {
495 hc->tx_rpdupack = -1; /* XXX lame */ 535 hc->tx_rpdupack = -1; /* XXX lame */
496 hc->tx_rpseq = 0; 536 hc->tx_rpseq = 0;
497 537#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
538 /*
539 * FIXME: Ack Congestion Control is broken; in
540 * the current state instabilities occurred with
541 * Ack Ratios greater than 1; causing hang-ups
542 * and long RTO timeouts. This needs to be fixed
543 * before opening up dynamic changes. -- gerrit
544 */
498 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); 545 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
546#endif
499 } 547 }
500 } 548 }
501 } 549 }
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index f585d330e1e5..18c97543e522 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
43#define CCID2_SEQBUF_LEN 1024 43#define CCID2_SEQBUF_LEN 1024
44#define CCID2_SEQBUF_MAX 128 44#define CCID2_SEQBUF_MAX 128
45 45
46/*
47 * Multiple of congestion window to keep the sequence window at
48 * (RFC 4340 7.5.2)
49 */
50#define CCID2_WIN_CHANGE_FACTOR 5
51
46/** 52/**
47 * struct ccid2_hc_tx_sock - CCID2 TX half connection 53 * struct ccid2_hc_tx_sock - CCID2 TX half connection
48 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 54 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 4902029854d8..1f94b7e01d39 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK 4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
6 */ 6 */
7#include <linux/moduleparam.h>
7#include "tfrc.h" 8#include "tfrc.h"
8 9
9#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 10#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb07229017..583490aaf56f 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); 474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
475} 475}
476 476
477extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
477extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 478extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
478extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); 479extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
479extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, 480extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def952722..23cea0ee3101 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
12 * ----------- 12 * -----------
13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild 13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild
14 * changes of parameters of an established connection are not supported. 14 * changes of parameters of an established connection are not supported.
15 * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
15 * o All currently known SP features have 1-byte quantities. If in the future 16 * o All currently known SP features have 1-byte quantities. If in the future
16 * extensions of RFCs 4340..42 define features with item lengths larger than 17 * extensions of RFCs 4340..42 define features with item lengths larger than
17 * one byte, a feature-specific extension of the code will be required. 18 * one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
343 return dccp_feat_table[idx].activation_hdlr(sk, val, rx); 344 return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
344} 345}
345 346
347/**
348 * dccp_feat_activate - Activate feature value on socket
349 * @sk: fully connected DCCP socket (after handshake is complete)
350 * @feat_num: feature to activate, one of %dccp_feature_numbers
351 * @local: whether local (1) or remote (0) @feat_num is meant
352 * @fval: the value (SP or NN) to activate, or NULL to use the default value
353 * For general use this function is preferable over __dccp_feat_activate().
354 */
355static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
356 dccp_feat_val const *fval)
357{
358 return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
359}
360
346/* Test for "Req'd" feature (RFC 4340, 6.4) */ 361/* Test for "Req'd" feature (RFC 4340, 6.4) */
347static inline int dccp_feat_must_be_understood(u8 feat_num) 362static inline int dccp_feat_must_be_understood(u8 feat_num)
348{ 363{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
650 return -1; 665 return -1;
651 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb)) 666 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
652 return -1; 667 return -1;
653 /* 668
654 * Enter CHANGING after transmitting the Change option (6.6.2). 669 if (skb->sk->sk_state == DCCP_OPEN &&
655 */ 670 (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
656 if (pos->state == FEAT_INITIALISING) 671 /*
657 pos->state = FEAT_CHANGING; 672 * Confirms don't get retransmitted (6.6.3) once the
673 * connection is in state OPEN
674 */
675 dccp_feat_list_pop(pos);
676 } else {
677 /*
678 * Enter CHANGING after transmitting the Change
679 * option (6.6.2).
680 */
681 if (pos->state == FEAT_INITIALISING)
682 pos->state = FEAT_CHANGING;
683 }
658 } 684 }
659 return 0; 685 return 0;
660} 686}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
730 0, list, len); 756 0, list, len);
731} 757}
732 758
759/**
760 * dccp_feat_nn_get - Query current/pending value of NN feature
761 * @sk: DCCP socket of an established connection
762 * @feat: NN feature number from %dccp_feature_numbers
763 * For a known NN feature, returns value currently being negotiated, or
764 * current (confirmed) value if no negotiation is going on.
765 */
766u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
767{
768 if (dccp_feat_type(feat) == FEAT_NN) {
769 struct dccp_sock *dp = dccp_sk(sk);
770 struct dccp_feat_entry *entry;
771
772 entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
773 if (entry != NULL)
774 return entry->val.nn;
775
776 switch (feat) {
777 case DCCPF_ACK_RATIO:
778 return dp->dccps_l_ack_ratio;
779 case DCCPF_SEQUENCE_WINDOW:
780 return dp->dccps_l_seq_win;
781 }
782 }
783 DCCP_BUG("attempt to look up unsupported feature %u", feat);
784 return 0;
785}
786EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
787
788/**
789 * dccp_feat_signal_nn_change - Update NN values for an established connection
790 * @sk: DCCP socket of an established connection
791 * @feat: NN feature number from %dccp_feature_numbers
792 * @nn_val: the new value to use
793 * This function is used to communicate NN updates out-of-band.
794 */
795int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
796{
797 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
798 dccp_feat_val fval = { .nn = nn_val };
799 struct dccp_feat_entry *entry;
800
801 if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
802 return 0;
803
804 if (dccp_feat_type(feat) != FEAT_NN ||
805 !dccp_feat_is_valid_nn_val(feat, nn_val))
806 return -EINVAL;
807
808 if (nn_val == dccp_feat_nn_get(sk, feat))
809 return 0; /* already set or negotiation under way */
810
811 entry = dccp_feat_list_lookup(fn, feat, 1);
812 if (entry != NULL) {
813 dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
814 (unsigned long long)entry->val.nn,
815 (unsigned long long)nn_val);
816 dccp_feat_list_pop(entry);
817 }
818
819 inet_csk_schedule_ack(sk);
820 return dccp_feat_push_change(fn, feat, 1, 0, &fval);
821}
822EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
733 823
734/* 824/*
735 * Tracking features whose value depend on the choice of CCID 825 * Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
1187} 1277}
1188 1278
1189/** 1279/**
1280 * dccp_feat_handle_nn_established - Fast-path reception of NN options
1281 * @sk: socket of an established DCCP connection
1282 * @mandatory: whether @opt was preceded by a Mandatory option
1283 * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
1284 * @feat: NN number, one of %dccp_feature_numbers
1285 * @val: NN value
1286 * @len: length of @val in bytes
1287 * This function combines the functionality of change_recv/confirm_recv, with
1288 * the following differences (reset codes are the same):
1289 * - cleanup after receiving the Confirm;
1290 * - values are directly activated after successful parsing;
1291 * - deliberately restricted to NN features.
1292 * The restriction to NN features is essential since SP features can have non-
1293 * predictable outcomes (depending on the remote configuration), and are inter-
1294 * dependent (CCIDs for instance cause further dependencies).
1295 */
1296static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
1297 u8 feat, u8 *val, u8 len)
1298{
1299 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
1300 const bool local = (opt == DCCPO_CONFIRM_R);
1301 struct dccp_feat_entry *entry;
1302 u8 type = dccp_feat_type(feat);
1303 dccp_feat_val fval;
1304
1305 dccp_feat_print_opt(opt, feat, val, len, mandatory);
1306
1307 /* Ignore non-mandatory unknown and non-NN features */
1308 if (type == FEAT_UNKNOWN) {
1309 if (local && !mandatory)
1310 return 0;
1311 goto fast_path_unknown;
1312 } else if (type != FEAT_NN) {
1313 return 0;
1314 }
1315
1316 /*
1317 * We don't accept empty Confirms, since in fast-path feature
1318 * negotiation the values are enabled immediately after sending
1319 * the Change option.
1320 * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
1321 */
1322 if (len == 0 || len > sizeof(fval.nn))
1323 goto fast_path_unknown;
1324
1325 if (opt == DCCPO_CHANGE_L) {
1326 fval.nn = dccp_decode_value_var(val, len);
1327 if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
1328 goto fast_path_unknown;
1329
1330 if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
1331 dccp_feat_activate(sk, feat, local, &fval))
1332 return DCCP_RESET_CODE_TOO_BUSY;
1333
1334 /* set the `Ack Pending' flag to piggyback a Confirm */
1335 inet_csk_schedule_ack(sk);
1336
1337 } else if (opt == DCCPO_CONFIRM_R) {
1338 entry = dccp_feat_list_lookup(fn, feat, local);
1339 if (entry == NULL || entry->state != FEAT_CHANGING)
1340 return 0;
1341
1342 fval.nn = dccp_decode_value_var(val, len);
1343 /*
1344 * Just ignore a value that doesn't match our current value.
1345 * If the option changes twice within two RTTs, then at least
1346 * one CONFIRM will be received for the old value after a
1347 * new CHANGE was sent.
1348 */
1349 if (fval.nn != entry->val.nn)
1350 return 0;
1351
1352 /* Only activate after receiving the Confirm option (6.6.1). */
1353 dccp_feat_activate(sk, feat, local, &fval);
1354
1355 /* It has been confirmed - so remove the entry */
1356 dccp_feat_list_pop(entry);
1357
1358 } else {
1359 DCCP_WARN("Received illegal option %u\n", opt);
1360 goto fast_path_failed;
1361 }
1362 return 0;
1363
1364fast_path_unknown:
1365 if (!mandatory)
1366 return dccp_push_empty_confirm(fn, feat, local);
1367
1368fast_path_failed:
1369 return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
1370 : DCCP_RESET_CODE_OPTION_ERROR;
1371}
1372
1373/**
1190 * dccp_feat_parse_options - Process Feature-Negotiation Options 1374 * dccp_feat_parse_options - Process Feature-Negotiation Options
1191 * @sk: for general use and used by the client during connection setup 1375 * @sk: for general use and used by the client during connection setup
1192 * @dreq: used by the server during connection setup 1376 * @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
1221 return dccp_feat_confirm_recv(fn, mandatory, opt, feat, 1405 return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
1222 val, len, server); 1406 val, len, server);
1223 } 1407 }
1408 break;
1409 /*
1410 * Support for exchanging NN options on an established connection.
1411 */
1412 case DCCP_OPEN:
1413 case DCCP_PARTOPEN:
1414 return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
1415 val, len);
1224 } 1416 }
1225 return 0; /* ignore FN options in all other states */ 1417 return 0; /* ignore FN options in all other states */
1226} 1418}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5e634e..90b957d34d26 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
129 129
130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len); 130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len); 131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
132extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
132 133
133extern int dccp_insert_option_mandatory(struct sk_buff *skb); 134extern int dccp_insert_option_mandatory(struct sk_buff *skb);
134extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, 135extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 332639b56f4d..90a919afbed7 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -433,6 +433,7 @@ exit:
433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
434 return NULL; 434 return NULL;
435put_and_exit: 435put_and_exit:
436 bh_unlock_sock(newsk);
436 sock_put(newsk); 437 sock_put(newsk);
437 goto exit; 438 goto exit;
438} 439}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b74f76117dcf..17ee85ce148d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -271,7 +271,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
271 &ireq6->loc_addr, 271 &ireq6->loc_addr,
272 &ireq6->rmt_addr); 272 &ireq6->rmt_addr);
273 ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); 273 ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
274 err = ip6_xmit(sk, skb, &fl6, opt); 274 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
275 err = net_xmit_eval(err); 275 err = net_xmit_eval(err);
276 } 276 }
277 277
@@ -326,7 +326,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
326 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 326 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
327 if (!IS_ERR(dst)) { 327 if (!IS_ERR(dst)) {
328 skb_dst_set(skb, dst); 328 skb_dst_set(skb, dst);
329 ip6_xmit(ctl_sk, skb, &fl6, NULL); 329 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
332 return; 332 return;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d942d9..e742f90a6858 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
184 dp->dccps_rate_last = jiffies; 184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; 187 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
189 188
190 dccp_init_xmit_timers(sk); 189 dccp_init_xmit_timers(sk);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 7587870b7040..16f0b223102e 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/export.h>
15 16
16#include "dccp.h" 17#include "dccp.h"
17 18
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ba4faceec405..2ab16e12520c 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
388 } 388 }
389 389
390 ifa->ifa_next = dn_db->ifa_list; 390 ifa->ifa_next = dn_db->ifa_list;
391 rcu_assign_pointer(dn_db->ifa_list, ifa); 391 RCU_INIT_POINTER(dn_db->ifa_list, ifa);
392 392
393 dn_ifaddr_notify(RTM_NEWADDR, ifa); 393 dn_ifaddr_notify(RTM_NEWADDR, ifa);
394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1093 1093
1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1095 1095
1096 rcu_assign_pointer(dev->dn_ptr, dn_db); 1096 RCU_INIT_POINTER(dev->dn_ptr, dn_db);
1097 dn_db->dev = dev; 1097 dn_db->dev = dev;
1098 init_timer(&dn_db->timer); 1098 init_timer(&dn_db->timer);
1099 1099
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1101 1101
1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1103 if (!dn_db->neigh_parms) { 1103 if (!dn_db->neigh_parms) {
1104 rcu_assign_pointer(dev->dn_ptr, NULL); 1104 RCU_INIT_POINTER(dev->dn_ptr, NULL);
1105 kfree(dn_db); 1105 kfree(dn_db);
1106 return NULL; 1106 return NULL;
1107 } 1107 }
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 43450c100226..a77d16158eb6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -77,6 +77,7 @@
77#include <linux/netfilter_decnet.h> 77#include <linux/netfilter_decnet.h>
78#include <linux/rcupdate.h> 78#include <linux/rcupdate.h>
79#include <linux/times.h> 79#include <linux/times.h>
80#include <linux/export.h>
80#include <asm/errno.h> 81#include <asm/errno.h>
81#include <net/net_namespace.h> 82#include <net/net_namespace.h>
82#include <net/netlink.h> 83#include <net/netlink.h>
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f0efb0ccfeca..f65c9ddaee41 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -23,6 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/export.h>
26#include <net/neighbour.h> 27#include <net/neighbour.h>
27#include <net/dst.h> 28#include <net/dst.h>
28#include <net/flow.h> 29#include <net/flow.h>
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 3fb14b7c13cf..0dc1589343c3 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/module.h>
15#include <net/dsa.h> 16#include <net/dsa.h>
16#include "dsa_priv.h" 17#include "dsa_priv.h"
17 18
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c37038..56cf9b8e1c7c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
301 .ndo_start_xmit = dsa_xmit, 301 .ndo_start_xmit = dsa_xmit,
302 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 302 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
303 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 303 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
304 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
305 .ndo_set_mac_address = dsa_slave_set_mac_address, 304 .ndo_set_mac_address = dsa_slave_set_mac_address,
306 .ndo_do_ioctl = dsa_slave_ioctl, 305 .ndo_do_ioctl = dsa_slave_ioctl,
307}; 306};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
314 .ndo_start_xmit = edsa_xmit, 313 .ndo_start_xmit = edsa_xmit,
315 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 314 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
316 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 315 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
317 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
318 .ndo_set_mac_address = dsa_slave_set_mac_address, 316 .ndo_set_mac_address = dsa_slave_set_mac_address,
319 .ndo_do_ioctl = dsa_slave_ioctl, 317 .ndo_do_ioctl = dsa_slave_ioctl,
320}; 318};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
327 .ndo_start_xmit = trailer_xmit, 325 .ndo_start_xmit = trailer_xmit,
328 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 326 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
329 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 327 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
330 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
331 .ndo_set_mac_address = dsa_slave_set_mac_address, 328 .ndo_set_mac_address = dsa_slave_set_mac_address,
332 .ndo_do_ioctl = dsa_slave_ioctl, 329 .ndo_do_ioctl = dsa_slave_ioctl,
333}; 330};
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
new file mode 100644
index 000000000000..19d6aefe97d4
--- /dev/null
+++ b/net/ieee802154/6lowpan.c
@@ -0,0 +1,891 @@
1/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#define DEBUG
54
55#include <linux/bitops.h>
56#include <linux/if_arp.h>
57#include <linux/module.h>
58#include <linux/moduleparam.h>
59#include <linux/netdevice.h>
60#include <net/af_ieee802154.h>
61#include <net/ieee802154.h>
62#include <net/ieee802154_netdev.h>
63#include <net/ipv6.h>
64
65#include "6lowpan.h"
66
67/* TTL uncompression values */
68static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
69
70static LIST_HEAD(lowpan_devices);
71
72/*
73 * Uncompression of linklocal:
74 * 0 -> 16 bytes from packet
75 * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
76 * 2 -> 2 bytes from prefix - zeroes + 2 from packet
77 * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
78 *
79 * NOTE: => the uncompress function does change 0xf to 0x10
80 * NOTE: 0x00 => no-autoconfig => unspecified
81 */
82static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
83
84/*
85 * Uncompression of ctx-based:
86 * 0 -> 0 bits from packet [unspecified / reserved]
87 * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
88 * 2 -> 8 bytes from prefix - zeroes + 2 from packet
89 * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
90 */
91static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
92
93/*
94 * Uncompression of ctx-base
95 * 0 -> 0 bits from packet
96 * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
97 * 2 -> 2 bytes from prefix - zeroes + 3 from packet
98 * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
99 */
100static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
101
102/* Link local prefix */
103static const u8 lowpan_llprefix[] = {0xfe, 0x80};
104
105/* private device info */
106struct lowpan_dev_info {
107 struct net_device *real_dev; /* real WPAN device ptr */
108 struct mutex dev_list_mtx; /* mutex for list ops */
109};
110
111struct lowpan_dev_record {
112 struct net_device *ldev;
113 struct list_head list;
114};
115
116static inline struct
117lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
118{
119 return netdev_priv(dev);
120}
121
122static inline void lowpan_address_flip(u8 *src, u8 *dest)
123{
124 int i;
125 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
126 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
127}
128
129/* list of all 6lowpan devices, uses for package delivering */
130/* print data in line */
131static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
132 unsigned char *buf, int len)
133{
134#ifdef DEBUG
135 if (msg)
136 pr_debug("(%s) %s: ", caller, msg);
137 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
138 16, 1, buf, len, false);
139#endif /* DEBUG */
140}
141
142/*
143 * print data in a table format:
144 *
145 * addr: xx xx xx xx xx xx
146 * addr: xx xx xx xx xx xx
147 * ...
148 */
149static inline void lowpan_raw_dump_table(const char *caller, char *msg,
150 unsigned char *buf, int len)
151{
152#ifdef DEBUG
153 if (msg)
154 pr_debug("(%s) %s:\n", caller, msg);
155 print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
156 16, 1, buf, len, false);
157#endif /* DEBUG */
158}
159
160static u8
161lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
162 const unsigned char *lladdr)
163{
164 u8 val = 0;
165
166 if (is_addr_mac_addr_based(ipaddr, lladdr))
167 val = 3; /* 0-bits */
168 else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
169 /* compress IID to 16 bits xxxx::XXXX */
170 memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
171 *hc06_ptr += 2;
172 val = 2; /* 16-bits */
173 } else {
174 /* do not compress IID => xxxx::IID */
175 memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
176 *hc06_ptr += 8;
177 val = 1; /* 64-bits */
178 }
179
180 return rol8(val, shift);
181}
182
183static void
184lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
185{
186 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
187 /* second bit-flip (Universe/Local) is done according RFC2464 */
188 ipaddr->s6_addr[8] ^= 0x02;
189}
190
191/*
192 * Uncompress addresses based on a prefix and a postfix with zeroes in
193 * between. If the postfix is zero in length it will use the link address
194 * to configure the IP address (autoconf style).
195 * pref_post_count takes a byte where the first nibble specify prefix count
196 * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
197 */
198static int
199lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
200 u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
201{
202 u8 prefcount = pref_post_count >> 4;
203 u8 postcount = pref_post_count & 0x0f;
204
205 /* full nibble 15 => 16 */
206 prefcount = (prefcount == 15 ? 16 : prefcount);
207 postcount = (postcount == 15 ? 16 : postcount);
208
209 if (lladdr)
210 lowpan_raw_dump_inline(__func__, "linklocal address",
211 lladdr, IEEE802154_ALEN);
212 if (prefcount > 0)
213 memcpy(ipaddr, prefix, prefcount);
214
215 if (prefcount + postcount < 16)
216 memset(&ipaddr->s6_addr[prefcount], 0,
217 16 - (prefcount + postcount));
218
219 if (postcount > 0) {
220 memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
221 skb_pull(skb, postcount);
222 } else if (prefcount > 0) {
223 if (lladdr == NULL)
224 return -EINVAL;
225
226 /* no IID based configuration if no prefix and no data */
227 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
228 }
229
230 pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
231 postcount);
232 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
233
234 return 0;
235}
236
237static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
238{
239 u8 ret;
240
241 ret = skb->data[0];
242 skb_pull(skb, 1);
243
244 return ret;
245}
246
247static int lowpan_header_create(struct sk_buff *skb,
248 struct net_device *dev,
249 unsigned short type, const void *_daddr,
250 const void *_saddr, unsigned len)
251{
252 u8 tmp, iphc0, iphc1, *hc06_ptr;
253 struct ipv6hdr *hdr;
254 const u8 *saddr = _saddr;
255 const u8 *daddr = _daddr;
256 u8 *head;
257 struct ieee802154_addr sa, da;
258
259 if (type != ETH_P_IPV6)
260 return 0;
261 /* TODO:
262 * if this package isn't ipv6 one, where should it be routed?
263 */
264 head = kzalloc(100, GFP_KERNEL);
265 if (head == NULL)
266 return -ENOMEM;
267
268 hdr = ipv6_hdr(skb);
269 hc06_ptr = head + 2;
270
271 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
272 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
273 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
274 hdr->hop_limit);
275
276 lowpan_raw_dump_table(__func__, "raw skb network header dump",
277 skb_network_header(skb), sizeof(struct ipv6hdr));
278
279 if (!saddr)
280 saddr = dev->dev_addr;
281
282 lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
283
284 /*
285 * As we copy some bit-length fields, in the IPHC encoding bytes,
286 * we sometimes use |=
287 * If the field is 0, and the current bit value in memory is 1,
288 * this does not work. We therefore reset the IPHC encoding here
289 */
290 iphc0 = LOWPAN_DISPATCH_IPHC;
291 iphc1 = 0;
292
293 /* TODO: context lookup */
294
295 lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
296
297 /*
298 * Traffic class, flow label
299 * If flow label is 0, compress it. If traffic class is 0, compress it
300 * We have to process both in the same time as the offset of traffic
301 * class depends on the presence of version and flow label
302 */
303
304 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
305 tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
306 tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
307
308 if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
309 (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
310 /* flow label can be compressed */
311 iphc0 |= LOWPAN_IPHC_FL_C;
312 if ((hdr->priority == 0) &&
313 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
314 /* compress (elide) all */
315 iphc0 |= LOWPAN_IPHC_TC_C;
316 } else {
317 /* compress only the flow label */
318 *hc06_ptr = tmp;
319 hc06_ptr += 1;
320 }
321 } else {
322 /* Flow label cannot be compressed */
323 if ((hdr->priority == 0) &&
324 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
325 /* compress only traffic class */
326 iphc0 |= LOWPAN_IPHC_TC_C;
327 *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
328 memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
329 hc06_ptr += 3;
330 } else {
331 /* compress nothing */
332 memcpy(hc06_ptr, &hdr, 4);
333 /* replace the top byte with new ECN | DSCP format */
334 *hc06_ptr = tmp;
335 hc06_ptr += 4;
336 }
337 }
338
339 /* NOTE: payload length is always compressed */
340
341 /* Next Header is compress if UDP */
342 if (hdr->nexthdr == UIP_PROTO_UDP)
343 iphc0 |= LOWPAN_IPHC_NH_C;
344
345/* TODO: next header compression */
346
347 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
348 *hc06_ptr = hdr->nexthdr;
349 hc06_ptr += 1;
350 }
351
352 /*
353 * Hop limit
354 * if 1: compress, encoding is 01
355 * if 64: compress, encoding is 10
356 * if 255: compress, encoding is 11
357 * else do not compress
358 */
359 switch (hdr->hop_limit) {
360 case 1:
361 iphc0 |= LOWPAN_IPHC_TTL_1;
362 break;
363 case 64:
364 iphc0 |= LOWPAN_IPHC_TTL_64;
365 break;
366 case 255:
367 iphc0 |= LOWPAN_IPHC_TTL_255;
368 break;
369 default:
370 *hc06_ptr = hdr->hop_limit;
371 break;
372 }
373
374 /* source address compression */
375 if (is_addr_unspecified(&hdr->saddr)) {
376 pr_debug("(%s): source address is unspecified, setting SAC\n",
377 __func__);
378 iphc1 |= LOWPAN_IPHC_SAC;
379 /* TODO: context lookup */
380 } else if (is_addr_link_local(&hdr->saddr)) {
381 pr_debug("(%s): source address is link-local\n", __func__);
382 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
383 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
384 } else {
385 pr_debug("(%s): send the full source address\n", __func__);
386 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
387 hc06_ptr += 16;
388 }
389
390 /* destination address compression */
391 if (is_addr_mcast(&hdr->daddr)) {
392 pr_debug("(%s): destination address is multicast", __func__);
393 iphc1 |= LOWPAN_IPHC_M;
394 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
395 pr_debug("compressed to 1 octet\n");
396 iphc1 |= LOWPAN_IPHC_DAM_11;
397 /* use last byte */
398 *hc06_ptr = hdr->daddr.s6_addr[15];
399 hc06_ptr += 1;
400 } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
401 pr_debug("compressed to 4 octets\n");
402 iphc1 |= LOWPAN_IPHC_DAM_10;
403 /* second byte + the last three */
404 *hc06_ptr = hdr->daddr.s6_addr[1];
405 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
406 hc06_ptr += 4;
407 } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
408 pr_debug("compressed to 6 octets\n");
409 iphc1 |= LOWPAN_IPHC_DAM_01;
410 /* second byte + the last five */
411 *hc06_ptr = hdr->daddr.s6_addr[1];
412 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
413 hc06_ptr += 6;
414 } else {
415 pr_debug("using full address\n");
416 iphc1 |= LOWPAN_IPHC_DAM_00;
417 memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
418 hc06_ptr += 16;
419 }
420 } else {
421 pr_debug("(%s): destination address is unicast: ", __func__);
422 /* TODO: context lookup */
423 if (is_addr_link_local(&hdr->daddr)) {
424 pr_debug("destination address is link-local\n");
425 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
426 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
427 } else {
428 pr_debug("using full address\n");
429 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
430 hc06_ptr += 16;
431 }
432 }
433
434 /* TODO: UDP header compression */
435 /* TODO: Next Header compression */
436
437 head[0] = iphc0;
438 head[1] = iphc1;
439
440 skb_pull(skb, sizeof(struct ipv6hdr));
441 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
442
443 kfree(head);
444
445 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
446 skb->len);
447
448 /*
449 * NOTE1: I'm still unsure about the fact that compression and WPAN
450 * header are created here and not later in the xmit. So wait for
451 * an opinion of net maintainers.
452 */
453 /*
454 * NOTE2: to be absolutely correct, we must derive PANid information
455 * from MAC subif of the 'dev' and 'real_dev' network devices, but
456 * this isn't implemented in mainline yet, so currently we assign 0xff
457 */
458 {
459 /* prepare wpan address data */
460 sa.addr_type = IEEE802154_ADDR_LONG;
461 sa.pan_id = 0xff;
462
463 da.addr_type = IEEE802154_ADDR_LONG;
464 da.pan_id = 0xff;
465
466 memcpy(&(da.hwaddr), daddr, 8);
467 memcpy(&(sa.hwaddr), saddr, 8);
468
469 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
470 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
471 type, (void *)&da, (void *)&sa, skb->len);
472 }
473}
474
475static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
476{
477 struct sk_buff *new;
478 struct lowpan_dev_record *entry;
479 int stat = NET_RX_SUCCESS;
480
481 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
482 GFP_ATOMIC);
483 kfree_skb(skb);
484
485 if (!new)
486 return -ENOMEM;
487
488 skb_push(new, sizeof(struct ipv6hdr));
489 skb_reset_network_header(new);
490 skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
491
492 new->protocol = htons(ETH_P_IPV6);
493 new->pkt_type = PACKET_HOST;
494
495 rcu_read_lock();
496 list_for_each_entry_rcu(entry, &lowpan_devices, list)
497 if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
498 skb = skb_copy(new, GFP_ATOMIC);
499 if (!skb) {
500 stat = -ENOMEM;
501 break;
502 }
503
504 skb->dev = entry->ldev;
505 stat = netif_rx(skb);
506 }
507 rcu_read_unlock();
508
509 kfree_skb(new);
510
511 return stat;
512}
513
514static int
515lowpan_process_data(struct sk_buff *skb)
516{
517 struct ipv6hdr hdr;
518 u8 tmp, iphc0, iphc1, num_context = 0;
519 u8 *_saddr, *_daddr;
520 int err;
521
522 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
523 skb->len);
524 /* at least two bytes will be used for the encoding */
525 if (skb->len < 2)
526 goto drop;
527 iphc0 = lowpan_fetch_skb_u8(skb);
528 iphc1 = lowpan_fetch_skb_u8(skb);
529
530 _saddr = mac_cb(skb)->sa.hwaddr;
531 _daddr = mac_cb(skb)->da.hwaddr;
532
533 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
534
535 /* another if the CID flag is set */
536 if (iphc1 & LOWPAN_IPHC_CID) {
537 pr_debug("(%s): CID flag is set, increase header with one\n",
538 __func__);
539 if (!skb->len)
540 goto drop;
541 num_context = lowpan_fetch_skb_u8(skb);
542 }
543
544 hdr.version = 6;
545
546 /* Traffic Class and Flow Label */
547 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
548 /*
549 * Traffic Class and FLow Label carried in-line
550 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
551 */
552 case 0: /* 00b */
553 if (!skb->len)
554 goto drop;
555 tmp = lowpan_fetch_skb_u8(skb);
556 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
557 skb_pull(skb, 3);
558 hdr.priority = ((tmp >> 2) & 0x0f);
559 hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
560 (hdr.flow_lbl[0] & 0x0f);
561 break;
562 /*
563 * Traffic class carried in-line
564 * ECN + DSCP (1 byte), Flow Label is elided
565 */
566 case 1: /* 10b */
567 if (!skb->len)
568 goto drop;
569 tmp = lowpan_fetch_skb_u8(skb);
570 hdr.priority = ((tmp >> 2) & 0x0f);
571 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
572 hdr.flow_lbl[1] = 0;
573 hdr.flow_lbl[2] = 0;
574 break;
575 /*
576 * Flow Label carried in-line
577 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
578 */
579 case 2: /* 01b */
580 if (!skb->len)
581 goto drop;
582 tmp = lowpan_fetch_skb_u8(skb);
583 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
584 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
585 skb_pull(skb, 2);
586 break;
587 /* Traffic Class and Flow Label are elided */
588 case 3: /* 11b */
589 hdr.priority = 0;
590 hdr.flow_lbl[0] = 0;
591 hdr.flow_lbl[1] = 0;
592 hdr.flow_lbl[2] = 0;
593 break;
594 default:
595 break;
596 }
597
598 /* Next Header */
599 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
600 /* Next header is carried inline */
601 if (!skb->len)
602 goto drop;
603 hdr.nexthdr = lowpan_fetch_skb_u8(skb);
604 pr_debug("(%s): NH flag is set, next header is carried "
605 "inline: %02x\n", __func__, hdr.nexthdr);
606 }
607
608 /* Hop Limit */
609 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
610 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
611 else {
612 if (!skb->len)
613 goto drop;
614 hdr.hop_limit = lowpan_fetch_skb_u8(skb);
615 }
616
617 /* Extract SAM to the tmp variable */
618 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
619
620 /* Source address uncompression */
621 pr_debug("(%s): source address stateless compression\n", __func__);
622 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
623 lowpan_unc_llconf[tmp], skb->data);
624 if (err)
625 goto drop;
626
627 /* Extract DAM to the tmp variable */
628 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
629
630 /* check for Multicast Compression */
631 if (iphc1 & LOWPAN_IPHC_M) {
632 if (iphc1 & LOWPAN_IPHC_DAC) {
633 pr_debug("(%s): destination address context-based "
634 "multicast compression\n", __func__);
635 /* TODO: implement this */
636 } else {
637 u8 prefix[] = {0xff, 0x02};
638
639 pr_debug("(%s): destination address non-context-based"
640 " multicast compression\n", __func__);
641 if (0 < tmp && tmp < 3) {
642 if (!skb->len)
643 goto drop;
644 else
645 prefix[1] = lowpan_fetch_skb_u8(skb);
646 }
647
648 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
649 lowpan_unc_mxconf[tmp], NULL);
650 if (err)
651 goto drop;
652 }
653 } else {
654 pr_debug("(%s): destination address stateless compression\n",
655 __func__);
656 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
657 lowpan_unc_llconf[tmp], skb->data);
658 if (err)
659 goto drop;
660 }
661
662 /* TODO: UDP header parse */
663
664 /* Not fragmented package */
665 hdr.payload_len = htons(skb->len);
666
667 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
668 skb_headroom(skb), skb->len);
669
670 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
671 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
672 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
673
674 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
675 sizeof(hdr));
676 return lowpan_skb_deliver(skb, &hdr);
677drop:
678 kfree_skb(skb);
679 return -EINVAL;
680}
681
682static int lowpan_set_address(struct net_device *dev, void *p)
683{
684 struct sockaddr *sa = p;
685
686 if (netif_running(dev))
687 return -EBUSY;
688
689 /* TODO: validate addr */
690 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
691
692 return 0;
693}
694
695static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
696{
697 int err = 0;
698
699 pr_debug("(%s): package xmit\n", __func__);
700
701 skb->dev = lowpan_dev_info(dev)->real_dev;
702 if (skb->dev == NULL) {
703 pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
704 dev_kfree_skb(skb);
705 } else
706 err = dev_queue_xmit(skb);
707
708 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
709}
710
711static void lowpan_dev_free(struct net_device *dev)
712{
713 dev_put(lowpan_dev_info(dev)->real_dev);
714 free_netdev(dev);
715}
716
717static struct header_ops lowpan_header_ops = {
718 .create = lowpan_header_create,
719};
720
721static const struct net_device_ops lowpan_netdev_ops = {
722 .ndo_start_xmit = lowpan_xmit,
723 .ndo_set_mac_address = lowpan_set_address,
724};
725
726static void lowpan_setup(struct net_device *dev)
727{
728 pr_debug("(%s)\n", __func__);
729
730 dev->addr_len = IEEE802154_ADDR_LEN;
731 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
732 dev->type = ARPHRD_IEEE802154;
733 dev->features = NETIF_F_NO_CSUM;
734 /* Frame Control + Sequence Number + Address fields + Security Header */
735 dev->hard_header_len = 2 + 1 + 20 + 14;
736 dev->needed_tailroom = 2; /* FCS */
737 dev->mtu = 1281;
738 dev->tx_queue_len = 0;
739 dev->flags = IFF_NOARP | IFF_BROADCAST;
740 dev->watchdog_timeo = 0;
741
742 dev->netdev_ops = &lowpan_netdev_ops;
743 dev->header_ops = &lowpan_header_ops;
744 dev->destructor = lowpan_dev_free;
745}
746
747static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
748{
749 pr_debug("(%s)\n", __func__);
750
751 if (tb[IFLA_ADDRESS]) {
752 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
753 return -EINVAL;
754 }
755 return 0;
756}
757
758static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
759 struct packet_type *pt, struct net_device *orig_dev)
760{
761 if (!netif_running(dev))
762 goto drop;
763
764 if (dev->type != ARPHRD_IEEE802154)
765 goto drop;
766
767 /* check that it's our buffer */
768 if ((skb->data[0] & 0xe0) == 0x60)
769 lowpan_process_data(skb);
770
771 return NET_RX_SUCCESS;
772
773drop:
774 kfree_skb(skb);
775 return NET_RX_DROP;
776}
777
778static int lowpan_newlink(struct net *src_net, struct net_device *dev,
779 struct nlattr *tb[], struct nlattr *data[])
780{
781 struct net_device *real_dev;
782 struct lowpan_dev_record *entry;
783
784 pr_debug("(%s)\n", __func__);
785
786 if (!tb[IFLA_LINK])
787 return -EINVAL;
788 /* find and hold real wpan device */
789 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
790 if (!real_dev)
791 return -ENODEV;
792
793 lowpan_dev_info(dev)->real_dev = real_dev;
794 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
795
796 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
797 if (!entry) {
798 dev_put(real_dev);
799 lowpan_dev_info(dev)->real_dev = NULL;
800 return -ENOMEM;
801 }
802
803 entry->ldev = dev;
804
805 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
806 INIT_LIST_HEAD(&entry->list);
807 list_add_tail(&entry->list, &lowpan_devices);
808 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
809
810 register_netdevice(dev);
811
812 return 0;
813}
814
815static void lowpan_dellink(struct net_device *dev, struct list_head *head)
816{
817 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
818 struct net_device *real_dev = lowpan_dev->real_dev;
819 struct lowpan_dev_record *entry;
820 struct lowpan_dev_record *tmp;
821
822 ASSERT_RTNL();
823
824 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
825 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
826 if (entry->ldev == dev) {
827 list_del(&entry->list);
828 kfree(entry);
829 }
830 }
831 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
832
833 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
834
835 unregister_netdevice_queue(dev, head);
836
837 dev_put(real_dev);
838}
839
840static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
841 .kind = "lowpan",
842 .priv_size = sizeof(struct lowpan_dev_info),
843 .setup = lowpan_setup,
844 .newlink = lowpan_newlink,
845 .dellink = lowpan_dellink,
846 .validate = lowpan_validate,
847};
848
849static inline int __init lowpan_netlink_init(void)
850{
851 return rtnl_link_register(&lowpan_link_ops);
852}
853
854static inline void __init lowpan_netlink_fini(void)
855{
856 rtnl_link_unregister(&lowpan_link_ops);
857}
858
859static struct packet_type lowpan_packet_type = {
860 .type = __constant_htons(ETH_P_IEEE802154),
861 .func = lowpan_rcv,
862};
863
864static int __init lowpan_init_module(void)
865{
866 int err = 0;
867
868 pr_debug("(%s)\n", __func__);
869
870 err = lowpan_netlink_init();
871 if (err < 0)
872 goto out;
873
874 dev_add_pack(&lowpan_packet_type);
875out:
876 return err;
877}
878
879static void __exit lowpan_cleanup_module(void)
880{
881 pr_debug("(%s)\n", __func__);
882
883 lowpan_netlink_fini();
884
885 dev_remove_pack(&lowpan_packet_type);
886}
887
888module_init(lowpan_init_module);
889module_exit(lowpan_cleanup_module);
890MODULE_LICENSE("GPL");
891MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
new file mode 100644
index 000000000000..5d8cf80b930d
--- /dev/null
+++ b/net/ieee802154/6lowpan.h
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#ifndef __6LOWPAN_H__
54#define __6LOWPAN_H__
55
56/* need to know address length to manipulate with it */
57#define IEEE802154_ALEN 8
58
59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
61#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
62#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
63
64/*
65 * ipv6 address based on mac
66 * second bit-flip (Universe/Local) is done according RFC2464
67 */
68#define is_addr_mac_addr_based(a, m) \
69 ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
70 (((a)->s6_addr[9]) == (m)[1]) && \
71 (((a)->s6_addr[10]) == (m)[2]) && \
72 (((a)->s6_addr[11]) == (m)[3]) && \
73 (((a)->s6_addr[12]) == (m)[4]) && \
74 (((a)->s6_addr[13]) == (m)[5]) && \
75 (((a)->s6_addr[14]) == (m)[6]) && \
76 (((a)->s6_addr[15]) == (m)[7]))
77
78/* ipv6 address is unspecified */
79#define is_addr_unspecified(a) \
80 ((((a)->s6_addr32[0]) == 0) && \
81 (((a)->s6_addr32[1]) == 0) && \
82 (((a)->s6_addr32[2]) == 0) && \
83 (((a)->s6_addr32[3]) == 0))
84
85/* compare ipv6 addresses prefixes */
86#define ipaddr_prefixcmp(addr1, addr2, length) \
87 (memcmp(addr1, addr2, length >> 3) == 0)
88
89/* local link, i.e. FE80::/10 */
90#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
91
92/*
93 * check whether we can compress the IID to 16 bits,
94 * it's possible for unicast adresses with first 49 bits are zero only.
95 */
96#define lowpan_is_iid_16_bit_compressable(a) \
97 ((((a)->s6_addr16[4]) == 0) && \
98 (((a)->s6_addr16[5]) == 0) && \
99 (((a)->s6_addr16[6]) == 0) && \
100 ((((a)->s6_addr[14]) & 0x80) == 0))
101
102/* multicast address */
103#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
104
105/* check whether the 112-bit gid of the multicast address is mappable to: */
106
107/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
108#define lowpan_is_mcast_addr_compressable(a) \
109 ((((a)->s6_addr16[1]) == 0) && \
110 (((a)->s6_addr16[2]) == 0) && \
111 (((a)->s6_addr16[3]) == 0) && \
112 (((a)->s6_addr16[4]) == 0) && \
113 (((a)->s6_addr16[5]) == 0) && \
114 (((a)->s6_addr16[6]) == 0) && \
115 (((a)->s6_addr[14]) == 0) && \
116 ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
117
118/* 48 bits, FFXX::00XX:XXXX:XXXX */
119#define lowpan_is_mcast_addr_compressable48(a) \
120 ((((a)->s6_addr16[1]) == 0) && \
121 (((a)->s6_addr16[2]) == 0) && \
122 (((a)->s6_addr16[3]) == 0) && \
123 (((a)->s6_addr16[4]) == 0) && \
124 (((a)->s6_addr[10]) == 0))
125
126/* 32 bits, FFXX::00XX:XXXX */
127#define lowpan_is_mcast_addr_compressable32(a) \
128 ((((a)->s6_addr16[1]) == 0) && \
129 (((a)->s6_addr16[2]) == 0) && \
130 (((a)->s6_addr16[3]) == 0) && \
131 (((a)->s6_addr16[4]) == 0) && \
132 (((a)->s6_addr16[5]) == 0) && \
133 (((a)->s6_addr[12]) == 0))
134
135/* 8 bits, FF02::00XX */
136#define lowpan_is_mcast_addr_compressable8(a) \
137 ((((a)->s6_addr[1]) == 2) && \
138 (((a)->s6_addr16[1]) == 0) && \
139 (((a)->s6_addr16[2]) == 0) && \
140 (((a)->s6_addr16[3]) == 0) && \
141 (((a)->s6_addr16[4]) == 0) && \
142 (((a)->s6_addr16[5]) == 0) && \
143 (((a)->s6_addr16[6]) == 0) && \
144 (((a)->s6_addr[14]) == 0))
145
146#define lowpan_is_addr_broadcast(a) \
147 ((((a)[0]) == 0xFF) && \
148 (((a)[1]) == 0xFF) && \
149 (((a)[2]) == 0xFF) && \
150 (((a)[3]) == 0xFF) && \
151 (((a)[4]) == 0xFF) && \
152 (((a)[5]) == 0xFF) && \
153 (((a)[6]) == 0xFF) && \
154 (((a)[7]) == 0xFF))
155
156#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
157#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
158#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
159#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
160#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
161
162/*
163 * Values of fields within the IPHC encoding first byte
164 * (C stands for compressed and I for inline)
165 */
166#define LOWPAN_IPHC_TF 0x18
167
168#define LOWPAN_IPHC_FL_C 0x10
169#define LOWPAN_IPHC_TC_C 0x08
170#define LOWPAN_IPHC_NH_C 0x04
171#define LOWPAN_IPHC_TTL_1 0x01
172#define LOWPAN_IPHC_TTL_64 0x02
173#define LOWPAN_IPHC_TTL_255 0x03
174#define LOWPAN_IPHC_TTL_I 0x00
175
176
177/* Values of fields within the IPHC encoding second byte */
178#define LOWPAN_IPHC_CID 0x80
179
180#define LOWPAN_IPHC_SAC 0x40
181#define LOWPAN_IPHC_SAM_00 0x00
182#define LOWPAN_IPHC_SAM_01 0x10
183#define LOWPAN_IPHC_SAM_10 0x20
184#define LOWPAN_IPHC_SAM 0x30
185
186#define LOWPAN_IPHC_SAM_BIT 4
187
188#define LOWPAN_IPHC_M 0x08
189#define LOWPAN_IPHC_DAC 0x04
190#define LOWPAN_IPHC_DAM_00 0x00
191#define LOWPAN_IPHC_DAM_01 0x01
192#define LOWPAN_IPHC_DAM_10 0x02
193#define LOWPAN_IPHC_DAM_11 0x03
194
195#define LOWPAN_IPHC_DAM_BIT 0
196/*
197 * LOWPAN_UDP encoding (works together with IPHC)
198 */
199#define LOWPAN_NHC_UDP_MASK 0xF8
200#define LOWPAN_NHC_UDP_ID 0xF0
201#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
202#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
203
204/* values for port compression, _with checksum_ ie bit 5 set to 0 */
205#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
206#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
207 dest = 0xF0 + 8 bit inline */
208#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
209 dest = 16 bit inline */
210#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
211
212#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1c1de97d264a..7dee65052925 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -10,3 +10,9 @@ config IEEE802154
10 10
11 Say Y here to compile LR-WPAN support into the kernel or say M to 11 Say Y here to compile LR-WPAN support into the kernel or say M to
12 compile it as modules. 12 compile it as modules.
13
14config IEEE802154_6LOWPAN
15 tristate "6lowpan support over IEEE 802.15.4"
16 depends on IEEE802154 && IPV6
17 ---help---
18 IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 5761185f884e..d7716d64c6bb 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,3 +1,5 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 2obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3
4ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
5af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 71ee1108d4f8..adaf46214905 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -30,6 +30,7 @@
30#include <net/genetlink.h> 30#include <net/genetlink.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <linux/nl802154.h> 32#include <linux/nl802154.h>
33#include <linux/export.h>
33#include <net/af_ieee802154.h> 34#include <net/af_ieee802154.h>
34#include <net/nl802154.h> 35#include <net/nl802154.h>
35#include <net/ieee802154.h> 36#include <net/ieee802154.h>
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd2b9478ddd1..1b5096a9875a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -893,7 +893,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
893EXPORT_SYMBOL(inet_ioctl); 893EXPORT_SYMBOL(inet_ioctl);
894 894
895#ifdef CONFIG_COMPAT 895#ifdef CONFIG_COMPAT
896int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 896static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
897{ 897{
898 struct sock *sk = sock->sk; 898 struct sock *sk = sock->sk;
899 int err = -ENOIOCTLCMD; 899 int err = -ENOIOCTLCMD;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2c2a98e402e7..86f3b885b4f3 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -476,7 +476,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
476 doi = doi_def->doi; 476 doi = doi_def->doi;
477 doi_type = doi_def->type; 477 doi_type = doi_def->type;
478 478
479 if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) 479 if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
480 goto doi_add_return; 480 goto doi_add_return;
481 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { 481 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
482 switch (doi_def->tags[iter]) { 482 switch (doi_def->tags[iter]) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd06dd00..c6b5092f29a1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
258 ip_mc_up(in_dev); 258 ip_mc_up(in_dev);
259 259
260 /* we can receive as soon as ip_ptr is set -- do this last */ 260 /* we can receive as soon as ip_ptr is set -- do this last */
261 rcu_assign_pointer(dev->ip_ptr, in_dev); 261 RCU_INIT_POINTER(dev->ip_ptr, in_dev);
262out: 262out:
263 return in_dev; 263 return in_dev;
264out_kfree: 264out_kfree:
@@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev)
291 inet_free_ifa(ifa); 291 inet_free_ifa(ifa);
292 } 292 }
293 293
294 rcu_assign_pointer(dev->ip_ptr, NULL); 294 RCU_INIT_POINTER(dev->ip_ptr, NULL);
295 295
296 devinet_sysctl_unregister(in_dev); 296 devinet_sysctl_unregister(in_dev);
297 neigh_parms_release(&arp_tbl, in_dev->arp_parms); 297 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1175 switch (event) { 1175 switch (event) {
1176 case NETDEV_REGISTER: 1176 case NETDEV_REGISTER:
1177 printk(KERN_DEBUG "inetdev_event: bug\n"); 1177 printk(KERN_DEBUG "inetdev_event: bug\n");
1178 rcu_assign_pointer(dev->ip_ptr, NULL); 1178 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1179 break; 1179 break;
1180 case NETDEV_UP: 1180 case NETDEV_UP:
1181 if (!inetdev_valid_mtu(dev->mtu)) 1181 if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index a53bb1b5b118..46339ba7a2d3 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/rcupdate.h> 28#include <linux/rcupdate.h>
29#include <linux/export.h>
29#include <net/ip.h> 30#include <net/ip.h>
30#include <net/route.h> 31#include <net/route.h>
31#include <net/tcp.h> 32#include <net/tcp.h>
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index de9e2978476f..37b671185c81 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -73,6 +73,7 @@
73#include <linux/list.h> 73#include <linux/list.h>
74#include <linux/slab.h> 74#include <linux/slab.h>
75#include <linux/prefetch.h> 75#include <linux/prefetch.h>
76#include <linux/export.h>
76#include <net/net_namespace.h> 77#include <net/net_namespace.h>
77#include <net/ip.h> 78#include <net/ip.h>
78#include <net/protocol.h> 79#include <net/protocol.h>
@@ -204,7 +205,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
204 return (struct tnode *)(parent & ~NODE_TYPE_MASK); 205 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
205} 206}
206 207
207/* Same as rcu_assign_pointer 208/* Same as RCU_INIT_POINTER
208 * but that macro() assumes that value is a pointer. 209 * but that macro() assumes that value is a pointer.
209 */ 210 */
210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr) 211static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
@@ -528,7 +529,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
528 if (n) 529 if (n)
529 node_set_parent(n, tn); 530 node_set_parent(n, tn);
530 531
531 rcu_assign_pointer(tn->child[i], n); 532 RCU_INIT_POINTER(tn->child[i], n);
532} 533}
533 534
534#define MAX_WORK 10 535#define MAX_WORK 10
@@ -1014,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1014 1015
1015 tp = node_parent((struct rt_trie_node *) tn); 1016 tp = node_parent((struct rt_trie_node *) tn);
1016 if (!tp) 1017 if (!tp)
1017 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1018 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1018 1019
1019 tnode_free_flush(); 1020 tnode_free_flush();
1020 if (!tp) 1021 if (!tp)
@@ -1026,7 +1027,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1026 if (IS_TNODE(tn)) 1027 if (IS_TNODE(tn))
1027 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1028 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1028 1029
1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1030 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1030 tnode_free_flush(); 1031 tnode_free_flush();
1031} 1032}
1032 1033
@@ -1163,7 +1164,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1163 put_child(t, (struct tnode *)tp, cindex, 1164 put_child(t, (struct tnode *)tp, cindex,
1164 (struct rt_trie_node *)tn); 1165 (struct rt_trie_node *)tn);
1165 } else { 1166 } else {
1166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1167 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1167 tp = tn; 1168 tp = tn;
1168 } 1169 }
1169 } 1170 }
@@ -1621,7 +1622,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1621 put_child(t, (struct tnode *)tp, cindex, NULL); 1622 put_child(t, (struct tnode *)tp, cindex, NULL);
1622 trie_rebalance(t, tp); 1623 trie_rebalance(t, tp);
1623 } else 1624 } else
1624 rcu_assign_pointer(t->trie, NULL); 1625 RCU_INIT_POINTER(t->trie, NULL);
1625 1626
1626 free_leaf(l); 1627 free_leaf(l);
1627} 1628}
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index dbfc21de3479..8cb1ebb7cd74 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version)
34 if (gre_proto[version]) 34 if (gre_proto[version])
35 goto err_out_unlock; 35 goto err_out_unlock;
36 36
37 rcu_assign_pointer(gre_proto[version], proto); 37 RCU_INIT_POINTER(gre_proto[version], proto);
38 spin_unlock(&gre_proto_lock); 38 spin_unlock(&gre_proto_lock);
39 return 0; 39 return 0;
40 40
@@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
54 if (rcu_dereference_protected(gre_proto[version], 54 if (rcu_dereference_protected(gre_proto[version],
55 lockdep_is_held(&gre_proto_lock)) != proto) 55 lockdep_is_held(&gre_proto_lock)) != proto)
56 goto err_out_unlock; 56 goto err_out_unlock;
57 rcu_assign_pointer(gre_proto[version], NULL); 57 RCU_INIT_POINTER(gre_proto[version], NULL);
58 spin_unlock(&gre_proto_lock); 58 spin_unlock(&gre_proto_lock);
59 synchronize_rcu(); 59 synchronize_rcu();
60 return 0; 60 return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 23ef31baa1af..ab188ae12fd9 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1152,10 +1152,9 @@ static int __net_init icmp_sk_init(struct net *net)
1152 net->ipv4.icmp_sk[i] = sk; 1152 net->ipv4.icmp_sk[i] = sk;
1153 1153
1154 /* Enough space for 2 64K ICMP packets, including 1154 /* Enough space for 2 64K ICMP packets, including
1155 * sk_buff struct overhead. 1155 * sk_buff/skb_shared_info struct overhead.
1156 */ 1156 */
1157 sk->sk_sndbuf = 1157 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
1158 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1159 1158
1160 /* 1159 /*
1161 * Speedup sock_wfree() 1160 * Speedup sock_wfree()
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d577199eabd5..c7472eff2d51 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1009 1009
1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1011 We will get multicast token leakage, when IFF_MULTICAST 1011 We will get multicast token leakage, when IFF_MULTICAST
1012 is changed. This check should be done in dev->set_multicast_list 1012 is changed. This check should be done in ndo_set_rx_mode
1013 routine. Something sort of: 1013 routine. Something sort of:
1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1015 --ANK 1015 --ANK
@@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1242 1242
1243 im->next_rcu = in_dev->mc_list; 1243 im->next_rcu = in_dev->mc_list;
1244 in_dev->mc_count++; 1244 in_dev->mc_count++;
1245 rcu_assign_pointer(in_dev->mc_list, im); 1245 RCU_INIT_POINTER(in_dev->mc_list, im);
1246 1246
1247#ifdef CONFIG_IP_MULTICAST 1247#ifdef CONFIG_IP_MULTICAST
1248 igmpv3_del_delrec(in_dev, im->multiaddr); 1248 igmpv3_del_delrec(in_dev, im->multiaddr);
@@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1813 iml->next_rcu = inet->mc_list; 1813 iml->next_rcu = inet->mc_list;
1814 iml->sflist = NULL; 1814 iml->sflist = NULL;
1815 iml->sfmode = MCAST_EXCLUDE; 1815 iml->sfmode = MCAST_EXCLUDE;
1816 rcu_assign_pointer(inet->mc_list, iml); 1816 RCU_INIT_POINTER(inet->mc_list, iml);
1817 ip_mc_inc_group(in_dev, addr); 1817 ip_mc_inc_group(in_dev, addr);
1818 err = 0; 1818 err = 0;
1819done: 1819done:
@@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1835 } 1835 }
1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1838 rcu_assign_pointer(iml->sflist, NULL); 1838 RCU_INIT_POINTER(iml->sflist, NULL);
1839 /* decrease mem now to avoid the memleak warning */ 1839 /* decrease mem now to avoid the memleak warning */
1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); 1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1841 kfree_rcu(psf, rcu); 1841 kfree_rcu(psf, rcu);
@@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); 2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2001 kfree_rcu(psl, rcu); 2001 kfree_rcu(psl, rcu);
2002 } 2002 }
2003 rcu_assign_pointer(pmc->sflist, newpsl); 2003 RCU_INIT_POINTER(pmc->sflist, newpsl);
2004 psl = newpsl; 2004 psl = newpsl;
2005 } 2005 }
2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2103 } else 2103 } else
2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2105 0, NULL, 0); 2105 0, NULL, 0);
2106 rcu_assign_pointer(pmc->sflist, newpsl); 2106 RCU_INIT_POINTER(pmc->sflist, newpsl);
2107 pmc->sfmode = msf->imsf_fmode; 2107 pmc->sfmode = msf->imsf_fmode;
2108 err = 0; 2108 err = 0;
2109done: 2109done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 389a2e6a17fd..f5e2bdaef949 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,9 @@ static int inet_csk_diag_fill(struct sock *sk,
108 icsk->icsk_ca_ops->name); 108 icsk->icsk_ca_ops->name);
109 } 109 }
110 110
111 if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
112 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
113
111 r->idiag_family = sk->sk_family; 114 r->idiag_family = sk->sk_family;
112 r->idiag_state = sk->sk_state; 115 r->idiag_state = sk->sk_state;
113 r->idiag_timer = 0; 116 r->idiag_timer = 0;
@@ -130,6 +133,8 @@ static int inet_csk_diag_fill(struct sock *sk,
130 &np->rcv_saddr); 133 &np->rcv_saddr);
131 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 134 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
132 &np->daddr); 135 &np->daddr);
136 if (ext & (1 << (INET_DIAG_TOS - 1)))
137 RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
133 } 138 }
134#endif 139#endif
135 140
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ef7ae6049a51..cc280a3f4f96 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -244,11 +244,11 @@ static void lro_add_frags(struct net_lro_desc *lro_desc,
244 skb->truesize += truesize; 244 skb->truesize += truesize;
245 245
246 skb_frags[0].page_offset += hlen; 246 skb_frags[0].page_offset += hlen;
247 skb_frags[0].size -= hlen; 247 skb_frag_size_sub(&skb_frags[0], hlen);
248 248
249 while (tcp_data_len > 0) { 249 while (tcp_data_len > 0) {
250 *(lro_desc->next_frag) = *skb_frags; 250 *(lro_desc->next_frag) = *skb_frags;
251 tcp_data_len -= skb_frags->size; 251 tcp_data_len -= skb_frag_size(skb_frags);
252 lro_desc->next_frag++; 252 lro_desc->next_frag++;
253 skb_frags++; 253 skb_frags++;
254 skb_shinfo(skb)->nr_frags++; 254 skb_shinfo(skb)->nr_frags++;
@@ -400,14 +400,14 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
400 skb_frags = skb_shinfo(skb)->frags; 400 skb_frags = skb_shinfo(skb)->frags;
401 while (data_len > 0) { 401 while (data_len > 0) {
402 *skb_frags = *frags; 402 *skb_frags = *frags;
403 data_len -= frags->size; 403 data_len -= skb_frag_size(frags);
404 skb_frags++; 404 skb_frags++;
405 frags++; 405 frags++;
406 skb_shinfo(skb)->nr_frags++; 406 skb_shinfo(skb)->nr_frags++;
407 } 407 }
408 408
409 skb_shinfo(skb)->frags[0].page_offset += hdr_len; 409 skb_shinfo(skb)->frags[0].page_offset += hdr_len;
410 skb_shinfo(skb)->frags[0].size -= hdr_len; 410 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
411 411
412 skb->ip_summed = ip_summed; 412 skb->ip_summed = ip_summed;
413 skb->csum = sum; 413 skb->csum = sum;
@@ -433,7 +433,7 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
433 if (!lro_mgr->get_frag_header || 433 if (!lro_mgr->get_frag_header ||
434 lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, 434 lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
435 (void *)&tcph, &flags, priv)) { 435 (void *)&tcph, &flags, priv)) {
436 mac_hdr = page_address(frags->page) + frags->page_offset; 436 mac_hdr = skb_frag_address(frags);
437 goto out1; 437 goto out1;
438 } 438 }
439 439
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 3c8dfa16614d..89168c6351ff 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemcheck.h> 12#include <linux/kmemcheck.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
14#include <net/inet_hashtables.h> 15#include <net/inet_hashtables.h>
15#include <net/inet_timewait_sock.h> 16#include <net/inet_timewait_sock.h>
16#include <net/ip.h> 17#include <net/ip.h>
@@ -183,6 +184,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
183 tw->tw_daddr = inet->inet_daddr; 184 tw->tw_daddr = inet->inet_daddr;
184 tw->tw_rcv_saddr = inet->inet_rcv_saddr; 185 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
185 tw->tw_bound_dev_if = sk->sk_bound_dev_if; 186 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
187 tw->tw_tos = inet->tos;
186 tw->tw_num = inet->inet_num; 188 tw->tw_num = inet->inet_num;
187 tw->tw_state = TCP_TIME_WAIT; 189 tw->tw_state = TCP_TIME_WAIT;
188 tw->tw_substate = state; 190 tw->tw_substate = state;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 0e0ab98abc6f..fdaabf2f2b68 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,8 +599,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
599 head->next = clone; 599 head->next = clone;
600 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 600 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
601 skb_frag_list_init(head); 601 skb_frag_list_init(head);
602 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 602 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
603 plen += skb_shinfo(head)->frags[i].size; 603 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
604 clone->len = clone->data_len = head->data_len - plen; 604 clone->len = clone->data_len = head->data_len - plen;
605 head->data_len -= clone->len; 605 head->data_len -= clone->len;
606 head->len -= clone->len; 606 head->len -= clone->len;
@@ -682,6 +682,42 @@ int ip_defrag(struct sk_buff *skb, u32 user)
682} 682}
683EXPORT_SYMBOL(ip_defrag); 683EXPORT_SYMBOL(ip_defrag);
684 684
685struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
686{
687 const struct iphdr *iph;
688 u32 len;
689
690 if (skb->protocol != htons(ETH_P_IP))
691 return skb;
692
693 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
694 return skb;
695
696 iph = ip_hdr(skb);
697 if (iph->ihl < 5 || iph->version != 4)
698 return skb;
699 if (!pskb_may_pull(skb, iph->ihl*4))
700 return skb;
701 iph = ip_hdr(skb);
702 len = ntohs(iph->tot_len);
703 if (skb->len < len || len < (iph->ihl * 4))
704 return skb;
705
706 if (ip_is_fragment(ip_hdr(skb))) {
707 skb = skb_share_check(skb, GFP_ATOMIC);
708 if (skb) {
709 if (pskb_trim_rcsum(skb, len))
710 return skb;
711 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
712 if (ip_defrag(skb, user))
713 return NULL;
714 skb->rxhash = 0;
715 }
716 }
717 return skb;
718}
719EXPORT_SYMBOL(ip_check_defrag);
720
685#ifdef CONFIG_SYSCTL 721#ifdef CONFIG_SYSCTL
686static int zero; 722static int zero;
687 723
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7bb94c48345..d55110e93120 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -835,8 +835,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
838 if (max_headroom > dev->needed_headroom)
839 dev->needed_headroom = max_headroom;
840 if (!new_skb) { 838 if (!new_skb) {
841 ip_rt_put(rt); 839 ip_rt_put(rt);
842 dev->stats.tx_dropped++; 840 dev->stats.tx_dropped++;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8c6563361ab5..0bc95f3977d2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -989,13 +989,13 @@ alloc_new_skb:
989 if (page && (left = PAGE_SIZE - off) > 0) { 989 if (page && (left = PAGE_SIZE - off) > 0) {
990 if (copy >= left) 990 if (copy >= left)
991 copy = left; 991 copy = left;
992 if (page != frag->page) { 992 if (page != skb_frag_page(frag)) {
993 if (i == MAX_SKB_FRAGS) { 993 if (i == MAX_SKB_FRAGS) {
994 err = -EMSGSIZE; 994 err = -EMSGSIZE;
995 goto error; 995 goto error;
996 } 996 }
997 get_page(page);
998 skb_fill_page_desc(skb, i, page, off, 0); 997 skb_fill_page_desc(skb, i, page, off, 0);
998 skb_frag_ref(skb, i);
999 frag = &skb_shinfo(skb)->frags[i]; 999 frag = &skb_shinfo(skb)->frags[i];
1000 } 1000 }
1001 } else if (i < MAX_SKB_FRAGS) { 1001 } else if (i < MAX_SKB_FRAGS) {
@@ -1015,12 +1015,13 @@ alloc_new_skb:
1015 err = -EMSGSIZE; 1015 err = -EMSGSIZE;
1016 goto error; 1016 goto error;
1017 } 1017 }
1018 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { 1018 if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
1019 offset, copy, skb->len, skb) < 0) {
1019 err = -EFAULT; 1020 err = -EFAULT;
1020 goto error; 1021 goto error;
1021 } 1022 }
1022 cork->off += copy; 1023 cork->off += copy;
1023 frag->size += copy; 1024 skb_frag_size_add(frag, copy);
1024 skb->len += copy; 1025 skb->len += copy;
1025 skb->data_len += copy; 1026 skb->data_len += copy;
1026 skb->truesize += copy; 1027 skb->truesize += copy;
@@ -1229,7 +1230,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1229 if (len > size) 1230 if (len > size)
1230 len = size; 1231 len = size;
1231 if (skb_can_coalesce(skb, i, page, offset)) { 1232 if (skb_can_coalesce(skb, i, page, offset)) {
1232 skb_shinfo(skb)->frags[i-1].size += len; 1233 skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
1233 } else if (i < MAX_SKB_FRAGS) { 1234 } else if (i < MAX_SKB_FRAGS) {
1234 get_page(page); 1235 get_page(page);
1235 skb_fill_page_desc(skb, i, page, offset, len); 1236 skb_fill_page_desc(skb, i, page, offset, len);
@@ -1465,7 +1466,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1465 * structure to pass arguments. 1466 * structure to pass arguments.
1466 */ 1467 */
1467void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, 1468void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1468 struct ip_reply_arg *arg, unsigned int len) 1469 const struct ip_reply_arg *arg, unsigned int len)
1469{ 1470{
1470 struct inet_sock *inet = inet_sk(sk); 1471 struct inet_sock *inet = inet_sk(sk);
1471 struct ip_options_data replyopts; 1472 struct ip_options_data replyopts;
@@ -1488,7 +1489,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1488 } 1489 }
1489 1490
1490 flowi4_init_output(&fl4, arg->bound_dev_if, 0, 1491 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1491 RT_TOS(ip_hdr(skb)->tos), 1492 RT_TOS(arg->tos),
1492 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1493 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1493 ip_reply_arg_flowi_flags(arg), 1494 ip_reply_arg_flowi_flags(arg),
1494 daddr, rt->rt_spec_dst, 1495 daddr, rt->rt_spec_dst,
@@ -1505,7 +1506,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1505 with locally disabled BH and that sk cannot be already spinlocked. 1506 with locally disabled BH and that sk cannot be already spinlocked.
1506 */ 1507 */
1507 bh_lock_sock(sk); 1508 bh_lock_sock(sk);
1508 inet->tos = ip_hdr(skb)->tos; 1509 inet->tos = arg->tos;
1509 sk->sk_priority = skb->priority; 1510 sk->sk_priority = skb->priority;
1510 sk->sk_protocol = ip_hdr(skb)->protocol; 1511 sk->sk_protocol = ip_hdr(skb)->protocol;
1511 sk->sk_bound_dev_if = arg->bound_dev_if; 1512 sk->sk_bound_dev_if = arg->bound_dev_if;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8905e92f896a..09ff51bf16a4 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -33,6 +33,7 @@
33#include <linux/netfilter.h> 33#include <linux/netfilter.h>
34#include <linux/route.h> 34#include <linux/route.h>
35#include <linux/mroute.h> 35#include <linux/mroute.h>
36#include <net/inet_ecn.h>
36#include <net/route.h> 37#include <net/route.h>
37#include <net/xfrm.h> 38#include <net/xfrm.h>
38#include <net/compat.h> 39#include <net/compat.h>
@@ -578,8 +579,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
578 break; 579 break;
579 case IP_TOS: /* This sets both TOS and Precedence */ 580 case IP_TOS: /* This sets both TOS and Precedence */
580 if (sk->sk_type == SOCK_STREAM) { 581 if (sk->sk_type == SOCK_STREAM) {
581 val &= ~3; 582 val &= ~INET_ECN_MASK;
582 val |= inet->tos & 3; 583 val |= inet->tos & INET_ECN_MASK;
583 } 584 }
584 if (inet->tos != val) { 585 if (inet->tos != val) {
585 inet->tos = val; 586 inet->tos = val;
@@ -961,7 +962,7 @@ mc_msf_out:
961 break; 962 break;
962 963
963 case IP_TRANSPARENT: 964 case IP_TRANSPARENT:
964 if (!capable(CAP_NET_ADMIN)) { 965 if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) {
965 err = -EPERM; 966 err = -EPERM;
966 break; 967 break;
967 } 968 }
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 472a8c4f1dc0..0da2afc97f32 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -54,6 +54,7 @@
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/nfs_fs.h> 55#include <linux/nfs_fs.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/export.h>
57#include <net/net_namespace.h> 58#include <net/net_namespace.h>
58#include <net/arp.h> 59#include <net/arp.h>
59#include <net/ip.h> 60#include <net/ip.h>
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b7ca6e..065effd8349a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
231 (iter = rtnl_dereference(*tp)) != NULL; 231 (iter = rtnl_dereference(*tp)) != NULL;
232 tp = &iter->next) { 232 tp = &iter->next) {
233 if (t == iter) { 233 if (t == iter) {
234 rcu_assign_pointer(*tp, t->next); 234 RCU_INIT_POINTER(*tp, t->next);
235 break; 235 break;
236 } 236 }
237 } 237 }
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
241{ 241{
242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t); 242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
243 243
244 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 244 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
245 rcu_assign_pointer(*tp, t); 245 RCU_INIT_POINTER(*tp, t);
246} 246}
247 247
248static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 248static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
301 struct ipip_net *ipn = net_generic(net, ipip_net_id); 301 struct ipip_net *ipn = net_generic(net, ipip_net_id);
302 302
303 if (dev == ipn->fb_tunnel_dev) 303 if (dev == ipn->fb_tunnel_dev)
304 rcu_assign_pointer(ipn->tunnels_wc[0], NULL); 304 RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
305 else 305 else
306 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 306 ipip_tunnel_unlink(ipn, netdev_priv(dev));
307 dev_put(dev); 307 dev_put(dev);
@@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
791 return -ENOMEM; 791 return -ENOMEM;
792 792
793 dev_hold(dev); 793 dev_hold(dev);
794 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); 794 RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
795 return 0; 795 return 0;
796} 796}
797 797
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 58e879157976..76a7f07b38b6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -61,6 +61,7 @@
61#include <linux/if_arp.h> 61#include <linux/if_arp.h>
62#include <linux/netfilter_ipv4.h> 62#include <linux/netfilter_ipv4.h>
63#include <linux/compat.h> 63#include <linux/compat.h>
64#include <linux/export.h>
64#include <net/ipip.h> 65#include <net/ipip.h>
65#include <net/checksum.h> 66#include <net/checksum.h>
66#include <net/netlink.h> 67#include <net/netlink.h>
@@ -1176,7 +1177,7 @@ static void mrtsock_destruct(struct sock *sk)
1176 ipmr_for_each_table(mrt, net) { 1177 ipmr_for_each_table(mrt, net) {
1177 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1178 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1179 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1179 rcu_assign_pointer(mrt->mroute_sk, NULL); 1180 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1180 mroute_clean_tables(mrt); 1181 mroute_clean_tables(mrt);
1181 } 1182 }
1182 } 1183 }
@@ -1203,7 +1204,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1203 return -ENOENT; 1204 return -ENOENT;
1204 1205
1205 if (optname != MRT_INIT) { 1206 if (optname != MRT_INIT) {
1206 if (sk != rcu_dereference_raw(mrt->mroute_sk) && 1207 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1207 !capable(CAP_NET_ADMIN)) 1208 !capable(CAP_NET_ADMIN))
1208 return -EACCES; 1209 return -EACCES;
1209 } 1210 }
@@ -1224,13 +1225,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1224 1225
1225 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1226 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1226 if (ret == 0) { 1227 if (ret == 0) {
1227 rcu_assign_pointer(mrt->mroute_sk, sk); 1228 RCU_INIT_POINTER(mrt->mroute_sk, sk);
1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1229 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1229 } 1230 }
1230 rtnl_unlock(); 1231 rtnl_unlock();
1231 return ret; 1232 return ret;
1232 case MRT_DONE: 1233 case MRT_DONE:
1233 if (sk != rcu_dereference_raw(mrt->mroute_sk)) 1234 if (sk != rcu_access_pointer(mrt->mroute_sk))
1234 return -EACCES; 1235 return -EACCES;
1235 return ip_ra_control(sk, 0, NULL); 1236 return ip_ra_control(sk, 0, NULL);
1236 case MRT_ADD_VIF: 1237 case MRT_ADD_VIF:
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 929b27bdeb79..9899619ab9b8 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -5,6 +5,7 @@
5#include <linux/ip.h> 5#include <linux/ip.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/gfp.h> 7#include <linux/gfp.h>
8#include <linux/export.h>
8#include <net/route.h> 9#include <net/route.h>
9#include <net/xfrm.h> 10#include <net/xfrm.h>
10#include <net/ip.h> 11#include <net/ip.h>
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index db8d22db425f..a639967eb727 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -395,7 +395,6 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
395 config = clusterip_config_init(cipinfo, 395 config = clusterip_config_init(cipinfo,
396 e->ip.dst.s_addr, dev); 396 e->ip.dst.s_addr, dev);
397 if (!config) { 397 if (!config) {
398 pr_info("cannot allocate config\n");
399 dev_put(dev); 398 dev_put(dev);
400 return -ENOMEM; 399 return -ENOMEM;
401 } 400 }
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 446e0f467a17..b5508151e547 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -135,10 +135,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
135 * due to slab allocator restrictions */ 135 * due to slab allocator restrictions */
136 136
137 n = max(size, nlbufsiz); 137 n = max(size, nlbufsiz);
138 skb = alloc_skb(n, GFP_ATOMIC); 138 skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
139 if (!skb) { 139 if (!skb) {
140 pr_debug("cannot alloc whole buffer %ub!\n", n);
141
142 if (n > size) { 140 if (n > size) {
143 /* try to allocate only as much as we need for 141 /* try to allocate only as much as we need for
144 * current packet */ 142 * current packet */
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 5585980fce2e..9682b36df38c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -21,6 +21,7 @@
21#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
22#include <net/netfilter/nf_conntrack_acct.h> 22#include <net/netfilter/nf_conntrack_acct.h>
23#include <linux/rculist_nulls.h> 23#include <linux/rculist_nulls.h>
24#include <linux/export.h>
24 25
25struct ct_iter_state { 26struct ct_iter_state {
26 struct seq_net_private p; 27 struct seq_net_private p;
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 703f366fd235..7b22382ff0e9 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb,
70 70
71static void __exit nf_nat_amanda_fini(void) 71static void __exit nf_nat_amanda_fini(void)
72{ 72{
73 rcu_assign_pointer(nf_nat_amanda_hook, NULL); 73 RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
74 synchronize_rcu(); 74 synchronize_rcu();
75} 75}
76 76
77static int __init nf_nat_amanda_init(void) 77static int __init nf_nat_amanda_init(void)
78{ 78{
79 BUG_ON(nf_nat_amanda_hook != NULL); 79 BUG_ON(nf_nat_amanda_hook != NULL);
80 rcu_assign_pointer(nf_nat_amanda_hook, help); 80 RCU_INIT_POINTER(nf_nat_amanda_hook, help);
81 return 0; 81 return 0;
82} 82}
83 83
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3346de5d94d0..447bc5cfdc6c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
514 ret = -EBUSY; 514 ret = -EBUSY;
515 goto out; 515 goto out;
516 } 516 }
517 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); 517 RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
518 out: 518 out:
519 spin_unlock_bh(&nf_nat_lock); 519 spin_unlock_bh(&nf_nat_lock);
520 return ret; 520 return ret;
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
526{ 526{
527 spin_lock_bh(&nf_nat_lock); 527 spin_lock_bh(&nf_nat_lock);
528 rcu_assign_pointer(nf_nat_protos[proto->protonum], 528 RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
529 &nf_nat_unknown_protocol); 529 &nf_nat_unknown_protocol);
530 spin_unlock_bh(&nf_nat_lock); 530 spin_unlock_bh(&nf_nat_lock);
531 synchronize_rcu(); 531 synchronize_rcu();
@@ -736,10 +736,10 @@ static int __init nf_nat_init(void)
736 /* Sew in builtin protocols. */ 736 /* Sew in builtin protocols. */
737 spin_lock_bh(&nf_nat_lock); 737 spin_lock_bh(&nf_nat_lock);
738 for (i = 0; i < MAX_IP_NAT_PROTO; i++) 738 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
739 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); 739 RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
740 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); 740 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
741 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); 741 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
742 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); 742 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
743 spin_unlock_bh(&nf_nat_lock); 743 spin_unlock_bh(&nf_nat_lock);
744 744
745 /* Initialize fake conntrack so that NAT will skip it */ 745 /* Initialize fake conntrack so that NAT will skip it */
@@ -748,12 +748,12 @@ static int __init nf_nat_init(void)
748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
749 749
750 BUG_ON(nf_nat_seq_adjust_hook != NULL); 750 BUG_ON(nf_nat_seq_adjust_hook != NULL);
751 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 751 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
753 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, 753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
754 nfnetlink_parse_nat_setup); 754 nfnetlink_parse_nat_setup);
755 BUG_ON(nf_ct_nat_offset != NULL); 755 BUG_ON(nf_ct_nat_offset != NULL);
756 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset); 756 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
757 return 0; 757 return 0;
758 758
759 cleanup_extend: 759 cleanup_extend:
@@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void)
766 unregister_pernet_subsys(&nf_nat_net_ops); 766 unregister_pernet_subsys(&nf_nat_net_ops);
767 nf_ct_l3proto_put(l3proto); 767 nf_ct_l3proto_put(l3proto);
768 nf_ct_extend_unregister(&nat_extend); 768 nf_ct_extend_unregister(&nat_extend);
769 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); 769 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
770 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); 770 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
771 rcu_assign_pointer(nf_ct_nat_offset, NULL); 771 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
772 synchronize_net(); 772 synchronize_net();
773} 773}
774 774
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index dc73abb3fe27..e462a957d080 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -113,14 +113,14 @@ out:
113 113
114static void __exit nf_nat_ftp_fini(void) 114static void __exit nf_nat_ftp_fini(void)
115{ 115{
116 rcu_assign_pointer(nf_nat_ftp_hook, NULL); 116 RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
117 synchronize_rcu(); 117 synchronize_rcu();
118} 118}
119 119
120static int __init nf_nat_ftp_init(void) 120static int __init nf_nat_ftp_init(void)
121{ 121{
122 BUG_ON(nf_nat_ftp_hook != NULL); 122 BUG_ON(nf_nat_ftp_hook != NULL);
123 rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); 123 RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 790f3160e012..b9a1136addbd 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -581,30 +581,30 @@ static int __init init(void)
581 BUG_ON(nat_callforwarding_hook != NULL); 581 BUG_ON(nat_callforwarding_hook != NULL);
582 BUG_ON(nat_q931_hook != NULL); 582 BUG_ON(nat_q931_hook != NULL);
583 583
584 rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); 584 RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
585 rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); 585 RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
586 rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); 586 RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
587 rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); 587 RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
588 rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); 588 RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
589 rcu_assign_pointer(nat_t120_hook, nat_t120); 589 RCU_INIT_POINTER(nat_t120_hook, nat_t120);
590 rcu_assign_pointer(nat_h245_hook, nat_h245); 590 RCU_INIT_POINTER(nat_h245_hook, nat_h245);
591 rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); 591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
592 rcu_assign_pointer(nat_q931_hook, nat_q931); 592 RCU_INIT_POINTER(nat_q931_hook, nat_q931);
593 return 0; 593 return 0;
594} 594}
595 595
596/****************************************************************************/ 596/****************************************************************************/
597static void __exit fini(void) 597static void __exit fini(void)
598{ 598{
599 rcu_assign_pointer(set_h245_addr_hook, NULL); 599 RCU_INIT_POINTER(set_h245_addr_hook, NULL);
600 rcu_assign_pointer(set_h225_addr_hook, NULL); 600 RCU_INIT_POINTER(set_h225_addr_hook, NULL);
601 rcu_assign_pointer(set_sig_addr_hook, NULL); 601 RCU_INIT_POINTER(set_sig_addr_hook, NULL);
602 rcu_assign_pointer(set_ras_addr_hook, NULL); 602 RCU_INIT_POINTER(set_ras_addr_hook, NULL);
603 rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); 603 RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
604 rcu_assign_pointer(nat_t120_hook, NULL); 604 RCU_INIT_POINTER(nat_t120_hook, NULL);
605 rcu_assign_pointer(nat_h245_hook, NULL); 605 RCU_INIT_POINTER(nat_h245_hook, NULL);
606 rcu_assign_pointer(nat_callforwarding_hook, NULL); 606 RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
607 rcu_assign_pointer(nat_q931_hook, NULL); 607 RCU_INIT_POINTER(nat_q931_hook, NULL);
608 synchronize_rcu(); 608 synchronize_rcu();
609} 609}
610 610
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 535e1a802356..979ae165f4ef 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb,
75 75
76static void __exit nf_nat_irc_fini(void) 76static void __exit nf_nat_irc_fini(void)
77{ 77{
78 rcu_assign_pointer(nf_nat_irc_hook, NULL); 78 RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
79 synchronize_rcu(); 79 synchronize_rcu();
80} 80}
81 81
82static int __init nf_nat_irc_init(void) 82static int __init nf_nat_irc_init(void)
83{ 83{
84 BUG_ON(nf_nat_irc_hook != NULL); 84 BUG_ON(nf_nat_irc_hook != NULL);
85 rcu_assign_pointer(nf_nat_irc_hook, help); 85 RCU_INIT_POINTER(nf_nat_irc_hook, help);
86 return 0; 86 return 0;
87} 87}
88 88
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 4c060038d29f..3e8284ba46b8 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void)
282 nf_nat_need_gre(); 282 nf_nat_need_gre();
283 283
284 BUG_ON(nf_nat_pptp_hook_outbound != NULL); 284 BUG_ON(nf_nat_pptp_hook_outbound != NULL);
285 rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); 285 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
286 286
287 BUG_ON(nf_nat_pptp_hook_inbound != NULL); 287 BUG_ON(nf_nat_pptp_hook_inbound != NULL);
288 rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); 288 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
289 289
290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); 290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
291 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); 291 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
292 292
293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL); 293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
294 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); 294 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
295 return 0; 295 return 0;
296} 296}
297 297
298static void __exit nf_nat_helper_pptp_fini(void) 298static void __exit nf_nat_helper_pptp_fini(void)
299{ 299{
300 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); 300 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
301 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); 301 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
302 rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); 302 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
303 rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); 303 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
304 synchronize_rcu(); 304 synchronize_rcu();
305} 305}
306 306
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index f52d41ea0690..a3d997618602 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13 13
14#include <linux/netfilter.h> 14#include <linux/netfilter.h>
15#include <linux/export.h>
15#include <net/secure_seq.h> 16#include <net/secure_seq.h>
16#include <net/netfilter/nf_nat.h> 17#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_core.h> 18#include <net/netfilter/nf_nat_core.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 5744c3ec847c..9f4dc1235dc7 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/export.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/icmp.h> 13#include <linux/icmp.h>
13 14
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 756331d42661..bd5a80a62a5b 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/ip.h> 11#include <linux/ip.h>
12#include <linux/sctp.h> 12#include <linux/sctp.h>
13#include <linux/module.h>
13#include <net/sctp/checksum.h> 14#include <net/sctp/checksum.h>
14 15
15#include <net/netfilter/nf_nat_protocol.h> 16#include <net/netfilter/nf_nat_protocol.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index aa460a595d5d..0d67bb80130f 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/export.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/tcp.h> 13#include <linux/tcp.h>
13 14
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index dfe65c7e2925..0b1b8601cba7 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/export.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/udp.h> 13#include <linux/udp.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index 3cc8c8af39ef..f83ef23e2ab7 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -13,6 +13,7 @@
13#include <linux/udp.h> 13#include <linux/udp.h>
14 14
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <linux/module.h>
16#include <net/netfilter/nf_nat.h> 17#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_protocol.h>
18 19
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf7816fdb..78844d9208f1 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -528,13 +528,13 @@ err1:
528 528
529static void __exit nf_nat_sip_fini(void) 529static void __exit nf_nat_sip_fini(void)
530{ 530{
531 rcu_assign_pointer(nf_nat_sip_hook, NULL); 531 RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); 532 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
533 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 533 RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
534 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 534 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
535 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 535 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
536 rcu_assign_pointer(nf_nat_sdp_session_hook, NULL); 536 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
537 rcu_assign_pointer(nf_nat_sdp_media_hook, NULL); 537 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
538 synchronize_rcu(); 538 synchronize_rcu();
539} 539}
540 540
@@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void)
547 BUG_ON(nf_nat_sdp_port_hook != NULL); 547 BUG_ON(nf_nat_sdp_port_hook != NULL);
548 BUG_ON(nf_nat_sdp_session_hook != NULL); 548 BUG_ON(nf_nat_sdp_session_hook != NULL);
549 BUG_ON(nf_nat_sdp_media_hook != NULL); 549 BUG_ON(nf_nat_sdp_media_hook != NULL);
550 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 550 RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
551 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); 551 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
552 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 552 RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
553 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 553 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
554 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 554 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
555 rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session); 555 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
556 rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media); 556 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
557 return 0; 557 return 0;
558} 558}
559 559
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 076b7c8c4aa4..2133c30a4a5f 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -400,11 +400,8 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
400 *len = 0; 400 *len = 0;
401 401
402 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); 402 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
403 if (*octets == NULL) { 403 if (*octets == NULL)
404 if (net_ratelimit())
405 pr_notice("OOM in bsalg (%d)\n", __LINE__);
406 return 0; 404 return 0;
407 }
408 405
409 ptr = *octets; 406 ptr = *octets;
410 while (ctx->pointer < eoc) { 407 while (ctx->pointer < eoc) {
@@ -451,11 +448,8 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
451 return 0; 448 return 0;
452 449
453 *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); 450 *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
454 if (*oid == NULL) { 451 if (*oid == NULL)
455 if (net_ratelimit())
456 pr_notice("OOM in bsalg (%d)\n", __LINE__);
457 return 0; 452 return 0;
458 }
459 453
460 optr = *oid; 454 optr = *oid;
461 455
@@ -728,8 +722,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
728 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 722 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
729 if (*obj == NULL) { 723 if (*obj == NULL) {
730 kfree(id); 724 kfree(id);
731 if (net_ratelimit())
732 pr_notice("OOM in bsalg (%d)\n", __LINE__);
733 return 0; 725 return 0;
734 } 726 }
735 (*obj)->syntax.l[0] = l; 727 (*obj)->syntax.l[0] = l;
@@ -744,8 +736,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
744 if (*obj == NULL) { 736 if (*obj == NULL) {
745 kfree(p); 737 kfree(p);
746 kfree(id); 738 kfree(id);
747 if (net_ratelimit())
748 pr_notice("OOM in bsalg (%d)\n", __LINE__);
749 return 0; 739 return 0;
750 } 740 }
751 memcpy((*obj)->syntax.c, p, len); 741 memcpy((*obj)->syntax.c, p, len);
@@ -759,8 +749,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
759 *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); 749 *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
760 if (*obj == NULL) { 750 if (*obj == NULL) {
761 kfree(id); 751 kfree(id);
762 if (net_ratelimit())
763 pr_notice("OOM in bsalg (%d)\n", __LINE__);
764 return 0; 752 return 0;
765 } 753 }
766 if (!asn1_null_decode(ctx, end)) { 754 if (!asn1_null_decode(ctx, end)) {
@@ -780,8 +768,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
780 if (*obj == NULL) { 768 if (*obj == NULL) {
781 kfree(lp); 769 kfree(lp);
782 kfree(id); 770 kfree(id);
783 if (net_ratelimit())
784 pr_notice("OOM in bsalg (%d)\n", __LINE__);
785 return 0; 771 return 0;
786 } 772 }
787 memcpy((*obj)->syntax.ul, lp, len); 773 memcpy((*obj)->syntax.ul, lp, len);
@@ -801,8 +787,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
801 if (*obj == NULL) { 787 if (*obj == NULL) {
802 kfree(p); 788 kfree(p);
803 kfree(id); 789 kfree(id);
804 if (net_ratelimit())
805 pr_notice("OOM in bsalg (%d)\n", __LINE__);
806 return 0; 790 return 0;
807 } 791 }
808 memcpy((*obj)->syntax.uc, p, len); 792 memcpy((*obj)->syntax.uc, p, len);
@@ -819,8 +803,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
819 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 803 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
820 if (*obj == NULL) { 804 if (*obj == NULL) {
821 kfree(id); 805 kfree(id);
822 if (net_ratelimit())
823 pr_notice("OOM in bsalg (%d)\n", __LINE__);
824 return 0; 806 return 0;
825 } 807 }
826 (*obj)->syntax.ul[0] = ul; 808 (*obj)->syntax.ul[0] = ul;
@@ -1310,7 +1292,7 @@ static int __init nf_nat_snmp_basic_init(void)
1310 int ret = 0; 1292 int ret = 0;
1311 1293
1312 BUG_ON(nf_nat_snmp_hook != NULL); 1294 BUG_ON(nf_nat_snmp_hook != NULL);
1313 rcu_assign_pointer(nf_nat_snmp_hook, help); 1295 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1314 1296
1315 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1297 ret = nf_conntrack_helper_register(&snmp_trap_helper);
1316 if (ret < 0) { 1298 if (ret < 0) {
@@ -1322,7 +1304,7 @@ static int __init nf_nat_snmp_basic_init(void)
1322 1304
1323static void __exit nf_nat_snmp_basic_fini(void) 1305static void __exit nf_nat_snmp_basic_fini(void)
1324{ 1306{
1325 rcu_assign_pointer(nf_nat_snmp_hook, NULL); 1307 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1326 nf_conntrack_helper_unregister(&snmp_trap_helper); 1308 nf_conntrack_helper_unregister(&snmp_trap_helper);
1327} 1309}
1328 1310
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index a6e606e84820..92900482edea 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void)
284 284
285#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
286 BUG_ON(ip_nat_decode_session != NULL); 286 BUG_ON(ip_nat_decode_session != NULL);
287 rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); 287 RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
288#endif 288#endif
289 ret = nf_nat_rule_init(); 289 ret = nf_nat_rule_init();
290 if (ret < 0) { 290 if (ret < 0) {
@@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void)
302 nf_nat_rule_cleanup(); 302 nf_nat_rule_cleanup();
303 cleanup_decode_session: 303 cleanup_decode_session:
304#ifdef CONFIG_XFRM 304#ifdef CONFIG_XFRM
305 rcu_assign_pointer(ip_nat_decode_session, NULL); 305 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
306 synchronize_net(); 306 synchronize_net();
307#endif 307#endif
308 return ret; 308 return ret;
@@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void)
313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); 313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
314 nf_nat_rule_cleanup(); 314 nf_nat_rule_cleanup();
315#ifdef CONFIG_XFRM 315#ifdef CONFIG_XFRM
316 rcu_assign_pointer(ip_nat_decode_session, NULL); 316 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
317 synchronize_net(); 317 synchronize_net();
318#endif 318#endif
319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */ 319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 7274a43c7a12..a2901bf829c0 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb,
36 36
37static void __exit nf_nat_tftp_fini(void) 37static void __exit nf_nat_tftp_fini(void)
38{ 38{
39 rcu_assign_pointer(nf_nat_tftp_hook, NULL); 39 RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
40 synchronize_rcu(); 40 synchronize_rcu();
41} 41}
42 42
43static int __init nf_nat_tftp_init(void) 43static int __init nf_nat_tftp_init(void)
44{ 44{
45 BUG_ON(nf_nat_tftp_hook != NULL); 45 BUG_ON(nf_nat_tftp_hook != NULL);
46 rcu_assign_pointer(nf_nat_tftp_hook, help); 46 RCU_INIT_POINTER(nf_nat_tftp_hook, help);
47 return 0; 47 return 0;
48} 48}
49 49
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 39b403f854c6..a06f73fdb3c0 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -39,6 +39,7 @@
39#include <net/protocol.h> 39#include <net/protocol.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <linux/export.h>
42#include <net/sock.h> 43#include <net/sock.h>
43#include <net/ping.h> 44#include <net/ping.h>
44#include <net/udp.h> 45#include <net/udp.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4bfad5da94f4..466ea8bb7a4d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -42,6 +42,7 @@
42#include <linux/inetdevice.h> 42#include <linux/inetdevice.h>
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/export.h>
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/raw.h> 47#include <net/raw.h>
47 48
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 61714bd52925..007e2eb769d3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -48,6 +48,7 @@
48#include <linux/errno.h> 48#include <linux/errno.h>
49#include <linux/aio.h> 49#include <linux/aio.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/export.h>
51#include <linux/spinlock.h> 52#include <linux/spinlock.h>
52#include <linux/sockios.h> 53#include <linux/sockios.h>
53#include <linux/socket.h> 54#include <linux/socket.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 075212e41b83..155138d8ec8b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -120,7 +120,6 @@
120 120
121static int ip_rt_max_size; 121static int ip_rt_max_size;
122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
123static int ip_rt_gc_interval __read_mostly = 60 * HZ;
124static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 123static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
125static int ip_rt_redirect_number __read_mostly = 9; 124static int ip_rt_redirect_number __read_mostly = 9;
126static int ip_rt_redirect_load __read_mostly = HZ / 50; 125static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -324,7 +323,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
324 struct rtable *r = NULL; 323 struct rtable *r = NULL;
325 324
326 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 325 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
327 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) 326 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
328 continue; 327 continue;
329 rcu_read_lock_bh(); 328 rcu_read_lock_bh();
330 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 329 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -350,7 +349,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
350 do { 349 do {
351 if (--st->bucket < 0) 350 if (--st->bucket < 0)
352 return NULL; 351 return NULL;
353 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); 352 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
354 rcu_read_lock_bh(); 353 rcu_read_lock_bh();
355 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 354 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
356 } 355 }
@@ -761,7 +760,7 @@ static void rt_do_flush(struct net *net, int process_context)
761 760
762 if (process_context && need_resched()) 761 if (process_context && need_resched())
763 cond_resched(); 762 cond_resched();
764 rth = rcu_dereference_raw(rt_hash_table[i].chain); 763 rth = rcu_access_pointer(rt_hash_table[i].chain);
765 if (!rth) 764 if (!rth)
766 continue; 765 continue;
767 766
@@ -1309,7 +1308,12 @@ static void rt_del(unsigned hash, struct rtable *rt)
1309void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1308void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1310 __be32 saddr, struct net_device *dev) 1309 __be32 saddr, struct net_device *dev)
1311{ 1310{
1311 int s, i;
1312 struct in_device *in_dev = __in_dev_get_rcu(dev); 1312 struct in_device *in_dev = __in_dev_get_rcu(dev);
1313 struct rtable *rt;
1314 __be32 skeys[2] = { saddr, 0 };
1315 int ikeys[2] = { dev->ifindex, 0 };
1316 struct flowi4 fl4;
1313 struct inet_peer *peer; 1317 struct inet_peer *peer;
1314 struct net *net; 1318 struct net *net;
1315 1319
@@ -1332,13 +1336,34 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1332 goto reject_redirect; 1336 goto reject_redirect;
1333 } 1337 }
1334 1338
1335 peer = inet_getpeer_v4(daddr, 1); 1339 memset(&fl4, 0, sizeof(fl4));
1336 if (peer) { 1340 fl4.daddr = daddr;
1337 peer->redirect_learned.a4 = new_gw; 1341 for (s = 0; s < 2; s++) {
1342 for (i = 0; i < 2; i++) {
1343 fl4.flowi4_oif = ikeys[i];
1344 fl4.saddr = skeys[s];
1345 rt = __ip_route_output_key(net, &fl4);
1346 if (IS_ERR(rt))
1347 continue;
1338 1348
1339 inet_putpeer(peer); 1349 if (rt->dst.error || rt->dst.dev != dev ||
1350 rt->rt_gateway != old_gw) {
1351 ip_rt_put(rt);
1352 continue;
1353 }
1354
1355 if (!rt->peer)
1356 rt_bind_peer(rt, rt->rt_dst, 1);
1357
1358 peer = rt->peer;
1359 if (peer) {
1360 peer->redirect_learned.a4 = new_gw;
1361 atomic_inc(&__rt_peer_genid);
1362 }
1340 1363
1341 atomic_inc(&__rt_peer_genid); 1364 ip_rt_put(rt);
1365 return;
1366 }
1342 } 1367 }
1343 return; 1368 return;
1344 1369
@@ -1568,11 +1593,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1568 est_mtu = mtu; 1593 est_mtu = mtu;
1569 peer->pmtu_learned = mtu; 1594 peer->pmtu_learned = mtu;
1570 peer->pmtu_expires = pmtu_expires; 1595 peer->pmtu_expires = pmtu_expires;
1596 atomic_inc(&__rt_peer_genid);
1571 } 1597 }
1572 1598
1573 inet_putpeer(peer); 1599 inet_putpeer(peer);
1574
1575 atomic_inc(&__rt_peer_genid);
1576 } 1600 }
1577 return est_mtu ? : new_mtu; 1601 return est_mtu ? : new_mtu;
1578} 1602}
@@ -3121,13 +3145,6 @@ static ctl_table ipv4_route_table[] = {
3121 .proc_handler = proc_dointvec_jiffies, 3145 .proc_handler = proc_dointvec_jiffies,
3122 }, 3146 },
3123 { 3147 {
3124 .procname = "gc_interval",
3125 .data = &ip_rt_gc_interval,
3126 .maxlen = sizeof(int),
3127 .mode = 0644,
3128 .proc_handler = proc_dointvec_jiffies,
3129 },
3130 {
3131 .procname = "redirect_load", 3148 .procname = "redirect_load",
3132 .data = &ip_rt_redirect_load, 3149 .data = &ip_rt_redirect_load,
3133 .maxlen = sizeof(int), 3150 .maxlen = sizeof(int),
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3bc5c8f7c71b..90f6544c13e2 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -15,6 +15,7 @@
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/cryptohash.h> 16#include <linux/cryptohash.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/export.h>
18#include <net/tcp.h> 19#include <net/tcp.h>
19#include <net/route.h> 20#include <net/route.h>
20 21
@@ -265,7 +266,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
265 struct ip_options *opt) 266 struct ip_options *opt)
266{ 267{
267 struct tcp_options_received tcp_opt; 268 struct tcp_options_received tcp_opt;
268 u8 *hash_location; 269 const u8 *hash_location;
269 struct inet_request_sock *ireq; 270 struct inet_request_sock *ireq;
270 struct tcp_request_sock *treq; 271 struct tcp_request_sock *treq;
271 struct tcp_sock *tp = tcp_sk(sk); 272 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb729..34f5db1e1c8b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -374,7 +374,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
374{ 374{
375 unsigned int mask; 375 unsigned int mask;
376 struct sock *sk = sock->sk; 376 struct sock *sk = sock->sk;
377 struct tcp_sock *tp = tcp_sk(sk); 377 const struct tcp_sock *tp = tcp_sk(sk);
378 378
379 sock_poll_wait(file, sk_sleep(sk), wait); 379 sock_poll_wait(file, sk_sleep(sk), wait);
380 if (sk->sk_state == TCP_LISTEN) 380 if (sk->sk_state == TCP_LISTEN)
@@ -524,11 +524,11 @@ EXPORT_SYMBOL(tcp_ioctl);
524 524
525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
526{ 526{
527 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 527 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
528 tp->pushed_seq = tp->write_seq; 528 tp->pushed_seq = tp->write_seq;
529} 529}
530 530
531static inline int forced_push(struct tcp_sock *tp) 531static inline int forced_push(const struct tcp_sock *tp)
532{ 532{
533 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 533 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
534} 534}
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
540 540
541 skb->csum = 0; 541 skb->csum = 0;
542 tcb->seq = tcb->end_seq = tp->write_seq; 542 tcb->seq = tcb->end_seq = tp->write_seq;
543 tcb->flags = TCPHDR_ACK; 543 tcb->tcp_flags = TCPHDR_ACK;
544 tcb->sacked = 0; 544 tcb->sacked = 0;
545 skb_header_release(skb); 545 skb_header_release(skb);
546 tcp_add_write_queue_tail(sk, skb); 546 tcp_add_write_queue_tail(sk, skb);
@@ -813,7 +813,7 @@ new_segment:
813 goto wait_for_memory; 813 goto wait_for_memory;
814 814
815 if (can_coalesce) { 815 if (can_coalesce) {
816 skb_shinfo(skb)->frags[i - 1].size += copy; 816 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
817 } else { 817 } else {
818 get_page(page); 818 get_page(page);
819 skb_fill_page_desc(skb, i, page, offset, copy); 819 skb_fill_page_desc(skb, i, page, offset, copy);
@@ -830,7 +830,7 @@ new_segment:
830 skb_shinfo(skb)->gso_segs = 0; 830 skb_shinfo(skb)->gso_segs = 0;
831 831
832 if (!copied) 832 if (!copied)
833 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 833 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
834 834
835 copied += copy; 835 copied += copy;
836 poffset += copy; 836 poffset += copy;
@@ -891,9 +891,9 @@ EXPORT_SYMBOL(tcp_sendpage);
891#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 891#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
892#define TCP_OFF(sk) (sk->sk_sndmsg_off) 892#define TCP_OFF(sk) (sk->sk_sndmsg_off)
893 893
894static inline int select_size(struct sock *sk, int sg) 894static inline int select_size(const struct sock *sk, int sg)
895{ 895{
896 struct tcp_sock *tp = tcp_sk(sk); 896 const struct tcp_sock *tp = tcp_sk(sk);
897 int tmp = tp->mss_cache; 897 int tmp = tp->mss_cache;
898 898
899 if (sg) { 899 if (sg) {
@@ -1058,8 +1058,7 @@ new_segment:
1058 1058
1059 /* Update the skb. */ 1059 /* Update the skb. */
1060 if (merge) { 1060 if (merge) {
1061 skb_shinfo(skb)->frags[i - 1].size += 1061 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1062 copy;
1063 } else { 1062 } else {
1064 skb_fill_page_desc(skb, i, page, off, copy); 1063 skb_fill_page_desc(skb, i, page, off, copy);
1065 if (TCP_PAGE(sk)) { 1064 if (TCP_PAGE(sk)) {
@@ -1074,7 +1073,7 @@ new_segment:
1074 } 1073 }
1075 1074
1076 if (!copied) 1075 if (!copied)
1077 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 1076 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1078 1077
1079 tp->write_seq += copy; 1078 tp->write_seq += copy;
1080 TCP_SKB_CB(skb)->end_seq += copy; 1079 TCP_SKB_CB(skb)->end_seq += copy;
@@ -1194,13 +1193,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1194 struct tcp_sock *tp = tcp_sk(sk); 1193 struct tcp_sock *tp = tcp_sk(sk);
1195 int time_to_ack = 0; 1194 int time_to_ack = 0;
1196 1195
1197#if TCP_DEBUG
1198 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1196 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1199 1197
1200 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1198 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1201 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1199 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1202 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1200 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1203#endif
1204 1201
1205 if (inet_csk_ack_scheduled(sk)) { 1202 if (inet_csk_ack_scheduled(sk)) {
1206 const struct inet_connection_sock *icsk = inet_csk(sk); 1203 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2409,7 +2406,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2409int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2406int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2410 unsigned int optlen) 2407 unsigned int optlen)
2411{ 2408{
2412 struct inet_connection_sock *icsk = inet_csk(sk); 2409 const struct inet_connection_sock *icsk = inet_csk(sk);
2413 2410
2414 if (level != SOL_TCP) 2411 if (level != SOL_TCP)
2415 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2412 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
@@ -2431,9 +2428,9 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
2431#endif 2428#endif
2432 2429
2433/* Return information about state of tcp endpoint in API format. */ 2430/* Return information about state of tcp endpoint in API format. */
2434void tcp_get_info(struct sock *sk, struct tcp_info *info) 2431void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2435{ 2432{
2436 struct tcp_sock *tp = tcp_sk(sk); 2433 const struct tcp_sock *tp = tcp_sk(sk);
2437 const struct inet_connection_sock *icsk = inet_csk(sk); 2434 const struct inet_connection_sock *icsk = inet_csk(sk);
2438 u32 now = tcp_time_stamp; 2435 u32 now = tcp_time_stamp;
2439 2436
@@ -2455,8 +2452,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2455 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2452 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2456 } 2453 }
2457 2454
2458 if (tp->ecn_flags&TCP_ECN_OK) 2455 if (tp->ecn_flags & TCP_ECN_OK)
2459 info->tcpi_options |= TCPI_OPT_ECN; 2456 info->tcpi_options |= TCPI_OPT_ECN;
2457 if (tp->ecn_flags & TCP_ECN_SEEN)
2458 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2460 2459
2461 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2460 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2462 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2461 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2857,26 +2856,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
2857 2856
2858#ifdef CONFIG_TCP_MD5SIG 2857#ifdef CONFIG_TCP_MD5SIG
2859static unsigned long tcp_md5sig_users; 2858static unsigned long tcp_md5sig_users;
2860static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; 2859static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
2861static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2860static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2862 2861
2863static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) 2862static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2864{ 2863{
2865 int cpu; 2864 int cpu;
2865
2866 for_each_possible_cpu(cpu) { 2866 for_each_possible_cpu(cpu) {
2867 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); 2867 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2868 if (p) { 2868
2869 if (p->md5_desc.tfm) 2869 if (p->md5_desc.tfm)
2870 crypto_free_hash(p->md5_desc.tfm); 2870 crypto_free_hash(p->md5_desc.tfm);
2871 kfree(p);
2872 }
2873 } 2871 }
2874 free_percpu(pool); 2872 free_percpu(pool);
2875} 2873}
2876 2874
2877void tcp_free_md5sig_pool(void) 2875void tcp_free_md5sig_pool(void)
2878{ 2876{
2879 struct tcp_md5sig_pool * __percpu *pool = NULL; 2877 struct tcp_md5sig_pool __percpu *pool = NULL;
2880 2878
2881 spin_lock_bh(&tcp_md5sig_pool_lock); 2879 spin_lock_bh(&tcp_md5sig_pool_lock);
2882 if (--tcp_md5sig_users == 0) { 2880 if (--tcp_md5sig_users == 0) {
@@ -2889,30 +2887,24 @@ void tcp_free_md5sig_pool(void)
2889} 2887}
2890EXPORT_SYMBOL(tcp_free_md5sig_pool); 2888EXPORT_SYMBOL(tcp_free_md5sig_pool);
2891 2889
2892static struct tcp_md5sig_pool * __percpu * 2890static struct tcp_md5sig_pool __percpu *
2893__tcp_alloc_md5sig_pool(struct sock *sk) 2891__tcp_alloc_md5sig_pool(struct sock *sk)
2894{ 2892{
2895 int cpu; 2893 int cpu;
2896 struct tcp_md5sig_pool * __percpu *pool; 2894 struct tcp_md5sig_pool __percpu *pool;
2897 2895
2898 pool = alloc_percpu(struct tcp_md5sig_pool *); 2896 pool = alloc_percpu(struct tcp_md5sig_pool);
2899 if (!pool) 2897 if (!pool)
2900 return NULL; 2898 return NULL;
2901 2899
2902 for_each_possible_cpu(cpu) { 2900 for_each_possible_cpu(cpu) {
2903 struct tcp_md5sig_pool *p;
2904 struct crypto_hash *hash; 2901 struct crypto_hash *hash;
2905 2902
2906 p = kzalloc(sizeof(*p), sk->sk_allocation);
2907 if (!p)
2908 goto out_free;
2909 *per_cpu_ptr(pool, cpu) = p;
2910
2911 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2903 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2912 if (!hash || IS_ERR(hash)) 2904 if (!hash || IS_ERR(hash))
2913 goto out_free; 2905 goto out_free;
2914 2906
2915 p->md5_desc.tfm = hash; 2907 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2916 } 2908 }
2917 return pool; 2909 return pool;
2918out_free: 2910out_free:
@@ -2920,9 +2912,9 @@ out_free:
2920 return NULL; 2912 return NULL;
2921} 2913}
2922 2914
2923struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 2915struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2924{ 2916{
2925 struct tcp_md5sig_pool * __percpu *pool; 2917 struct tcp_md5sig_pool __percpu *pool;
2926 int alloc = 0; 2918 int alloc = 0;
2927 2919
2928retry: 2920retry:
@@ -2941,7 +2933,7 @@ retry:
2941 2933
2942 if (alloc) { 2934 if (alloc) {
2943 /* we cannot hold spinlock here because this may sleep. */ 2935 /* we cannot hold spinlock here because this may sleep. */
2944 struct tcp_md5sig_pool * __percpu *p; 2936 struct tcp_md5sig_pool __percpu *p;
2945 2937
2946 p = __tcp_alloc_md5sig_pool(sk); 2938 p = __tcp_alloc_md5sig_pool(sk);
2947 spin_lock_bh(&tcp_md5sig_pool_lock); 2939 spin_lock_bh(&tcp_md5sig_pool_lock);
@@ -2974,7 +2966,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2974 */ 2966 */
2975struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2967struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2976{ 2968{
2977 struct tcp_md5sig_pool * __percpu *p; 2969 struct tcp_md5sig_pool __percpu *p;
2978 2970
2979 local_bh_disable(); 2971 local_bh_disable();
2980 2972
@@ -2985,7 +2977,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2985 spin_unlock(&tcp_md5sig_pool_lock); 2977 spin_unlock(&tcp_md5sig_pool_lock);
2986 2978
2987 if (p) 2979 if (p)
2988 return *this_cpu_ptr(p); 2980 return this_cpu_ptr(p);
2989 2981
2990 local_bh_enable(); 2982 local_bh_enable();
2991 return NULL; 2983 return NULL;
@@ -3000,23 +2992,25 @@ void tcp_put_md5sig_pool(void)
3000EXPORT_SYMBOL(tcp_put_md5sig_pool); 2992EXPORT_SYMBOL(tcp_put_md5sig_pool);
3001 2993
3002int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 2994int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3003 struct tcphdr *th) 2995 const struct tcphdr *th)
3004{ 2996{
3005 struct scatterlist sg; 2997 struct scatterlist sg;
2998 struct tcphdr hdr;
3006 int err; 2999 int err;
3007 3000
3008 __sum16 old_checksum = th->check; 3001 /* We are not allowed to change tcphdr, make a local copy */
3009 th->check = 0; 3002 memcpy(&hdr, th, sizeof(hdr));
3003 hdr.check = 0;
3004
3010 /* options aren't included in the hash */ 3005 /* options aren't included in the hash */
3011 sg_init_one(&sg, th, sizeof(struct tcphdr)); 3006 sg_init_one(&sg, &hdr, sizeof(hdr));
3012 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); 3007 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3013 th->check = old_checksum;
3014 return err; 3008 return err;
3015} 3009}
3016EXPORT_SYMBOL(tcp_md5_hash_header); 3010EXPORT_SYMBOL(tcp_md5_hash_header);
3017 3011
3018int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3012int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3019 struct sk_buff *skb, unsigned header_len) 3013 const struct sk_buff *skb, unsigned int header_len)
3020{ 3014{
3021 struct scatterlist sg; 3015 struct scatterlist sg;
3022 const struct tcphdr *tp = tcp_hdr(skb); 3016 const struct tcphdr *tp = tcp_hdr(skb);
@@ -3035,8 +3029,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3035 3029
3036 for (i = 0; i < shi->nr_frags; ++i) { 3030 for (i = 0; i < shi->nr_frags; ++i) {
3037 const struct skb_frag_struct *f = &shi->frags[i]; 3031 const struct skb_frag_struct *f = &shi->frags[i];
3038 sg_set_page(&sg, f->page, f->size, f->page_offset); 3032 struct page *page = skb_frag_page(f);
3039 if (crypto_hash_update(desc, &sg, f->size)) 3033 sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
3034 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3040 return 1; 3035 return 1;
3041 } 3036 }
3042 3037
@@ -3048,7 +3043,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3048} 3043}
3049EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3044EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3050 3045
3051int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) 3046int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3052{ 3047{
3053 struct scatterlist sg; 3048 struct scatterlist sg;
3054 3049
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d73aab3fbfc0..52b5c2d0ecd0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -206,7 +206,7 @@ static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
207} 207}
208 208
209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) 209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
210{ 210{
211 if (tcp_hdr(skb)->cwr) 211 if (tcp_hdr(skb)->cwr)
212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
@@ -217,32 +217,41 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
217 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 217 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
218} 218}
219 219
220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) 220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
221{ 221{
222 if (tp->ecn_flags & TCP_ECN_OK) { 222 if (!(tp->ecn_flags & TCP_ECN_OK))
223 if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) 223 return;
224 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 224
225 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
226 case INET_ECN_NOT_ECT:
225 /* Funny extension: if ECT is not set on a segment, 227 /* Funny extension: if ECT is not set on a segment,
226 * it is surely retransmit. It is not in ECN RFC, 228 * and we already seen ECT on a previous segment,
227 * but Linux follows this rule. */ 229 * it is probably a retransmit.
228 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 230 */
231 if (tp->ecn_flags & TCP_ECN_SEEN)
229 tcp_enter_quickack_mode((struct sock *)tp); 232 tcp_enter_quickack_mode((struct sock *)tp);
233 break;
234 case INET_ECN_CE:
235 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
236 /* fallinto */
237 default:
238 tp->ecn_flags |= TCP_ECN_SEEN;
230 } 239 }
231} 240}
232 241
233static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) 242static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
234{ 243{
235 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 244 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
236 tp->ecn_flags &= ~TCP_ECN_OK; 245 tp->ecn_flags &= ~TCP_ECN_OK;
237} 246}
238 247
239static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) 248static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
240{ 249{
241 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 250 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
242 tp->ecn_flags &= ~TCP_ECN_OK; 251 tp->ecn_flags &= ~TCP_ECN_OK;
243} 252}
244 253
245static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) 254static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
246{ 255{
247 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 256 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
248 return 1; 257 return 1;
@@ -256,14 +265,11 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
256 265
257static void tcp_fixup_sndbuf(struct sock *sk) 266static void tcp_fixup_sndbuf(struct sock *sk)
258{ 267{
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 268 int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
260 sizeof(struct sk_buff);
261 269
262 if (sk->sk_sndbuf < 3 * sndmem) { 270 sndmem *= TCP_INIT_CWND;
263 sk->sk_sndbuf = 3 * sndmem; 271 if (sk->sk_sndbuf < sndmem)
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) 272 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
267} 273}
268 274
269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 275/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -309,7 +315,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
309 return 0; 315 return 0;
310} 316}
311 317
312static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) 318static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
313{ 319{
314 struct tcp_sock *tp = tcp_sk(sk); 320 struct tcp_sock *tp = tcp_sk(sk);
315 321
@@ -339,17 +345,24 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
339 345
340static void tcp_fixup_rcvbuf(struct sock *sk) 346static void tcp_fixup_rcvbuf(struct sock *sk)
341{ 347{
342 struct tcp_sock *tp = tcp_sk(sk); 348 u32 mss = tcp_sk(sk)->advmss;
343 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 349 u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
350 int rcvmem;
344 351
345 /* Try to select rcvbuf so that 4 mss-sized segments 352 /* Limit to 10 segments if mss <= 1460,
346 * will fit to window and corresponding skbs will fit to our rcvbuf. 353 * or 14600/mss segments, with a minimum of two segments.
347 * (was 3; 4 is minimum to allow fast retransmit to work.)
348 */ 354 */
349 while (tcp_win_from_space(rcvmem) < tp->advmss) 355 if (mss > 1460)
356 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
357
358 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
359 while (tcp_win_from_space(rcvmem) < mss)
350 rcvmem += 128; 360 rcvmem += 128;
351 if (sk->sk_rcvbuf < 4 * rcvmem) 361
352 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 362 rcvmem *= icwnd;
363
364 if (sk->sk_rcvbuf < rcvmem)
365 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
353} 366}
354 367
355/* 4. Try to fixup all. It is made immediately after connection enters 368/* 4. Try to fixup all. It is made immediately after connection enters
@@ -416,7 +429,7 @@ static void tcp_clamp_window(struct sock *sk)
416 */ 429 */
417void tcp_initialize_rcv_mss(struct sock *sk) 430void tcp_initialize_rcv_mss(struct sock *sk)
418{ 431{
419 struct tcp_sock *tp = tcp_sk(sk); 432 const struct tcp_sock *tp = tcp_sk(sk);
420 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 433 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
421 434
422 hint = min(hint, tp->rcv_wnd / 2); 435 hint = min(hint, tp->rcv_wnd / 2);
@@ -531,8 +544,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
531 space /= tp->advmss; 544 space /= tp->advmss;
532 if (!space) 545 if (!space)
533 space = 1; 546 space = 1;
534 rcvmem = (tp->advmss + MAX_TCP_HEADER + 547 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
535 16 + sizeof(struct sk_buff));
536 while (tcp_win_from_space(rcvmem) < tp->advmss) 548 while (tcp_win_from_space(rcvmem) < tp->advmss)
537 rcvmem += 128; 549 rcvmem += 128;
538 space *= rcvmem; 550 space *= rcvmem;
@@ -812,7 +824,7 @@ void tcp_update_metrics(struct sock *sk)
812 } 824 }
813} 825}
814 826
815__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 827__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
816{ 828{
817 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 829 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
818 830
@@ -1204,7 +1216,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1204 tp->lost_retrans_low = new_low_seq; 1216 tp->lost_retrans_low = new_low_seq;
1205} 1217}
1206 1218
1207static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, 1219static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1208 struct tcp_sack_block_wire *sp, int num_sacks, 1220 struct tcp_sack_block_wire *sp, int num_sacks,
1209 u32 prior_snd_una) 1221 u32 prior_snd_una)
1210{ 1222{
@@ -1298,7 +1310,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1298 return in_sack; 1310 return in_sack;
1299} 1311}
1300 1312
1301static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1313static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1302 struct tcp_sacktag_state *state, 1314 struct tcp_sacktag_state *state,
1303 int dup_sack, int pcount) 1315 int dup_sack, int pcount)
1304{ 1316{
@@ -1438,7 +1450,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1438 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1450 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1439 } 1451 }
1440 1452
1441 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags; 1453 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
1442 if (skb == tcp_highest_sack(sk)) 1454 if (skb == tcp_highest_sack(sk))
1443 tcp_advance_highest_sack(sk, skb); 1455 tcp_advance_highest_sack(sk, skb);
1444 1456
@@ -1453,13 +1465,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1453/* I wish gso_size would have a bit more sane initialization than 1465/* I wish gso_size would have a bit more sane initialization than
1454 * something-or-zero which complicates things 1466 * something-or-zero which complicates things
1455 */ 1467 */
1456static int tcp_skb_seglen(struct sk_buff *skb) 1468static int tcp_skb_seglen(const struct sk_buff *skb)
1457{ 1469{
1458 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1470 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1459} 1471}
1460 1472
1461/* Shifting pages past head area doesn't work */ 1473/* Shifting pages past head area doesn't work */
1462static int skb_can_shift(struct sk_buff *skb) 1474static int skb_can_shift(const struct sk_buff *skb)
1463{ 1475{
1464 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1476 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1465} 1477}
@@ -1708,19 +1720,19 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1708 return skb; 1720 return skb;
1709} 1721}
1710 1722
1711static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) 1723static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
1712{ 1724{
1713 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1725 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1714} 1726}
1715 1727
1716static int 1728static int
1717tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, 1729tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1718 u32 prior_snd_una) 1730 u32 prior_snd_una)
1719{ 1731{
1720 const struct inet_connection_sock *icsk = inet_csk(sk); 1732 const struct inet_connection_sock *icsk = inet_csk(sk);
1721 struct tcp_sock *tp = tcp_sk(sk); 1733 struct tcp_sock *tp = tcp_sk(sk);
1722 unsigned char *ptr = (skb_transport_header(ack_skb) + 1734 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1723 TCP_SKB_CB(ack_skb)->sacked); 1735 TCP_SKB_CB(ack_skb)->sacked);
1724 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1736 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1725 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1737 struct tcp_sack_block sp[TCP_NUM_SACKS];
1726 struct tcp_sack_block *cache; 1738 struct tcp_sack_block *cache;
@@ -2284,7 +2296,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
2284 return 0; 2296 return 0;
2285} 2297}
2286 2298
2287static inline int tcp_fackets_out(struct tcp_sock *tp) 2299static inline int tcp_fackets_out(const struct tcp_sock *tp)
2288{ 2300{
2289 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 2301 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
2290} 2302}
@@ -2304,19 +2316,20 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
2304 * they differ. Since neither occurs due to loss, TCP should really 2316 * they differ. Since neither occurs due to loss, TCP should really
2305 * ignore them. 2317 * ignore them.
2306 */ 2318 */
2307static inline int tcp_dupack_heuristics(struct tcp_sock *tp) 2319static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2308{ 2320{
2309 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2321 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2310} 2322}
2311 2323
2312static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2324static inline int tcp_skb_timedout(const struct sock *sk,
2325 const struct sk_buff *skb)
2313{ 2326{
2314 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 2327 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
2315} 2328}
2316 2329
2317static inline int tcp_head_timedout(struct sock *sk) 2330static inline int tcp_head_timedout(const struct sock *sk)
2318{ 2331{
2319 struct tcp_sock *tp = tcp_sk(sk); 2332 const struct tcp_sock *tp = tcp_sk(sk);
2320 2333
2321 return tp->packets_out && 2334 return tp->packets_out &&
2322 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 2335 tcp_skb_timedout(sk, tcp_write_queue_head(sk));
@@ -2627,7 +2640,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
2627/* Nothing was retransmitted or returned timestamp is less 2640/* Nothing was retransmitted or returned timestamp is less
2628 * than timestamp of the first retransmission. 2641 * than timestamp of the first retransmission.
2629 */ 2642 */
2630static inline int tcp_packet_delayed(struct tcp_sock *tp) 2643static inline int tcp_packet_delayed(const struct tcp_sock *tp)
2631{ 2644{
2632 return !tp->retrans_stamp || 2645 return !tp->retrans_stamp ||
2633 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2646 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2688,7 +2701,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
2688 tp->snd_cwnd_stamp = tcp_time_stamp; 2701 tp->snd_cwnd_stamp = tcp_time_stamp;
2689} 2702}
2690 2703
2691static inline int tcp_may_undo(struct tcp_sock *tp) 2704static inline int tcp_may_undo(const struct tcp_sock *tp)
2692{ 2705{
2693 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2706 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2694} 2707}
@@ -2752,9 +2765,9 @@ static void tcp_try_undo_dsack(struct sock *sk)
2752 * that successive retransmissions of a segment must not advance 2765 * that successive retransmissions of a segment must not advance
2753 * retrans_stamp under any conditions. 2766 * retrans_stamp under any conditions.
2754 */ 2767 */
2755static int tcp_any_retrans_done(struct sock *sk) 2768static int tcp_any_retrans_done(const struct sock *sk)
2756{ 2769{
2757 struct tcp_sock *tp = tcp_sk(sk); 2770 const struct tcp_sock *tp = tcp_sk(sk);
2758 struct sk_buff *skb; 2771 struct sk_buff *skb;
2759 2772
2760 if (tp->retrans_out) 2773 if (tp->retrans_out)
@@ -2828,9 +2841,13 @@ static int tcp_try_undo_loss(struct sock *sk)
2828static inline void tcp_complete_cwr(struct sock *sk) 2841static inline void tcp_complete_cwr(struct sock *sk)
2829{ 2842{
2830 struct tcp_sock *tp = tcp_sk(sk); 2843 struct tcp_sock *tp = tcp_sk(sk);
2831 /* Do not moderate cwnd if it's already undone in cwr or recovery */ 2844
2832 if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) { 2845 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2833 tp->snd_cwnd = tp->snd_ssthresh; 2846 if (tp->undo_marker) {
2847 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
2848 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2849 else /* PRR */
2850 tp->snd_cwnd = tp->snd_ssthresh;
2834 tp->snd_cwnd_stamp = tcp_time_stamp; 2851 tp->snd_cwnd_stamp = tcp_time_stamp;
2835 } 2852 }
2836 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2853 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2948,6 +2965,38 @@ void tcp_simple_retransmit(struct sock *sk)
2948} 2965}
2949EXPORT_SYMBOL(tcp_simple_retransmit); 2966EXPORT_SYMBOL(tcp_simple_retransmit);
2950 2967
2968/* This function implements the PRR algorithm, specifcally the PRR-SSRB
2969 * (proportional rate reduction with slow start reduction bound) as described in
2970 * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
2971 * It computes the number of packets to send (sndcnt) based on packets newly
2972 * delivered:
2973 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2974 * cwnd reductions across a full RTT.
2975 * 2) If packets in flight is lower than ssthresh (such as due to excess
2976 * losses and/or application stalls), do not perform any further cwnd
2977 * reductions, but instead slow start up to ssthresh.
2978 */
2979static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
2980 int fast_rexmit, int flag)
2981{
2982 struct tcp_sock *tp = tcp_sk(sk);
2983 int sndcnt = 0;
2984 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2985
2986 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
2987 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2988 tp->prior_cwnd - 1;
2989 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2990 } else {
2991 sndcnt = min_t(int, delta,
2992 max_t(int, tp->prr_delivered - tp->prr_out,
2993 newly_acked_sacked) + 1);
2994 }
2995
2996 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
2997 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2998}
2999
2951/* Process an event, which can update packets-in-flight not trivially. 3000/* Process an event, which can update packets-in-flight not trivially.
2952 * Main goal of this function is to calculate new estimate for left_out, 3001 * Main goal of this function is to calculate new estimate for left_out,
2953 * taking into account both packets sitting in receiver's buffer and 3002 * taking into account both packets sitting in receiver's buffer and
@@ -2959,7 +3008,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit);
2959 * It does _not_ decide what to send, it is made in function 3008 * It does _not_ decide what to send, it is made in function
2960 * tcp_xmit_retransmit_queue(). 3009 * tcp_xmit_retransmit_queue().
2961 */ 3010 */
2962static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) 3011static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3012 int newly_acked_sacked, int flag)
2963{ 3013{
2964 struct inet_connection_sock *icsk = inet_csk(sk); 3014 struct inet_connection_sock *icsk = inet_csk(sk);
2965 struct tcp_sock *tp = tcp_sk(sk); 3015 struct tcp_sock *tp = tcp_sk(sk);
@@ -3109,13 +3159,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
3109 3159
3110 tp->bytes_acked = 0; 3160 tp->bytes_acked = 0;
3111 tp->snd_cwnd_cnt = 0; 3161 tp->snd_cwnd_cnt = 0;
3162 tp->prior_cwnd = tp->snd_cwnd;
3163 tp->prr_delivered = 0;
3164 tp->prr_out = 0;
3112 tcp_set_ca_state(sk, TCP_CA_Recovery); 3165 tcp_set_ca_state(sk, TCP_CA_Recovery);
3113 fast_rexmit = 1; 3166 fast_rexmit = 1;
3114 } 3167 }
3115 3168
3116 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 3169 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
3117 tcp_update_scoreboard(sk, fast_rexmit); 3170 tcp_update_scoreboard(sk, fast_rexmit);
3118 tcp_cwnd_down(sk, flag); 3171 tp->prr_delivered += newly_acked_sacked;
3172 tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
3119 tcp_xmit_retransmit_queue(sk); 3173 tcp_xmit_retransmit_queue(sk);
3120} 3174}
3121 3175
@@ -3192,7 +3246,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3192 */ 3246 */
3193static void tcp_rearm_rto(struct sock *sk) 3247static void tcp_rearm_rto(struct sock *sk)
3194{ 3248{
3195 struct tcp_sock *tp = tcp_sk(sk); 3249 const struct tcp_sock *tp = tcp_sk(sk);
3196 3250
3197 if (!tp->packets_out) { 3251 if (!tp->packets_out) {
3198 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3252 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
@@ -3296,7 +3350,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3296 * connection startup slow start one packet too 3350 * connection startup slow start one packet too
3297 * quickly. This is severely frowned upon behavior. 3351 * quickly. This is severely frowned upon behavior.
3298 */ 3352 */
3299 if (!(scb->flags & TCPHDR_SYN)) { 3353 if (!(scb->tcp_flags & TCPHDR_SYN)) {
3300 flag |= FLAG_DATA_ACKED; 3354 flag |= FLAG_DATA_ACKED;
3301 } else { 3355 } else {
3302 flag |= FLAG_SYN_ACKED; 3356 flag |= FLAG_SYN_ACKED;
@@ -3444,7 +3498,7 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
3444 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3498 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3445 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3499 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3446 */ 3500 */
3447static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, 3501static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3448 u32 ack_seq) 3502 u32 ack_seq)
3449{ 3503{
3450 struct tcp_sock *tp = tcp_sk(sk); 3504 struct tcp_sock *tp = tcp_sk(sk);
@@ -3620,7 +3674,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3620} 3674}
3621 3675
3622/* This routine deals with incoming acks, but not outgoing ones. */ 3676/* This routine deals with incoming acks, but not outgoing ones. */
3623static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 3677static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3624{ 3678{
3625 struct inet_connection_sock *icsk = inet_csk(sk); 3679 struct inet_connection_sock *icsk = inet_csk(sk);
3626 struct tcp_sock *tp = tcp_sk(sk); 3680 struct tcp_sock *tp = tcp_sk(sk);
@@ -3630,6 +3684,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3630 u32 prior_in_flight; 3684 u32 prior_in_flight;
3631 u32 prior_fackets; 3685 u32 prior_fackets;
3632 int prior_packets; 3686 int prior_packets;
3687 int prior_sacked = tp->sacked_out;
3688 int newly_acked_sacked = 0;
3633 int frto_cwnd = 0; 3689 int frto_cwnd = 0;
3634 3690
3635 /* If the ack is older than previous acks 3691 /* If the ack is older than previous acks
@@ -3701,6 +3757,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3701 /* See if we can take anything off of the retransmit queue. */ 3757 /* See if we can take anything off of the retransmit queue. */
3702 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3758 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3703 3759
3760 newly_acked_sacked = (prior_packets - prior_sacked) -
3761 (tp->packets_out - tp->sacked_out);
3762
3704 if (tp->frto_counter) 3763 if (tp->frto_counter)
3705 frto_cwnd = tcp_process_frto(sk, flag); 3764 frto_cwnd = tcp_process_frto(sk, flag);
3706 /* Guarantee sacktag reordering detection against wrap-arounds */ 3765 /* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3713,7 +3772,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3713 tcp_may_raise_cwnd(sk, flag)) 3772 tcp_may_raise_cwnd(sk, flag))
3714 tcp_cong_avoid(sk, ack, prior_in_flight); 3773 tcp_cong_avoid(sk, ack, prior_in_flight);
3715 tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, 3774 tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
3716 flag); 3775 newly_acked_sacked, flag);
3717 } else { 3776 } else {
3718 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3777 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
3719 tcp_cong_avoid(sk, ack, prior_in_flight); 3778 tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3752,14 +3811,14 @@ old_ack:
3752 * But, this can also be called on packets in the established flow when 3811 * But, this can also be called on packets in the established flow when
3753 * the fast version below fails. 3812 * the fast version below fails.
3754 */ 3813 */
3755void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3814void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
3756 u8 **hvpp, int estab) 3815 const u8 **hvpp, int estab)
3757{ 3816{
3758 unsigned char *ptr; 3817 const unsigned char *ptr;
3759 struct tcphdr *th = tcp_hdr(skb); 3818 const struct tcphdr *th = tcp_hdr(skb);
3760 int length = (th->doff * 4) - sizeof(struct tcphdr); 3819 int length = (th->doff * 4) - sizeof(struct tcphdr);
3761 3820
3762 ptr = (unsigned char *)(th + 1); 3821 ptr = (const unsigned char *)(th + 1);
3763 opt_rx->saw_tstamp = 0; 3822 opt_rx->saw_tstamp = 0;
3764 3823
3765 while (length > 0) { 3824 while (length > 0) {
@@ -3870,9 +3929,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3870} 3929}
3871EXPORT_SYMBOL(tcp_parse_options); 3930EXPORT_SYMBOL(tcp_parse_options);
3872 3931
3873static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) 3932static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
3874{ 3933{
3875 __be32 *ptr = (__be32 *)(th + 1); 3934 const __be32 *ptr = (const __be32 *)(th + 1);
3876 3935
3877 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3936 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3878 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3937 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
@@ -3889,8 +3948,9 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3889/* Fast parse options. This hopes to only see timestamps. 3948/* Fast parse options. This hopes to only see timestamps.
3890 * If it is wrong it falls back on tcp_parse_options(). 3949 * If it is wrong it falls back on tcp_parse_options().
3891 */ 3950 */
3892static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 3951static int tcp_fast_parse_options(const struct sk_buff *skb,
3893 struct tcp_sock *tp, u8 **hvpp) 3952 const struct tcphdr *th,
3953 struct tcp_sock *tp, const u8 **hvpp)
3894{ 3954{
3895 /* In the spirit of fast parsing, compare doff directly to constant 3955 /* In the spirit of fast parsing, compare doff directly to constant
3896 * values. Because equality is used, short doff can be ignored here. 3956 * values. Because equality is used, short doff can be ignored here.
@@ -3911,10 +3971,10 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3911/* 3971/*
3912 * Parse MD5 Signature option 3972 * Parse MD5 Signature option
3913 */ 3973 */
3914u8 *tcp_parse_md5sig_option(struct tcphdr *th) 3974const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3915{ 3975{
3916 int length = (th->doff << 2) - sizeof (*th); 3976 int length = (th->doff << 2) - sizeof(*th);
3917 u8 *ptr = (u8*)(th + 1); 3977 const u8 *ptr = (const u8 *)(th + 1);
3918 3978
3919 /* If the TCP option is too short, we can short cut */ 3979 /* If the TCP option is too short, we can short cut */
3920 if (length < TCPOLEN_MD5SIG) 3980 if (length < TCPOLEN_MD5SIG)
@@ -3991,8 +4051,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3991 4051
3992static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4052static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
3993{ 4053{
3994 struct tcp_sock *tp = tcp_sk(sk); 4054 const struct tcp_sock *tp = tcp_sk(sk);
3995 struct tcphdr *th = tcp_hdr(skb); 4055 const struct tcphdr *th = tcp_hdr(skb);
3996 u32 seq = TCP_SKB_CB(skb)->seq; 4056 u32 seq = TCP_SKB_CB(skb)->seq;
3997 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4057 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3998 4058
@@ -4031,7 +4091,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
4031 * (borrowed from freebsd) 4091 * (borrowed from freebsd)
4032 */ 4092 */
4033 4093
4034static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 4094static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4035{ 4095{
4036 return !before(end_seq, tp->rcv_wup) && 4096 return !before(end_seq, tp->rcv_wup) &&
4037 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4097 !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4076,7 +4136,7 @@ static void tcp_reset(struct sock *sk)
4076 * 4136 *
4077 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4137 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4078 */ 4138 */
4079static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) 4139static void tcp_fin(struct sock *sk)
4080{ 4140{
4081 struct tcp_sock *tp = tcp_sk(sk); 4141 struct tcp_sock *tp = tcp_sk(sk);
4082 4142
@@ -4188,7 +4248,7 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4188 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4248 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4189} 4249}
4190 4250
4191static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4251static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4192{ 4252{
4193 struct tcp_sock *tp = tcp_sk(sk); 4253 struct tcp_sock *tp = tcp_sk(sk);
4194 4254
@@ -4347,7 +4407,7 @@ static void tcp_ofo_queue(struct sock *sk)
4347 __skb_queue_tail(&sk->sk_receive_queue, skb); 4407 __skb_queue_tail(&sk->sk_receive_queue, skb);
4348 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4408 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4349 if (tcp_hdr(skb)->fin) 4409 if (tcp_hdr(skb)->fin)
4350 tcp_fin(skb, sk, tcp_hdr(skb)); 4410 tcp_fin(sk);
4351 } 4411 }
4352} 4412}
4353 4413
@@ -4375,7 +4435,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4375 4435
4376static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4436static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4377{ 4437{
4378 struct tcphdr *th = tcp_hdr(skb); 4438 const struct tcphdr *th = tcp_hdr(skb);
4379 struct tcp_sock *tp = tcp_sk(sk); 4439 struct tcp_sock *tp = tcp_sk(sk);
4380 int eaten = -1; 4440 int eaten = -1;
4381 4441
@@ -4429,7 +4489,7 @@ queue_and_out:
4429 if (skb->len) 4489 if (skb->len)
4430 tcp_event_data_recv(sk, skb); 4490 tcp_event_data_recv(sk, skb);
4431 if (th->fin) 4491 if (th->fin)
4432 tcp_fin(skb, sk, th); 4492 tcp_fin(sk);
4433 4493
4434 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4494 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4435 tcp_ofo_queue(sk); 4495 tcp_ofo_queue(sk);
@@ -4859,9 +4919,9 @@ void tcp_cwnd_application_limited(struct sock *sk)
4859 tp->snd_cwnd_stamp = tcp_time_stamp; 4919 tp->snd_cwnd_stamp = tcp_time_stamp;
4860} 4920}
4861 4921
4862static int tcp_should_expand_sndbuf(struct sock *sk) 4922static int tcp_should_expand_sndbuf(const struct sock *sk)
4863{ 4923{
4864 struct tcp_sock *tp = tcp_sk(sk); 4924 const struct tcp_sock *tp = tcp_sk(sk);
4865 4925
4866 /* If the user specified a specific send buffer setting, do 4926 /* If the user specified a specific send buffer setting, do
4867 * not modify it. 4927 * not modify it.
@@ -4895,8 +4955,10 @@ static void tcp_new_space(struct sock *sk)
4895 struct tcp_sock *tp = tcp_sk(sk); 4955 struct tcp_sock *tp = tcp_sk(sk);
4896 4956
4897 if (tcp_should_expand_sndbuf(sk)) { 4957 if (tcp_should_expand_sndbuf(sk)) {
4898 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 4958 int sndmem = SKB_TRUESIZE(max_t(u32,
4899 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 4959 tp->rx_opt.mss_clamp,
4960 tp->mss_cache) +
4961 MAX_TCP_HEADER);
4900 int demanded = max_t(unsigned int, tp->snd_cwnd, 4962 int demanded = max_t(unsigned int, tp->snd_cwnd,
4901 tp->reordering + 1); 4963 tp->reordering + 1);
4902 sndmem *= 2 * demanded; 4964 sndmem *= 2 * demanded;
@@ -4968,7 +5030,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
4968 * either form (or just set the sysctl tcp_stdurg). 5030 * either form (or just set the sysctl tcp_stdurg).
4969 */ 5031 */
4970 5032
4971static void tcp_check_urg(struct sock *sk, struct tcphdr *th) 5033static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
4972{ 5034{
4973 struct tcp_sock *tp = tcp_sk(sk); 5035 struct tcp_sock *tp = tcp_sk(sk);
4974 u32 ptr = ntohs(th->urg_ptr); 5036 u32 ptr = ntohs(th->urg_ptr);
@@ -5034,7 +5096,7 @@ static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
5034} 5096}
5035 5097
5036/* This is the 'fast' part of urgent handling. */ 5098/* This is the 'fast' part of urgent handling. */
5037static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 5099static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
5038{ 5100{
5039 struct tcp_sock *tp = tcp_sk(sk); 5101 struct tcp_sock *tp = tcp_sk(sk);
5040 5102
@@ -5155,9 +5217,9 @@ out:
5155 * play significant role here. 5217 * play significant role here.
5156 */ 5218 */
5157static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5219static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5158 struct tcphdr *th, int syn_inerr) 5220 const struct tcphdr *th, int syn_inerr)
5159{ 5221{
5160 u8 *hash_location; 5222 const u8 *hash_location;
5161 struct tcp_sock *tp = tcp_sk(sk); 5223 struct tcp_sock *tp = tcp_sk(sk);
5162 5224
5163 /* RFC1323: H1. Apply PAWS check first. */ 5225 /* RFC1323: H1. Apply PAWS check first. */
@@ -5238,7 +5300,7 @@ discard:
5238 * tcp_data_queue when everything is OK. 5300 * tcp_data_queue when everything is OK.
5239 */ 5301 */
5240int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5302int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5241 struct tcphdr *th, unsigned len) 5303 const struct tcphdr *th, unsigned int len)
5242{ 5304{
5243 struct tcp_sock *tp = tcp_sk(sk); 5305 struct tcp_sock *tp = tcp_sk(sk);
5244 int res; 5306 int res;
@@ -5449,9 +5511,9 @@ discard:
5449EXPORT_SYMBOL(tcp_rcv_established); 5511EXPORT_SYMBOL(tcp_rcv_established);
5450 5512
5451static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5513static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5452 struct tcphdr *th, unsigned len) 5514 const struct tcphdr *th, unsigned int len)
5453{ 5515{
5454 u8 *hash_location; 5516 const u8 *hash_location;
5455 struct inet_connection_sock *icsk = inet_csk(sk); 5517 struct inet_connection_sock *icsk = inet_csk(sk);
5456 struct tcp_sock *tp = tcp_sk(sk); 5518 struct tcp_sock *tp = tcp_sk(sk);
5457 struct tcp_cookie_values *cvp = tp->cookie_values; 5519 struct tcp_cookie_values *cvp = tp->cookie_values;
@@ -5726,7 +5788,7 @@ reset_and_undo:
5726 */ 5788 */
5727 5789
5728int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 5790int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5729 struct tcphdr *th, unsigned len) 5791 const struct tcphdr *th, unsigned int len)
5730{ 5792{
5731 struct tcp_sock *tp = tcp_sk(sk); 5793 struct tcp_sock *tp = tcp_sk(sk);
5732 struct inet_connection_sock *icsk = inet_csk(sk); 5794 struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7963e03f1068..a7443159c400 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93 __be32 addr); 93 __be32 addr);
94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, struct tcphdr *th); 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96#else 96#else
97static inline 97static inline
98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
@@ -104,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
104struct inet_hashinfo tcp_hashinfo; 104struct inet_hashinfo tcp_hashinfo;
105EXPORT_SYMBOL(tcp_hashinfo); 105EXPORT_SYMBOL(tcp_hashinfo);
106 106
107static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 107static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
108{ 108{
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
110 ip_hdr(skb)->saddr, 110 ip_hdr(skb)->saddr,
@@ -552,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb,
552/* This routine computes an IPv4 TCP checksum. */ 552/* This routine computes an IPv4 TCP checksum. */
553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
554{ 554{
555 struct inet_sock *inet = inet_sk(sk); 555 const struct inet_sock *inet = inet_sk(sk);
556 556
557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
558} 558}
@@ -590,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
590 590
591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
592{ 592{
593 struct tcphdr *th = tcp_hdr(skb); 593 const struct tcphdr *th = tcp_hdr(skb);
594 struct { 594 struct {
595 struct tcphdr th; 595 struct tcphdr th;
596#ifdef CONFIG_TCP_MD5SIG 596#ifdef CONFIG_TCP_MD5SIG
@@ -652,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 653
654 net = dev_net(skb_dst(skb)->dev); 654 net = dev_net(skb_dst(skb)->dev);
655 arg.tos = ip_hdr(skb)->tos;
655 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 656 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
656 &arg, arg.iov[0].iov_len); 657 &arg, arg.iov[0].iov_len);
657 658
@@ -666,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
666static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 667static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
667 u32 win, u32 ts, int oif, 668 u32 win, u32 ts, int oif,
668 struct tcp_md5sig_key *key, 669 struct tcp_md5sig_key *key,
669 int reply_flags) 670 int reply_flags, u8 tos)
670{ 671{
671 struct tcphdr *th = tcp_hdr(skb); 672 const struct tcphdr *th = tcp_hdr(skb);
672 struct { 673 struct {
673 struct tcphdr th; 674 struct tcphdr th;
674 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 675 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -726,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
726 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 727 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
727 if (oif) 728 if (oif)
728 arg.bound_dev_if = oif; 729 arg.bound_dev_if = oif;
729 730 arg.tos = tos;
730 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 731 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
731 &arg, arg.iov[0].iov_len); 732 &arg, arg.iov[0].iov_len);
732 733
@@ -743,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
743 tcptw->tw_ts_recent, 744 tcptw->tw_ts_recent,
744 tw->tw_bound_dev_if, 745 tw->tw_bound_dev_if,
745 tcp_twsk_md5_key(tcptw), 746 tcp_twsk_md5_key(tcptw),
746 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0 747 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
748 tw->tw_tos
747 ); 749 );
748 750
749 inet_twsk_put(tw); 751 inet_twsk_put(tw);
@@ -757,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
757 req->ts_recent, 759 req->ts_recent,
758 0, 760 0,
759 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 761 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
760 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0); 762 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
763 ip_hdr(skb)->tos);
761} 764}
762 765
763/* 766/*
@@ -1090,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1090} 1093}
1091 1094
1092static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1095static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1093 __be32 daddr, __be32 saddr, struct tcphdr *th) 1096 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1094{ 1097{
1095 struct tcp_md5sig_pool *hp; 1098 struct tcp_md5sig_pool *hp;
1096 struct hash_desc *desc; 1099 struct hash_desc *desc;
@@ -1122,12 +1125,12 @@ clear_hash_noput:
1122} 1125}
1123 1126
1124int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 1127int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1125 struct sock *sk, struct request_sock *req, 1128 const struct sock *sk, const struct request_sock *req,
1126 struct sk_buff *skb) 1129 const struct sk_buff *skb)
1127{ 1130{
1128 struct tcp_md5sig_pool *hp; 1131 struct tcp_md5sig_pool *hp;
1129 struct hash_desc *desc; 1132 struct hash_desc *desc;
1130 struct tcphdr *th = tcp_hdr(skb); 1133 const struct tcphdr *th = tcp_hdr(skb);
1131 __be32 saddr, daddr; 1134 __be32 saddr, daddr;
1132 1135
1133 if (sk) { 1136 if (sk) {
@@ -1172,7 +1175,7 @@ clear_hash_noput:
1172} 1175}
1173EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1176EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1174 1177
1175static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) 1178static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1176{ 1179{
1177 /* 1180 /*
1178 * This gets called for each TCP segment that arrives 1181 * This gets called for each TCP segment that arrives
@@ -1182,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1182 * o MD5 hash and we're not expecting one. 1185 * o MD5 hash and we're not expecting one.
1183 * o MD5 hash and its wrong. 1186 * o MD5 hash and its wrong.
1184 */ 1187 */
1185 __u8 *hash_location = NULL; 1188 const __u8 *hash_location = NULL;
1186 struct tcp_md5sig_key *hash_expected; 1189 struct tcp_md5sig_key *hash_expected;
1187 const struct iphdr *iph = ip_hdr(skb); 1190 const struct iphdr *iph = ip_hdr(skb);
1188 struct tcphdr *th = tcp_hdr(skb); 1191 const struct tcphdr *th = tcp_hdr(skb);
1189 int genhash; 1192 int genhash;
1190 unsigned char newhash[16]; 1193 unsigned char newhash[16];
1191 1194
@@ -1248,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1248{ 1251{
1249 struct tcp_extend_values tmp_ext; 1252 struct tcp_extend_values tmp_ext;
1250 struct tcp_options_received tmp_opt; 1253 struct tcp_options_received tmp_opt;
1251 u8 *hash_location; 1254 const u8 *hash_location;
1252 struct request_sock *req; 1255 struct request_sock *req;
1253 struct inet_request_sock *ireq; 1256 struct inet_request_sock *ireq;
1254 struct tcp_sock *tp = tcp_sk(sk); 1257 struct tcp_sock *tp = tcp_sk(sk);
@@ -1507,6 +1510,7 @@ exit:
1507 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1508 return NULL; 1511 return NULL;
1509put_and_exit: 1512put_and_exit:
1513 bh_unlock_sock(newsk);
1510 sock_put(newsk); 1514 sock_put(newsk);
1511 goto exit; 1515 goto exit;
1512} 1516}
@@ -1588,7 +1592,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1588#endif 1592#endif
1589 1593
1590 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1594 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1591 sock_rps_save_rxhash(sk, skb->rxhash); 1595 sock_rps_save_rxhash(sk, skb);
1592 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1596 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1593 rsk = sk; 1597 rsk = sk;
1594 goto reset; 1598 goto reset;
@@ -1605,7 +1609,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1605 goto discard; 1609 goto discard;
1606 1610
1607 if (nsk != sk) { 1611 if (nsk != sk) {
1608 sock_rps_save_rxhash(nsk, skb->rxhash); 1612 sock_rps_save_rxhash(nsk, skb);
1609 if (tcp_child_process(sk, nsk, skb)) { 1613 if (tcp_child_process(sk, nsk, skb)) {
1610 rsk = nsk; 1614 rsk = nsk;
1611 goto reset; 1615 goto reset;
@@ -1613,7 +1617,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1613 return 0; 1617 return 0;
1614 } 1618 }
1615 } else 1619 } else
1616 sock_rps_save_rxhash(sk, skb->rxhash); 1620 sock_rps_save_rxhash(sk, skb);
1617 1621
1618 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1622 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1619 rsk = sk; 1623 rsk = sk;
@@ -1645,7 +1649,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
1645int tcp_v4_rcv(struct sk_buff *skb) 1649int tcp_v4_rcv(struct sk_buff *skb)
1646{ 1650{
1647 const struct iphdr *iph; 1651 const struct iphdr *iph;
1648 struct tcphdr *th; 1652 const struct tcphdr *th;
1649 struct sock *sk; 1653 struct sock *sk;
1650 int ret; 1654 int ret;
1651 struct net *net = dev_net(skb->dev); 1655 struct net *net = dev_net(skb->dev);
@@ -1680,7 +1684,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1680 skb->len - th->doff * 4); 1684 skb->len - th->doff * 4);
1681 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1685 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1682 TCP_SKB_CB(skb)->when = 0; 1686 TCP_SKB_CB(skb)->when = 0;
1683 TCP_SKB_CB(skb)->flags = iph->tos; 1687 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1684 TCP_SKB_CB(skb)->sacked = 0; 1688 TCP_SKB_CB(skb)->sacked = 0;
1685 1689
1686 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1690 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1809,7 +1813,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer);
1809 1813
1810void *tcp_v4_tw_get_peer(struct sock *sk) 1814void *tcp_v4_tw_get_peer(struct sock *sk)
1811{ 1815{
1812 struct inet_timewait_sock *tw = inet_twsk(sk); 1816 const struct inet_timewait_sock *tw = inet_twsk(sk);
1813 1817
1814 return inet_getpeer_v4(tw->tw_daddr, 1); 1818 return inet_getpeer_v4(tw->tw_daddr, 1);
1815} 1819}
@@ -2336,7 +2340,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2336 } 2340 }
2337} 2341}
2338 2342
2339static int tcp_seq_open(struct inode *inode, struct file *file) 2343int tcp_seq_open(struct inode *inode, struct file *file)
2340{ 2344{
2341 struct tcp_seq_afinfo *afinfo = PDE(inode)->data; 2345 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2342 struct tcp_iter_state *s; 2346 struct tcp_iter_state *s;
@@ -2352,23 +2356,19 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
2352 s->last_pos = 0; 2356 s->last_pos = 0;
2353 return 0; 2357 return 0;
2354} 2358}
2359EXPORT_SYMBOL(tcp_seq_open);
2355 2360
2356int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) 2361int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2357{ 2362{
2358 int rc = 0; 2363 int rc = 0;
2359 struct proc_dir_entry *p; 2364 struct proc_dir_entry *p;
2360 2365
2361 afinfo->seq_fops.open = tcp_seq_open;
2362 afinfo->seq_fops.read = seq_read;
2363 afinfo->seq_fops.llseek = seq_lseek;
2364 afinfo->seq_fops.release = seq_release_net;
2365
2366 afinfo->seq_ops.start = tcp_seq_start; 2366 afinfo->seq_ops.start = tcp_seq_start;
2367 afinfo->seq_ops.next = tcp_seq_next; 2367 afinfo->seq_ops.next = tcp_seq_next;
2368 afinfo->seq_ops.stop = tcp_seq_stop; 2368 afinfo->seq_ops.stop = tcp_seq_stop;
2369 2369
2370 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2370 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2371 &afinfo->seq_fops, afinfo); 2371 afinfo->seq_fops, afinfo);
2372 if (!p) 2372 if (!p)
2373 rc = -ENOMEM; 2373 rc = -ENOMEM;
2374 return rc; 2374 return rc;
@@ -2381,7 +2381,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2381} 2381}
2382EXPORT_SYMBOL(tcp_proc_unregister); 2382EXPORT_SYMBOL(tcp_proc_unregister);
2383 2383
2384static void get_openreq4(struct sock *sk, struct request_sock *req, 2384static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2385 struct seq_file *f, int i, int uid, int *len) 2385 struct seq_file *f, int i, int uid, int *len)
2386{ 2386{
2387 const struct inet_request_sock *ireq = inet_rsk(req); 2387 const struct inet_request_sock *ireq = inet_rsk(req);
@@ -2411,9 +2411,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2411{ 2411{
2412 int timer_active; 2412 int timer_active;
2413 unsigned long timer_expires; 2413 unsigned long timer_expires;
2414 struct tcp_sock *tp = tcp_sk(sk); 2414 const struct tcp_sock *tp = tcp_sk(sk);
2415 const struct inet_connection_sock *icsk = inet_csk(sk); 2415 const struct inet_connection_sock *icsk = inet_csk(sk);
2416 struct inet_sock *inet = inet_sk(sk); 2416 const struct inet_sock *inet = inet_sk(sk);
2417 __be32 dest = inet->inet_daddr; 2417 __be32 dest = inet->inet_daddr;
2418 __be32 src = inet->inet_rcv_saddr; 2418 __be32 src = inet->inet_rcv_saddr;
2419 __u16 destp = ntohs(inet->inet_dport); 2419 __u16 destp = ntohs(inet->inet_dport);
@@ -2462,7 +2462,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2462 len); 2462 len);
2463} 2463}
2464 2464
2465static void get_timewait4_sock(struct inet_timewait_sock *tw, 2465static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2466 struct seq_file *f, int i, int *len) 2466 struct seq_file *f, int i, int *len)
2467{ 2467{
2468 __be32 dest, src; 2468 __be32 dest, src;
@@ -2517,12 +2517,18 @@ out:
2517 return 0; 2517 return 0;
2518} 2518}
2519 2519
2520static const struct file_operations tcp_afinfo_seq_fops = {
2521 .owner = THIS_MODULE,
2522 .open = tcp_seq_open,
2523 .read = seq_read,
2524 .llseek = seq_lseek,
2525 .release = seq_release_net
2526};
2527
2520static struct tcp_seq_afinfo tcp4_seq_afinfo = { 2528static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2521 .name = "tcp", 2529 .name = "tcp",
2522 .family = AF_INET, 2530 .family = AF_INET,
2523 .seq_fops = { 2531 .seq_fops = &tcp_afinfo_seq_fops,
2524 .owner = THIS_MODULE,
2525 },
2526 .seq_ops = { 2532 .seq_ops = {
2527 .show = tcp4_seq_show, 2533 .show = tcp4_seq_show,
2528 }, 2534 },
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0ce3d06dce60..66363b689ad6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -141,7 +141,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
141 const struct tcphdr *th) 141 const struct tcphdr *th)
142{ 142{
143 struct tcp_options_received tmp_opt; 143 struct tcp_options_received tmp_opt;
144 u8 *hash_location; 144 const u8 *hash_location;
145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
146 int paws_reject = 0; 146 int paws_reject = 0;
147 147
@@ -345,6 +345,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
345 tw6 = inet6_twsk((struct sock *)tw); 345 tw6 = inet6_twsk((struct sock *)tw);
346 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); 346 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
347 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); 347 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
348 tw->tw_tclass = np->tclass;
348 tw->tw_ipv6only = np->ipv6only; 349 tw->tw_ipv6only = np->ipv6only;
349 } 350 }
350#endif 351#endif
@@ -567,7 +568,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
567 struct request_sock **prev) 568 struct request_sock **prev)
568{ 569{
569 struct tcp_options_received tmp_opt; 570 struct tcp_options_received tmp_opt;
570 u8 *hash_location; 571 const u8 *hash_location;
571 struct sock *child; 572 struct sock *child;
572 const struct tcphdr *th = tcp_hdr(skb); 573 const struct tcphdr *th = tcp_hdr(skb);
573 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 574 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 882e0b0964d0..980b98f6288c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
65 65
66 66
67/* Account for new data that has been sent to the network. */ 67/* Account for new data that has been sent to the network. */
68static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 68static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
69{ 69{
70 struct tcp_sock *tp = tcp_sk(sk); 70 struct tcp_sock *tp = tcp_sk(sk);
71 unsigned int prior_packets = tp->packets_out; 71 unsigned int prior_packets = tp->packets_out;
@@ -89,9 +89,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
90 * invalid. OK, let's make this for now: 90 * invalid. OK, let's make this for now:
91 */ 91 */
92static inline __u32 tcp_acceptable_seq(struct sock *sk) 92static inline __u32 tcp_acceptable_seq(const struct sock *sk)
93{ 93{
94 struct tcp_sock *tp = tcp_sk(sk); 94 const struct tcp_sock *tp = tcp_sk(sk);
95 95
96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
97 return tp->snd_nxt; 97 return tp->snd_nxt;
@@ -116,7 +116,7 @@ static inline __u32 tcp_acceptable_seq(struct sock *sk)
116static __u16 tcp_advertise_mss(struct sock *sk) 116static __u16 tcp_advertise_mss(struct sock *sk)
117{ 117{
118 struct tcp_sock *tp = tcp_sk(sk); 118 struct tcp_sock *tp = tcp_sk(sk);
119 struct dst_entry *dst = __sk_dst_get(sk); 119 const struct dst_entry *dst = __sk_dst_get(sk);
120 int mss = tp->advmss; 120 int mss = tp->advmss;
121 121
122 if (dst) { 122 if (dst) {
@@ -133,7 +133,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
133 133
134/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 134/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
135 * This is the first part of cwnd validation mechanism. */ 135 * This is the first part of cwnd validation mechanism. */
136static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 136static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
137{ 137{
138 struct tcp_sock *tp = tcp_sk(sk); 138 struct tcp_sock *tp = tcp_sk(sk);
139 s32 delta = tcp_time_stamp - tp->lsndtime; 139 s32 delta = tcp_time_stamp - tp->lsndtime;
@@ -154,7 +154,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
154 154
155/* Congestion state accounting after a packet has been sent. */ 155/* Congestion state accounting after a packet has been sent. */
156static void tcp_event_data_sent(struct tcp_sock *tp, 156static void tcp_event_data_sent(struct tcp_sock *tp,
157 struct sk_buff *skb, struct sock *sk) 157 struct sock *sk)
158{ 158{
159 struct inet_connection_sock *icsk = inet_csk(sk); 159 struct inet_connection_sock *icsk = inet_csk(sk);
160 const u32 now = tcp_time_stamp; 160 const u32 now = tcp_time_stamp;
@@ -295,11 +295,11 @@ static u16 tcp_select_window(struct sock *sk)
295} 295}
296 296
297/* Packet ECN state for a SYN-ACK */ 297/* Packet ECN state for a SYN-ACK */
298static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 298static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
299{ 299{
300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR; 300 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
301 if (!(tp->ecn_flags & TCP_ECN_OK)) 301 if (!(tp->ecn_flags & TCP_ECN_OK))
302 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE; 302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
303} 303}
304 304
305/* Packet ECN state for a SYN. */ 305/* Packet ECN state for a SYN. */
@@ -309,13 +309,13 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
309 309
310 tp->ecn_flags = 0; 310 tp->ecn_flags = 0;
311 if (sysctl_tcp_ecn == 1) { 311 if (sysctl_tcp_ecn == 1) {
312 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR; 312 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
313 tp->ecn_flags = TCP_ECN_OK; 313 tp->ecn_flags = TCP_ECN_OK;
314 } 314 }
315} 315}
316 316
317static __inline__ void 317static __inline__ void
318TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 318TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
319{ 319{
320 if (inet_rsk(req)->ecn_ok) 320 if (inet_rsk(req)->ecn_ok)
321 th->ece = 1; 321 th->ece = 1;
@@ -356,7 +356,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
356 skb->ip_summed = CHECKSUM_PARTIAL; 356 skb->ip_summed = CHECKSUM_PARTIAL;
357 skb->csum = 0; 357 skb->csum = 0;
358 358
359 TCP_SKB_CB(skb)->flags = flags; 359 TCP_SKB_CB(skb)->tcp_flags = flags;
360 TCP_SKB_CB(skb)->sacked = 0; 360 TCP_SKB_CB(skb)->sacked = 0;
361 361
362 skb_shinfo(skb)->gso_segs = 1; 362 skb_shinfo(skb)->gso_segs = 1;
@@ -565,7 +565,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
565 */ 565 */
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts, 567 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) { 568 struct tcp_md5sig_key **md5)
569{
569 struct tcp_sock *tp = tcp_sk(sk); 570 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp_cookie_values *cvp = tp->cookie_values; 571 struct tcp_cookie_values *cvp = tp->cookie_values;
571 unsigned remaining = MAX_TCP_OPTION_SPACE; 572 unsigned remaining = MAX_TCP_OPTION_SPACE;
@@ -743,7 +744,8 @@ static unsigned tcp_synack_options(struct sock *sk,
743 */ 744 */
744static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 745static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
745 struct tcp_out_options *opts, 746 struct tcp_out_options *opts,
746 struct tcp_md5sig_key **md5) { 747 struct tcp_md5sig_key **md5)
748{
747 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 749 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
748 struct tcp_sock *tp = tcp_sk(sk); 750 struct tcp_sock *tp = tcp_sk(sk);
749 unsigned size = 0; 751 unsigned size = 0;
@@ -826,7 +828,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
826 tcb = TCP_SKB_CB(skb); 828 tcb = TCP_SKB_CB(skb);
827 memset(&opts, 0, sizeof(opts)); 829 memset(&opts, 0, sizeof(opts));
828 830
829 if (unlikely(tcb->flags & TCPHDR_SYN)) 831 if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
830 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 832 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
831 else 833 else
832 tcp_options_size = tcp_established_options(sk, skb, &opts, 834 tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -850,9 +852,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
850 th->seq = htonl(tcb->seq); 852 th->seq = htonl(tcb->seq);
851 th->ack_seq = htonl(tp->rcv_nxt); 853 th->ack_seq = htonl(tp->rcv_nxt);
852 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 854 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
853 tcb->flags); 855 tcb->tcp_flags);
854 856
855 if (unlikely(tcb->flags & TCPHDR_SYN)) { 857 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
856 /* RFC1323: The window in SYN & SYN/ACK segments 858 /* RFC1323: The window in SYN & SYN/ACK segments
857 * is never scaled. 859 * is never scaled.
858 */ 860 */
@@ -875,7 +877,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
875 } 877 }
876 878
877 tcp_options_write((__be32 *)(th + 1), tp, &opts); 879 tcp_options_write((__be32 *)(th + 1), tp, &opts);
878 if (likely((tcb->flags & TCPHDR_SYN) == 0)) 880 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
879 TCP_ECN_send(sk, skb, tcp_header_size); 881 TCP_ECN_send(sk, skb, tcp_header_size);
880 882
881#ifdef CONFIG_TCP_MD5SIG 883#ifdef CONFIG_TCP_MD5SIG
@@ -889,11 +891,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
889 891
890 icsk->icsk_af_ops->send_check(sk, skb); 892 icsk->icsk_af_ops->send_check(sk, skb);
891 893
892 if (likely(tcb->flags & TCPHDR_ACK)) 894 if (likely(tcb->tcp_flags & TCPHDR_ACK))
893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 895 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
894 896
895 if (skb->len != tcp_header_size) 897 if (skb->len != tcp_header_size)
896 tcp_event_data_sent(tp, skb, sk); 898 tcp_event_data_sent(tp, sk);
897 899
898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 900 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 901 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
@@ -926,7 +928,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
926} 928}
927 929
928/* Initialize TSO segments for a packet. */ 930/* Initialize TSO segments for a packet. */
929static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 931static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
930 unsigned int mss_now) 932 unsigned int mss_now)
931{ 933{
932 if (skb->len <= mss_now || !sk_can_gso(sk) || 934 if (skb->len <= mss_now || !sk_can_gso(sk) ||
@@ -947,7 +949,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
947/* When a modification to fackets out becomes necessary, we need to check 949/* When a modification to fackets out becomes necessary, we need to check
948 * skb is counted to fackets_out or not. 950 * skb is counted to fackets_out or not.
949 */ 951 */
950static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 952static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
951 int decr) 953 int decr)
952{ 954{
953 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
@@ -962,7 +964,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
962/* Pcount in the middle of the write queue got changed, we need to do various 964/* Pcount in the middle of the write queue got changed, we need to do various
963 * tweaks to fix counters 965 * tweaks to fix counters
964 */ 966 */
965static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 967static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
966{ 968{
967 struct tcp_sock *tp = tcp_sk(sk); 969 struct tcp_sock *tp = tcp_sk(sk);
968 970
@@ -1032,9 +1034,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1032 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1034 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1033 1035
1034 /* PSH and FIN should only be set in the second packet. */ 1036 /* PSH and FIN should only be set in the second packet. */
1035 flags = TCP_SKB_CB(skb)->flags; 1037 flags = TCP_SKB_CB(skb)->tcp_flags;
1036 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1038 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1037 TCP_SKB_CB(buff)->flags = flags; 1039 TCP_SKB_CB(buff)->tcp_flags = flags;
1038 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1040 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1039 1041
1040 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1042 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -1094,14 +1096,16 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1094 eat = len; 1096 eat = len;
1095 k = 0; 1097 k = 0;
1096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1098 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1097 if (skb_shinfo(skb)->frags[i].size <= eat) { 1099 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1098 put_page(skb_shinfo(skb)->frags[i].page); 1100
1099 eat -= skb_shinfo(skb)->frags[i].size; 1101 if (size <= eat) {
1102 skb_frag_unref(skb, i);
1103 eat -= size;
1100 } else { 1104 } else {
1101 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1105 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1102 if (eat) { 1106 if (eat) {
1103 skb_shinfo(skb)->frags[k].page_offset += eat; 1107 skb_shinfo(skb)->frags[k].page_offset += eat;
1104 skb_shinfo(skb)->frags[k].size -= eat; 1108 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1105 eat = 0; 1109 eat = 0;
1106 } 1110 }
1107 k++; 1111 k++;
@@ -1144,10 +1148,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1144} 1148}
1145 1149
1146/* Calculate MSS. Not accounting for SACKs here. */ 1150/* Calculate MSS. Not accounting for SACKs here. */
1147int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1151int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1148{ 1152{
1149 struct tcp_sock *tp = tcp_sk(sk); 1153 const struct tcp_sock *tp = tcp_sk(sk);
1150 struct inet_connection_sock *icsk = inet_csk(sk); 1154 const struct inet_connection_sock *icsk = inet_csk(sk);
1151 int mss_now; 1155 int mss_now;
1152 1156
1153 /* Calculate base mss without TCP options: 1157 /* Calculate base mss without TCP options:
@@ -1173,10 +1177,10 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1173} 1177}
1174 1178
1175/* Inverse of above */ 1179/* Inverse of above */
1176int tcp_mss_to_mtu(struct sock *sk, int mss) 1180int tcp_mss_to_mtu(const struct sock *sk, int mss)
1177{ 1181{
1178 struct tcp_sock *tp = tcp_sk(sk); 1182 const struct tcp_sock *tp = tcp_sk(sk);
1179 struct inet_connection_sock *icsk = inet_csk(sk); 1183 const struct inet_connection_sock *icsk = inet_csk(sk);
1180 int mtu; 1184 int mtu;
1181 1185
1182 mtu = mss + 1186 mtu = mss +
@@ -1250,8 +1254,8 @@ EXPORT_SYMBOL(tcp_sync_mss);
1250 */ 1254 */
1251unsigned int tcp_current_mss(struct sock *sk) 1255unsigned int tcp_current_mss(struct sock *sk)
1252{ 1256{
1253 struct tcp_sock *tp = tcp_sk(sk); 1257 const struct tcp_sock *tp = tcp_sk(sk);
1254 struct dst_entry *dst = __sk_dst_get(sk); 1258 const struct dst_entry *dst = __sk_dst_get(sk);
1255 u32 mss_now; 1259 u32 mss_now;
1256 unsigned header_len; 1260 unsigned header_len;
1257 struct tcp_out_options opts; 1261 struct tcp_out_options opts;
@@ -1311,10 +1315,10 @@ static void tcp_cwnd_validate(struct sock *sk)
1311 * modulo only when the receiver window alone is the limiting factor or 1315 * modulo only when the receiver window alone is the limiting factor or
1312 * when we would be allowed to send the split-due-to-Nagle skb fully. 1316 * when we would be allowed to send the split-due-to-Nagle skb fully.
1313 */ 1317 */
1314static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1318static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1315 unsigned int mss_now, unsigned int cwnd) 1319 unsigned int mss_now, unsigned int cwnd)
1316{ 1320{
1317 struct tcp_sock *tp = tcp_sk(sk); 1321 const struct tcp_sock *tp = tcp_sk(sk);
1318 u32 needed, window, cwnd_len; 1322 u32 needed, window, cwnd_len;
1319 1323
1320 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1324 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -1334,13 +1338,14 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1334/* Can at least one segment of SKB be sent right now, according to the 1338/* Can at least one segment of SKB be sent right now, according to the
1335 * congestion window rules? If so, return how many segments are allowed. 1339 * congestion window rules? If so, return how many segments are allowed.
1336 */ 1340 */
1337static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1341static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1338 struct sk_buff *skb) 1342 const struct sk_buff *skb)
1339{ 1343{
1340 u32 in_flight, cwnd; 1344 u32 in_flight, cwnd;
1341 1345
1342 /* Don't be strict about the congestion window for the final FIN. */ 1346 /* Don't be strict about the congestion window for the final FIN. */
1343 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) 1347 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1348 tcp_skb_pcount(skb) == 1)
1344 return 1; 1349 return 1;
1345 1350
1346 in_flight = tcp_packets_in_flight(tp); 1351 in_flight = tcp_packets_in_flight(tp);
@@ -1355,7 +1360,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1355 * This must be invoked the first time we consider transmitting 1360 * This must be invoked the first time we consider transmitting
1356 * SKB onto the wire. 1361 * SKB onto the wire.
1357 */ 1362 */
1358static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1363static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1359 unsigned int mss_now) 1364 unsigned int mss_now)
1360{ 1365{
1361 int tso_segs = tcp_skb_pcount(skb); 1366 int tso_segs = tcp_skb_pcount(skb);
@@ -1393,7 +1398,7 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1393/* Return non-zero if the Nagle test allows this packet to be 1398/* Return non-zero if the Nagle test allows this packet to be
1394 * sent now. 1399 * sent now.
1395 */ 1400 */
1396static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1401static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1397 unsigned int cur_mss, int nonagle) 1402 unsigned int cur_mss, int nonagle)
1398{ 1403{
1399 /* Nagle rule does not apply to frames, which sit in the middle of the 1404 /* Nagle rule does not apply to frames, which sit in the middle of the
@@ -1409,7 +1414,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1409 * Nagle can be ignored during F-RTO too (see RFC4138). 1414 * Nagle can be ignored during F-RTO too (see RFC4138).
1410 */ 1415 */
1411 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1416 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1412 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)) 1417 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1413 return 1; 1418 return 1;
1414 1419
1415 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1420 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1419,7 +1424,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1419} 1424}
1420 1425
1421/* Does at least the first segment of SKB fit into the send window? */ 1426/* Does at least the first segment of SKB fit into the send window? */
1422static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1427static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1423 unsigned int cur_mss) 1428 unsigned int cur_mss)
1424{ 1429{
1425 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1430 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1434,10 +1439,10 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1434 * should be put on the wire right now. If so, it returns the number of 1439 * should be put on the wire right now. If so, it returns the number of
1435 * packets allowed by the congestion window. 1440 * packets allowed by the congestion window.
1436 */ 1441 */
1437static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1442static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1438 unsigned int cur_mss, int nonagle) 1443 unsigned int cur_mss, int nonagle)
1439{ 1444{
1440 struct tcp_sock *tp = tcp_sk(sk); 1445 const struct tcp_sock *tp = tcp_sk(sk);
1441 unsigned int cwnd_quota; 1446 unsigned int cwnd_quota;
1442 1447
1443 tcp_init_tso_segs(sk, skb, cur_mss); 1448 tcp_init_tso_segs(sk, skb, cur_mss);
@@ -1455,7 +1460,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1455/* Test if sending is allowed right now. */ 1460/* Test if sending is allowed right now. */
1456int tcp_may_send_now(struct sock *sk) 1461int tcp_may_send_now(struct sock *sk)
1457{ 1462{
1458 struct tcp_sock *tp = tcp_sk(sk); 1463 const struct tcp_sock *tp = tcp_sk(sk);
1459 struct sk_buff *skb = tcp_send_head(sk); 1464 struct sk_buff *skb = tcp_send_head(sk);
1460 1465
1461 return skb && 1466 return skb &&
@@ -1497,9 +1502,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1497 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1502 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1498 1503
1499 /* PSH and FIN should only be set in the second packet. */ 1504 /* PSH and FIN should only be set in the second packet. */
1500 flags = TCP_SKB_CB(skb)->flags; 1505 flags = TCP_SKB_CB(skb)->tcp_flags;
1501 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1506 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1502 TCP_SKB_CB(buff)->flags = flags; 1507 TCP_SKB_CB(buff)->tcp_flags = flags;
1503 1508
1504 /* This packet was never sent out yet, so no SACK bits. */ 1509 /* This packet was never sent out yet, so no SACK bits. */
1505 TCP_SKB_CB(buff)->sacked = 0; 1510 TCP_SKB_CB(buff)->sacked = 0;
@@ -1530,7 +1535,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1530 u32 send_win, cong_win, limit, in_flight; 1535 u32 send_win, cong_win, limit, in_flight;
1531 int win_divisor; 1536 int win_divisor;
1532 1537
1533 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1538 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1534 goto send_now; 1539 goto send_now;
1535 1540
1536 if (icsk->icsk_ca_state != TCP_CA_Open) 1541 if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1657,7 +1662,7 @@ static int tcp_mtu_probe(struct sock *sk)
1657 1662
1658 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1663 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1659 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1664 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1660 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK; 1665 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
1661 TCP_SKB_CB(nskb)->sacked = 0; 1666 TCP_SKB_CB(nskb)->sacked = 0;
1662 nskb->csum = 0; 1667 nskb->csum = 0;
1663 nskb->ip_summed = skb->ip_summed; 1668 nskb->ip_summed = skb->ip_summed;
@@ -1677,11 +1682,11 @@ static int tcp_mtu_probe(struct sock *sk)
1677 if (skb->len <= copy) { 1682 if (skb->len <= copy) {
1678 /* We've eaten all the data from this skb. 1683 /* We've eaten all the data from this skb.
1679 * Throw it away. */ 1684 * Throw it away. */
1680 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1685 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1681 tcp_unlink_write_queue(skb, sk); 1686 tcp_unlink_write_queue(skb, sk);
1682 sk_wmem_free_skb(sk, skb); 1687 sk_wmem_free_skb(sk, skb);
1683 } else { 1688 } else {
1684 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1689 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1685 ~(TCPHDR_FIN|TCPHDR_PSH); 1690 ~(TCPHDR_FIN|TCPHDR_PSH);
1686 if (!skb_shinfo(skb)->nr_frags) { 1691 if (!skb_shinfo(skb)->nr_frags) {
1687 skb_pull(skb, copy); 1692 skb_pull(skb, copy);
@@ -1796,11 +1801,13 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1796 tcp_event_new_data_sent(sk, skb); 1801 tcp_event_new_data_sent(sk, skb);
1797 1802
1798 tcp_minshall_update(tp, mss_now, skb); 1803 tcp_minshall_update(tp, mss_now, skb);
1799 sent_pkts++; 1804 sent_pkts += tcp_skb_pcount(skb);
1800 1805
1801 if (push_one) 1806 if (push_one)
1802 break; 1807 break;
1803 } 1808 }
1809 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
1810 tp->prr_out += sent_pkts;
1804 1811
1805 if (likely(sent_pkts)) { 1812 if (likely(sent_pkts)) {
1806 tcp_cwnd_validate(sk); 1813 tcp_cwnd_validate(sk);
@@ -1985,7 +1992,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1985 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1992 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1986 1993
1987 /* Merge over control information. This moves PSH/FIN etc. over */ 1994 /* Merge over control information. This moves PSH/FIN etc. over */
1988 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1995 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
1989 1996
1990 /* All done, get rid of second SKB and account for it so 1997 /* All done, get rid of second SKB and account for it so
1991 * packet counting does not break. 1998 * packet counting does not break.
@@ -2003,7 +2010,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2003} 2010}
2004 2011
2005/* Check if coalescing SKBs is legal. */ 2012/* Check if coalescing SKBs is legal. */
2006static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 2013static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2007{ 2014{
2008 if (tcp_skb_pcount(skb) > 1) 2015 if (tcp_skb_pcount(skb) > 1)
2009 return 0; 2016 return 0;
@@ -2033,7 +2040,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2033 2040
2034 if (!sysctl_tcp_retrans_collapse) 2041 if (!sysctl_tcp_retrans_collapse)
2035 return; 2042 return;
2036 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN) 2043 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2037 return; 2044 return;
2038 2045
2039 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2046 tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2125,12 +2132,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2125 * since it is cheap to do so and saves bytes on the network. 2132 * since it is cheap to do so and saves bytes on the network.
2126 */ 2133 */
2127 if (skb->len > 0 && 2134 if (skb->len > 0 &&
2128 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && 2135 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2129 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2136 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2130 if (!pskb_trim(skb, 0)) { 2137 if (!pskb_trim(skb, 0)) {
2131 /* Reuse, even though it does some unnecessary work */ 2138 /* Reuse, even though it does some unnecessary work */
2132 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2139 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2133 TCP_SKB_CB(skb)->flags); 2140 TCP_SKB_CB(skb)->tcp_flags);
2134 skb->ip_summed = CHECKSUM_NONE; 2141 skb->ip_summed = CHECKSUM_NONE;
2135 } 2142 }
2136 } 2143 }
@@ -2179,7 +2186,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2179static int tcp_can_forward_retransmit(struct sock *sk) 2186static int tcp_can_forward_retransmit(struct sock *sk)
2180{ 2187{
2181 const struct inet_connection_sock *icsk = inet_csk(sk); 2188 const struct inet_connection_sock *icsk = inet_csk(sk);
2182 struct tcp_sock *tp = tcp_sk(sk); 2189 const struct tcp_sock *tp = tcp_sk(sk);
2183 2190
2184 /* Forward retransmissions are possible only during Recovery. */ 2191 /* Forward retransmissions are possible only during Recovery. */
2185 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2192 if (icsk->icsk_ca_state != TCP_CA_Recovery)
@@ -2294,6 +2301,9 @@ begin_fwd:
2294 return; 2301 return;
2295 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2302 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2296 2303
2304 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
2305 tp->prr_out += tcp_skb_pcount(skb);
2306
2297 if (skb == tcp_write_queue_head(sk)) 2307 if (skb == tcp_write_queue_head(sk))
2298 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2308 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2299 inet_csk(sk)->icsk_rto, 2309 inet_csk(sk)->icsk_rto,
@@ -2317,7 +2327,7 @@ void tcp_send_fin(struct sock *sk)
2317 mss_now = tcp_current_mss(sk); 2327 mss_now = tcp_current_mss(sk);
2318 2328
2319 if (tcp_send_head(sk) != NULL) { 2329 if (tcp_send_head(sk) != NULL) {
2320 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN; 2330 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2321 TCP_SKB_CB(skb)->end_seq++; 2331 TCP_SKB_CB(skb)->end_seq++;
2322 tp->write_seq++; 2332 tp->write_seq++;
2323 } else { 2333 } else {
@@ -2379,11 +2389,11 @@ int tcp_send_synack(struct sock *sk)
2379 struct sk_buff *skb; 2389 struct sk_buff *skb;
2380 2390
2381 skb = tcp_write_queue_head(sk); 2391 skb = tcp_write_queue_head(sk);
2382 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) { 2392 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2383 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2393 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2384 return -EFAULT; 2394 return -EFAULT;
2385 } 2395 }
2386 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) { 2396 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2387 if (skb_cloned(skb)) { 2397 if (skb_cloned(skb)) {
2388 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2398 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2389 if (nskb == NULL) 2399 if (nskb == NULL)
@@ -2397,7 +2407,7 @@ int tcp_send_synack(struct sock *sk)
2397 skb = nskb; 2407 skb = nskb;
2398 } 2408 }
2399 2409
2400 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK; 2410 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2401 TCP_ECN_send_synack(tcp_sk(sk), skb); 2411 TCP_ECN_send_synack(tcp_sk(sk), skb);
2402 } 2412 }
2403 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2413 TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2542,7 +2552,7 @@ EXPORT_SYMBOL(tcp_make_synack);
2542/* Do all connect socket setups that can be done AF independent. */ 2552/* Do all connect socket setups that can be done AF independent. */
2543static void tcp_connect_init(struct sock *sk) 2553static void tcp_connect_init(struct sock *sk)
2544{ 2554{
2545 struct dst_entry *dst = __sk_dst_get(sk); 2555 const struct dst_entry *dst = __sk_dst_get(sk);
2546 struct tcp_sock *tp = tcp_sk(sk); 2556 struct tcp_sock *tp = tcp_sk(sk);
2547 __u8 rcv_wscale; 2557 __u8 rcv_wscale;
2548 2558
@@ -2794,13 +2804,13 @@ int tcp_write_wakeup(struct sock *sk)
2794 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2804 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2795 skb->len > mss) { 2805 skb->len > mss) {
2796 seg_size = min(seg_size, mss); 2806 seg_size = min(seg_size, mss);
2797 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2807 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2798 if (tcp_fragment(sk, skb, seg_size, mss)) 2808 if (tcp_fragment(sk, skb, seg_size, mss))
2799 return -1; 2809 return -1;
2800 } else if (!tcp_skb_pcount(skb)) 2810 } else if (!tcp_skb_pcount(skb))
2801 tcp_set_skb_tso_segs(sk, skb, mss); 2811 tcp_set_skb_tso_segs(sk, skb, mss);
2802 2812
2803 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2813 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2804 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2814 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2805 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2815 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2806 if (!err) 2816 if (!err)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ecd44b0c45f1..2e0f0af76c19 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -334,7 +334,6 @@ void tcp_retransmit_timer(struct sock *sk)
334 * connection. If the socket is an orphan, time it out, 334 * connection. If the socket is an orphan, time it out,
335 * we cannot allow such beasts to hang infinitely. 335 * we cannot allow such beasts to hang infinitely.
336 */ 336 */
337#ifdef TCP_DEBUG
338 struct inet_sock *inet = inet_sk(sk); 337 struct inet_sock *inet = inet_sk(sk);
339 if (sk->sk_family == AF_INET) { 338 if (sk->sk_family == AF_INET) {
340 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 339 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
@@ -349,7 +348,6 @@ void tcp_retransmit_timer(struct sock *sk)
349 inet->inet_num, tp->snd_una, tp->snd_nxt); 348 inet->inet_num, tp->snd_una, tp->snd_nxt);
350 } 349 }
351#endif 350#endif
352#endif
353 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 351 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
354 tcp_write_err(sk); 352 tcp_write_err(sk);
355 goto out; 353 goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b5a19340a95..ab0966df1e2a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags)
1267 sk->sk_state = TCP_CLOSE; 1267 sk->sk_state = TCP_CLOSE;
1268 inet->inet_daddr = 0; 1268 inet->inet_daddr = 0;
1269 inet->inet_dport = 0; 1269 inet->inet_dport = 0;
1270 sock_rps_save_rxhash(sk, 0); 1270 sock_rps_reset_rxhash(sk);
1271 sk->sk_bound_dev_if = 0; 1271 sk->sk_bound_dev_if = 0;
1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1273 inet_reset_saddr(sk); 1273 inet_reset_saddr(sk);
@@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1355 int rc; 1355 int rc;
1356 1356
1357 if (inet_sk(sk)->inet_daddr) 1357 if (inet_sk(sk)->inet_daddr)
1358 sock_rps_save_rxhash(sk, skb->rxhash); 1358 sock_rps_save_rxhash(sk, skb);
1359 1359
1360 rc = ip_queue_rcv_skb(sk, skb); 1360 rc = ip_queue_rcv_skb(sk, skb);
1361 if (rc < 0) { 1361 if (rc < 0) {
@@ -1397,6 +1397,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1397 nf_reset(skb); 1397 nf_reset(skb);
1398 1398
1399 if (up->encap_type) { 1399 if (up->encap_type) {
1400 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1401
1400 /* 1402 /*
1401 * This is an encapsulation socket so pass the skb to 1403 * This is an encapsulation socket so pass the skb to
1402 * the socket's udp_encap_rcv() hook. Otherwise, just 1404 * the socket's udp_encap_rcv() hook. Otherwise, just
@@ -1409,11 +1411,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1409 */ 1411 */
1410 1412
1411 /* if we're overly short, let UDP handle it */ 1413 /* if we're overly short, let UDP handle it */
1412 if (skb->len > sizeof(struct udphdr) && 1414 encap_rcv = ACCESS_ONCE(up->encap_rcv);
1413 up->encap_rcv != NULL) { 1415 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
1414 int ret; 1416 int ret;
1415 1417
1416 ret = (*up->encap_rcv)(sk, skb); 1418 ret = encap_rcv(sk, skb);
1417 if (ret <= 0) { 1419 if (ret <= 0) {
1418 UDP_INC_STATS_BH(sock_net(sk), 1420 UDP_INC_STATS_BH(sock_net(sk),
1419 UDP_MIB_INDATAGRAMS, 1421 UDP_MIB_INDATAGRAMS,
@@ -1461,10 +1463,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1461 } 1463 }
1462 } 1464 }
1463 1465
1464 if (rcu_dereference_raw(sk->sk_filter)) { 1466 if (rcu_access_pointer(sk->sk_filter) &&
1465 if (udp_lib_checksum_complete(skb)) 1467 udp_lib_checksum_complete(skb))
1466 goto drop; 1468 goto drop;
1467 }
1468 1469
1469 1470
1470 if (sk_rcvqueues_full(sk, skb)) 1471 if (sk_rcvqueues_full(sk, skb))
@@ -2038,7 +2039,7 @@ static void udp_seq_stop(struct seq_file *seq, void *v)
2038 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2039 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2039} 2040}
2040 2041
2041static int udp_seq_open(struct inode *inode, struct file *file) 2042int udp_seq_open(struct inode *inode, struct file *file)
2042{ 2043{
2043 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 2044 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
2044 struct udp_iter_state *s; 2045 struct udp_iter_state *s;
@@ -2054,6 +2055,7 @@ static int udp_seq_open(struct inode *inode, struct file *file)
2054 s->udp_table = afinfo->udp_table; 2055 s->udp_table = afinfo->udp_table;
2055 return err; 2056 return err;
2056} 2057}
2058EXPORT_SYMBOL(udp_seq_open);
2057 2059
2058/* ------------------------------------------------------------------------ */ 2060/* ------------------------------------------------------------------------ */
2059int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) 2061int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
@@ -2061,17 +2063,12 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
2061 struct proc_dir_entry *p; 2063 struct proc_dir_entry *p;
2062 int rc = 0; 2064 int rc = 0;
2063 2065
2064 afinfo->seq_fops.open = udp_seq_open;
2065 afinfo->seq_fops.read = seq_read;
2066 afinfo->seq_fops.llseek = seq_lseek;
2067 afinfo->seq_fops.release = seq_release_net;
2068
2069 afinfo->seq_ops.start = udp_seq_start; 2066 afinfo->seq_ops.start = udp_seq_start;
2070 afinfo->seq_ops.next = udp_seq_next; 2067 afinfo->seq_ops.next = udp_seq_next;
2071 afinfo->seq_ops.stop = udp_seq_stop; 2068 afinfo->seq_ops.stop = udp_seq_stop;
2072 2069
2073 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2070 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2074 &afinfo->seq_fops, afinfo); 2071 afinfo->seq_fops, afinfo);
2075 if (!p) 2072 if (!p)
2076 rc = -ENOMEM; 2073 rc = -ENOMEM;
2077 return rc; 2074 return rc;
@@ -2121,14 +2118,20 @@ int udp4_seq_show(struct seq_file *seq, void *v)
2121 return 0; 2118 return 0;
2122} 2119}
2123 2120
2121static const struct file_operations udp_afinfo_seq_fops = {
2122 .owner = THIS_MODULE,
2123 .open = udp_seq_open,
2124 .read = seq_read,
2125 .llseek = seq_lseek,
2126 .release = seq_release_net
2127};
2128
2124/* ------------------------------------------------------------------------ */ 2129/* ------------------------------------------------------------------------ */
2125static struct udp_seq_afinfo udp4_seq_afinfo = { 2130static struct udp_seq_afinfo udp4_seq_afinfo = {
2126 .name = "udp", 2131 .name = "udp",
2127 .family = AF_INET, 2132 .family = AF_INET,
2128 .udp_table = &udp_table, 2133 .udp_table = &udp_table,
2129 .seq_fops = { 2134 .seq_fops = &udp_afinfo_seq_fops,
2130 .owner = THIS_MODULE,
2131 },
2132 .seq_ops = { 2135 .seq_ops = {
2133 .show = udp4_seq_show, 2136 .show = udp4_seq_show,
2134 }, 2137 },
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index aee9963f7f5a..12e9499a1a6c 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -10,6 +10,7 @@
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13#include <linux/export.h>
13#include "udp_impl.h" 14#include "udp_impl.h"
14 15
15struct udp_table udplite_table __read_mostly; 16struct udp_table udplite_table __read_mostly;
@@ -71,13 +72,20 @@ static struct inet_protosw udplite4_protosw = {
71}; 72};
72 73
73#ifdef CONFIG_PROC_FS 74#ifdef CONFIG_PROC_FS
75
76static const struct file_operations udplite_afinfo_seq_fops = {
77 .owner = THIS_MODULE,
78 .open = udp_seq_open,
79 .read = seq_read,
80 .llseek = seq_lseek,
81 .release = seq_release_net
82};
83
74static struct udp_seq_afinfo udplite4_seq_afinfo = { 84static struct udp_seq_afinfo udplite4_seq_afinfo = {
75 .name = "udplite", 85 .name = "udplite",
76 .family = AF_INET, 86 .family = AF_INET,
77 .udp_table = &udplite_table, 87 .udp_table = &udplite_table,
78 .seq_fops = { 88 .seq_fops = &udplite_afinfo_seq_fops,
79 .owner = THIS_MODULE,
80 },
81 .seq_ops = { 89 .seq_ops = {
82 .show = udp4_seq_show, 90 .show = udp4_seq_show,
83 }, 91 },
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fc5368ad2b0d..a0b4c5da8d43 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
79 struct rtable *rt = (struct rtable *)xdst->route; 79 struct rtable *rt = (struct rtable *)xdst->route;
80 const struct flowi4 *fl4 = &fl->u.ip4; 80 const struct flowi4 *fl4 = &fl->u.ip4;
81 81
82 rt->rt_key_dst = fl4->daddr; 82 xdst->u.rt.rt_key_dst = fl4->daddr;
83 rt->rt_key_src = fl4->saddr; 83 xdst->u.rt.rt_key_src = fl4->saddr;
84 rt->rt_key_tos = fl4->flowi4_tos; 84 xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
85 rt->rt_route_iif = fl4->flowi4_iif; 85 xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
86 rt->rt_iif = fl4->flowi4_iif; 86 xdst->u.rt.rt_iif = fl4->flowi4_iif;
87 rt->rt_oif = fl4->flowi4_oif; 87 xdst->u.rt.rt_oif = fl4->flowi4_oif;
88 rt->rt_mark = fl4->flowi4_mark; 88 xdst->u.rt.rt_mark = fl4->flowi4_mark;
89 89
90 xdst->u.dst.dev = dev; 90 xdst->u.dst.dev = dev;
91 dev_hold(dev); 91 dev_hold(dev);
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index d9ac0a0058b5..9258e751baba 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -12,6 +12,7 @@
12#include <linux/pfkeyv2.h> 12#include <linux/pfkeyv2.h>
13#include <linux/ipsec.h> 13#include <linux/ipsec.h>
14#include <linux/netfilter_ipv4.h> 14#include <linux/netfilter_ipv4.h>
15#include <linux/export.h>
15 16
16static int xfrm4_init_flags(struct xfrm_state *x) 17static int xfrm4_init_flags(struct xfrm_state *x)
17{ 18{
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12368c586068..cf88df82e2c2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -87,6 +87,7 @@
87 87
88#include <linux/proc_fs.h> 88#include <linux/proc_fs.h>
89#include <linux/seq_file.h> 89#include <linux/seq_file.h>
90#include <linux/export.h>
90 91
91/* Set to 3 to get tracing... */ 92/* Set to 3 to get tracing... */
92#define ACONF_DEBUG 2 93#define ACONF_DEBUG 2
@@ -428,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
428 ndev->tstamp = jiffies; 429 ndev->tstamp = jiffies;
429 addrconf_sysctl_register(ndev); 430 addrconf_sysctl_register(ndev);
430 /* protected by rtnl_lock */ 431 /* protected by rtnl_lock */
431 rcu_assign_pointer(dev->ip6_ptr, ndev); 432 RCU_INIT_POINTER(dev->ip6_ptr, ndev);
432 433
433 /* Join all-node multicast group */ 434 /* Join all-node multicast group */
434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
@@ -824,12 +825,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
824{ 825{
825 struct inet6_dev *idev = ifp->idev; 826 struct inet6_dev *idev = ifp->idev;
826 struct in6_addr addr, *tmpaddr; 827 struct in6_addr addr, *tmpaddr;
827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; 828 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
828 unsigned long regen_advance; 829 unsigned long regen_advance;
829 int tmp_plen; 830 int tmp_plen;
830 int ret = 0; 831 int ret = 0;
831 int max_addresses; 832 int max_addresses;
832 u32 addr_flags; 833 u32 addr_flags;
834 unsigned long now = jiffies;
833 835
834 write_lock(&idev->lock); 836 write_lock(&idev->lock);
835 if (ift) { 837 if (ift) {
@@ -874,7 +876,7 @@ retry:
874 goto out; 876 goto out;
875 } 877 }
876 memcpy(&addr.s6_addr[8], idev->rndid, 8); 878 memcpy(&addr.s6_addr[8], idev->rndid, 8);
877 age = (jiffies - ifp->tstamp) / HZ; 879 age = (now - ifp->tstamp) / HZ;
878 tmp_valid_lft = min_t(__u32, 880 tmp_valid_lft = min_t(__u32,
879 ifp->valid_lft, 881 ifp->valid_lft,
880 idev->cnf.temp_valid_lft + age); 882 idev->cnf.temp_valid_lft + age);
@@ -884,7 +886,6 @@ retry:
884 idev->cnf.max_desync_factor); 886 idev->cnf.max_desync_factor);
885 tmp_plen = ifp->prefix_len; 887 tmp_plen = ifp->prefix_len;
886 max_addresses = idev->cnf.max_addresses; 888 max_addresses = idev->cnf.max_addresses;
887 tmp_cstamp = ifp->cstamp;
888 tmp_tstamp = ifp->tstamp; 889 tmp_tstamp = ifp->tstamp;
889 spin_unlock_bh(&ifp->lock); 890 spin_unlock_bh(&ifp->lock);
890 891
@@ -929,7 +930,7 @@ retry:
929 ift->ifpub = ifp; 930 ift->ifpub = ifp;
930 ift->valid_lft = tmp_valid_lft; 931 ift->valid_lft = tmp_valid_lft;
931 ift->prefered_lft = tmp_prefered_lft; 932 ift->prefered_lft = tmp_prefered_lft;
932 ift->cstamp = tmp_cstamp; 933 ift->cstamp = now;
933 ift->tstamp = tmp_tstamp; 934 ift->tstamp = tmp_tstamp;
934 spin_unlock_bh(&ift->lock); 935 spin_unlock_bh(&ift->lock);
935 936
@@ -1713,6 +1714,40 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1713 ip6_route_add(&cfg); 1714 ip6_route_add(&cfg);
1714} 1715}
1715 1716
1717
1718static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1719 int plen,
1720 const struct net_device *dev,
1721 u32 flags, u32 noflags)
1722{
1723 struct fib6_node *fn;
1724 struct rt6_info *rt = NULL;
1725 struct fib6_table *table;
1726
1727 table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
1728 if (table == NULL)
1729 return NULL;
1730
1731 write_lock_bh(&table->tb6_lock);
1732 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
1733 if (!fn)
1734 goto out;
1735 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1736 if (rt->rt6i_dev->ifindex != dev->ifindex)
1737 continue;
1738 if ((rt->rt6i_flags & flags) != flags)
1739 continue;
1740 if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
1741 continue;
1742 dst_hold(&rt->dst);
1743 break;
1744 }
1745out:
1746 write_unlock_bh(&table->tb6_lock);
1747 return rt;
1748}
1749
1750
1716/* Create "default" multicast route to the interface */ 1751/* Create "default" multicast route to the interface */
1717 1752
1718static void addrconf_add_mroute(struct net_device *dev) 1753static void addrconf_add_mroute(struct net_device *dev)
@@ -1842,10 +1877,13 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1842 if (addrconf_finite_timeout(rt_expires)) 1877 if (addrconf_finite_timeout(rt_expires))
1843 rt_expires *= HZ; 1878 rt_expires *= HZ;
1844 1879
1845 rt = rt6_lookup(net, &pinfo->prefix, NULL, 1880 rt = addrconf_get_prefix_route(&pinfo->prefix,
1846 dev->ifindex, 1); 1881 pinfo->prefix_len,
1882 dev,
1883 RTF_ADDRCONF | RTF_PREFIX_RT,
1884 RTF_GATEWAY | RTF_DEFAULT);
1847 1885
1848 if (rt && addrconf_is_prefix_route(rt)) { 1886 if (rt) {
1849 /* Autoconf prefix route */ 1887 /* Autoconf prefix route */
1850 if (valid_lft == 0) { 1888 if (valid_lft == 0) {
1851 ip6_del_rt(rt); 1889 ip6_del_rt(rt);
@@ -1999,25 +2037,50 @@ ok:
1999#ifdef CONFIG_IPV6_PRIVACY 2037#ifdef CONFIG_IPV6_PRIVACY
2000 read_lock_bh(&in6_dev->lock); 2038 read_lock_bh(&in6_dev->lock);
2001 /* update all temporary addresses in the list */ 2039 /* update all temporary addresses in the list */
2002 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { 2040 list_for_each_entry(ift, &in6_dev->tempaddr_list,
2003 /* 2041 tmp_list) {
2004 * When adjusting the lifetimes of an existing 2042 int age, max_valid, max_prefered;
2005 * temporary address, only lower the lifetimes. 2043
2006 * Implementations must not increase the
2007 * lifetimes of an existing temporary address
2008 * when processing a Prefix Information Option.
2009 */
2010 if (ifp != ift->ifpub) 2044 if (ifp != ift->ifpub)
2011 continue; 2045 continue;
2012 2046
2047 /*
2048 * RFC 4941 section 3.3:
2049 * If a received option will extend the lifetime
2050 * of a public address, the lifetimes of
2051 * temporary addresses should be extended,
2052 * subject to the overall constraint that no
2053 * temporary addresses should ever remain
2054 * "valid" or "preferred" for a time longer than
2055 * (TEMP_VALID_LIFETIME) or
2056 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
2057 * respectively.
2058 */
2059 age = (now - ift->cstamp) / HZ;
2060 max_valid = in6_dev->cnf.temp_valid_lft - age;
2061 if (max_valid < 0)
2062 max_valid = 0;
2063
2064 max_prefered = in6_dev->cnf.temp_prefered_lft -
2065 in6_dev->cnf.max_desync_factor -
2066 age;
2067 if (max_prefered < 0)
2068 max_prefered = 0;
2069
2070 if (valid_lft > max_valid)
2071 valid_lft = max_valid;
2072
2073 if (prefered_lft > max_prefered)
2074 prefered_lft = max_prefered;
2075
2013 spin_lock(&ift->lock); 2076 spin_lock(&ift->lock);
2014 flags = ift->flags; 2077 flags = ift->flags;
2015 if (ift->valid_lft > valid_lft && 2078 ift->valid_lft = valid_lft;
2016 ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) 2079 ift->prefered_lft = prefered_lft;
2017 ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; 2080 ift->tstamp = now;
2018 if (ift->prefered_lft > prefered_lft && 2081 if (prefered_lft > 0)
2019 ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) 2082 ift->flags &= ~IFA_F_DEPRECATED;
2020 ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; 2083
2021 spin_unlock(&ift->lock); 2084 spin_unlock(&ift->lock);
2022 if (!(flags&IFA_F_TENTATIVE)) 2085 if (!(flags&IFA_F_TENTATIVE))
2023 ipv6_ifa_notify(0, ift); 2086 ipv6_ifa_notify(0, ift);
@@ -2025,9 +2088,11 @@ ok:
2025 2088
2026 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { 2089 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2027 /* 2090 /*
2028 * When a new public address is created as described in [ADDRCONF], 2091 * When a new public address is created as
2029 * also create a new temporary address. Also create a temporary 2092 * described in [ADDRCONF], also create a new
2030 * address if it's enabled but no temporary address currently exists. 2093 * temporary address. Also create a temporary
2094 * address if it's enabled but no temporary
2095 * address currently exists.
2031 */ 2096 */
2032 read_unlock_bh(&in6_dev->lock); 2097 read_unlock_bh(&in6_dev->lock);
2033 ipv6_create_tempaddr(ifp, NULL); 2098 ipv6_create_tempaddr(ifp, NULL);
@@ -2706,7 +2771,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2706 idev->dead = 1; 2771 idev->dead = 1;
2707 2772
2708 /* protected by rtnl_lock */ 2773 /* protected by rtnl_lock */
2709 rcu_assign_pointer(dev->ip6_ptr, NULL); 2774 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
2710 2775
2711 /* Step 1.5: remove snmp6 entry */ 2776 /* Step 1.5: remove snmp6 entry */
2712 snmp6_unregister_dev(idev); 2777 snmp6_unregister_dev(idev);
@@ -2969,12 +3034,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2969 3034
2970 ipv6_ifa_notify(RTM_NEWADDR, ifp); 3035 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2971 3036
2972 /* If added prefix is link local and forwarding is off, 3037 /* If added prefix is link local and we are prepared to process
2973 start sending router solicitations. 3038 router advertisements, start sending router solicitations.
2974 */ 3039 */
2975 3040
2976 if ((ifp->idev->cnf.forwarding == 0 || 3041 if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
2977 ifp->idev->cnf.forwarding == 2) && 3042 ifp->idev->cnf.accept_ra == 2) &&
2978 ifp->idev->cnf.rtr_solicits > 0 && 3043 ifp->idev->cnf.rtr_solicits > 0 &&
2979 (dev->flags&IFF_LOOPBACK) == 0 && 3044 (dev->flags&IFF_LOOPBACK) == 0 &&
2980 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 3045 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 6b03826552e1..399287e595d7 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -3,6 +3,7 @@
3 * not configured or static. 3 * not configured or static.
4 */ 4 */
5 5
6#include <linux/export.h>
6#include <net/ipv6.h> 7#include <net/ipv6.h>
7 8
8#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) 9#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b46e9f88ce37..e2480691c220 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -297,10 +297,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
297 ipv6_addr_copy(&iph->daddr, &fl6->daddr); 297 ipv6_addr_copy(&iph->daddr, &fl6->daddr);
298 298
299 mtu_info = IP6CBMTU(skb); 299 mtu_info = IP6CBMTU(skb);
300 if (!mtu_info) {
301 kfree_skb(skb);
302 return;
303 }
304 300
305 mtu_info->ip6m_mtu = mtu; 301 mtu_info->ip6m_mtu = mtu;
306 mtu_info->ip6m_addr.sin6_family = AF_INET6; 302 mtu_info->ip6m_addr.sin6_family = AF_INET6;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 79a485e8a700..bf22a225f422 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -30,6 +30,7 @@
30#include <linux/in6.h> 30#include <linux/in6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/export.h>
33 34
34#include <net/dst.h> 35#include <net/dst.h>
35#include <net/sock.h> 36#include <net/sock.h>
@@ -273,12 +274,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
273#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 274#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
274 __u16 dstbuf; 275 __u16 dstbuf;
275#endif 276#endif
276 struct dst_entry *dst; 277 struct dst_entry *dst = skb_dst(skb);
277 278
278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 279 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
279 !pskb_may_pull(skb, (skb_transport_offset(skb) + 280 !pskb_may_pull(skb, (skb_transport_offset(skb) +
280 ((skb_transport_header(skb)[1] + 1) << 3)))) { 281 ((skb_transport_header(skb)[1] + 1) << 3)))) {
281 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 282 IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
282 IPSTATS_MIB_INHDRERRORS); 283 IPSTATS_MIB_INHDRERRORS);
283 kfree_skb(skb); 284 kfree_skb(skb);
284 return -1; 285 return -1;
@@ -289,9 +290,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
289 dstbuf = opt->dst1; 290 dstbuf = opt->dst1;
290#endif 291#endif
291 292
292 dst = dst_clone(skb_dst(skb));
293 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 293 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
294 dst_release(dst);
295 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 294 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
296 opt = IP6CB(skb); 295 opt = IP6CB(skb);
297#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 296#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -304,7 +303,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
304 303
305 IP6_INC_STATS_BH(dev_net(dst->dev), 304 IP6_INC_STATS_BH(dev_net(dst->dev),
306 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 305 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
307 dst_release(dst);
308 return -1; 306 return -1;
309} 307}
310 308
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 14ed0a955b56..37f548b7f6dc 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -2,6 +2,7 @@
2 * IPv6 library code, needed by static components when full IPv6 support is 2 * IPv6 library code, needed by static components when full IPv6 support is
3 * not configured or static. 3 * not configured or static.
4 */ 4 */
5#include <linux/export.h>
5#include <net/ipv6.h> 6#include <net/ipv6.h>
6 7
7/* 8/*
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 34d244df907d..295571576f83 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/export.h>
17 18
18#include <net/fib_rules.h> 19#include <net/fib_rules.h>
19#include <net/ipv6.h> 20#include <net/ipv6.h>
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 11900417b1cc..90868fb42757 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
490 goto out_dst_release; 490 goto out_dst_release;
491 } 491 }
492 492
493 idev = in6_dev_get(skb->dev); 493 rcu_read_lock();
494 idev = __in6_dev_get(skb->dev);
494 495
495 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 496 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
496 len + sizeof(struct icmp6hdr), 497 len + sizeof(struct icmp6hdr),
@@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
500 if (err) { 501 if (err) {
501 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 502 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
502 ip6_flush_pending_frames(sk); 503 ip6_flush_pending_frames(sk);
503 goto out_put; 504 } else {
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
506 len + sizeof(struct icmp6hdr));
504 } 507 }
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); 508 rcu_read_unlock();
506
507out_put:
508 if (likely(idev != NULL))
509 in6_dev_put(idev);
510out_dst_release: 509out_dst_release:
511 dst_release(dst); 510 dst_release(dst);
512out: 511out:
513 icmpv6_xmit_unlock(sk); 512 icmpv6_xmit_unlock(sk);
514} 513}
515
516EXPORT_SYMBOL(icmpv6_send); 514EXPORT_SYMBOL(icmpv6_send);
517 515
518static void icmpv6_echo_reply(struct sk_buff *skb) 516static void icmpv6_echo_reply(struct sk_buff *skb)
@@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
569 if (hlimit < 0) 567 if (hlimit < 0)
570 hlimit = ip6_dst_hoplimit(dst); 568 hlimit = ip6_dst_hoplimit(dst);
571 569
572 idev = in6_dev_get(skb->dev); 570 idev = __in6_dev_get(skb->dev);
573 571
574 msg.skb = skb; 572 msg.skb = skb;
575 msg.offset = 0; 573 msg.offset = 0;
@@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
583 if (err) { 581 if (err) {
584 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 582 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
585 ip6_flush_pending_frames(sk); 583 ip6_flush_pending_frames(sk);
586 goto out_put; 584 } else {
585 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
586 skb->len + sizeof(struct icmp6hdr));
587 } 587 }
588 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
589
590out_put:
591 if (likely(idev != NULL))
592 in6_dev_put(idev);
593 dst_release(dst); 588 dst_release(dst);
594out: 589out:
595 icmpv6_xmit_unlock(sk); 590 icmpv6_xmit_unlock(sk);
@@ -840,8 +835,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
840 /* Enough space for 2 64K ICMP packets, including 835 /* Enough space for 2 64K ICMP packets, including
841 * sk_buff struct overhead. 836 * sk_buff struct overhead.
842 */ 837 */
843 sk->sk_sndbuf = 838 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
844 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
845 } 839 }
846 return 0; 840 return 0;
847 841
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a58e8cf6646..fee46d5a2f12 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
211 struct flowi6 fl6; 211 struct flowi6 fl6;
212 struct dst_entry *dst; 212 struct dst_entry *dst;
213 struct in6_addr *final_p, final; 213 struct in6_addr *final_p, final;
214 int res;
214 215
215 memset(&fl6, 0, sizeof(fl6)); 216 memset(&fl6, 0, sizeof(fl6));
216 fl6.flowi6_proto = sk->sk_protocol; 217 fl6.flowi6_proto = sk->sk_protocol;
@@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
241 __inet6_csk_dst_store(sk, dst, NULL, NULL); 242 __inet6_csk_dst_store(sk, dst, NULL, NULL);
242 } 243 }
243 244
244 skb_dst_set(skb, dst_clone(dst)); 245 rcu_read_lock();
246 skb_dst_set_noref(skb, dst);
245 247
246 /* Restore final destination back after routing done */ 248 /* Restore final destination back after routing done */
247 ipv6_addr_copy(&fl6.daddr, &np->daddr); 249 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 250
249 return ip6_xmit(sk, skb, &fl6, np->opt); 251 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
252 rcu_read_unlock();
253 return res;
250} 254}
251
252EXPORT_SYMBOL_GPL(inet6_csk_xmit); 255EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 320d91d20ad7..93718f3db79b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -28,10 +28,6 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#ifdef CONFIG_PROC_FS
32#include <linux/proc_fs.h>
33#endif
34
35#include <net/ipv6.h> 31#include <net/ipv6.h>
36#include <net/ndisc.h> 32#include <net/ndisc.h>
37#include <net/addrconf.h> 33#include <net/addrconf.h>
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 543039450193..4566dbd916d3 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -21,6 +21,7 @@
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/export.h>
24 25
25#include <net/net_namespace.h> 26#include <net/net_namespace.h>
26#include <net/sock.h> 27#include <net/sock.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4c882cf4e8a1..84d0bd5cac93 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -180,7 +180,7 @@ int ip6_output(struct sk_buff *skb)
180 */ 180 */
181 181
182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
183 struct ipv6_txoptions *opt) 183 struct ipv6_txoptions *opt, int tclass)
184{ 184{
185 struct net *net = sock_net(sk); 185 struct net *net = sock_net(sk);
186 struct ipv6_pinfo *np = inet6_sk(sk); 186 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -190,7 +190,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
190 u8 proto = fl6->flowi6_proto; 190 u8 proto = fl6->flowi6_proto;
191 int seg_len = skb->len; 191 int seg_len = skb->len;
192 int hlimit = -1; 192 int hlimit = -1;
193 int tclass = 0;
194 u32 mtu; 193 u32 mtu;
195 194
196 if (opt) { 195 if (opt) {
@@ -228,10 +227,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
228 /* 227 /*
229 * Fill in the IPv6 header 228 * Fill in the IPv6 header
230 */ 229 */
231 if (np) { 230 if (np)
232 tclass = np->tclass;
233 hlimit = np->hop_limit; 231 hlimit = np->hop_limit;
234 }
235 if (hlimit < 0) 232 if (hlimit < 0)
236 hlimit = ip6_dst_hoplimit(dst); 233 hlimit = ip6_dst_hoplimit(dst);
237 234
@@ -1126,7 +1123,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1126 hh_len + fragheaderlen + transhdrlen + 20, 1123 hh_len + fragheaderlen + transhdrlen + 20,
1127 (flags & MSG_DONTWAIT), &err); 1124 (flags & MSG_DONTWAIT), &err);
1128 if (skb == NULL) 1125 if (skb == NULL)
1129 return -ENOMEM; 1126 return err;
1130 1127
1131 /* reserve space for Hardware header */ 1128 /* reserve space for Hardware header */
1132 skb_reserve(skb, hh_len); 1129 skb_reserve(skb, hh_len);
@@ -1193,6 +1190,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1193 struct sk_buff *skb; 1190 struct sk_buff *skb;
1194 unsigned int maxfraglen, fragheaderlen; 1191 unsigned int maxfraglen, fragheaderlen;
1195 int exthdrlen; 1192 int exthdrlen;
1193 int dst_exthdrlen;
1196 int hh_len; 1194 int hh_len;
1197 int mtu; 1195 int mtu;
1198 int copy; 1196 int copy;
@@ -1248,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1248 np->cork.hop_limit = hlimit; 1246 np->cork.hop_limit = hlimit;
1249 np->cork.tclass = tclass; 1247 np->cork.tclass = tclass;
1250 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1248 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1251 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1249 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1252 if (np->frag_size < mtu) { 1250 if (np->frag_size < mtu) {
1253 if (np->frag_size) 1251 if (np->frag_size)
1254 mtu = np->frag_size; 1252 mtu = np->frag_size;
@@ -1259,16 +1257,17 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1259 cork->length = 0; 1257 cork->length = 0;
1260 sk->sk_sndmsg_page = NULL; 1258 sk->sk_sndmsg_page = NULL;
1261 sk->sk_sndmsg_off = 0; 1259 sk->sk_sndmsg_off = 0;
1262 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - 1260 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
1263 rt->rt6i_nfheader_len;
1264 length += exthdrlen; 1261 length += exthdrlen;
1265 transhdrlen += exthdrlen; 1262 transhdrlen += exthdrlen;
1263 dst_exthdrlen = rt->dst.header_len;
1266 } else { 1264 } else {
1267 rt = (struct rt6_info *)cork->dst; 1265 rt = (struct rt6_info *)cork->dst;
1268 fl6 = &inet->cork.fl.u.ip6; 1266 fl6 = &inet->cork.fl.u.ip6;
1269 opt = np->cork.opt; 1267 opt = np->cork.opt;
1270 transhdrlen = 0; 1268 transhdrlen = 0;
1271 exthdrlen = 0; 1269 exthdrlen = 0;
1270 dst_exthdrlen = 0;
1272 mtu = cork->fragsize; 1271 mtu = cork->fragsize;
1273 } 1272 }
1274 1273
@@ -1368,6 +1367,8 @@ alloc_new_skb:
1368 else 1367 else
1369 alloclen = datalen + fragheaderlen; 1368 alloclen = datalen + fragheaderlen;
1370 1369
1370 alloclen += dst_exthdrlen;
1371
1371 /* 1372 /*
1372 * The last fragment gets additional space at tail. 1373 * The last fragment gets additional space at tail.
1373 * Note: we overallocate on fragments with MSG_MODE 1374 * Note: we overallocate on fragments with MSG_MODE
@@ -1419,9 +1420,9 @@ alloc_new_skb:
1419 /* 1420 /*
1420 * Find where to start putting bytes 1421 * Find where to start putting bytes
1421 */ 1422 */
1422 data = skb_put(skb, fraglen); 1423 data = skb_put(skb, fraglen + dst_exthdrlen);
1423 skb_set_network_header(skb, exthdrlen); 1424 skb_set_network_header(skb, exthdrlen + dst_exthdrlen);
1424 data += fragheaderlen; 1425 data += fragheaderlen + dst_exthdrlen;
1425 skb->transport_header = (skb->network_header + 1426 skb->transport_header = (skb->network_header +
1426 fragheaderlen); 1427 fragheaderlen);
1427 if (fraggap) { 1428 if (fraggap) {
@@ -1434,6 +1435,7 @@ alloc_new_skb:
1434 pskb_trim_unique(skb_prev, maxfraglen); 1435 pskb_trim_unique(skb_prev, maxfraglen);
1435 } 1436 }
1436 copy = datalen - transhdrlen - fraggap; 1437 copy = datalen - transhdrlen - fraggap;
1438
1437 if (copy < 0) { 1439 if (copy < 0) {
1438 err = -EINVAL; 1440 err = -EINVAL;
1439 kfree_skb(skb); 1441 kfree_skb(skb);
@@ -1448,6 +1450,7 @@ alloc_new_skb:
1448 length -= datalen - fraggap; 1450 length -= datalen - fraggap;
1449 transhdrlen = 0; 1451 transhdrlen = 0;
1450 exthdrlen = 0; 1452 exthdrlen = 0;
1453 dst_exthdrlen = 0;
1451 csummode = CHECKSUM_NONE; 1454 csummode = CHECKSUM_NONE;
1452 1455
1453 /* 1456 /*
@@ -1480,13 +1483,13 @@ alloc_new_skb:
1480 if (page && (left = PAGE_SIZE - off) > 0) { 1483 if (page && (left = PAGE_SIZE - off) > 0) {
1481 if (copy >= left) 1484 if (copy >= left)
1482 copy = left; 1485 copy = left;
1483 if (page != frag->page) { 1486 if (page != skb_frag_page(frag)) {
1484 if (i == MAX_SKB_FRAGS) { 1487 if (i == MAX_SKB_FRAGS) {
1485 err = -EMSGSIZE; 1488 err = -EMSGSIZE;
1486 goto error; 1489 goto error;
1487 } 1490 }
1488 get_page(page);
1489 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); 1491 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1492 skb_frag_ref(skb, i);
1490 frag = &skb_shinfo(skb)->frags[i]; 1493 frag = &skb_shinfo(skb)->frags[i];
1491 } 1494 }
1492 } else if(i < MAX_SKB_FRAGS) { 1495 } else if(i < MAX_SKB_FRAGS) {
@@ -1506,12 +1509,14 @@ alloc_new_skb:
1506 err = -EMSGSIZE; 1509 err = -EMSGSIZE;
1507 goto error; 1510 goto error;
1508 } 1511 }
1509 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { 1512 if (getfrag(from,
1513 skb_frag_address(frag) + skb_frag_size(frag),
1514 offset, copy, skb->len, skb) < 0) {
1510 err = -EFAULT; 1515 err = -EFAULT;
1511 goto error; 1516 goto error;
1512 } 1517 }
1513 sk->sk_sndmsg_off += copy; 1518 sk->sk_sndmsg_off += copy;
1514 frag->size += copy; 1519 skb_frag_size_add(frag, copy);
1515 skb->len += copy; 1520 skb->len += copy;
1516 skb->data_len += copy; 1521 skb->data_len += copy;
1517 skb->truesize += copy; 1522 skb->truesize += copy;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0bc98886c383..bdc15c9003d7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
218{ 218{
219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
220 220
221 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 221 RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
222 rcu_assign_pointer(*tp, t); 222 RCU_INIT_POINTER(*tp, t);
223} 223}
224 224
225/** 225/**
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
237 (iter = rtnl_dereference(*tp)) != NULL; 237 (iter = rtnl_dereference(*tp)) != NULL;
238 tp = &iter->next) { 238 tp = &iter->next) {
239 if (t == iter) { 239 if (t == iter) {
240 rcu_assign_pointer(*tp, t->next); 240 RCU_INIT_POINTER(*tp, t->next);
241 break; 241 break;
242 } 242 }
243 } 243 }
@@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351 351
352 if (dev == ip6n->fb_tnl_dev) 352 if (dev == ip6n->fb_tnl_dev)
353 rcu_assign_pointer(ip6n->tnls_wc[0], NULL); 353 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
354 else 354 else
355 ip6_tnl_unlink(ip6n, t); 355 ip6_tnl_unlink(ip6n, t);
356 ip6_tnl_dst_reset(t); 356 ip6_tnl_dst_reset(t);
@@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
889 struct net_device_stats *stats = &t->dev->stats; 889 struct net_device_stats *stats = &t->dev->stats;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 struct ipv6_tel_txoption opt; 891 struct ipv6_tel_txoption opt;
892 struct dst_entry *dst; 892 struct dst_entry *dst = NULL, *ndst = NULL;
893 struct net_device *tdev; 893 struct net_device *tdev;
894 int mtu; 894 int mtu;
895 unsigned int max_headroom = sizeof(struct ipv6hdr); 895 unsigned int max_headroom = sizeof(struct ipv6hdr);
@@ -897,19 +897,20 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
897 int err = -1; 897 int err = -1;
898 int pkt_len; 898 int pkt_len;
899 899
900 if ((dst = ip6_tnl_dst_check(t)) != NULL) 900 if (!fl6->flowi6_mark)
901 dst_hold(dst); 901 dst = ip6_tnl_dst_check(t);
902 else { 902 if (!dst) {
903 dst = ip6_route_output(net, NULL, fl6); 903 ndst = ip6_route_output(net, NULL, fl6);
904 904
905 if (dst->error) 905 if (ndst->error)
906 goto tx_err_link_failure; 906 goto tx_err_link_failure;
907 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 907 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
908 if (IS_ERR(dst)) { 908 if (IS_ERR(ndst)) {
909 err = PTR_ERR(dst); 909 err = PTR_ERR(ndst);
910 dst = NULL; 910 ndst = NULL;
911 goto tx_err_link_failure; 911 goto tx_err_link_failure;
912 } 912 }
913 dst = ndst;
913 } 914 }
914 915
915 tdev = dst->dev; 916 tdev = dst->dev;
@@ -955,8 +956,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
955 skb = new_skb; 956 skb = new_skb;
956 } 957 }
957 skb_dst_drop(skb); 958 skb_dst_drop(skb);
958 skb_dst_set(skb, dst_clone(dst)); 959 if (fl6->flowi6_mark) {
959 960 skb_dst_set(skb, dst);
961 ndst = NULL;
962 } else {
963 skb_dst_set_noref(skb, dst);
964 }
960 skb->transport_header = skb->network_header; 965 skb->transport_header = skb->network_header;
961 966
962 proto = fl6->flowi6_proto; 967 proto = fl6->flowi6_proto;
@@ -987,13 +992,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
987 stats->tx_errors++; 992 stats->tx_errors++;
988 stats->tx_aborted_errors++; 993 stats->tx_aborted_errors++;
989 } 994 }
990 ip6_tnl_dst_store(t, dst); 995 if (ndst)
996 ip6_tnl_dst_store(t, ndst);
991 return 0; 997 return 0;
992tx_err_link_failure: 998tx_err_link_failure:
993 stats->tx_carrier_errors++; 999 stats->tx_carrier_errors++;
994 dst_link_failure(skb); 1000 dst_link_failure(skb);
995tx_err_dst_release: 1001tx_err_dst_release:
996 dst_release(dst); 1002 dst_release(ndst);
997 return err; 1003 return err;
998} 1004}
999 1005
@@ -1020,9 +1026,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1020 1026
1021 dsfield = ipv4_get_dsfield(iph); 1027 dsfield = ipv4_get_dsfield(iph);
1022 1028
1023 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) 1029 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1024 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1030 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1025 & IPV6_TCLASS_MASK; 1031 & IPV6_TCLASS_MASK;
1032 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1033 fl6.flowi6_mark = skb->mark;
1026 1034
1027 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1035 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1028 if (err != 0) { 1036 if (err != 0) {
@@ -1069,10 +1077,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1069 fl6.flowi6_proto = IPPROTO_IPV6; 1077 fl6.flowi6_proto = IPPROTO_IPV6;
1070 1078
1071 dsfield = ipv6_get_dsfield(ipv6h); 1079 dsfield = ipv6_get_dsfield(ipv6h);
1072 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) 1080 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1073 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 1081 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1074 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1082 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1075 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); 1083 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1084 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1085 fl6.flowi6_mark = skb->mark;
1076 1086
1077 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1087 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1078 if (err != 0) { 1088 if (err != 0) {
@@ -1439,7 +1449,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1439 1449
1440 t->parms.proto = IPPROTO_IPV6; 1450 t->parms.proto = IPPROTO_IPV6;
1441 dev_hold(dev); 1451 dev_hold(dev);
1442 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1452 RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
1443 return 0; 1453 return 0;
1444} 1454}
1445 1455
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index def0538e2413..449a9185b8f2 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,7 @@
51#include <linux/pim.h> 51#include <linux/pim.h>
52#include <net/addrconf.h> 52#include <net/addrconf.h>
53#include <linux/netfilter_ipv6.h> 53#include <linux/netfilter_ipv6.h>
54#include <linux/export.h>
54#include <net/ip6_checksum.h> 55#include <net/ip6_checksum.h>
55 56
56struct mr6_table { 57struct mr6_table {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2fbda5fc4cc4..c99e3ee9781f 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
343 break; 343 break;
344 344
345 case IPV6_TRANSPARENT: 345 case IPV6_TRANSPARENT:
346 if (!capable(CAP_NET_ADMIN)) { 346 if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
347 retv = -EPERM; 347 retv = -EPERM;
348 break; 348 break;
349 } 349 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9da6e02eaaeb..44e5b7f2a6c1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -370,17 +370,14 @@ static int ndisc_constructor(struct neighbour *neigh)
370 struct neigh_parms *parms; 370 struct neigh_parms *parms;
371 int is_multicast = ipv6_addr_is_multicast(addr); 371 int is_multicast = ipv6_addr_is_multicast(addr);
372 372
373 rcu_read_lock();
374 in6_dev = in6_dev_get(dev); 373 in6_dev = in6_dev_get(dev);
375 if (in6_dev == NULL) { 374 if (in6_dev == NULL) {
376 rcu_read_unlock();
377 return -EINVAL; 375 return -EINVAL;
378 } 376 }
379 377
380 parms = in6_dev->nd_parms; 378 parms = in6_dev->nd_parms;
381 __neigh_parms_put(neigh->parms); 379 __neigh_parms_put(neigh->parms);
382 neigh->parms = neigh_parms_clone(parms); 380 neigh->parms = neigh_parms_clone(parms);
383 rcu_read_unlock();
384 381
385 neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST; 382 neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST;
386 if (!dev->header_ops) { 383 if (!dev->header_ops) {
@@ -533,7 +530,8 @@ void ndisc_send_skb(struct sk_buff *skb,
533 530
534 skb_dst_set(skb, dst); 531 skb_dst_set(skb, dst);
535 532
536 idev = in6_dev_get(dst->dev); 533 rcu_read_lock();
534 idev = __in6_dev_get(dst->dev);
537 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 535 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
538 536
539 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 537 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
@@ -543,8 +541,7 @@ void ndisc_send_skb(struct sk_buff *skb,
543 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 541 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
544 } 542 }
545 543
546 if (likely(idev != NULL)) 544 rcu_read_unlock();
547 in6_dev_put(idev);
548} 545}
549 546
550EXPORT_SYMBOL(ndisc_send_skb); 547EXPORT_SYMBOL(ndisc_send_skb);
@@ -1039,7 +1036,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1039 if (skb->len < sizeof(*rs_msg)) 1036 if (skb->len < sizeof(*rs_msg))
1040 return; 1037 return;
1041 1038
1042 idev = in6_dev_get(skb->dev); 1039 idev = __in6_dev_get(skb->dev);
1043 if (!idev) { 1040 if (!idev) {
1044 if (net_ratelimit()) 1041 if (net_ratelimit())
1045 ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); 1042 ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
@@ -1080,7 +1077,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1080 neigh_release(neigh); 1077 neigh_release(neigh);
1081 } 1078 }
1082out: 1079out:
1083 in6_dev_put(idev); 1080 return;
1084} 1081}
1085 1082
1086static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) 1083static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1179,7 +1176,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1179 * set the RA_RECV flag in the interface 1176 * set the RA_RECV flag in the interface
1180 */ 1177 */
1181 1178
1182 in6_dev = in6_dev_get(skb->dev); 1179 in6_dev = __in6_dev_get(skb->dev);
1183 if (in6_dev == NULL) { 1180 if (in6_dev == NULL) {
1184 ND_PRINTK0(KERN_ERR 1181 ND_PRINTK0(KERN_ERR
1185 "ICMPv6 RA: can't find inet6 device for %s.\n", 1182 "ICMPv6 RA: can't find inet6 device for %s.\n",
@@ -1188,7 +1185,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1188 } 1185 }
1189 1186
1190 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1187 if (!ndisc_parse_options(opt, optlen, &ndopts)) {
1191 in6_dev_put(in6_dev);
1192 ND_PRINTK2(KERN_WARNING 1188 ND_PRINTK2(KERN_WARNING
1193 "ICMP6 RA: invalid ND options\n"); 1189 "ICMP6 RA: invalid ND options\n");
1194 return; 1190 return;
@@ -1225,6 +1221,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1225 if (!in6_dev->cnf.accept_ra_defrtr) 1221 if (!in6_dev->cnf.accept_ra_defrtr)
1226 goto skip_defrtr; 1222 goto skip_defrtr;
1227 1223
1224 if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
1225 goto skip_defrtr;
1226
1228 lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); 1227 lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
1229 1228
1230#ifdef CONFIG_IPV6_ROUTER_PREF 1229#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1255 ND_PRINTK0(KERN_ERR 1254 ND_PRINTK0(KERN_ERR
1256 "ICMPv6 RA: %s() failed to add default route.\n", 1255 "ICMPv6 RA: %s() failed to add default route.\n",
1257 __func__); 1256 __func__);
1258 in6_dev_put(in6_dev);
1259 return; 1257 return;
1260 } 1258 }
1261 1259
@@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1265 "ICMPv6 RA: %s() got default router without neighbour.\n", 1263 "ICMPv6 RA: %s() got default router without neighbour.\n",
1266 __func__); 1264 __func__);
1267 dst_release(&rt->dst); 1265 dst_release(&rt->dst);
1268 in6_dev_put(in6_dev);
1269 return; 1266 return;
1270 } 1267 }
1271 neigh->flags |= NTF_ROUTER; 1268 neigh->flags |= NTF_ROUTER;
@@ -1349,6 +1346,9 @@ skip_linkparms:
1349 goto out; 1346 goto out;
1350 1347
1351#ifdef CONFIG_IPV6_ROUTE_INFO 1348#ifdef CONFIG_IPV6_ROUTE_INFO
1349 if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
1350 goto skip_routeinfo;
1351
1352 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { 1352 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
1353 struct nd_opt_hdr *p; 1353 struct nd_opt_hdr *p;
1354 for (p = ndopts.nd_opts_ri; 1354 for (p = ndopts.nd_opts_ri;
@@ -1366,6 +1366,8 @@ skip_linkparms:
1366 &ipv6_hdr(skb)->saddr); 1366 &ipv6_hdr(skb)->saddr);
1367 } 1367 }
1368 } 1368 }
1369
1370skip_routeinfo:
1369#endif 1371#endif
1370 1372
1371#ifdef CONFIG_IPV6_NDISC_NODETYPE 1373#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1422,7 +1424,6 @@ out:
1422 dst_release(&rt->dst); 1424 dst_release(&rt->dst);
1423 else if (neigh) 1425 else if (neigh)
1424 neigh_release(neigh); 1426 neigh_release(neigh);
1425 in6_dev_put(in6_dev);
1426} 1427}
1427 1428
1428static void ndisc_redirect_rcv(struct sk_buff *skb) 1429static void ndisc_redirect_rcv(struct sk_buff *skb)
@@ -1481,13 +1482,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1481 return; 1482 return;
1482 } 1483 }
1483 1484
1484 in6_dev = in6_dev_get(skb->dev); 1485 in6_dev = __in6_dev_get(skb->dev);
1485 if (!in6_dev) 1486 if (!in6_dev)
1486 return; 1487 return;
1487 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) { 1488 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1488 in6_dev_put(in6_dev);
1489 return; 1489 return;
1490 }
1491 1490
1492 /* RFC2461 8.1: 1491 /* RFC2461 8.1:
1493 * The IP source address of the Redirect MUST be the same as the current 1492 * The IP source address of the Redirect MUST be the same as the current
@@ -1497,7 +1496,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1497 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1496 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1498 ND_PRINTK2(KERN_WARNING 1497 ND_PRINTK2(KERN_WARNING
1499 "ICMPv6 Redirect: invalid ND options\n"); 1498 "ICMPv6 Redirect: invalid ND options\n");
1500 in6_dev_put(in6_dev);
1501 return; 1499 return;
1502 } 1500 }
1503 if (ndopts.nd_opts_tgt_lladdr) { 1501 if (ndopts.nd_opts_tgt_lladdr) {
@@ -1506,7 +1504,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 if (!lladdr) { 1504 if (!lladdr) {
1507 ND_PRINTK2(KERN_WARNING 1505 ND_PRINTK2(KERN_WARNING
1508 "ICMPv6 Redirect: invalid link-layer address length\n"); 1506 "ICMPv6 Redirect: invalid link-layer address length\n");
1509 in6_dev_put(in6_dev);
1510 return; 1507 return;
1511 } 1508 }
1512 } 1509 }
@@ -1518,7 +1515,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1518 on_link); 1515 on_link);
1519 neigh_release(neigh); 1516 neigh_release(neigh);
1520 } 1517 }
1521 in6_dev_put(in6_dev);
1522} 1518}
1523 1519
1524void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1520void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
@@ -1651,7 +1647,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1651 csum_partial(icmph, len, 0)); 1647 csum_partial(icmph, len, 0));
1652 1648
1653 skb_dst_set(buff, dst); 1649 skb_dst_set(buff, dst);
1654 idev = in6_dev_get(dst->dev); 1650 rcu_read_lock();
1651 idev = __in6_dev_get(dst->dev);
1655 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1652 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1656 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, 1653 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1657 dst_output); 1654 dst_output);
@@ -1660,8 +1657,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1660 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1657 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1661 } 1658 }
1662 1659
1663 if (likely(idev != NULL)) 1660 rcu_read_unlock();
1664 in6_dev_put(idev);
1665 return; 1661 return;
1666 1662
1667release: 1663release:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 30fcee465448..db31561cc8df 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -3,6 +3,7 @@
3#include <linux/ipv6.h> 3#include <linux/ipv6.h>
4#include <linux/netfilter.h> 4#include <linux/netfilter.h>
5#include <linux/netfilter_ipv6.h> 5#include <linux/netfilter_ipv6.h>
6#include <linux/export.h>
6#include <net/dst.h> 7#include <net/dst.h>
7#include <net/ipv6.h> 8#include <net/ipv6.h>
8#include <net/ip6_route.h> 9#include <net/ip6_route.h>
@@ -100,9 +101,16 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
100 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, 101 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
101 }; 102 };
102 const void *sk = strict ? &fake_sk : NULL; 103 const void *sk = strict ? &fake_sk : NULL;
103 104 struct dst_entry *result;
104 *dst = ip6_route_output(net, sk, &fl->u.ip6); 105 int err;
105 return (*dst)->error; 106
107 result = ip6_route_output(net, sk, &fl->u.ip6);
108 err = result->error;
109 if (err)
110 dst_release(result);
111 else
112 *dst = result;
113 return err;
106} 114}
107 115
108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 116__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 085727263812..38f00b0298d3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -182,7 +182,6 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
182 return container_of(q, struct nf_ct_frag6_queue, q); 182 return container_of(q, struct nf_ct_frag6_queue, q);
183 183
184oom: 184oom:
185 pr_debug("Can't alloc new queue\n");
186 return NULL; 185 return NULL;
187} 186}
188 187
@@ -370,16 +369,16 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
370 struct sk_buff *clone; 369 struct sk_buff *clone;
371 int i, plen = 0; 370 int i, plen = 0;
372 371
373 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) { 372 clone = alloc_skb(0, GFP_ATOMIC);
374 pr_debug("Can't alloc skb\n"); 373 if (clone == NULL)
375 goto out_oom; 374 goto out_oom;
376 } 375
377 clone->next = head->next; 376 clone->next = head->next;
378 head->next = clone; 377 head->next = clone;
379 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 378 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
380 skb_frag_list_init(head); 379 skb_frag_list_init(head);
381 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 380 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
382 plen += skb_shinfo(head)->frags[i].size; 381 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
383 clone->len = clone->data_len = head->data_len - plen; 382 clone->len = clone->data_len = head->data_len - plen;
384 head->data_len -= clone->len; 383 head->data_len -= clone->len;
385 head->len -= clone->len; 384 head->len -= clone->len;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 18ff5df7ec02..1008ce94bc33 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -21,6 +21,7 @@
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/stddef.h> 23#include <linux/stddef.h>
24#include <linux/export.h>
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/ip.h> 26#include <net/ip.h>
26#include <net/sock.h> 27#include <net/sock.h>
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 343852e5c703..331af3b882ac 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -61,6 +61,7 @@
61 61
62#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
63#include <linux/seq_file.h> 63#include <linux/seq_file.h>
64#include <linux/export.h>
64 65
65static struct raw_hashinfo raw_v6_hashinfo = { 66static struct raw_hashinfo raw_v6_hashinfo = {
66 .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), 67 .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
@@ -130,14 +131,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
130 131
131int rawv6_mh_filter_register(mh_filter_t filter) 132int rawv6_mh_filter_register(mh_filter_t filter)
132{ 133{
133 rcu_assign_pointer(mh_filter, filter); 134 RCU_INIT_POINTER(mh_filter, filter);
134 return 0; 135 return 0;
135} 136}
136EXPORT_SYMBOL(rawv6_mh_filter_register); 137EXPORT_SYMBOL(rawv6_mh_filter_register);
137 138
138int rawv6_mh_filter_unregister(mh_filter_t filter) 139int rawv6_mh_filter_unregister(mh_filter_t filter)
139{ 140{
140 rcu_assign_pointer(mh_filter, NULL); 141 RCU_INIT_POINTER(mh_filter, NULL);
141 synchronize_rcu(); 142 synchronize_rcu();
142 return 0; 143 return 0;
143} 144}
@@ -372,9 +373,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
372 read_unlock(&raw_v6_hashinfo.lock); 373 read_unlock(&raw_v6_hashinfo.lock);
373} 374}
374 375
375static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 376static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
376{ 377{
377 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && 378 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
378 skb_checksum_complete(skb)) { 379 skb_checksum_complete(skb)) {
379 atomic_inc(&sk->sk_drops); 380 atomic_inc(&sk->sk_drops);
380 kfree_skb(skb); 381 kfree_skb(skb);
@@ -542,8 +543,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
542 goto out; 543 goto out;
543 544
544 offset = rp->offset; 545 offset = rp->offset;
545 total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) - 546 total_len = inet_sk(sk)->cork.base.length;
546 skb->data);
547 if (offset >= total_len - 1) { 547 if (offset >= total_len - 1) {
548 err = -EINVAL; 548 err = -EINVAL;
549 ip6_flush_pending_frames(sk); 549 ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7b954e2539d0..dfb164e9051a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -42,6 +42,7 @@
42#include <linux/jhash.h> 42#include <linux/jhash.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/export.h>
45 46
46#include <net/sock.h> 47#include <net/sock.h>
47#include <net/snmp.h> 48#include <net/snmp.h>
@@ -464,8 +465,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
464 head->next = clone; 465 head->next = clone;
465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 466 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
466 skb_frag_list_init(head); 467 skb_frag_list_init(head);
467 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 468 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
468 plen += skb_shinfo(head)->frags[i].size; 469 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
469 clone->len = clone->data_len = head->data_len - plen; 470 clone->len = clone->data_len = head->data_len - plen;
470 head->data_len -= clone->len; 471 head->data_len -= clone->len;
471 head->len -= clone->len; 472 head->len -= clone->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb545edef6ea..8473016bba4a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/capability.h> 27#include <linux/capability.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/export.h>
29#include <linux/types.h> 30#include <linux/types.h>
30#include <linux/times.h> 31#include <linux/times.h>
31#include <linux/socket.h> 32#include <linux/socket.h>
@@ -1086,11 +1087,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1086 rt->dst.output = ip6_output; 1087 rt->dst.output = ip6_output;
1087 dst_set_neighbour(&rt->dst, neigh); 1088 dst_set_neighbour(&rt->dst, neigh);
1088 atomic_set(&rt->dst.__refcnt, 1); 1089 atomic_set(&rt->dst.__refcnt, 1);
1089 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1090
1091 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1090 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1092 rt->rt6i_dst.plen = 128; 1091 rt->rt6i_dst.plen = 128;
1093 rt->rt6i_idev = idev; 1092 rt->rt6i_idev = idev;
1093 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1094 1094
1095 spin_lock_bh(&icmp6_dst_lock); 1095 spin_lock_bh(&icmp6_dst_lock);
1096 rt->dst.next = icmp6_dst_gc_list; 1096 rt->dst.next = icmp6_dst_gc_list;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 00b15ac7a702..a7a18602a046 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
182 (iter = rtnl_dereference(*tp)) != NULL; 182 (iter = rtnl_dereference(*tp)) != NULL;
183 tp = &iter->next) { 183 tp = &iter->next) {
184 if (t == iter) { 184 if (t == iter) {
185 rcu_assign_pointer(*tp, t->next); 185 RCU_INIT_POINTER(*tp, t->next);
186 break; 186 break;
187 } 187 }
188 } 188 }
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
192{ 192{
193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); 193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
194 194
195 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 195 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
196 rcu_assign_pointer(*tp, t); 196 RCU_INIT_POINTER(*tp, t);
197} 197}
198 198
199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) 199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
391 p->addr = a->addr; 391 p->addr = a->addr;
392 p->flags = a->flags; 392 p->flags = a->flags;
393 t->prl_count++; 393 t->prl_count++;
394 rcu_assign_pointer(t->prl, p); 394 RCU_INIT_POINTER(t->prl, p);
395out: 395out:
396 return err; 396 return err;
397} 397}
@@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
474 struct sit_net *sitn = net_generic(net, sit_net_id); 474 struct sit_net *sitn = net_generic(net, sit_net_id);
475 475
476 if (dev == sitn->fb_tunnel_dev) { 476 if (dev == sitn->fb_tunnel_dev) {
477 rcu_assign_pointer(sitn->tunnels_wc[0], NULL); 477 RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
478 } else { 478 } else {
479 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 479 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL); 480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
@@ -1176,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1176 if (!dev->tstats) 1176 if (!dev->tstats)
1177 return -ENOMEM; 1177 return -ENOMEM;
1178 dev_hold(dev); 1178 dev_hold(dev);
1179 rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); 1179 RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
1180 return 0; 1180 return 0;
1181} 1181}
1182 1182
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index ac838965ff34..5a0d6648bbbc 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -115,7 +115,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
115 & COOKIEMASK; 115 & COOKIEMASK;
116} 116}
117 117
118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
119{ 119{
120 const struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
121 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
137 jiffies / (HZ * 60), mssind); 137 jiffies / (HZ * 60), mssind);
138} 138}
139 139
140static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
141{ 141{
142 const struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
143 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
@@ -152,7 +152,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
152struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 152struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
153{ 153{
154 struct tcp_options_received tcp_opt; 154 struct tcp_options_received tcp_opt;
155 u8 *hash_location; 155 const u8 *hash_location;
156 struct inet_request_sock *ireq; 156 struct inet_request_sock *ireq;
157 struct inet6_request_sock *ireq6; 157 struct inet6_request_sock *ireq6;
158 struct tcp_request_sock *treq; 158 struct tcp_request_sock *treq;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 6dcf5e7d661b..166a57c47d39 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -10,6 +10,7 @@
10#include <linux/in6.h> 10#include <linux/in6.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/export.h>
13#include <net/ndisc.h> 14#include <net/ndisc.h>
14#include <net/ipv6.h> 15#include <net/ipv6.h>
15#include <net/addrconf.h> 16#include <net/addrconf.h>
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7b8fc5794352..36131d122a6f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -114,7 +114,7 @@ static __inline__ __sum16 tcp_v6_check(int len,
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115} 115}
116 116
117static __u32 tcp_v6_init_sequence(struct sk_buff *skb) 117static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
118{ 118{
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32, 120 ipv6_hdr(skb)->saddr.s6_addr32,
@@ -513,7 +513,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 514
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); 515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt); 516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 517 err = net_xmit_eval(err);
518 } 518 }
519 519
@@ -761,7 +761,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
761 761
762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
763 const struct in6_addr *daddr, struct in6_addr *saddr, 763 const struct in6_addr *daddr, struct in6_addr *saddr,
764 struct tcphdr *th) 764 const struct tcphdr *th)
765{ 765{
766 struct tcp_md5sig_pool *hp; 766 struct tcp_md5sig_pool *hp;
767 struct hash_desc *desc; 767 struct hash_desc *desc;
@@ -793,13 +793,14 @@ clear_hash_noput:
793} 793}
794 794
795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
796 struct sock *sk, struct request_sock *req, 796 const struct sock *sk,
797 struct sk_buff *skb) 797 const struct request_sock *req,
798 const struct sk_buff *skb)
798{ 799{
799 const struct in6_addr *saddr, *daddr; 800 const struct in6_addr *saddr, *daddr;
800 struct tcp_md5sig_pool *hp; 801 struct tcp_md5sig_pool *hp;
801 struct hash_desc *desc; 802 struct hash_desc *desc;
802 struct tcphdr *th = tcp_hdr(skb); 803 const struct tcphdr *th = tcp_hdr(skb);
803 804
804 if (sk) { 805 if (sk) {
805 saddr = &inet6_sk(sk)->saddr; 806 saddr = &inet6_sk(sk)->saddr;
@@ -842,12 +843,12 @@ clear_hash_noput:
842 return 1; 843 return 1;
843} 844}
844 845
845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 846static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
846{ 847{
847 __u8 *hash_location = NULL; 848 const __u8 *hash_location = NULL;
848 struct tcp_md5sig_key *hash_expected; 849 struct tcp_md5sig_key *hash_expected;
849 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 850 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
850 struct tcphdr *th = tcp_hdr(skb); 851 const struct tcphdr *th = tcp_hdr(skb);
851 int genhash; 852 int genhash;
852 u8 newhash[16]; 853 u8 newhash[16];
853 854
@@ -978,9 +979,10 @@ static int tcp6_gro_complete(struct sk_buff *skb)
978} 979}
979 980
980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 981static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
981 u32 ts, struct tcp_md5sig_key *key, int rst) 982 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
982{ 983{
983 struct tcphdr *th = tcp_hdr(skb), *t1; 984 const struct tcphdr *th = tcp_hdr(skb);
985 struct tcphdr *t1;
984 struct sk_buff *buff; 986 struct sk_buff *buff;
985 struct flowi6 fl6; 987 struct flowi6 fl6;
986 struct net *net = dev_net(skb_dst(skb)->dev); 988 struct net *net = dev_net(skb_dst(skb)->dev);
@@ -1058,7 +1060,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 1060 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059 if (!IS_ERR(dst)) { 1061 if (!IS_ERR(dst)) {
1060 skb_dst_set(buff, dst); 1062 skb_dst_set(buff, dst);
1061 ip6_xmit(ctl_sk, buff, &fl6, NULL); 1063 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063 if (rst) 1065 if (rst)
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1066 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1070,7 +1072,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1070 1072
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 1073static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{ 1074{
1073 struct tcphdr *th = tcp_hdr(skb); 1075 const struct tcphdr *th = tcp_hdr(skb);
1074 u32 seq = 0, ack_seq = 0; 1076 u32 seq = 0, ack_seq = 0;
1075 struct tcp_md5sig_key *key = NULL; 1077 struct tcp_md5sig_key *key = NULL;
1076 1078
@@ -1091,13 +1093,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 1093 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092 (th->doff << 2); 1094 (th->doff << 2);
1093 1095
1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1); 1096 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
1095} 1097}
1096 1098
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 1099static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098 struct tcp_md5sig_key *key) 1100 struct tcp_md5sig_key *key, u8 tclass)
1099{ 1101{
1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0); 1102 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1101} 1103}
1102 1104
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1105static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -1107,7 +1109,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1107 1109
1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1110 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1111 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); 1112 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1113 tw->tw_tclass);
1111 1114
1112 inet_twsk_put(tw); 1115 inet_twsk_put(tw);
1113} 1116}
@@ -1116,7 +1119,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116 struct request_sock *req) 1119 struct request_sock *req)
1117{ 1120{
1118 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 1121 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); 1122 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1120} 1123}
1121 1124
1122 1125
@@ -1160,7 +1163,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{ 1163{
1161 struct tcp_extend_values tmp_ext; 1164 struct tcp_extend_values tmp_ext;
1162 struct tcp_options_received tmp_opt; 1165 struct tcp_options_received tmp_opt;
1163 u8 *hash_location; 1166 const u8 *hash_location;
1164 struct request_sock *req; 1167 struct request_sock *req;
1165 struct inet6_request_sock *treq; 1168 struct inet6_request_sock *treq;
1166 struct ipv6_pinfo *np = inet6_sk(sk); 1169 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1608,7 +1611,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1608 opt_skb = skb_clone(skb, GFP_ATOMIC); 1611 opt_skb = skb_clone(skb, GFP_ATOMIC);
1609 1612
1610 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1613 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611 sock_rps_save_rxhash(sk, skb->rxhash); 1614 sock_rps_save_rxhash(sk, skb);
1612 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1615 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613 goto reset; 1616 goto reset;
1614 if (opt_skb) 1617 if (opt_skb)
@@ -1630,7 +1633,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1630 * the new socket.. 1633 * the new socket..
1631 */ 1634 */
1632 if(nsk != sk) { 1635 if(nsk != sk) {
1633 sock_rps_save_rxhash(nsk, skb->rxhash); 1636 sock_rps_save_rxhash(nsk, skb);
1634 if (tcp_child_process(sk, nsk, skb)) 1637 if (tcp_child_process(sk, nsk, skb))
1635 goto reset; 1638 goto reset;
1636 if (opt_skb) 1639 if (opt_skb)
@@ -1638,7 +1641,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1638 return 0; 1641 return 0;
1639 } 1642 }
1640 } else 1643 } else
1641 sock_rps_save_rxhash(sk, skb->rxhash); 1644 sock_rps_save_rxhash(sk, skb);
1642 1645
1643 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1646 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644 goto reset; 1647 goto reset;
@@ -1688,7 +1691,7 @@ ipv6_pktoptions:
1688 1691
1689static int tcp_v6_rcv(struct sk_buff *skb) 1692static int tcp_v6_rcv(struct sk_buff *skb)
1690{ 1693{
1691 struct tcphdr *th; 1694 const struct tcphdr *th;
1692 const struct ipv6hdr *hdr; 1695 const struct ipv6hdr *hdr;
1693 struct sock *sk; 1696 struct sock *sk;
1694 int ret; 1697 int ret;
@@ -1722,7 +1725,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1722 skb->len - th->doff*4); 1725 skb->len - th->doff*4);
1723 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1726 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724 TCP_SKB_CB(skb)->when = 0; 1727 TCP_SKB_CB(skb)->when = 0;
1725 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr); 1728 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1726 TCP_SKB_CB(skb)->sacked = 0; 1729 TCP_SKB_CB(skb)->sacked = 0;
1727 1730
1728 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1731 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1856,8 +1859,8 @@ static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1856 1859
1857static void *tcp_v6_tw_get_peer(struct sock *sk) 1860static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{ 1861{
1859 struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 1862 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860 struct inet_timewait_sock *tw = inet_twsk(sk); 1863 const struct inet_timewait_sock *tw = inet_twsk(sk);
1861 1864
1862 if (tw->tw_family == AF_INET) 1865 if (tw->tw_family == AF_INET)
1863 return tcp_v4_tw_get_peer(sk); 1866 return tcp_v4_tw_get_peer(sk);
@@ -2012,7 +2015,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
2012#ifdef CONFIG_PROC_FS 2015#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */ 2016/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq, 2017static void get_openreq6(struct seq_file *seq,
2015 struct sock *sk, struct request_sock *req, int i, int uid) 2018 const struct sock *sk, struct request_sock *req, int i, int uid)
2016{ 2019{
2017 int ttd = req->expires - jiffies; 2020 int ttd = req->expires - jiffies;
2018 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2021 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -2048,10 +2051,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2048 __u16 destp, srcp; 2051 __u16 destp, srcp;
2049 int timer_active; 2052 int timer_active;
2050 unsigned long timer_expires; 2053 unsigned long timer_expires;
2051 struct inet_sock *inet = inet_sk(sp); 2054 const struct inet_sock *inet = inet_sk(sp);
2052 struct tcp_sock *tp = tcp_sk(sp); 2055 const struct tcp_sock *tp = tcp_sk(sp);
2053 const struct inet_connection_sock *icsk = inet_csk(sp); 2056 const struct inet_connection_sock *icsk = inet_csk(sp);
2054 struct ipv6_pinfo *np = inet6_sk(sp); 2057 const struct ipv6_pinfo *np = inet6_sk(sp);
2055 2058
2056 dest = &np->daddr; 2059 dest = &np->daddr;
2057 src = &np->rcv_saddr; 2060 src = &np->rcv_saddr;
@@ -2103,7 +2106,7 @@ static void get_timewait6_sock(struct seq_file *seq,
2103{ 2106{
2104 const struct in6_addr *dest, *src; 2107 const struct in6_addr *dest, *src;
2105 __u16 destp, srcp; 2108 __u16 destp, srcp;
2106 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2109 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107 int ttd = tw->tw_ttd - jiffies; 2110 int ttd = tw->tw_ttd - jiffies;
2108 2111
2109 if (ttd < 0) 2112 if (ttd < 0)
@@ -2158,12 +2161,18 @@ out:
2158 return 0; 2161 return 0;
2159} 2162}
2160 2163
2164static const struct file_operations tcp6_afinfo_seq_fops = {
2165 .owner = THIS_MODULE,
2166 .open = tcp_seq_open,
2167 .read = seq_read,
2168 .llseek = seq_lseek,
2169 .release = seq_release_net
2170};
2171
2161static struct tcp_seq_afinfo tcp6_seq_afinfo = { 2172static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2162 .name = "tcp6", 2173 .name = "tcp6",
2163 .family = AF_INET6, 2174 .family = AF_INET6,
2164 .seq_fops = { 2175 .seq_fops = &tcp6_afinfo_seq_fops,
2165 .owner = THIS_MODULE,
2166 },
2167 .seq_ops = { 2176 .seq_ops = {
2168 .show = tcp6_seq_show, 2177 .show = tcp6_seq_show,
2169 }, 2178 },
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bb95e8e1c6f9..846f4757eb8d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
509 int is_udplite = IS_UDPLITE(sk); 509 int is_udplite = IS_UDPLITE(sk);
510 510
511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) 511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
512 sock_rps_save_rxhash(sk, skb->rxhash); 512 sock_rps_save_rxhash(sk, skb);
513 513
514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
515 goto drop; 515 goto drop;
@@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
533 } 533 }
534 } 534 }
535 535
536 if (rcu_dereference_raw(sk->sk_filter)) { 536 if (rcu_access_pointer(sk->sk_filter)) {
537 if (udp_lib_checksum_complete(skb)) 537 if (udp_lib_checksum_complete(skb))
538 goto drop; 538 goto drop;
539 } 539 }
@@ -1424,13 +1424,19 @@ int udp6_seq_show(struct seq_file *seq, void *v)
1424 return 0; 1424 return 0;
1425} 1425}
1426 1426
1427static const struct file_operations udp6_afinfo_seq_fops = {
1428 .owner = THIS_MODULE,
1429 .open = udp_seq_open,
1430 .read = seq_read,
1431 .llseek = seq_lseek,
1432 .release = seq_release_net
1433};
1434
1427static struct udp_seq_afinfo udp6_seq_afinfo = { 1435static struct udp_seq_afinfo udp6_seq_afinfo = {
1428 .name = "udp6", 1436 .name = "udp6",
1429 .family = AF_INET6, 1437 .family = AF_INET6,
1430 .udp_table = &udp_table, 1438 .udp_table = &udp_table,
1431 .seq_fops = { 1439 .seq_fops = &udp6_afinfo_seq_fops,
1432 .owner = THIS_MODULE,
1433 },
1434 .seq_ops = { 1440 .seq_ops = {
1435 .show = udp6_seq_show, 1441 .show = udp6_seq_show,
1436 }, 1442 },
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 986c4de5292e..1d08e21d9f69 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -11,6 +11,7 @@
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14#include <linux/export.h>
14#include "udp_impl.h" 15#include "udp_impl.h"
15 16
16static int udplitev6_rcv(struct sk_buff *skb) 17static int udplitev6_rcv(struct sk_buff *skb)
@@ -93,13 +94,20 @@ void udplitev6_exit(void)
93} 94}
94 95
95#ifdef CONFIG_PROC_FS 96#ifdef CONFIG_PROC_FS
97
98static const struct file_operations udplite6_afinfo_seq_fops = {
99 .owner = THIS_MODULE,
100 .open = udp_seq_open,
101 .read = seq_read,
102 .llseek = seq_lseek,
103 .release = seq_release_net
104};
105
96static struct udp_seq_afinfo udplite6_seq_afinfo = { 106static struct udp_seq_afinfo udplite6_seq_afinfo = {
97 .name = "udplite6", 107 .name = "udplite6",
98 .family = AF_INET6, 108 .family = AF_INET6,
99 .udp_table = &udplite_table, 109 .udp_table = &udplite_table,
100 .seq_fops = { 110 .seq_fops = &udplite6_afinfo_seq_fops,
101 .owner = THIS_MODULE,
102 },
103 .seq_ops = { 111 .seq_ops = {
104 .show = udp6_seq_show, 112 .show = udp6_seq_show,
105 }, 113 },
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 49a91c5f5623..faae41737fca 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -28,6 +28,43 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
28 28
29EXPORT_SYMBOL(xfrm6_find_1stfragopt); 29EXPORT_SYMBOL(xfrm6_find_1stfragopt);
30 30
31static int xfrm6_local_dontfrag(struct sk_buff *skb)
32{
33 int proto;
34 struct sock *sk = skb->sk;
35
36 if (sk) {
37 proto = sk->sk_protocol;
38
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag;
41 }
42
43 return 0;
44}
45
46static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
47{
48 struct flowi6 fl6;
49 struct sock *sk = skb->sk;
50
51 fl6.flowi6_oif = sk->sk_bound_dev_if;
52 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
53
54 ipv6_local_rxpmtu(sk, &fl6, mtu);
55}
56
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{
59 struct flowi6 fl6;
60 struct sock *sk = skb->sk;
61
62 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
64
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66}
67
31static int xfrm6_tunnel_check_size(struct sk_buff *skb) 68static int xfrm6_tunnel_check_size(struct sk_buff *skb)
32{ 69{
33 int mtu, ret = 0; 70 int mtu, ret = 0;
@@ -39,7 +76,13 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
39 76
40 if (!skb->local_df && skb->len > mtu) { 77 if (!skb->local_df && skb->len > mtu) {
41 skb->dev = dst->dev; 78 skb->dev = dst->dev;
42 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 79
80 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk)
83 xfrm6_local_error(skb, mtu);
84 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
43 ret = -EMSGSIZE; 86 ret = -EMSGSIZE;
44 } 87 }
45 88
@@ -93,9 +136,18 @@ static int __xfrm6_output(struct sk_buff *skb)
93{ 136{
94 struct dst_entry *dst = skb_dst(skb); 137 struct dst_entry *dst = skb_dst(skb);
95 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb);
140
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu);
146 return -EMSGSIZE;
147 }
96 148
97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 149 if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 150 ((skb->len > mtu && !skb_is_gso(skb)) ||
99 dst_allfrag(skb_dst(skb)))) { 151 dst_allfrag(skb_dst(skb)))) {
100 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
101 } 153 }
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 248f0b2a7ee9..f2d72b8a3faa 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -15,6 +15,7 @@
15#include <linux/pfkeyv2.h> 15#include <linux/pfkeyv2.h>
16#include <linux/ipsec.h> 16#include <linux/ipsec.h>
17#include <linux/netfilter_ipv6.h> 17#include <linux/netfilter_ipv6.h>
18#include <linux/export.h>
18#include <net/dsfield.h> 19#include <net/dsfield.h>
19#include <net/ipv6.h> 20#include <net/ipv6.h>
20#include <net/addrconf.h> 21#include <net/addrconf.h>
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 26b5bfcf1d03..f8ba30dfecae 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -9,6 +9,7 @@
9#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/seq_file.h> 11#include <linux/seq_file.h>
12#include <linux/export.h>
12#include <net/net_namespace.h> 13#include <net/net_namespace.h>
13#include <net/tcp_states.h> 14#include <net/tcp_states.h>
14#include <net/ipx.h> 15#include <net/ipx.h>
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 36c3f037f172..b0b56a339a83 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -35,6 +35,7 @@
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/export.h>
38 39
39#include <net/irda/irda.h> 40#include <net/irda/irda.h>
40#include <net/irda/irlmp.h> 41#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b3cc8b3989a9..253695d43fd9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -551,7 +551,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
551 */ 551 */
552 tty->closing = 1; 552 tty->closing = 1;
553 if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE) 553 if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE)
554 tty_wait_until_sent(tty, self->closing_wait); 554 tty_wait_until_sent_from_close(tty, self->closing_wait);
555 555
556 ircomm_tty_shutdown(self); 556 ircomm_tty_shutdown(self);
557 557
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 3eca35faf2a8..14653b8d664d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -42,6 +42,7 @@
42#include <linux/kmod.h> 42#include <linux/kmod.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/export.h>
45 46
46#include <asm/ioctls.h> 47#include <asm/ioctls.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index e8d5f4405d68..d14152e866d9 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = {
50 .ndo_open = irlan_eth_open, 50 .ndo_open = irlan_eth_open,
51 .ndo_stop = irlan_eth_close, 51 .ndo_stop = irlan_eth_close,
52 .ndo_start_xmit = irlan_eth_xmit, 52 .ndo_start_xmit = irlan_eth_xmit,
53 .ndo_set_multicast_list = irlan_eth_set_multicast_list, 53 .ndo_set_rx_mode = irlan_eth_set_multicast_list,
54 .ndo_change_mtu = eth_change_mtu, 54 .ndo_change_mtu = eth_change_mtu,
55 .ndo_validate_addr = eth_validate_addr, 55 .ndo_validate_addr = eth_validate_addr,
56}; 56};
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 285ccd623ae5..32e3bb026110 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -29,6 +29,7 @@
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h>
32 33
33#include <asm/byteorder.h> 34#include <asm/byteorder.h>
34#include <asm/unaligned.h> 35#include <asm/unaligned.h>
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 4369f7f41bcb..798ffd9a705e 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -30,6 +30,8 @@
30 * 30 *
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/export.h>
34
33#include <asm/byteorder.h> 35#include <asm/byteorder.h>
34 36
35#include <net/irda/irda.h> 37#include <net/irda/irda.h>
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39e..497fbe732def 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
1config IUCV 1config IUCV
2 tristate "IUCV support (S390 - z/VM only)"
3 depends on S390 2 depends on S390
3 def_tristate y if S390
4 prompt "IUCV support (S390 - z/VM only)"
4 help 5 help
5 Select this option if you want to use inter-user communication 6 Select this option if you want to use inter-user communication
6 under VM or VIF. If you run on z/VM, say "Y" to enable a fast 7 under VM or VIF. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests. 8 communication link between VM guests.
8 9
9config AFIUCV 10config AFIUCV
10 tristate "AF_IUCV support (S390 - z/VM only)" 11 depends on S390
11 depends on IUCV 12 def_tristate m if QETH_L3 || IUCV
13 prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
12 help 14 help
13 Select this option if you want to use inter-user communication under 15 Select this option if you want to use AF_IUCV socket applications
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast 16 based on z/VM inter-user communication vehicle or based on
15 communication link between VM guests. 17 HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e434d03..274d150320c0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
27#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29 29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
32 31
33#define VERSION "1.1" 32#define VERSION "1.2"
34 33
35static char iucv_userid[80]; 34static char iucv_userid[80];
36 35
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
42 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
43}; 42};
44 43
44static struct iucv_interface *pr_iucv;
45
45/* special AF_IUCV IPRM messages */ 46/* special AF_IUCV IPRM messages */
46static const u8 iprm_shutdown[8] = 47static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
90static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
91static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
92 93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
93/* Call Back functions */ 100/* Call Back functions */
94static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
165 case IUCV_CLOSING: 172 case IUCV_CLOSING:
166 case IUCV_CONNECTED: 173 case IUCV_CONNECTED:
167 if (iucv->path) { 174 if (iucv->path) {
168 err = iucv_path_sever(iucv->path, NULL); 175 err = pr_iucv->path_sever(iucv->path, NULL);
169 iucv_path_free(iucv->path); 176 iucv_path_free(iucv->path);
170 iucv->path = NULL; 177 iucv->path = NULL;
171 } 178 }
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
229static struct device_driver af_iucv_driver = { 236static struct device_driver af_iucv_driver = {
230 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
231 .name = "afiucv", 238 .name = "afiucv",
232 .bus = &iucv_bus, 239 .bus = NULL,
233 .pm = &afiucv_pm_ops, 240 .pm = &afiucv_pm_ops,
234}; 241};
235 242
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
294 301
295 if (sk->sk_state != IUCV_CONNECTED) 302 if (sk->sk_state != IUCV_CONNECTED)
296 return 1; 303 return 1;
297 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 304 if (iucv->transport == AF_IUCV_TRANS_IUCV)
305 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
306 else
307 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
308 (atomic_read(&iucv->pendings) <= 0));
298} 309}
299 310
300/** 311/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
314 325
326/**
327 * afiucv_hs_send() - send a message through HiperSockets transport
328 */
329static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
330 struct sk_buff *skb, u8 flags)
331{
332 struct net *net = sock_net(sock);
333 struct iucv_sock *iucv = iucv_sk(sock);
334 struct af_iucv_trans_hdr *phs_hdr;
335 struct sk_buff *nskb;
336 int err, confirm_recv = 0;
337
338 memset(skb->head, 0, ETH_HLEN);
339 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
340 sizeof(struct af_iucv_trans_hdr));
341 skb_reset_mac_header(skb);
342 skb_reset_network_header(skb);
343 skb_push(skb, ETH_HLEN);
344 skb_reset_mac_header(skb);
345 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
346
347 phs_hdr->magic = ETH_P_AF_IUCV;
348 phs_hdr->version = 1;
349 phs_hdr->flags = flags;
350 if (flags == AF_IUCV_FLAG_SYN)
351 phs_hdr->window = iucv->msglimit;
352 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
353 confirm_recv = atomic_read(&iucv->msg_recv);
354 phs_hdr->window = confirm_recv;
355 if (confirm_recv)
356 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
357 }
358 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
359 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
360 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
361 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
362 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
363 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
364 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
365 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
366 if (imsg)
367 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
368
369 rcu_read_lock();
370 skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
371 rcu_read_unlock();
372 if (!skb->dev)
373 return -ENODEV;
374 if (!(skb->dev->flags & IFF_UP))
375 return -ENETDOWN;
376 if (skb->len > skb->dev->mtu) {
377 if (sock->sk_type == SOCK_SEQPACKET)
378 return -EMSGSIZE;
379 else
380 skb_trim(skb, skb->dev->mtu);
381 }
382 skb->protocol = ETH_P_AF_IUCV;
383 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
384 nskb = skb_clone(skb, GFP_ATOMIC);
385 if (!nskb)
386 return -ENOMEM;
387 skb_queue_tail(&iucv->send_skb_q, nskb);
388 err = dev_queue_xmit(skb);
389 if (err) {
390 skb_unlink(nskb, &iucv->send_skb_q);
391 kfree_skb(nskb);
392 } else {
393 atomic_sub(confirm_recv, &iucv->msg_recv);
394 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
395 }
396 return err;
397}
398
315/* Timers */ 399/* Timers */
316static void iucv_sock_timeout(unsigned long arg) 400static void iucv_sock_timeout(unsigned long arg)
317{ 401{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
380 unsigned char user_data[16]; 464 unsigned char user_data[16];
381 struct iucv_sock *iucv = iucv_sk(sk); 465 struct iucv_sock *iucv = iucv_sk(sk);
382 unsigned long timeo; 466 unsigned long timeo;
467 int err, blen;
468 struct sk_buff *skb;
383 469
384 iucv_sock_clear_timer(sk); 470 iucv_sock_clear_timer(sk);
385 lock_sock(sk); 471 lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
390 break; 476 break;
391 477
392 case IUCV_CONNECTED: 478 case IUCV_CONNECTED:
479 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
480 /* send fin */
481 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
482 skb = sock_alloc_send_skb(sk, blen, 1, &err);
483 if (skb) {
484 skb_reserve(skb,
485 sizeof(struct af_iucv_trans_hdr) +
486 ETH_HLEN);
487 err = afiucv_hs_send(NULL, sk, skb,
488 AF_IUCV_FLAG_FIN);
489 }
490 sk->sk_state = IUCV_DISCONN;
491 sk->sk_state_change(sk);
492 }
393 case IUCV_DISCONN: 493 case IUCV_DISCONN:
394 sk->sk_state = IUCV_CLOSING; 494 sk->sk_state = IUCV_CLOSING;
395 sk->sk_state_change(sk); 495 sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
412 low_nmcpy(user_data, iucv->src_name); 512 low_nmcpy(user_data, iucv->src_name);
413 high_nmcpy(user_data, iucv->dst_name); 513 high_nmcpy(user_data, iucv->dst_name);
414 ASCEBC(user_data, sizeof(user_data)); 514 ASCEBC(user_data, sizeof(user_data));
415 iucv_path_sever(iucv->path, user_data); 515 pr_iucv->path_sever(iucv->path, user_data);
416 iucv_path_free(iucv->path); 516 iucv_path_free(iucv->path);
417 iucv->path = NULL; 517 iucv->path = NULL;
418 } 518 }
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
444static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 544static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
445{ 545{
446 struct sock *sk; 546 struct sock *sk;
547 struct iucv_sock *iucv;
447 548
448 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 549 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
449 if (!sk) 550 if (!sk)
450 return NULL; 551 return NULL;
552 iucv = iucv_sk(sk);
451 553
452 sock_init_data(sock, sk); 554 sock_init_data(sock, sk);
453 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 555 INIT_LIST_HEAD(&iucv->accept_q);
454 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 556 spin_lock_init(&iucv->accept_q_lock);
455 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 557 skb_queue_head_init(&iucv->send_skb_q);
456 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 558 INIT_LIST_HEAD(&iucv->message_q.list);
457 spin_lock_init(&iucv_sk(sk)->message_q.lock); 559 spin_lock_init(&iucv->message_q.lock);
458 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 560 skb_queue_head_init(&iucv->backlog_skb_q);
459 iucv_sk(sk)->send_tag = 0; 561 iucv->send_tag = 0;
460 iucv_sk(sk)->flags = 0; 562 atomic_set(&iucv->pendings, 0);
461 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; 563 iucv->flags = 0;
462 iucv_sk(sk)->path = NULL; 564 iucv->msglimit = 0;
463 memset(&iucv_sk(sk)->src_user_id , 0, 32); 565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
567 iucv->path = NULL;
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
569 memset(&iucv->src_user_id , 0, 32);
570 if (pr_iucv)
571 iucv->transport = AF_IUCV_TRANS_IUCV;
572 else
573 iucv->transport = AF_IUCV_TRANS_HIPER;
464 574
465 sk->sk_destruct = iucv_sock_destruct; 575 sk->sk_destruct = iucv_sock_destruct;
466 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
591 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
592 struct sock *sk = sock->sk; 702 struct sock *sk = sock->sk;
593 struct iucv_sock *iucv; 703 struct iucv_sock *iucv;
594 int err; 704 int err = 0;
705 struct net_device *dev;
706 char uid[9];
595 707
596 /* Verify the input sockaddr */ 708 /* Verify the input sockaddr */
597 if (!addr || addr->sa_family != AF_IUCV) 709 if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
610 err = -EADDRINUSE; 722 err = -EADDRINUSE;
611 goto done_unlock; 723 goto done_unlock;
612 } 724 }
613 if (iucv->path) { 725 if (iucv->path)
614 err = 0;
615 goto done_unlock; 726 goto done_unlock;
616 }
617 727
618 /* Bind the socket */ 728 /* Bind the socket */
619 memcpy(iucv->src_name, sa->siucv_name, 8);
620 729
621 /* Copy the user id */ 730 if (pr_iucv)
622 memcpy(iucv->src_user_id, iucv_userid, 8); 731 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
623 sk->sk_state = IUCV_BOUND; 732 goto vm_bind; /* VM IUCV transport */
624 err = 0;
625 733
734 /* try hiper transport */
735 memcpy(uid, sa->siucv_user_id, sizeof(uid));
736 ASCEBC(uid, 8);
737 rcu_read_lock();
738 for_each_netdev_rcu(&init_net, dev) {
739 if (!memcmp(dev->perm_addr, uid, 8)) {
740 memcpy(iucv->src_name, sa->siucv_name, 8);
741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
742 sock->sk->sk_bound_dev_if = dev->ifindex;
743 sk->sk_state = IUCV_BOUND;
744 iucv->transport = AF_IUCV_TRANS_HIPER;
745 if (!iucv->msglimit)
746 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
747 rcu_read_unlock();
748 goto done_unlock;
749 }
750 }
751 rcu_read_unlock();
752vm_bind:
753 if (pr_iucv) {
754 /* use local userid for backward compat */
755 memcpy(iucv->src_name, sa->siucv_name, 8);
756 memcpy(iucv->src_user_id, iucv_userid, 8);
757 sk->sk_state = IUCV_BOUND;
758 iucv->transport = AF_IUCV_TRANS_IUCV;
759 if (!iucv->msglimit)
760 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
761 goto done_unlock;
762 }
763 /* found no dev to bind */
764 err = -ENODEV;
626done_unlock: 765done_unlock:
627 /* Release the socket list lock */ 766 /* Release the socket list lock */
628 write_unlock_bh(&iucv_sk_list.lock); 767 write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
658 797
659 memcpy(&iucv->src_name, name, 8); 798 memcpy(&iucv->src_name, name, 8);
660 799
800 if (!iucv->msglimit)
801 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
802
661 return err; 803 return err;
662} 804}
663 805
664/* Connect an unconnected socket */ 806static int afiucv_hs_connect(struct socket *sock)
665static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
666 int alen, int flags)
667{ 807{
668 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
669 struct sock *sk = sock->sk; 808 struct sock *sk = sock->sk;
670 struct iucv_sock *iucv; 809 struct sk_buff *skb;
671 unsigned char user_data[16]; 810 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
672 int err; 811 int err = 0;
673
674 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
675 return -EINVAL;
676
677 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
678 return -EBADFD;
679
680 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
681 return -EINVAL;
682 812
683 if (sk->sk_state == IUCV_OPEN) { 813 /* send syn */
684 err = iucv_sock_autobind(sk); 814 skb = sock_alloc_send_skb(sk, blen, 1, &err);
685 if (unlikely(err)) 815 if (!skb) {
686 return err; 816 err = -ENOMEM;
817 goto done;
687 } 818 }
819 skb->dev = NULL;
820 skb_reserve(skb, blen);
821 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
822done:
823 return err;
824}
688 825
689 lock_sock(sk); 826static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
690 827{
691 /* Set the destination information */ 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
692 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 829 struct sock *sk = sock->sk;
693 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 830 struct iucv_sock *iucv = iucv_sk(sk);
831 unsigned char user_data[16];
832 int err;
694 833
695 high_nmcpy(user_data, sa->siucv_name); 834 high_nmcpy(user_data, sa->siucv_name);
696 low_nmcpy(user_data, iucv_sk(sk)->src_name); 835 low_nmcpy(user_data, iucv->src_name);
697 ASCEBC(user_data, sizeof(user_data)); 836 ASCEBC(user_data, sizeof(user_data));
698 837
699 iucv = iucv_sk(sk);
700 /* Create path. */ 838 /* Create path. */
701 iucv->path = iucv_path_alloc(iucv->msglimit, 839 iucv->path = iucv_path_alloc(iucv->msglimit,
702 IUCV_IPRMDATA, GFP_KERNEL); 840 IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
704 err = -ENOMEM; 842 err = -ENOMEM;
705 goto done; 843 goto done;
706 } 844 }
707 err = iucv_path_connect(iucv->path, &af_iucv_handler, 845 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
708 sa->siucv_user_id, NULL, user_data, sk); 846 sa->siucv_user_id, NULL, user_data,
847 sk);
709 if (err) { 848 if (err) {
710 iucv_path_free(iucv->path); 849 iucv_path_free(iucv->path);
711 iucv->path = NULL; 850 iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
724 err = -ECONNREFUSED; 863 err = -ECONNREFUSED;
725 break; 864 break;
726 } 865 }
727 goto done;
728 } 866 }
867done:
868 return err;
869}
729 870
730 if (sk->sk_state != IUCV_CONNECTED) { 871/* Connect an unconnected socket */
872static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
873 int alen, int flags)
874{
875 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
876 struct sock *sk = sock->sk;
877 struct iucv_sock *iucv = iucv_sk(sk);
878 int err;
879
880 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
881 return -EINVAL;
882
883 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
884 return -EBADFD;
885
886 if (sk->sk_state == IUCV_OPEN &&
887 iucv->transport == AF_IUCV_TRANS_HIPER)
888 return -EBADFD; /* explicit bind required */
889
890 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
891 return -EINVAL;
892
893 if (sk->sk_state == IUCV_OPEN) {
894 err = iucv_sock_autobind(sk);
895 if (unlikely(err))
896 return err;
897 }
898
899 lock_sock(sk);
900
901 /* Set the destination information */
902 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
903 memcpy(iucv->dst_name, sa->siucv_name, 8);
904
905 if (iucv->transport == AF_IUCV_TRANS_HIPER)
906 err = afiucv_hs_connect(sock);
907 else
908 err = afiucv_path_connect(sock, addr);
909 if (err)
910 goto done;
911
912 if (sk->sk_state != IUCV_CONNECTED)
731 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
732 IUCV_DISCONN), 914 IUCV_DISCONN),
733 sock_sndtimeo(sk, flags & O_NONBLOCK)); 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
734 }
735 916
736 if (sk->sk_state == IUCV_DISCONN) { 917 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
737 err = -ECONNREFUSED; 918 err = -ECONNREFUSED;
738 }
739 919
740 if (err) { 920 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
741 iucv_path_sever(iucv->path, NULL); 921 pr_iucv->path_sever(iucv->path, NULL);
742 iucv_path_free(iucv->path); 922 iucv_path_free(iucv->path);
743 iucv->path = NULL; 923 iucv->path = NULL;
744 } 924 }
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
833{ 1013{
834 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 1014 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
835 struct sock *sk = sock->sk; 1015 struct sock *sk = sock->sk;
1016 struct iucv_sock *iucv = iucv_sk(sk);
836 1017
837 addr->sa_family = AF_IUCV; 1018 addr->sa_family = AF_IUCV;
838 *len = sizeof(struct sockaddr_iucv); 1019 *len = sizeof(struct sockaddr_iucv);
839 1020
840 if (peer) { 1021 if (peer) {
841 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 1022 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
842 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 1023 memcpy(siucv->siucv_name, iucv->dst_name, 8);
843 } else { 1024 } else {
844 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 1025 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
845 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 1026 memcpy(siucv->siucv_name, iucv->src_name, 8);
846 } 1027 }
847 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1028 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
848 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1029 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
849 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1030 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
850 1031
851 return 0; 1032 return 0;
852} 1033}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
871 1052
872 memcpy(prmdata, (void *) skb->data, skb->len); 1053 memcpy(prmdata, (void *) skb->data, skb->len);
873 prmdata[7] = 0xff - (u8) skb->len; 1054 prmdata[7] = 0xff - (u8) skb->len;
874 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, 1055 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
875 (void *) prmdata, 8); 1056 (void *) prmdata, 8);
876} 1057}
877 1058
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 * this is fine for SOCK_SEQPACKET (unless we want to support 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
961 * segmented records using the MSG_EOR flag), but 1142 * segmented records using the MSG_EOR flag), but
962 * for SOCK_STREAM we might want to improve it in future */ 1143 * for SOCK_STREAM we might want to improve it in future */
963 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1144 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1145 skb = sock_alloc_send_skb(sk,
1146 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1147 noblock, &err);
1148 else
1149 skb = sock_alloc_send_skb(sk, len, noblock, &err);
964 if (!skb) 1150 if (!skb)
965 goto out; 1151 goto out;
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
966 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
967 err = -EFAULT; 1155 err = -EFAULT;
968 goto fail; 1156 goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
983 /* increment and save iucv message tag for msg_completion cbk */ 1171 /* increment and save iucv message tag for msg_completion cbk */
984 txmsg.tag = iucv->send_tag++; 1172 txmsg.tag = iucv->send_tag++;
985 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1174 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1175 atomic_inc(&iucv->msg_sent);
1176 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1177 if (err) {
1178 atomic_dec(&iucv->msg_sent);
1179 goto fail;
1180 }
1181 goto release;
1182 }
986 skb_queue_tail(&iucv->send_skb_q, skb); 1183 skb_queue_tail(&iucv->send_skb_q, skb);
987 1184
988 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
999 /* this error should never happen since the 1196 /* this error should never happen since the
1000 * IUCV_IPRMDATA path flag is set... sever path */ 1197 * IUCV_IPRMDATA path flag is set... sever path */
1001 if (err == 0x15) { 1198 if (err == 0x15) {
1002 iucv_path_sever(iucv->path, NULL); 1199 pr_iucv->path_sever(iucv->path, NULL);
1003 skb_unlink(skb, &iucv->send_skb_q); 1200 skb_unlink(skb, &iucv->send_skb_q);
1004 err = -EPIPE; 1201 err = -EPIPE;
1005 goto fail; 1202 goto fail;
1006 } 1203 }
1007 } else 1204 } else
1008 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1205 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1009 (void *) skb->data, skb->len); 1206 (void *) skb->data, skb->len);
1010 if (err) { 1207 if (err) {
1011 if (err == 3) { 1208 if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 goto fail; 1220 goto fail;
1024 } 1221 }
1025 1222
1223release:
1026 release_sock(sk); 1224 release_sock(sk);
1027 return len; 1225 return len;
1028 1226
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1095 skb->len = 0; 1293 skb->len = 0;
1096 } 1294 }
1097 } else { 1295 } else {
1098 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, 1296 rc = pr_iucv->message_receive(path, msg,
1099 skb->data, len, NULL); 1297 msg->flags & IUCV_IPRMDATA,
1298 skb->data, len, NULL);
1100 if (rc) { 1299 if (rc) {
1101 kfree_skb(skb); 1300 kfree_skb(skb);
1102 return; 1301 return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1110 kfree_skb(skb); 1309 kfree_skb(skb);
1111 skb = NULL; 1310 skb = NULL;
1112 if (rc) { 1311 if (rc) {
1113 iucv_path_sever(path, NULL); 1312 pr_iucv->path_sever(path, NULL);
1114 return; 1313 return;
1115 } 1314 }
1116 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 1315 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1154 struct sock *sk = sock->sk; 1353 struct sock *sk = sock->sk;
1155 struct iucv_sock *iucv = iucv_sk(sk); 1354 struct iucv_sock *iucv = iucv_sk(sk);
1156 unsigned int copied, rlen; 1355 unsigned int copied, rlen;
1157 struct sk_buff *skb, *rskb, *cskb; 1356 struct sk_buff *skb, *rskb, *cskb, *sskb;
1357 int blen;
1158 int err = 0; 1358 int err = 0;
1159 1359
1160 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1179 copied = min_t(unsigned int, rlen, len); 1379 copied = min_t(unsigned int, rlen, len);
1180 1380
1181 cskb = skb; 1381 cskb = skb;
1182 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1382 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1183 if (!(flags & MSG_PEEK)) 1383 if (!(flags & MSG_PEEK))
1184 skb_queue_head(&sk->sk_receive_queue, skb); 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1185 return -EFAULT; 1385 return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1217 } 1417 }
1218 1418
1219 kfree_skb(skb); 1419 kfree_skb(skb);
1420 atomic_inc(&iucv->msg_recv);
1220 1421
1221 /* Queue backlog skbs */ 1422 /* Queue backlog skbs */
1222 spin_lock_bh(&iucv->message_q.lock); 1423 spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1233 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1234 if (!list_empty(&iucv->message_q.list)) 1435 if (!list_empty(&iucv->message_q.list))
1235 iucv_process_message_q(sk); 1436 iucv_process_message_q(sk);
1437 if (atomic_read(&iucv->msg_recv) >=
1438 iucv->msglimit / 2) {
1439 /* send WIN to peer */
1440 blen = sizeof(struct af_iucv_trans_hdr) +
1441 ETH_HLEN;
1442 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1443 if (sskb) {
1444 skb_reserve(sskb,
1445 sizeof(struct af_iucv_trans_hdr)
1446 + ETH_HLEN);
1447 err = afiucv_hs_send(NULL, sk, sskb,
1448 AF_IUCV_FLAG_WIN);
1449 }
1450 if (err) {
1451 sk->sk_state = IUCV_DISCONN;
1452 sk->sk_state_change(sk);
1453 }
1454 }
1236 } 1455 }
1237 spin_unlock_bh(&iucv->message_q.lock); 1456 spin_unlock_bh(&iucv->message_q.lock);
1238 } 1457 }
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1327 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1546 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1328 txmsg.class = 0; 1547 txmsg.class = 0;
1329 txmsg.tag = 0; 1548 txmsg.tag = 0;
1330 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1549 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1331 (void *) iprm_shutdown, 8); 1550 0, (void *) iprm_shutdown, 8);
1332 if (err) { 1551 if (err) {
1333 switch (err) { 1552 switch (err) {
1334 case 1: 1553 case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1345 } 1564 }
1346 1565
1347 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1566 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1348 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 1567 err = pr_iucv->path_quiesce(iucv->path, NULL);
1349 if (err) 1568 if (err)
1350 err = -ENOTCONN; 1569 err = -ENOTCONN;
1351 1570
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
1372 1591
1373 /* Unregister with IUCV base support */ 1592 /* Unregister with IUCV base support */
1374 if (iucv_sk(sk)->path) { 1593 if (iucv_sk(sk)->path) {
1375 iucv_path_sever(iucv_sk(sk)->path, NULL); 1594 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1376 iucv_path_free(iucv_sk(sk)->path); 1595 iucv_path_free(iucv_sk(sk)->path);
1377 iucv_sk(sk)->path = NULL; 1596 iucv_sk(sk)->path = NULL;
1378 } 1597 }
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1514 high_nmcpy(user_data, iucv->dst_name); 1733 high_nmcpy(user_data, iucv->dst_name);
1515 ASCEBC(user_data, sizeof(user_data)); 1734 ASCEBC(user_data, sizeof(user_data));
1516 if (sk->sk_state != IUCV_LISTEN) { 1735 if (sk->sk_state != IUCV_LISTEN) {
1517 err = iucv_path_sever(path, user_data); 1736 err = pr_iucv->path_sever(path, user_data);
1518 iucv_path_free(path); 1737 iucv_path_free(path);
1519 goto fail; 1738 goto fail;
1520 } 1739 }
1521 1740
1522 /* Check for backlog size */ 1741 /* Check for backlog size */
1523 if (sk_acceptq_is_full(sk)) { 1742 if (sk_acceptq_is_full(sk)) {
1524 err = iucv_path_sever(path, user_data); 1743 err = pr_iucv->path_sever(path, user_data);
1525 iucv_path_free(path); 1744 iucv_path_free(path);
1526 goto fail; 1745 goto fail;
1527 } 1746 }
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1529 /* Create the new socket */ 1748 /* Create the new socket */
1530 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); 1749 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1531 if (!nsk) { 1750 if (!nsk) {
1532 err = iucv_path_sever(path, user_data); 1751 err = pr_iucv->path_sever(path, user_data);
1533 iucv_path_free(path); 1752 iucv_path_free(path);
1534 goto fail; 1753 goto fail;
1535 } 1754 }
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1553 /* set message limit for path based on msglimit of accepting socket */ 1772 /* set message limit for path based on msglimit of accepting socket */
1554 niucv->msglimit = iucv->msglimit; 1773 niucv->msglimit = iucv->msglimit;
1555 path->msglim = iucv->msglimit; 1774 path->msglim = iucv->msglimit;
1556 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1775 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1557 if (err) { 1776 if (err) {
1558 err = iucv_path_sever(path, user_data); 1777 err = pr_iucv->path_sever(path, user_data);
1559 iucv_path_free(path); 1778 iucv_path_free(path);
1560 iucv_sock_kill(nsk); 1779 iucv_sock_kill(nsk);
1561 goto fail; 1780 goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1589 int len; 1808 int len;
1590 1809
1591 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1810 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1592 iucv_message_reject(path, msg); 1811 pr_iucv->message_reject(path, msg);
1593 return; 1812 return;
1594 } 1813 }
1595 1814
@@ -1600,7 +1819,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1600 goto save_message; 1819 goto save_message;
1601 1820
1602 len = atomic_read(&sk->sk_rmem_alloc); 1821 len = atomic_read(&sk->sk_rmem_alloc);
1603 len += iucv_msg_length(msg) + sizeof(struct sk_buff); 1822 len += SKB_TRUESIZE(iucv_msg_length(msg));
1604 if (len > sk->sk_rcvbuf) 1823 if (len > sk->sk_rcvbuf)
1605 goto save_message; 1824 goto save_message;
1606 1825
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1692 bh_unlock_sock(sk); 1911 bh_unlock_sock(sk);
1693} 1912}
1694 1913
1914/***************** HiperSockets transport callbacks ********************/
1915static void afiucv_swap_src_dest(struct sk_buff *skb)
1916{
1917 struct af_iucv_trans_hdr *trans_hdr =
1918 (struct af_iucv_trans_hdr *)skb->data;
1919 char tmpID[8];
1920 char tmpName[8];
1921
1922 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1923 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1924 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1925 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1926 memcpy(tmpID, trans_hdr->srcUserID, 8);
1927 memcpy(tmpName, trans_hdr->srcAppName, 8);
1928 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1929 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1930 memcpy(trans_hdr->destUserID, tmpID, 8);
1931 memcpy(trans_hdr->destAppName, tmpName, 8);
1932 skb_push(skb, ETH_HLEN);
1933 memset(skb->data, 0, ETH_HLEN);
1934}
1935
1936/**
1937 * afiucv_hs_callback_syn - react on received SYN
1938 **/
1939static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct sock *nsk;
1942 struct iucv_sock *iucv, *niucv;
1943 struct af_iucv_trans_hdr *trans_hdr;
1944 int err;
1945
1946 iucv = iucv_sk(sk);
1947 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1948 if (!iucv) {
1949 /* no sock - connection refused */
1950 afiucv_swap_src_dest(skb);
1951 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1952 err = dev_queue_xmit(skb);
1953 goto out;
1954 }
1955
1956 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1957 bh_lock_sock(sk);
1958 if ((sk->sk_state != IUCV_LISTEN) ||
1959 sk_acceptq_is_full(sk) ||
1960 !nsk) {
1961 /* error on server socket - connection refused */
1962 if (nsk)
1963 sk_free(nsk);
1964 afiucv_swap_src_dest(skb);
1965 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1966 err = dev_queue_xmit(skb);
1967 bh_unlock_sock(sk);
1968 goto out;
1969 }
1970
1971 niucv = iucv_sk(nsk);
1972 iucv_sock_init(nsk, sk);
1973 niucv->transport = AF_IUCV_TRANS_HIPER;
1974 niucv->msglimit = iucv->msglimit;
1975 if (!trans_hdr->window)
1976 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1977 else
1978 niucv->msglimit_peer = trans_hdr->window;
1979 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1980 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1981 memcpy(niucv->src_name, iucv->src_name, 8);
1982 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1983 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1984 afiucv_swap_src_dest(skb);
1985 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1986 trans_hdr->window = niucv->msglimit;
1987 /* if receiver acks the xmit connection is established */
1988 err = dev_queue_xmit(skb);
1989 if (!err) {
1990 iucv_accept_enqueue(sk, nsk);
1991 nsk->sk_state = IUCV_CONNECTED;
1992 sk->sk_data_ready(sk, 1);
1993 } else
1994 iucv_sock_kill(nsk);
1995 bh_unlock_sock(sk);
1996
1997out:
1998 return NET_RX_SUCCESS;
1999}
2000
2001/**
2002 * afiucv_hs_callback_synack() - react on received SYN-ACK
2003 **/
2004static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2005{
2006 struct iucv_sock *iucv = iucv_sk(sk);
2007 struct af_iucv_trans_hdr *trans_hdr =
2008 (struct af_iucv_trans_hdr *)skb->data;
2009
2010 if (!iucv)
2011 goto out;
2012 if (sk->sk_state != IUCV_BOUND)
2013 goto out;
2014 bh_lock_sock(sk);
2015 iucv->msglimit_peer = trans_hdr->window;
2016 sk->sk_state = IUCV_CONNECTED;
2017 sk->sk_state_change(sk);
2018 bh_unlock_sock(sk);
2019out:
2020 kfree_skb(skb);
2021 return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2026 **/
2027static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2028{
2029 struct iucv_sock *iucv = iucv_sk(sk);
2030
2031 if (!iucv)
2032 goto out;
2033 if (sk->sk_state != IUCV_BOUND)
2034 goto out;
2035 bh_lock_sock(sk);
2036 sk->sk_state = IUCV_DISCONN;
2037 sk->sk_state_change(sk);
2038 bh_unlock_sock(sk);
2039out:
2040 kfree_skb(skb);
2041 return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_fin() - react on received FIN
2046 **/
2047static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2048{
2049 struct iucv_sock *iucv = iucv_sk(sk);
2050
2051 /* other end of connection closed */
2052 if (iucv) {
2053 bh_lock_sock(sk);
2054 if (!list_empty(&iucv->accept_q))
2055 sk->sk_state = IUCV_SEVERED;
2056 else
2057 sk->sk_state = IUCV_DISCONN;
2058 sk->sk_state_change(sk);
2059 bh_unlock_sock(sk);
2060 }
2061 kfree_skb(skb);
2062 return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_win() - react on received WIN
2067 **/
2068static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2069{
2070 struct iucv_sock *iucv = iucv_sk(sk);
2071 struct af_iucv_trans_hdr *trans_hdr =
2072 (struct af_iucv_trans_hdr *)skb->data;
2073
2074 if (!iucv)
2075 return NET_RX_SUCCESS;
2076
2077 if (sk->sk_state != IUCV_CONNECTED)
2078 return NET_RX_SUCCESS;
2079
2080 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2081 iucv_sock_wake_msglim(sk);
2082 return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_rx() - react on received data
2087 **/
2088static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2089{
2090 struct iucv_sock *iucv = iucv_sk(sk);
2091
2092 if (!iucv) {
2093 kfree_skb(skb);
2094 return NET_RX_SUCCESS;
2095 }
2096
2097 if (sk->sk_state != IUCV_CONNECTED) {
2098 kfree_skb(skb);
2099 return NET_RX_SUCCESS;
2100 }
2101
2102 /* write stuff from iucv_msg to skb cb */
2103 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2104 kfree_skb(skb);
2105 return NET_RX_SUCCESS;
2106 }
2107 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2108 skb_reset_transport_header(skb);
2109 skb_reset_network_header(skb);
2110 spin_lock(&iucv->message_q.lock);
2111 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2112 if (sock_queue_rcv_skb(sk, skb)) {
2113 /* handle rcv queue full */
2114 skb_queue_tail(&iucv->backlog_skb_q, skb);
2115 }
2116 } else
2117 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2118 spin_unlock(&iucv->message_q.lock);
2119 return NET_RX_SUCCESS;
2120}
2121
2122/**
2123 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2124 * transport
2125 * called from netif RX softirq
2126 **/
2127static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2128 struct packet_type *pt, struct net_device *orig_dev)
2129{
2130 struct hlist_node *node;
2131 struct sock *sk;
2132 struct iucv_sock *iucv;
2133 struct af_iucv_trans_hdr *trans_hdr;
2134 char nullstring[8];
2135 int err = 0;
2136
2137 skb_pull(skb, ETH_HLEN);
2138 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2139 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2140 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2141 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2142 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2143 memset(nullstring, 0, sizeof(nullstring));
2144 iucv = NULL;
2145 sk = NULL;
2146 read_lock(&iucv_sk_list.lock);
2147 sk_for_each(sk, node, &iucv_sk_list.head) {
2148 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2149 if ((!memcmp(&iucv_sk(sk)->src_name,
2150 trans_hdr->destAppName, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->src_user_id,
2152 trans_hdr->destUserID, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2154 (!memcmp(&iucv_sk(sk)->dst_user_id,
2155 nullstring, 8))) {
2156 iucv = iucv_sk(sk);
2157 break;
2158 }
2159 } else {
2160 if ((!memcmp(&iucv_sk(sk)->src_name,
2161 trans_hdr->destAppName, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->src_user_id,
2163 trans_hdr->destUserID, 8)) &&
2164 (!memcmp(&iucv_sk(sk)->dst_name,
2165 trans_hdr->srcAppName, 8)) &&
2166 (!memcmp(&iucv_sk(sk)->dst_user_id,
2167 trans_hdr->srcUserID, 8))) {
2168 iucv = iucv_sk(sk);
2169 break;
2170 }
2171 }
2172 }
2173 read_unlock(&iucv_sk_list.lock);
2174 if (!iucv)
2175 sk = NULL;
2176
2177 /* no sock
2178 how should we send with no sock
2179 1) send without sock no send rc checking?
2180 2) introduce default sock to handle this cases
2181
2182 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2183 data -> send FIN
2184 SYN|ACK, SYN|FIN, FIN -> no action? */
2185
2186 switch (trans_hdr->flags) {
2187 case AF_IUCV_FLAG_SYN:
2188 /* connect request */
2189 err = afiucv_hs_callback_syn(sk, skb);
2190 break;
2191 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2192 /* connect request confirmed */
2193 err = afiucv_hs_callback_synack(sk, skb);
2194 break;
2195 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2196 /* connect request refused */
2197 err = afiucv_hs_callback_synfin(sk, skb);
2198 break;
2199 case (AF_IUCV_FLAG_FIN):
2200 /* close request */
2201 err = afiucv_hs_callback_fin(sk, skb);
2202 break;
2203 case (AF_IUCV_FLAG_WIN):
2204 err = afiucv_hs_callback_win(sk, skb);
2205 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2206 err = afiucv_hs_callback_rx(sk, skb);
2207 else
2208 kfree(skb);
2209 break;
2210 case 0:
2211 /* plain data frame */
2212 err = afiucv_hs_callback_rx(sk, skb);
2213 break;
2214 default:
2215 ;
2216 }
2217
2218 return err;
2219}
2220
2221/**
2222 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2223 * transport
2224 **/
2225static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2226 enum iucv_tx_notify n)
2227{
2228 struct sock *isk = skb->sk;
2229 struct sock *sk = NULL;
2230 struct iucv_sock *iucv = NULL;
2231 struct sk_buff_head *list;
2232 struct sk_buff *list_skb;
2233 struct sk_buff *this = NULL;
2234 unsigned long flags;
2235 struct hlist_node *node;
2236
2237 read_lock(&iucv_sk_list.lock);
2238 sk_for_each(sk, node, &iucv_sk_list.head)
2239 if (sk == isk) {
2240 iucv = iucv_sk(sk);
2241 break;
2242 }
2243 read_unlock(&iucv_sk_list.lock);
2244
2245 if (!iucv)
2246 return;
2247
2248 bh_lock_sock(sk);
2249 list = &iucv->send_skb_q;
2250 list_skb = list->next;
2251 if (skb_queue_empty(list))
2252 goto out_unlock;
2253
2254 spin_lock_irqsave(&list->lock, flags);
2255 while (list_skb != (struct sk_buff *)list) {
2256 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2257 this = list_skb;
2258 switch (n) {
2259 case TX_NOTIFY_OK:
2260 __skb_unlink(this, list);
2261 iucv_sock_wake_msglim(sk);
2262 kfree_skb(this);
2263 break;
2264 case TX_NOTIFY_PENDING:
2265 atomic_inc(&iucv->pendings);
2266 break;
2267 case TX_NOTIFY_DELAYED_OK:
2268 __skb_unlink(this, list);
2269 atomic_dec(&iucv->pendings);
2270 if (atomic_read(&iucv->pendings) <= 0)
2271 iucv_sock_wake_msglim(sk);
2272 kfree_skb(this);
2273 break;
2274 case TX_NOTIFY_UNREACHABLE:
2275 case TX_NOTIFY_DELAYED_UNREACHABLE:
2276 case TX_NOTIFY_TPQFULL: /* not yet used */
2277 case TX_NOTIFY_GENERALERROR:
2278 case TX_NOTIFY_DELAYED_GENERALERROR:
2279 __skb_unlink(this, list);
2280 kfree_skb(this);
2281 if (!list_empty(&iucv->accept_q))
2282 sk->sk_state = IUCV_SEVERED;
2283 else
2284 sk->sk_state = IUCV_DISCONN;
2285 sk->sk_state_change(sk);
2286 break;
2287 }
2288 break;
2289 }
2290 list_skb = list_skb->next;
2291 }
2292 spin_unlock_irqrestore(&list->lock, flags);
2293
2294out_unlock:
2295 bh_unlock_sock(sk);
2296}
1695static const struct proto_ops iucv_sock_ops = { 2297static const struct proto_ops iucv_sock_ops = {
1696 .family = PF_IUCV, 2298 .family = PF_IUCV,
1697 .owner = THIS_MODULE, 2299 .owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
1718 .create = iucv_sock_create, 2320 .create = iucv_sock_create,
1719}; 2321};
1720 2322
1721static int __init afiucv_init(void) 2323static struct packet_type iucv_packet_type = {
2324 .type = cpu_to_be16(ETH_P_AF_IUCV),
2325 .func = afiucv_hs_rcv,
2326};
2327
2328static int afiucv_iucv_init(void)
1722{ 2329{
1723 int err; 2330 int err;
1724 2331
1725 if (!MACHINE_IS_VM) { 2332 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
1726 pr_err("The af_iucv module cannot be loaded"
1727 " without z/VM\n");
1728 err = -EPROTONOSUPPORT;
1729 goto out;
1730 }
1731 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1732 if (unlikely(err)) {
1733 WARN_ON(err);
1734 err = -EPROTONOSUPPORT;
1735 goto out;
1736 }
1737
1738 err = iucv_register(&af_iucv_handler, 0);
1739 if (err) 2333 if (err)
1740 goto out; 2334 goto out;
1741 err = proto_register(&iucv_proto, 0);
1742 if (err)
1743 goto out_iucv;
1744 err = sock_register(&iucv_sock_family_ops);
1745 if (err)
1746 goto out_proto;
1747 /* establish dummy device */ 2335 /* establish dummy device */
2336 af_iucv_driver.bus = pr_iucv->bus;
1748 err = driver_register(&af_iucv_driver); 2337 err = driver_register(&af_iucv_driver);
1749 if (err) 2338 if (err)
1750 goto out_sock; 2339 goto out_iucv;
1751 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2340 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 if (!af_iucv_dev) { 2341 if (!af_iucv_dev) {
1753 err = -ENOMEM; 2342 err = -ENOMEM;
1754 goto out_driver; 2343 goto out_driver;
1755 } 2344 }
1756 dev_set_name(af_iucv_dev, "af_iucv"); 2345 dev_set_name(af_iucv_dev, "af_iucv");
1757 af_iucv_dev->bus = &iucv_bus; 2346 af_iucv_dev->bus = pr_iucv->bus;
1758 af_iucv_dev->parent = iucv_root; 2347 af_iucv_dev->parent = pr_iucv->root;
1759 af_iucv_dev->release = (void (*)(struct device *))kfree; 2348 af_iucv_dev->release = (void (*)(struct device *))kfree;
1760 af_iucv_dev->driver = &af_iucv_driver; 2349 af_iucv_dev->driver = &af_iucv_driver;
1761 err = device_register(af_iucv_dev); 2350 err = device_register(af_iucv_dev);
1762 if (err) 2351 if (err)
1763 goto out_driver; 2352 goto out_driver;
1764
1765 return 0; 2353 return 0;
1766 2354
1767out_driver: 2355out_driver:
1768 driver_unregister(&af_iucv_driver); 2356 driver_unregister(&af_iucv_driver);
2357out_iucv:
2358 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2359out:
2360 return err;
2361}
2362
2363static int __init afiucv_init(void)
2364{
2365 int err;
2366
2367 if (MACHINE_IS_VM) {
2368 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2369 if (unlikely(err)) {
2370 WARN_ON(err);
2371 err = -EPROTONOSUPPORT;
2372 goto out;
2373 }
2374
2375 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2376 if (!pr_iucv) {
2377 printk(KERN_WARNING "iucv_if lookup failed\n");
2378 memset(&iucv_userid, 0, sizeof(iucv_userid));
2379 }
2380 } else {
2381 memset(&iucv_userid, 0, sizeof(iucv_userid));
2382 pr_iucv = NULL;
2383 }
2384
2385 err = proto_register(&iucv_proto, 0);
2386 if (err)
2387 goto out;
2388 err = sock_register(&iucv_sock_family_ops);
2389 if (err)
2390 goto out_proto;
2391
2392 if (pr_iucv) {
2393 err = afiucv_iucv_init();
2394 if (err)
2395 goto out_sock;
2396 }
2397 dev_add_pack(&iucv_packet_type);
2398 return 0;
2399
1769out_sock: 2400out_sock:
1770 sock_unregister(PF_IUCV); 2401 sock_unregister(PF_IUCV);
1771out_proto: 2402out_proto:
1772 proto_unregister(&iucv_proto); 2403 proto_unregister(&iucv_proto);
1773out_iucv:
1774 iucv_unregister(&af_iucv_handler, 0);
1775out: 2404out:
2405 if (pr_iucv)
2406 symbol_put(iucv_if);
1776 return err; 2407 return err;
1777} 2408}
1778 2409
1779static void __exit afiucv_exit(void) 2410static void __exit afiucv_exit(void)
1780{ 2411{
1781 device_unregister(af_iucv_dev); 2412 if (pr_iucv) {
1782 driver_unregister(&af_iucv_driver); 2413 device_unregister(af_iucv_dev);
2414 driver_unregister(&af_iucv_driver);
2415 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2416 symbol_put(iucv_if);
2417 }
2418 dev_remove_pack(&iucv_packet_type);
1783 sock_unregister(PF_IUCV); 2419 sock_unregister(PF_IUCV);
1784 proto_unregister(&iucv_proto); 2420 proto_unregister(&iucv_proto);
1785 iucv_unregister(&af_iucv_handler, 0);
1786} 2421}
1787 2422
1788module_init(afiucv_init); 2423module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1793MODULE_VERSION(VERSION); 2428MODULE_VERSION(VERSION);
1794MODULE_LICENSE("GPL"); 2429MODULE_LICENSE("GPL");
1795MODULE_ALIAS_NETPROTO(PF_IUCV); 2430MODULE_ALIAS_NETPROTO(PF_IUCV);
2431
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a3808aa40..403be43b793d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
1974 return rc; 1974 return rc;
1975} 1975}
1976 1976
1977struct iucv_interface iucv_if = {
1978 .message_receive = iucv_message_receive,
1979 .__message_receive = __iucv_message_receive,
1980 .message_reply = iucv_message_reply,
1981 .message_reject = iucv_message_reject,
1982 .message_send = iucv_message_send,
1983 .__message_send = __iucv_message_send,
1984 .message_send2way = iucv_message_send2way,
1985 .message_purge = iucv_message_purge,
1986 .path_accept = iucv_path_accept,
1987 .path_connect = iucv_path_connect,
1988 .path_quiesce = iucv_path_quiesce,
1989 .path_resume = iucv_path_resume,
1990 .path_sever = iucv_path_sever,
1991 .iucv_register = iucv_register,
1992 .iucv_unregister = iucv_unregister,
1993 .bus = NULL,
1994 .root = NULL,
1995};
1996EXPORT_SYMBOL(iucv_if);
1997
1977/** 1998/**
1978 * iucv_init 1999 * iucv_init
1979 * 2000 *
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
2038 rc = bus_register(&iucv_bus); 2059 rc = bus_register(&iucv_bus);
2039 if (rc) 2060 if (rc)
2040 goto out_reboot; 2061 goto out_reboot;
2062 iucv_if.root = iucv_root;
2063 iucv_if.bus = &iucv_bus;
2041 return 0; 2064 return 0;
2042 2065
2043out_reboot: 2066out_reboot:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 34b2ddeacb67..bf8d50c67931 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -397,6 +397,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
397 * expect to send up next, dequeue it and any other 397 * expect to send up next, dequeue it and any other
398 * in-sequence packets behind it. 398 * in-sequence packets behind it.
399 */ 399 */
400start:
400 spin_lock_bh(&session->reorder_q.lock); 401 spin_lock_bh(&session->reorder_q.lock);
401 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 402 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
402 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 403 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
@@ -433,7 +434,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
433 */ 434 */
434 spin_unlock_bh(&session->reorder_q.lock); 435 spin_unlock_bh(&session->reorder_q.lock);
435 l2tp_recv_dequeue_skb(session, skb); 436 l2tp_recv_dequeue_skb(session, skb);
436 spin_lock_bh(&session->reorder_q.lock); 437 goto start;
437 } 438 }
438 439
439out: 440out:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f42cd0915966..8a90d756c904 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -395,6 +395,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
395 struct pppol2tp_session *ps; 395 struct pppol2tp_session *ps;
396 int old_headroom; 396 int old_headroom;
397 int new_headroom; 397 int new_headroom;
398 int uhlen, headroom;
398 399
399 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 400 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
400 goto abort; 401 goto abort;
@@ -413,7 +414,13 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
413 goto abort_put_sess; 414 goto abort_put_sess;
414 415
415 old_headroom = skb_headroom(skb); 416 old_headroom = skb_headroom(skb);
416 if (skb_cow_head(skb, sizeof(ppph))) 417 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
418 headroom = NET_SKB_PAD +
419 sizeof(struct iphdr) + /* IP header */
420 uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
421 session->hdr_len + /* L2TP header */
422 sizeof(ppph); /* PPP header */
423 if (skb_cow_head(skb, headroom))
417 goto abort_put_sess_tun; 424 goto abort_put_sess_tun;
418 425
419 new_headroom = skb_headroom(skb); 426 new_headroom = skb_headroom(skb);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 956b7e47dc52..8d0324bac01c 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -139,7 +139,8 @@ out:
139 return lapb; 139 return lapb;
140} 140}
141 141
142int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks) 142int lapb_register(struct net_device *dev,
143 const struct lapb_register_struct *callbacks)
143{ 144{
144 struct lapb_cb *lapb; 145 struct lapb_cb *lapb;
145 int rc = LAPB_BADTOKEN; 146 int rc = LAPB_BADTOKEN;
@@ -158,7 +159,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
158 goto out; 159 goto out;
159 160
160 lapb->dev = dev; 161 lapb->dev = dev;
161 lapb->callbacks = *callbacks; 162 lapb->callbacks = callbacks;
162 163
163 __lapb_insert_cb(lapb); 164 __lapb_insert_cb(lapb);
164 165
@@ -380,32 +381,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
380 381
381void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) 382void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
382{ 383{
383 if (lapb->callbacks.connect_confirmation) 384 if (lapb->callbacks->connect_confirmation)
384 lapb->callbacks.connect_confirmation(lapb->dev, reason); 385 lapb->callbacks->connect_confirmation(lapb->dev, reason);
385} 386}
386 387
387void lapb_connect_indication(struct lapb_cb *lapb, int reason) 388void lapb_connect_indication(struct lapb_cb *lapb, int reason)
388{ 389{
389 if (lapb->callbacks.connect_indication) 390 if (lapb->callbacks->connect_indication)
390 lapb->callbacks.connect_indication(lapb->dev, reason); 391 lapb->callbacks->connect_indication(lapb->dev, reason);
391} 392}
392 393
393void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) 394void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
394{ 395{
395 if (lapb->callbacks.disconnect_confirmation) 396 if (lapb->callbacks->disconnect_confirmation)
396 lapb->callbacks.disconnect_confirmation(lapb->dev, reason); 397 lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
397} 398}
398 399
399void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) 400void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
400{ 401{
401 if (lapb->callbacks.disconnect_indication) 402 if (lapb->callbacks->disconnect_indication)
402 lapb->callbacks.disconnect_indication(lapb->dev, reason); 403 lapb->callbacks->disconnect_indication(lapb->dev, reason);
403} 404}
404 405
405int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) 406int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
406{ 407{
407 if (lapb->callbacks.data_indication) 408 if (lapb->callbacks->data_indication)
408 return lapb->callbacks.data_indication(lapb->dev, skb); 409 return lapb->callbacks->data_indication(lapb->dev, skb);
409 410
410 kfree_skb(skb); 411 kfree_skb(skb);
411 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ 412 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
@@ -415,8 +416,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
415{ 416{
416 int used = 0; 417 int used = 0;
417 418
418 if (lapb->callbacks.data_transmit) { 419 if (lapb->callbacks->data_transmit) {
419 lapb->callbacks.data_transmit(lapb->dev, skb); 420 lapb->callbacks->data_transmit(lapb->dev, skb);
420 used = 1; 421 used = 1;
421 } 422 }
422 423
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 903242111317..e32cab44ea95 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/export.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/llc.h> 18#include <net/llc.h>
18#include <net/llc_pdu.h> 19#include <net/llc_pdu.h>
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b38a1079a98e..b658cba89fdd 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -18,6 +18,7 @@
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19#include <linux/trdevice.h> 19#include <linux/trdevice.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/export.h>
21#include <net/llc.h> 22#include <net/llc.h>
22#include <net/llc_pdu.h> 23#include <net/llc_pdu.h>
23 24
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 7af1ff2d1f19..a1839c004357 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -17,6 +17,7 @@
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/export.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/llc.h> 23#include <net/llc.h>
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f5fdfcbf552a..7d3b438755f0 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -199,6 +199,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
199 199
200 Do not select this option. 200 Do not select this option.
201 201
202config MAC80211_VERBOSE_MPATH_DEBUG
203 bool "Verbose mesh path debugging"
204 depends on MAC80211_DEBUG_MENU
205 depends on MAC80211_MESH
206 ---help---
207 Selecting this option causes mac80211 to print out very
208 verbose mesh path selection debugging messages (when mac80211
209 is taking part in a mesh network).
210 It should not be selected on production systems as those
211 messages are remotely triggerable.
212
213 Do not select this option.
214
202config MAC80211_VERBOSE_MHWMP_DEBUG 215config MAC80211_VERBOSE_MHWMP_DEBUG
203 bool "Verbose mesh HWMP routing debugging" 216 bool "Verbose mesh HWMP routing debugging"
204 depends on MAC80211_DEBUG_MENU 217 depends on MAC80211_DEBUG_MENU
@@ -212,6 +225,18 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
212 225
213 Do not select this option. 226 Do not select this option.
214 227
228config MAC80211_VERBOSE_TDLS_DEBUG
229 bool "Verbose TDLS debugging"
230 depends on MAC80211_DEBUG_MENU
231 ---help---
232 Selecting this option causes mac80211 to print out very
233 verbose TDLS selection debugging messages (when mac80211
234 is a TDLS STA).
235 It should not be selected on production systems as those
236 messages are remotely triggerable.
237
238 Do not select this option.
239
215config MAC80211_DEBUG_COUNTERS 240config MAC80211_DEBUG_COUNTERS
216 bool "Extra statistics for TX/RX debugging" 241 bool "Extra statistics for TX/RX debugging"
217 depends on MAC80211_DEBUG_MENU 242 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index fd1aaf2a4a6c..93b243422659 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/ieee80211.h> 39#include <linux/ieee80211.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/export.h>
41#include <net/mac80211.h> 42#include <net/mac80211.h>
42#include "ieee80211_i.h" 43#include "ieee80211_i.h"
43#include "driver-ops.h" 44#include "driver-ops.h"
@@ -69,7 +70,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
69 if (!tid_rx) 70 if (!tid_rx)
70 return; 71 return;
71 72
72 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); 73 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
73 74
74#ifdef CONFIG_MAC80211_HT_DEBUG 75#ifdef CONFIG_MAC80211_HT_DEBUG
75 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 76 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -167,12 +168,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
167 u16 capab; 168 u16 capab;
168 169
169 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 170 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
170 171 if (!skb)
171 if (!skb) {
172 printk(KERN_DEBUG "%s: failed to allocate buffer "
173 "for addba resp frame\n", sdata->name);
174 return; 172 return;
175 }
176 173
177 skb_reserve(skb, local->hw.extra_tx_headroom); 174 skb_reserve(skb, local->hw.extra_tx_headroom);
178 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 175 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -227,7 +224,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
227 224
228 status = WLAN_STATUS_REQUEST_DECLINED; 225 status = WLAN_STATUS_REQUEST_DECLINED;
229 226
230 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { 227 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
231#ifdef CONFIG_MAC80211_HT_DEBUG 228#ifdef CONFIG_MAC80211_HT_DEBUG
232 printk(KERN_DEBUG "Suspend in progress. " 229 printk(KERN_DEBUG "Suspend in progress. "
233 "Denying ADDBA request\n"); 230 "Denying ADDBA request\n");
@@ -279,14 +276,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
279 276
280 /* prepare A-MPDU MLME for Rx aggregation */ 277 /* prepare A-MPDU MLME for Rx aggregation */
281 tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); 278 tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
282 if (!tid_agg_rx) { 279 if (!tid_agg_rx)
283#ifdef CONFIG_MAC80211_HT_DEBUG
284 if (net_ratelimit())
285 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
286 tid);
287#endif
288 goto end; 280 goto end;
289 }
290 281
291 spin_lock_init(&tid_agg_rx->reorder_lock); 282 spin_lock_init(&tid_agg_rx->reorder_lock);
292 283
@@ -306,11 +297,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
306 tid_agg_rx->reorder_time = 297 tid_agg_rx->reorder_time =
307 kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); 298 kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
308 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { 299 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
309#ifdef CONFIG_MAC80211_HT_DEBUG
310 if (net_ratelimit())
311 printk(KERN_ERR "can not allocate reordering buffer "
312 "to tid %d\n", tid);
313#endif
314 kfree(tid_agg_rx->reorder_buf); 300 kfree(tid_agg_rx->reorder_buf);
315 kfree(tid_agg_rx->reorder_time); 301 kfree(tid_agg_rx->reorder_time);
316 kfree(tid_agg_rx); 302 kfree(tid_agg_rx);
@@ -340,7 +326,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
340 status = WLAN_STATUS_SUCCESS; 326 status = WLAN_STATUS_SUCCESS;
341 327
342 /* activate it for RX */ 328 /* activate it for RX */
343 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); 329 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
344 330
345 if (timeout) 331 if (timeout)
346 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); 332 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8be8eff70da..b3f65520e7a7 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/export.h>
18#include <net/mac80211.h> 19#include <net/mac80211.h>
19#include "ieee80211_i.h" 20#include "ieee80211_i.h"
20#include "driver-ops.h" 21#include "driver-ops.h"
@@ -68,11 +69,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
68 69
69 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 70 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
70 71
71 if (!skb) { 72 if (!skb)
72 printk(KERN_ERR "%s: failed to allocate buffer "
73 "for addba request frame\n", sdata->name);
74 return; 73 return;
75 } 74
76 skb_reserve(skb, local->hw.extra_tx_headroom); 75 skb_reserve(skb, local->hw.extra_tx_headroom);
77 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 76 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
78 memset(mgmt, 0, 24); 77 memset(mgmt, 0, 24);
@@ -106,19 +105,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
106 ieee80211_tx_skb(sdata, skb); 105 ieee80211_tx_skb(sdata, skb);
107} 106}
108 107
109void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) 108void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
110{ 109{
110 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
111 struct ieee80211_local *local = sdata->local; 111 struct ieee80211_local *local = sdata->local;
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 struct ieee80211_bar *bar; 113 struct ieee80211_bar *bar;
114 u16 bar_control = 0; 114 u16 bar_control = 0;
115 115
116 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 116 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
117 if (!skb) { 117 if (!skb)
118 printk(KERN_ERR "%s: failed to allocate buffer for "
119 "bar frame\n", sdata->name);
120 return; 118 return;
121 } 119
122 skb_reserve(skb, local->hw.extra_tx_headroom); 120 skb_reserve(skb, local->hw.extra_tx_headroom);
123 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 121 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
124 memset(bar, 0, sizeof(*bar)); 122 memset(bar, 0, sizeof(*bar));
@@ -128,13 +126,14 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
128 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 126 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
129 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 127 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
130 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 128 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
131 bar_control |= (u16)(tid << 12); 129 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
132 bar->control = cpu_to_le16(bar_control); 130 bar->control = cpu_to_le16(bar_control);
133 bar->start_seq_num = cpu_to_le16(ssn); 131 bar->start_seq_num = cpu_to_le16(ssn);
134 132
135 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 133 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
136 ieee80211_tx_skb(sdata, skb); 134 ieee80211_tx_skb(sdata, skb);
137} 135}
136EXPORT_SYMBOL(ieee80211_send_bar);
138 137
139void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 138void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
140 struct tid_ampdu_tx *tid_tx) 139 struct tid_ampdu_tx *tid_tx)
@@ -364,7 +363,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
364 return -EINVAL; 363 return -EINVAL;
365 364
366 if ((tid >= STA_TID_NUM) || 365 if ((tid >= STA_TID_NUM) ||
367 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) 366 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
367 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
368 return -EINVAL; 368 return -EINVAL;
369 369
370#ifdef CONFIG_MAC80211_HT_DEBUG 370#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -383,7 +383,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
383 sdata->vif.type != NL80211_IFTYPE_AP) 383 sdata->vif.type != NL80211_IFTYPE_AP)
384 return -EINVAL; 384 return -EINVAL;
385 385
386 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { 386 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
387#ifdef CONFIG_MAC80211_HT_DEBUG 387#ifdef CONFIG_MAC80211_HT_DEBUG
388 printk(KERN_DEBUG "BA sessions blocked. " 388 printk(KERN_DEBUG "BA sessions blocked. "
389 "Denying BA session request\n"); 389 "Denying BA session request\n");
@@ -413,11 +413,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
413 /* prepare A-MPDU MLME for Tx aggregation */ 413 /* prepare A-MPDU MLME for Tx aggregation */
414 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 414 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
415 if (!tid_tx) { 415 if (!tid_tx) {
416#ifdef CONFIG_MAC80211_HT_DEBUG
417 if (net_ratelimit())
418 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
419 tid);
420#endif
421 ret = -ENOMEM; 416 ret = -ENOMEM;
422 goto err_unlock_sta; 417 goto err_unlock_sta;
423 } 418 }
@@ -574,14 +569,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
574 struct ieee80211_ra_tid *ra_tid; 569 struct ieee80211_ra_tid *ra_tid;
575 struct sk_buff *skb = dev_alloc_skb(0); 570 struct sk_buff *skb = dev_alloc_skb(0);
576 571
577 if (unlikely(!skb)) { 572 if (unlikely(!skb))
578#ifdef CONFIG_MAC80211_HT_DEBUG
579 if (net_ratelimit())
580 printk(KERN_WARNING "%s: Not enough memory, "
581 "dropping start BA session", sdata->name);
582#endif
583 return; 573 return;
584 } 574
585 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 575 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
586 memcpy(&ra_tid->ra, ra, ETH_ALEN); 576 memcpy(&ra_tid->ra, ra, ETH_ALEN);
587 ra_tid->tid = tid; 577 ra_tid->tid = tid;
@@ -727,14 +717,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
727 struct ieee80211_ra_tid *ra_tid; 717 struct ieee80211_ra_tid *ra_tid;
728 struct sk_buff *skb = dev_alloc_skb(0); 718 struct sk_buff *skb = dev_alloc_skb(0);
729 719
730 if (unlikely(!skb)) { 720 if (unlikely(!skb))
731#ifdef CONFIG_MAC80211_HT_DEBUG
732 if (net_ratelimit())
733 printk(KERN_WARNING "%s: Not enough memory, "
734 "dropping stop BA session", sdata->name);
735#endif
736 return; 721 return;
737 } 722
738 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 723 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
739 memcpy(&ra_tid->ra, ra, ETH_ALEN); 724 memcpy(&ra_tid->ra, ra, ETH_ALEN);
740 ra_tid->tid = tid; 725 ra_tid->tid = tid;
@@ -777,18 +762,14 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
777#ifdef CONFIG_MAC80211_HT_DEBUG 762#ifdef CONFIG_MAC80211_HT_DEBUG
778 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 763 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
779#endif 764#endif
780 765 /*
766 * IEEE 802.11-2007 7.3.1.14:
767 * In an ADDBA Response frame, when the Status Code field
768 * is set to 0, the Buffer Size subfield is set to a value
769 * of at least 1.
770 */
781 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 771 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
782 == WLAN_STATUS_SUCCESS) { 772 == WLAN_STATUS_SUCCESS && buf_size) {
783 /*
784 * IEEE 802.11-2007 7.3.1.14:
785 * In an ADDBA Response frame, when the Status Code field
786 * is set to 0, the Buffer Size subfield is set to a value
787 * of at least 1.
788 */
789 if (!buf_size)
790 goto out;
791
792 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 773 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
793 &tid_tx->state)) { 774 &tid_tx->state)) {
794 /* ignore duplicate response */ 775 /* ignore duplicate response */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 3d1b091d9b2e..d06c65fa5526 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -12,6 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <net/net_namespace.h> 13#include <net/net_namespace.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/if_ether.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
16#include "ieee80211_i.h" 17#include "ieee80211_i.h"
17#include "driver-ops.h" 18#include "driver-ops.h"
@@ -62,7 +63,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
62 63
63 if (type == NL80211_IFTYPE_AP_VLAN && 64 if (type == NL80211_IFTYPE_AP_VLAN &&
64 params && params->use_4addr == 0) 65 params && params->use_4addr == 0)
65 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 66 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
66 else if (type == NL80211_IFTYPE_STATION && 67 else if (type == NL80211_IFTYPE_STATION &&
67 params && params->use_4addr >= 0) 68 params && params->use_4addr >= 0)
68 sdata->u.mgd.use_4addr = params->use_4addr; 69 sdata->u.mgd.use_4addr = params->use_4addr;
@@ -343,7 +344,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
343 STATION_INFO_RX_BITRATE | 344 STATION_INFO_RX_BITRATE |
344 STATION_INFO_RX_DROP_MISC | 345 STATION_INFO_RX_DROP_MISC |
345 STATION_INFO_BSS_PARAM | 346 STATION_INFO_BSS_PARAM |
346 STATION_INFO_CONNECTED_TIME; 347 STATION_INFO_CONNECTED_TIME |
348 STATION_INFO_STA_FLAGS;
347 349
348 do_posix_clock_monotonic_gettime(&uptime); 350 do_posix_clock_monotonic_gettime(&uptime);
349 sinfo->connected_time = uptime.tv_sec - sta->last_connected; 351 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -403,6 +405,23 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
403 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 405 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
404 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; 406 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
405 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 407 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
408
409 sinfo->sta_flags.set = 0;
410 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
411 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
412 BIT(NL80211_STA_FLAG_WME) |
413 BIT(NL80211_STA_FLAG_MFP) |
414 BIT(NL80211_STA_FLAG_AUTHENTICATED);
415 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
416 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
417 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
418 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
419 if (test_sta_flag(sta, WLAN_STA_WME))
420 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
421 if (test_sta_flag(sta, WLAN_STA_MFP))
422 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
423 if (test_sta_flag(sta, WLAN_STA_AUTH))
424 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
406} 425}
407 426
408 427
@@ -455,6 +474,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
455 return ret; 474 return ret;
456} 475}
457 476
477static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
478 struct beacon_parameters *params)
479{
480 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
481
482 bss_conf->ssid_len = params->ssid_len;
483
484 if (params->ssid_len)
485 memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
486
487 bss_conf->hidden_ssid =
488 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
489}
490
458/* 491/*
459 * This handles both adding a beacon and setting new beacon info 492 * This handles both adding a beacon and setting new beacon info
460 */ 493 */
@@ -542,14 +575,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
542 575
543 sdata->vif.bss_conf.dtim_period = new->dtim_period; 576 sdata->vif.bss_conf.dtim_period = new->dtim_period;
544 577
545 rcu_assign_pointer(sdata->u.ap.beacon, new); 578 RCU_INIT_POINTER(sdata->u.ap.beacon, new);
546 579
547 synchronize_rcu(); 580 synchronize_rcu();
548 581
549 kfree(old); 582 kfree(old);
550 583
584 ieee80211_config_ap_ssid(sdata, params);
585
551 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 586 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
552 BSS_CHANGED_BEACON); 587 BSS_CHANGED_BEACON |
588 BSS_CHANGED_SSID);
553 return 0; 589 return 0;
554} 590}
555 591
@@ -594,7 +630,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
594 if (!old) 630 if (!old)
595 return -ENOENT; 631 return -ENOENT;
596 632
597 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 633 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
598 synchronize_rcu(); 634 synchronize_rcu();
599 kfree(old); 635 kfree(old);
600 636
@@ -650,7 +686,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
650 struct sta_info *sta, 686 struct sta_info *sta,
651 struct station_parameters *params) 687 struct station_parameters *params)
652{ 688{
653 unsigned long flags;
654 u32 rates; 689 u32 rates;
655 int i, j; 690 int i, j;
656 struct ieee80211_supported_band *sband; 691 struct ieee80211_supported_band *sband;
@@ -659,43 +694,58 @@ static void sta_apply_parameters(struct ieee80211_local *local,
659 694
660 sband = local->hw.wiphy->bands[local->oper_channel->band]; 695 sband = local->hw.wiphy->bands[local->oper_channel->band];
661 696
662 spin_lock_irqsave(&sta->flaglock, flags);
663 mask = params->sta_flags_mask; 697 mask = params->sta_flags_mask;
664 set = params->sta_flags_set; 698 set = params->sta_flags_set;
665 699
666 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { 700 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
667 sta->flags &= ~WLAN_STA_AUTHORIZED;
668 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 701 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
669 sta->flags |= WLAN_STA_AUTHORIZED; 702 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
703 else
704 clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
670 } 705 }
671 706
672 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { 707 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
673 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
674 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) 708 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
675 sta->flags |= WLAN_STA_SHORT_PREAMBLE; 709 set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
710 else
711 clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
676 } 712 }
677 713
678 if (mask & BIT(NL80211_STA_FLAG_WME)) { 714 if (mask & BIT(NL80211_STA_FLAG_WME)) {
679 sta->flags &= ~WLAN_STA_WME;
680 sta->sta.wme = false;
681 if (set & BIT(NL80211_STA_FLAG_WME)) { 715 if (set & BIT(NL80211_STA_FLAG_WME)) {
682 sta->flags |= WLAN_STA_WME; 716 set_sta_flag(sta, WLAN_STA_WME);
683 sta->sta.wme = true; 717 sta->sta.wme = true;
718 } else {
719 clear_sta_flag(sta, WLAN_STA_WME);
720 sta->sta.wme = false;
684 } 721 }
685 } 722 }
686 723
687 if (mask & BIT(NL80211_STA_FLAG_MFP)) { 724 if (mask & BIT(NL80211_STA_FLAG_MFP)) {
688 sta->flags &= ~WLAN_STA_MFP;
689 if (set & BIT(NL80211_STA_FLAG_MFP)) 725 if (set & BIT(NL80211_STA_FLAG_MFP))
690 sta->flags |= WLAN_STA_MFP; 726 set_sta_flag(sta, WLAN_STA_MFP);
727 else
728 clear_sta_flag(sta, WLAN_STA_MFP);
691 } 729 }
692 730
693 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { 731 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
694 sta->flags &= ~WLAN_STA_AUTH;
695 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) 732 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
696 sta->flags |= WLAN_STA_AUTH; 733 set_sta_flag(sta, WLAN_STA_AUTH);
734 else
735 clear_sta_flag(sta, WLAN_STA_AUTH);
736 }
737
738 if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
739 if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
740 set_sta_flag(sta, WLAN_STA_TDLS_PEER);
741 else
742 clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
743 }
744
745 if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
746 sta->sta.uapsd_queues = params->uapsd_queues;
747 sta->sta.max_sp = params->max_sp;
697 } 748 }
698 spin_unlock_irqrestore(&sta->flaglock, flags);
699 749
700 /* 750 /*
701 * cfg80211 validates this (1-2007) and allows setting the AID 751 * cfg80211 validates this (1-2007) and allows setting the AID
@@ -782,11 +832,18 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
782 if (is_multicast_ether_addr(mac)) 832 if (is_multicast_ether_addr(mac))
783 return -EINVAL; 833 return -EINVAL;
784 834
835 /* Only TDLS-supporting stations can add TDLS peers */
836 if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
837 !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
838 sdata->vif.type == NL80211_IFTYPE_STATION))
839 return -ENOTSUPP;
840
785 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 841 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
786 if (!sta) 842 if (!sta)
787 return -ENOMEM; 843 return -ENOMEM;
788 844
789 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; 845 set_sta_flag(sta, WLAN_STA_AUTH);
846 set_sta_flag(sta, WLAN_STA_ASSOC);
790 847
791 sta_apply_parameters(local, sta, params); 848 sta_apply_parameters(local, sta, params);
792 849
@@ -842,6 +899,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
842 return -ENOENT; 899 return -ENOENT;
843 } 900 }
844 901
902 /* The TDLS bit cannot be toggled after the STA was added */
903 if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
904 !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
905 !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
906 rcu_read_unlock();
907 return -EINVAL;
908 }
909
845 if (params->vlan && params->vlan != sta->sdata->dev) { 910 if (params->vlan && params->vlan != sta->sdata->dev) {
846 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 911 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
847 912
@@ -857,7 +922,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
857 return -EBUSY; 922 return -EBUSY;
858 } 923 }
859 924
860 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 925 RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
861 } 926 }
862 927
863 sta->sdata = vlansdata; 928 sta->sdata = vlansdata;
@@ -918,7 +983,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
918 if (dst) 983 if (dst)
919 return mesh_path_del(dst, sdata); 984 return mesh_path_del(dst, sdata);
920 985
921 mesh_path_flush(sdata); 986 mesh_path_flush_by_iface(sdata);
922 return 0; 987 return 0;
923} 988}
924 989
@@ -1137,6 +1202,22 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1137 conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; 1202 conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
1138 ieee80211_mesh_root_setup(ifmsh); 1203 ieee80211_mesh_root_setup(ifmsh);
1139 } 1204 }
1205 if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) {
1206 /* our current gate announcement implementation rides on root
1207 * announcements, so require this ifmsh to also be a root node
1208 * */
1209 if (nconf->dot11MeshGateAnnouncementProtocol &&
1210 !conf->dot11MeshHWMPRootMode) {
1211 conf->dot11MeshHWMPRootMode = 1;
1212 ieee80211_mesh_root_setup(ifmsh);
1213 }
1214 conf->dot11MeshGateAnnouncementProtocol =
1215 nconf->dot11MeshGateAnnouncementProtocol;
1216 }
1217 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
1218 conf->dot11MeshHWMPRannInterval =
1219 nconf->dot11MeshHWMPRannInterval;
1220 }
1140 return 0; 1221 return 0;
1141} 1222}
1142 1223
@@ -1235,9 +1316,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1235} 1316}
1236 1317
1237static int ieee80211_set_txq_params(struct wiphy *wiphy, 1318static int ieee80211_set_txq_params(struct wiphy *wiphy,
1319 struct net_device *dev,
1238 struct ieee80211_txq_params *params) 1320 struct ieee80211_txq_params *params)
1239{ 1321{
1240 struct ieee80211_local *local = wiphy_priv(wiphy); 1322 struct ieee80211_local *local = wiphy_priv(wiphy);
1323 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1241 struct ieee80211_tx_queue_params p; 1324 struct ieee80211_tx_queue_params p;
1242 1325
1243 if (!local->ops->conf_tx) 1326 if (!local->ops->conf_tx)
@@ -1258,8 +1341,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1258 if (params->queue >= local->hw.queues) 1341 if (params->queue >= local->hw.queues)
1259 return -EINVAL; 1342 return -EINVAL;
1260 1343
1261 local->tx_conf[params->queue] = p; 1344 sdata->tx_conf[params->queue] = p;
1262 if (drv_conf_tx(local, params->queue, &p)) { 1345 if (drv_conf_tx(local, sdata, params->queue, &p)) {
1263 wiphy_debug(local->hw.wiphy, 1346 wiphy_debug(local->hw.wiphy,
1264 "failed to set TX queue parameters for queue %d\n", 1347 "failed to set TX queue parameters for queue %d\n",
1265 params->queue); 1348 params->queue);
@@ -1821,7 +1904,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
1821 * so in that case userspace will have to deal with it. 1904 * so in that case userspace will have to deal with it.
1822 */ 1905 */
1823 1906
1824 if (wk->offchan_tx.wait && wk->offchan_tx.frame) 1907 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
1825 cfg80211_mgmt_tx_status(wk->sdata->dev, 1908 cfg80211_mgmt_tx_status(wk->sdata->dev,
1826 (unsigned long) wk->offchan_tx.frame, 1909 (unsigned long) wk->offchan_tx.frame,
1827 wk->ie, wk->ie_len, false, GFP_KERNEL); 1910 wk->ie, wk->ie_len, false, GFP_KERNEL);
@@ -1833,7 +1916,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1833 struct ieee80211_channel *chan, bool offchan, 1916 struct ieee80211_channel *chan, bool offchan,
1834 enum nl80211_channel_type channel_type, 1917 enum nl80211_channel_type channel_type,
1835 bool channel_type_valid, unsigned int wait, 1918 bool channel_type_valid, unsigned int wait,
1836 const u8 *buf, size_t len, u64 *cookie) 1919 const u8 *buf, size_t len, bool no_cck,
1920 u64 *cookie)
1837{ 1921{
1838 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1922 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1839 struct ieee80211_local *local = sdata->local; 1923 struct ieee80211_local *local = sdata->local;
@@ -1860,6 +1944,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1860 flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 1944 flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
1861 } 1945 }
1862 1946
1947 if (no_cck)
1948 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
1949
1863 if (is_offchan && !offchan) 1950 if (is_offchan && !offchan)
1864 return -EBUSY; 1951 return -EBUSY;
1865 1952
@@ -1898,33 +1985,6 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1898 1985
1899 *cookie = (unsigned long) skb; 1986 *cookie = (unsigned long) skb;
1900 1987
1901 if (is_offchan && local->ops->offchannel_tx) {
1902 int ret;
1903
1904 IEEE80211_SKB_CB(skb)->band = chan->band;
1905
1906 mutex_lock(&local->mtx);
1907
1908 if (local->hw_offchan_tx_cookie) {
1909 mutex_unlock(&local->mtx);
1910 return -EBUSY;
1911 }
1912
1913 /* TODO: bitrate control, TX processing? */
1914 ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
1915
1916 if (ret == 0)
1917 local->hw_offchan_tx_cookie = *cookie;
1918 mutex_unlock(&local->mtx);
1919
1920 /*
1921 * Allow driver to return 1 to indicate it wants to have the
1922 * frame transmitted with a remain_on_channel + regular TX.
1923 */
1924 if (ret != 1)
1925 return ret;
1926 }
1927
1928 if (is_offchan && local->ops->remain_on_channel) { 1988 if (is_offchan && local->ops->remain_on_channel) {
1929 unsigned int duration; 1989 unsigned int duration;
1930 int ret; 1990 int ret;
@@ -2011,18 +2071,6 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
2011 2071
2012 mutex_lock(&local->mtx); 2072 mutex_lock(&local->mtx);
2013 2073
2014 if (local->ops->offchannel_tx_cancel_wait &&
2015 local->hw_offchan_tx_cookie == cookie) {
2016 ret = drv_offchannel_tx_cancel_wait(local);
2017
2018 if (!ret)
2019 local->hw_offchan_tx_cookie = 0;
2020
2021 mutex_unlock(&local->mtx);
2022
2023 return ret;
2024 }
2025
2026 if (local->ops->cancel_remain_on_channel) { 2074 if (local->ops->cancel_remain_on_channel) {
2027 cookie ^= 2; 2075 cookie ^= 2;
2028 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); 2076 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -2123,6 +2171,323 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
2123 return 0; 2171 return 0;
2124} 2172}
2125 2173
2174static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
2175{
2176 u8 *pos = (void *)skb_put(skb, 7);
2177
2178 *pos++ = WLAN_EID_EXT_CAPABILITY;
2179 *pos++ = 5; /* len */
2180 *pos++ = 0x0;
2181 *pos++ = 0x0;
2182 *pos++ = 0x0;
2183 *pos++ = 0x0;
2184 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
2185}
2186
2187static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
2188{
2189 struct ieee80211_local *local = sdata->local;
2190 u16 capab;
2191
2192 capab = 0;
2193 if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
2194 return capab;
2195
2196 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
2197 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
2198 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
2199 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
2200
2201 return capab;
2202}
2203
2204static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
2205 u8 *peer, u8 *bssid)
2206{
2207 struct ieee80211_tdls_lnkie *lnkid;
2208
2209 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
2210
2211 lnkid->ie_type = WLAN_EID_LINK_ID;
2212 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
2213
2214 memcpy(lnkid->bssid, bssid, ETH_ALEN);
2215 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
2216 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
2217}
2218
2219static int
2220ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2221 u8 *peer, u8 action_code, u8 dialog_token,
2222 u16 status_code, struct sk_buff *skb)
2223{
2224 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2225 struct ieee80211_tdls_data *tf;
2226
2227 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
2228
2229 memcpy(tf->da, peer, ETH_ALEN);
2230 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
2231 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
2232 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
2233
2234 switch (action_code) {
2235 case WLAN_TDLS_SETUP_REQUEST:
2236 tf->category = WLAN_CATEGORY_TDLS;
2237 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
2238
2239 skb_put(skb, sizeof(tf->u.setup_req));
2240 tf->u.setup_req.dialog_token = dialog_token;
2241 tf->u.setup_req.capability =
2242 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2243
2244 ieee80211_add_srates_ie(&sdata->vif, skb);
2245 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2246 ieee80211_tdls_add_ext_capab(skb);
2247 break;
2248 case WLAN_TDLS_SETUP_RESPONSE:
2249 tf->category = WLAN_CATEGORY_TDLS;
2250 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
2251
2252 skb_put(skb, sizeof(tf->u.setup_resp));
2253 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
2254 tf->u.setup_resp.dialog_token = dialog_token;
2255 tf->u.setup_resp.capability =
2256 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2257
2258 ieee80211_add_srates_ie(&sdata->vif, skb);
2259 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2260 ieee80211_tdls_add_ext_capab(skb);
2261 break;
2262 case WLAN_TDLS_SETUP_CONFIRM:
2263 tf->category = WLAN_CATEGORY_TDLS;
2264 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
2265
2266 skb_put(skb, sizeof(tf->u.setup_cfm));
2267 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
2268 tf->u.setup_cfm.dialog_token = dialog_token;
2269 break;
2270 case WLAN_TDLS_TEARDOWN:
2271 tf->category = WLAN_CATEGORY_TDLS;
2272 tf->action_code = WLAN_TDLS_TEARDOWN;
2273
2274 skb_put(skb, sizeof(tf->u.teardown));
2275 tf->u.teardown.reason_code = cpu_to_le16(status_code);
2276 break;
2277 case WLAN_TDLS_DISCOVERY_REQUEST:
2278 tf->category = WLAN_CATEGORY_TDLS;
2279 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
2280
2281 skb_put(skb, sizeof(tf->u.discover_req));
2282 tf->u.discover_req.dialog_token = dialog_token;
2283 break;
2284 default:
2285 return -EINVAL;
2286 }
2287
2288 return 0;
2289}
2290
2291static int
2292ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2293 u8 *peer, u8 action_code, u8 dialog_token,
2294 u16 status_code, struct sk_buff *skb)
2295{
2296 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2297 struct ieee80211_mgmt *mgmt;
2298
2299 mgmt = (void *)skb_put(skb, 24);
2300 memset(mgmt, 0, 24);
2301 memcpy(mgmt->da, peer, ETH_ALEN);
2302 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2303 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2304
2305 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2306 IEEE80211_STYPE_ACTION);
2307
2308 switch (action_code) {
2309 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2310 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
2311 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
2312 mgmt->u.action.u.tdls_discover_resp.action_code =
2313 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
2314 mgmt->u.action.u.tdls_discover_resp.dialog_token =
2315 dialog_token;
2316 mgmt->u.action.u.tdls_discover_resp.capability =
2317 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2318
2319 ieee80211_add_srates_ie(&sdata->vif, skb);
2320 ieee80211_add_ext_srates_ie(&sdata->vif, skb);
2321 ieee80211_tdls_add_ext_capab(skb);
2322 break;
2323 default:
2324 return -EINVAL;
2325 }
2326
2327 return 0;
2328}
2329
2330static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2331 u8 *peer, u8 action_code, u8 dialog_token,
2332 u16 status_code, const u8 *extra_ies,
2333 size_t extra_ies_len)
2334{
2335 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2336 struct ieee80211_local *local = sdata->local;
2337 struct ieee80211_tx_info *info;
2338 struct sk_buff *skb = NULL;
2339 bool send_direct;
2340 int ret;
2341
2342 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
2343 return -ENOTSUPP;
2344
2345 /* make sure we are in managed mode, and associated */
2346 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
2347 !sdata->u.mgd.associated)
2348 return -EINVAL;
2349
2350#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
2351 printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
2352#endif
2353
2354 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
2355 max(sizeof(struct ieee80211_mgmt),
2356 sizeof(struct ieee80211_tdls_data)) +
2357 50 + /* supported rates */
2358 7 + /* ext capab */
2359 extra_ies_len +
2360 sizeof(struct ieee80211_tdls_lnkie));
2361 if (!skb)
2362 return -ENOMEM;
2363
2364 info = IEEE80211_SKB_CB(skb);
2365 skb_reserve(skb, local->hw.extra_tx_headroom);
2366
2367 switch (action_code) {
2368 case WLAN_TDLS_SETUP_REQUEST:
2369 case WLAN_TDLS_SETUP_RESPONSE:
2370 case WLAN_TDLS_SETUP_CONFIRM:
2371 case WLAN_TDLS_TEARDOWN:
2372 case WLAN_TDLS_DISCOVERY_REQUEST:
2373 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
2374 action_code, dialog_token,
2375 status_code, skb);
2376 send_direct = false;
2377 break;
2378 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2379 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
2380 dialog_token, status_code,
2381 skb);
2382 send_direct = true;
2383 break;
2384 default:
2385 ret = -ENOTSUPP;
2386 break;
2387 }
2388
2389 if (ret < 0)
2390 goto fail;
2391
2392 if (extra_ies_len)
2393 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
2394
2395 /* the TDLS link IE is always added last */
2396 switch (action_code) {
2397 case WLAN_TDLS_SETUP_REQUEST:
2398 case WLAN_TDLS_SETUP_CONFIRM:
2399 case WLAN_TDLS_TEARDOWN:
2400 case WLAN_TDLS_DISCOVERY_REQUEST:
2401 /* we are the initiator */
2402 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
2403 sdata->u.mgd.bssid);
2404 break;
2405 case WLAN_TDLS_SETUP_RESPONSE:
2406 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2407 /* we are the responder */
2408 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
2409 sdata->u.mgd.bssid);
2410 break;
2411 default:
2412 ret = -ENOTSUPP;
2413 goto fail;
2414 }
2415
2416 if (send_direct) {
2417 ieee80211_tx_skb(sdata, skb);
2418 return 0;
2419 }
2420
2421 /*
2422 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
2423 * we should default to AC_VI.
2424 */
2425 switch (action_code) {
2426 case WLAN_TDLS_SETUP_REQUEST:
2427 case WLAN_TDLS_SETUP_RESPONSE:
2428 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
2429 skb->priority = 2;
2430 break;
2431 default:
2432 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
2433 skb->priority = 5;
2434 break;
2435 }
2436
2437 /* disable bottom halves when entering the Tx path */
2438 local_bh_disable();
2439 ret = ieee80211_subif_start_xmit(skb, dev);
2440 local_bh_enable();
2441
2442 return ret;
2443
2444fail:
2445 dev_kfree_skb(skb);
2446 return ret;
2447}
2448
2449static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2450 u8 *peer, enum nl80211_tdls_operation oper)
2451{
2452 struct sta_info *sta;
2453 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2454
2455 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
2456 return -ENOTSUPP;
2457
2458 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2459 return -EINVAL;
2460
2461#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
2462 printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
2463#endif
2464
2465 switch (oper) {
2466 case NL80211_TDLS_ENABLE_LINK:
2467 rcu_read_lock();
2468 sta = sta_info_get(sdata, peer);
2469 if (!sta) {
2470 rcu_read_unlock();
2471 return -ENOLINK;
2472 }
2473
2474 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
2475 rcu_read_unlock();
2476 break;
2477 case NL80211_TDLS_DISABLE_LINK:
2478 return sta_info_destroy_addr(sdata, peer);
2479 case NL80211_TDLS_TEARDOWN:
2480 case NL80211_TDLS_SETUP:
2481 case NL80211_TDLS_DISCOVERY_REQ:
2482 /* We don't support in-driver setup/teardown/discovery */
2483 return -ENOTSUPP;
2484 default:
2485 return -ENOTSUPP;
2486 }
2487
2488 return 0;
2489}
2490
2126struct cfg80211_ops mac80211_config_ops = { 2491struct cfg80211_ops mac80211_config_ops = {
2127 .add_virtual_intf = ieee80211_add_iface, 2492 .add_virtual_intf = ieee80211_add_iface,
2128 .del_virtual_intf = ieee80211_del_iface, 2493 .del_virtual_intf = ieee80211_del_iface,
@@ -2186,4 +2551,6 @@ struct cfg80211_ops mac80211_config_ops = {
2186 .set_ringparam = ieee80211_set_ringparam, 2551 .set_ringparam = ieee80211_set_ringparam,
2187 .get_ringparam = ieee80211_get_ringparam, 2552 .get_ringparam = ieee80211_get_ringparam,
2188 .set_rekey_data = ieee80211_set_rekey_data, 2553 .set_rekey_data = ieee80211_set_rekey_data,
2554 .tdls_oper = ieee80211_tdls_oper,
2555 .tdls_mgmt = ieee80211_tdls_mgmt,
2189}; 2556};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f7cc32..883996b2f99f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -78,57 +78,6 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
78DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", 78DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
79 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); 79 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
80 80
81static ssize_t tsf_read(struct file *file, char __user *user_buf,
82 size_t count, loff_t *ppos)
83{
84 struct ieee80211_local *local = file->private_data;
85 u64 tsf;
86
87 tsf = drv_get_tsf(local);
88
89 return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
90 (unsigned long long) tsf);
91}
92
93static ssize_t tsf_write(struct file *file,
94 const char __user *user_buf,
95 size_t count, loff_t *ppos)
96{
97 struct ieee80211_local *local = file->private_data;
98 unsigned long long tsf;
99 char buf[100];
100 size_t len;
101
102 len = min(count, sizeof(buf) - 1);
103 if (copy_from_user(buf, user_buf, len))
104 return -EFAULT;
105 buf[len] = '\0';
106
107 if (strncmp(buf, "reset", 5) == 0) {
108 if (local->ops->reset_tsf) {
109 drv_reset_tsf(local);
110 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
111 }
112 } else {
113 tsf = simple_strtoul(buf, NULL, 0);
114 if (local->ops->set_tsf) {
115 drv_set_tsf(local, tsf);
116 wiphy_info(local->hw.wiphy,
117 "debugfs set TSF to %#018llx\n", tsf);
118
119 }
120 }
121
122 return count;
123}
124
125static const struct file_operations tsf_ops = {
126 .read = tsf_read,
127 .write = tsf_write,
128 .open = mac80211_open_file_generic,
129 .llseek = default_llseek,
130};
131
132static ssize_t reset_write(struct file *file, const char __user *user_buf, 81static ssize_t reset_write(struct file *file, const char __user *user_buf,
133 size_t count, loff_t *ppos) 82 size_t count, loff_t *ppos)
134{ 83{
@@ -195,20 +144,12 @@ static ssize_t uapsd_queues_write(struct file *file,
195 size_t count, loff_t *ppos) 144 size_t count, loff_t *ppos)
196{ 145{
197 struct ieee80211_local *local = file->private_data; 146 struct ieee80211_local *local = file->private_data;
198 unsigned long val; 147 u8 val;
199 char buf[10];
200 size_t len;
201 int ret; 148 int ret;
202 149
203 len = min(count, sizeof(buf) - 1); 150 ret = kstrtou8_from_user(user_buf, count, 0, &val);
204 if (copy_from_user(buf, user_buf, len))
205 return -EFAULT;
206 buf[len] = '\0';
207
208 ret = strict_strtoul(buf, 0, &val);
209
210 if (ret) 151 if (ret)
211 return -EINVAL; 152 return ret;
212 153
213 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) 154 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
214 return -ERANGE; 155 return -ERANGE;
@@ -305,6 +246,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
305 char *buf = kzalloc(mxln, GFP_KERNEL); 246 char *buf = kzalloc(mxln, GFP_KERNEL);
306 int sf = 0; /* how many written so far */ 247 int sf = 0; /* how many written so far */
307 248
249 if (!buf)
250 return 0;
251
308 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags); 252 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
309 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 253 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
310 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n"); 254 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
@@ -355,6 +299,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
355 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); 299 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
356 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) 300 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
357 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 301 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
302 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
303 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
358 304
359 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 305 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
360 kfree(buf); 306 kfree(buf);
@@ -450,7 +396,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
450 DEBUGFS_ADD(frequency); 396 DEBUGFS_ADD(frequency);
451 DEBUGFS_ADD(total_ps_buffered); 397 DEBUGFS_ADD(total_ps_buffered);
452 DEBUGFS_ADD(wep_iv); 398 DEBUGFS_ADD(wep_iv);
453 DEBUGFS_ADD(tsf);
454 DEBUGFS_ADD(queues); 399 DEBUGFS_ADD(queues);
455 DEBUGFS_ADD_MODE(reset, 0200); 400 DEBUGFS_ADD_MODE(reset, 0200);
456 DEBUGFS_ADD(noack); 401 DEBUGFS_ADD(noack);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9ea7c0d0103f..9352819a986b 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -21,6 +21,7 @@
21#include "rate.h" 21#include "rate.h"
22#include "debugfs.h" 22#include "debugfs.h"
23#include "debugfs_netdev.h" 23#include "debugfs_netdev.h"
24#include "driver-ops.h"
24 25
25static ssize_t ieee80211_if_read( 26static ssize_t ieee80211_if_read(
26 struct ieee80211_sub_if_data *sdata, 27 struct ieee80211_sub_if_data *sdata,
@@ -331,6 +332,46 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
331} 332}
332__IEEE80211_IF_FILE(num_buffered_multicast, NULL); 333__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
333 334
335/* IBSS attributes */
336static ssize_t ieee80211_if_fmt_tsf(
337 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
338{
339 struct ieee80211_local *local = sdata->local;
340 u64 tsf;
341
342 tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
343
344 return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
345}
346
347static ssize_t ieee80211_if_parse_tsf(
348 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
349{
350 struct ieee80211_local *local = sdata->local;
351 unsigned long long tsf;
352 int ret;
353
354 if (strncmp(buf, "reset", 5) == 0) {
355 if (local->ops->reset_tsf) {
356 drv_reset_tsf(local, sdata);
357 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
358 }
359 } else {
360 ret = kstrtoull(buf, 10, &tsf);
361 if (ret < 0)
362 return -EINVAL;
363 if (local->ops->set_tsf) {
364 drv_set_tsf(local, sdata, tsf);
365 wiphy_info(local->hw.wiphy,
366 "debugfs set TSF to %#018llx\n", tsf);
367 }
368 }
369
370 return buflen;
371}
372__IEEE80211_IF_FILE_W(tsf);
373
374
334/* WDS attributes */ 375/* WDS attributes */
335IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 376IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
336 377
@@ -340,6 +381,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
340IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); 381IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
341IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); 382IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
342IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); 383IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
384IEEE80211_IF_FILE(dropped_frames_congestion,
385 u.mesh.mshstats.dropped_frames_congestion, DEC);
343IEEE80211_IF_FILE(dropped_frames_no_route, 386IEEE80211_IF_FILE(dropped_frames_no_route,
344 u.mesh.mshstats.dropped_frames_no_route, DEC); 387 u.mesh.mshstats.dropped_frames_no_route, DEC);
345IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); 388IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
@@ -372,6 +415,10 @@ IEEE80211_IF_FILE(min_discovery_timeout,
372 u.mesh.mshcfg.min_discovery_timeout, DEC); 415 u.mesh.mshcfg.min_discovery_timeout, DEC);
373IEEE80211_IF_FILE(dot11MeshHWMPRootMode, 416IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
374 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); 417 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
418IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
419 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
420IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
421 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
375#endif 422#endif
376 423
377 424
@@ -415,6 +462,11 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
415 DEBUGFS_ADD_MODE(tkip_mic_test, 0200); 462 DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
416} 463}
417 464
465static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
466{
467 DEBUGFS_ADD_MODE(tsf, 0600);
468}
469
418static void add_wds_files(struct ieee80211_sub_if_data *sdata) 470static void add_wds_files(struct ieee80211_sub_if_data *sdata)
419{ 471{
420 DEBUGFS_ADD(drop_unencrypted); 472 DEBUGFS_ADD(drop_unencrypted);
@@ -459,6 +511,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
459 MESHSTATS_ADD(fwded_frames); 511 MESHSTATS_ADD(fwded_frames);
460 MESHSTATS_ADD(dropped_frames_ttl); 512 MESHSTATS_ADD(dropped_frames_ttl);
461 MESHSTATS_ADD(dropped_frames_no_route); 513 MESHSTATS_ADD(dropped_frames_no_route);
514 MESHSTATS_ADD(dropped_frames_congestion);
462 MESHSTATS_ADD(estab_plinks); 515 MESHSTATS_ADD(estab_plinks);
463#undef MESHSTATS_ADD 516#undef MESHSTATS_ADD
464} 517}
@@ -485,7 +538,9 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
485 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); 538 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
486 MESHPARAMS_ADD(path_refresh_time); 539 MESHPARAMS_ADD(path_refresh_time);
487 MESHPARAMS_ADD(min_discovery_timeout); 540 MESHPARAMS_ADD(min_discovery_timeout);
488 541 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
542 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
543 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
489#undef MESHPARAMS_ADD 544#undef MESHPARAMS_ADD
490} 545}
491#endif 546#endif
@@ -506,7 +561,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
506 add_sta_files(sdata); 561 add_sta_files(sdata);
507 break; 562 break;
508 case NL80211_IFTYPE_ADHOC: 563 case NL80211_IFTYPE_ADHOC:
509 /* XXX */ 564 add_ibss_files(sdata);
510 break; 565 break;
511 case NL80211_IFTYPE_AP: 566 case NL80211_IFTYPE_AP:
512 add_ap_files(sdata); 567 add_ap_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a01d2137fddc..c5f341798c16 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -56,19 +56,22 @@ STA_FILE(last_signal, last_signal, D);
56static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 56static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
57 size_t count, loff_t *ppos) 57 size_t count, loff_t *ppos)
58{ 58{
59 char buf[100]; 59 char buf[121];
60 struct sta_info *sta = file->private_data; 60 struct sta_info *sta = file->private_data;
61 u32 staflags = get_sta_flags(sta); 61
62 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", 62#define TEST(flg) \
63 staflags & WLAN_STA_AUTH ? "AUTH\n" : "", 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 64
65 staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "", 65 int res = scnprintf(buf, sizeof(buf),
66 staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 staflags & WLAN_STA_WME ? "WME\n" : "", 69 TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
70 staflags & WLAN_STA_WDS ? "WDS\n" : "", 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 staflags & WLAN_STA_MFP ? "MFP\n" : ""); 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH));
74#undef TEST
72 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 75 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
73} 76}
74STA_OPS(flags); 77STA_OPS(flags);
@@ -78,8 +81,14 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
78 size_t count, loff_t *ppos) 81 size_t count, loff_t *ppos)
79{ 82{
80 struct sta_info *sta = file->private_data; 83 struct sta_info *sta = file->private_data;
81 return mac80211_format_buffer(userbuf, count, ppos, "%u\n", 84 char buf[17*IEEE80211_NUM_ACS], *p = buf;
82 skb_queue_len(&sta->ps_tx_buf)); 85 int ac;
86
87 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
88 p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
89 skb_queue_len(&sta->ps_tx_buf[ac]) +
90 skb_queue_len(&sta->tx_filtered[ac]));
91 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
83} 92}
84STA_OPS(num_ps_buf_frames); 93STA_OPS(num_ps_buf_frames);
85 94
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 1425380983f7..5f165d7eb2db 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -413,50 +413,56 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
413 trace_drv_return_void(local); 413 trace_drv_return_void(local);
414} 414}
415 415
416static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 416static inline int drv_conf_tx(struct ieee80211_local *local,
417 struct ieee80211_sub_if_data *sdata, u16 queue,
417 const struct ieee80211_tx_queue_params *params) 418 const struct ieee80211_tx_queue_params *params)
418{ 419{
419 int ret = -EOPNOTSUPP; 420 int ret = -EOPNOTSUPP;
420 421
421 might_sleep(); 422 might_sleep();
422 423
423 trace_drv_conf_tx(local, queue, params); 424 trace_drv_conf_tx(local, sdata, queue, params);
424 if (local->ops->conf_tx) 425 if (local->ops->conf_tx)
425 ret = local->ops->conf_tx(&local->hw, queue, params); 426 ret = local->ops->conf_tx(&local->hw, &sdata->vif,
427 queue, params);
426 trace_drv_return_int(local, ret); 428 trace_drv_return_int(local, ret);
427 return ret; 429 return ret;
428} 430}
429 431
430static inline u64 drv_get_tsf(struct ieee80211_local *local) 432static inline u64 drv_get_tsf(struct ieee80211_local *local,
433 struct ieee80211_sub_if_data *sdata)
431{ 434{
432 u64 ret = -1ULL; 435 u64 ret = -1ULL;
433 436
434 might_sleep(); 437 might_sleep();
435 438
436 trace_drv_get_tsf(local); 439 trace_drv_get_tsf(local, sdata);
437 if (local->ops->get_tsf) 440 if (local->ops->get_tsf)
438 ret = local->ops->get_tsf(&local->hw); 441 ret = local->ops->get_tsf(&local->hw, &sdata->vif);
439 trace_drv_return_u64(local, ret); 442 trace_drv_return_u64(local, ret);
440 return ret; 443 return ret;
441} 444}
442 445
443static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 446static inline void drv_set_tsf(struct ieee80211_local *local,
447 struct ieee80211_sub_if_data *sdata,
448 u64 tsf)
444{ 449{
445 might_sleep(); 450 might_sleep();
446 451
447 trace_drv_set_tsf(local, tsf); 452 trace_drv_set_tsf(local, sdata, tsf);
448 if (local->ops->set_tsf) 453 if (local->ops->set_tsf)
449 local->ops->set_tsf(&local->hw, tsf); 454 local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
450 trace_drv_return_void(local); 455 trace_drv_return_void(local);
451} 456}
452 457
453static inline void drv_reset_tsf(struct ieee80211_local *local) 458static inline void drv_reset_tsf(struct ieee80211_local *local,
459 struct ieee80211_sub_if_data *sdata)
454{ 460{
455 might_sleep(); 461 might_sleep();
456 462
457 trace_drv_reset_tsf(local); 463 trace_drv_reset_tsf(local, sdata);
458 if (local->ops->reset_tsf) 464 if (local->ops->reset_tsf)
459 local->ops->reset_tsf(&local->hw); 465 local->ops->reset_tsf(&local->hw, &sdata->vif);
460 trace_drv_return_void(local); 466 trace_drv_return_void(local);
461} 467}
462 468
@@ -590,37 +596,6 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
590 return ret; 596 return ret;
591} 597}
592 598
593static inline int drv_offchannel_tx(struct ieee80211_local *local,
594 struct sk_buff *skb,
595 struct ieee80211_channel *chan,
596 enum nl80211_channel_type channel_type,
597 unsigned int wait)
598{
599 int ret;
600
601 might_sleep();
602
603 trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
604 ret = local->ops->offchannel_tx(&local->hw, skb, chan,
605 channel_type, wait);
606 trace_drv_return_int(local, ret);
607
608 return ret;
609}
610
611static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
612{
613 int ret;
614
615 might_sleep();
616
617 trace_drv_offchannel_tx_cancel_wait(local);
618 ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
619 trace_drv_return_int(local, ret);
620
621 return ret;
622}
623
624static inline int drv_set_ringparam(struct ieee80211_local *local, 599static inline int drv_set_ringparam(struct ieee80211_local *local,
625 u32 tx, u32 rx) 600 u32 tx, u32 rx)
626{ 601{
@@ -696,4 +671,34 @@ static inline void drv_rssi_callback(struct ieee80211_local *local,
696 local->ops->rssi_callback(&local->hw, event); 671 local->ops->rssi_callback(&local->hw, event);
697 trace_drv_return_void(local); 672 trace_drv_return_void(local);
698} 673}
674
675static inline void
676drv_release_buffered_frames(struct ieee80211_local *local,
677 struct sta_info *sta, u16 tids, int num_frames,
678 enum ieee80211_frame_release_type reason,
679 bool more_data)
680{
681 trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames,
682 reason, more_data);
683 if (local->ops->release_buffered_frames)
684 local->ops->release_buffered_frames(&local->hw, &sta->sta, tids,
685 num_frames, reason,
686 more_data);
687 trace_drv_return_void(local);
688}
689
690static inline void
691drv_allow_buffered_frames(struct ieee80211_local *local,
692 struct sta_info *sta, u16 tids, int num_frames,
693 enum ieee80211_frame_release_type reason,
694 bool more_data)
695{
696 trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames,
697 reason, more_data);
698 if (local->ops->allow_buffered_frames)
699 local->ops->allow_buffered_frames(&local->hw, &sta->sta,
700 tids, num_frames, reason,
701 more_data);
702 trace_drv_return_void(local);
703}
699#endif /* __MAC80211_DRIVER_OPS */ 704#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index f47b00dc7afd..2af4fca55337 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -697,64 +697,76 @@ TRACE_EVENT(drv_sta_remove,
697); 697);
698 698
699TRACE_EVENT(drv_conf_tx, 699TRACE_EVENT(drv_conf_tx,
700 TP_PROTO(struct ieee80211_local *local, u16 queue, 700 TP_PROTO(struct ieee80211_local *local,
701 struct ieee80211_sub_if_data *sdata,
702 u16 queue,
701 const struct ieee80211_tx_queue_params *params), 703 const struct ieee80211_tx_queue_params *params),
702 704
703 TP_ARGS(local, queue, params), 705 TP_ARGS(local, sdata, queue, params),
704 706
705 TP_STRUCT__entry( 707 TP_STRUCT__entry(
706 LOCAL_ENTRY 708 LOCAL_ENTRY
709 VIF_ENTRY
707 __field(u16, queue) 710 __field(u16, queue)
708 __field(u16, txop) 711 __field(u16, txop)
709 __field(u16, cw_min) 712 __field(u16, cw_min)
710 __field(u16, cw_max) 713 __field(u16, cw_max)
711 __field(u8, aifs) 714 __field(u8, aifs)
715 __field(bool, uapsd)
712 ), 716 ),
713 717
714 TP_fast_assign( 718 TP_fast_assign(
715 LOCAL_ASSIGN; 719 LOCAL_ASSIGN;
720 VIF_ASSIGN;
716 __entry->queue = queue; 721 __entry->queue = queue;
717 __entry->txop = params->txop; 722 __entry->txop = params->txop;
718 __entry->cw_max = params->cw_max; 723 __entry->cw_max = params->cw_max;
719 __entry->cw_min = params->cw_min; 724 __entry->cw_min = params->cw_min;
720 __entry->aifs = params->aifs; 725 __entry->aifs = params->aifs;
726 __entry->uapsd = params->uapsd;
721 ), 727 ),
722 728
723 TP_printk( 729 TP_printk(
724 LOCAL_PR_FMT " queue:%d", 730 LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
725 LOCAL_PR_ARG, __entry->queue 731 LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
726 ) 732 )
727); 733);
728 734
729DEFINE_EVENT(local_only_evt, drv_get_tsf, 735DEFINE_EVENT(local_sdata_evt, drv_get_tsf,
730 TP_PROTO(struct ieee80211_local *local), 736 TP_PROTO(struct ieee80211_local *local,
731 TP_ARGS(local) 737 struct ieee80211_sub_if_data *sdata),
738 TP_ARGS(local, sdata)
732); 739);
733 740
734TRACE_EVENT(drv_set_tsf, 741TRACE_EVENT(drv_set_tsf,
735 TP_PROTO(struct ieee80211_local *local, u64 tsf), 742 TP_PROTO(struct ieee80211_local *local,
743 struct ieee80211_sub_if_data *sdata,
744 u64 tsf),
736 745
737 TP_ARGS(local, tsf), 746 TP_ARGS(local, sdata, tsf),
738 747
739 TP_STRUCT__entry( 748 TP_STRUCT__entry(
740 LOCAL_ENTRY 749 LOCAL_ENTRY
750 VIF_ENTRY
741 __field(u64, tsf) 751 __field(u64, tsf)
742 ), 752 ),
743 753
744 TP_fast_assign( 754 TP_fast_assign(
745 LOCAL_ASSIGN; 755 LOCAL_ASSIGN;
756 VIF_ASSIGN;
746 __entry->tsf = tsf; 757 __entry->tsf = tsf;
747 ), 758 ),
748 759
749 TP_printk( 760 TP_printk(
750 LOCAL_PR_FMT " tsf:%llu", 761 LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu",
751 LOCAL_PR_ARG, (unsigned long long)__entry->tsf 762 LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf
752 ) 763 )
753); 764);
754 765
755DEFINE_EVENT(local_only_evt, drv_reset_tsf, 766DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
756 TP_PROTO(struct ieee80211_local *local), 767 TP_PROTO(struct ieee80211_local *local,
757 TP_ARGS(local) 768 struct ieee80211_sub_if_data *sdata),
769 TP_ARGS(local, sdata)
758); 770);
759 771
760DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, 772DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
@@ -1117,6 +1129,61 @@ TRACE_EVENT(drv_rssi_callback,
1117 ) 1129 )
1118); 1130);
1119 1131
1132DECLARE_EVENT_CLASS(release_evt,
1133 TP_PROTO(struct ieee80211_local *local,
1134 struct ieee80211_sta *sta,
1135 u16 tids, int num_frames,
1136 enum ieee80211_frame_release_type reason,
1137 bool more_data),
1138
1139 TP_ARGS(local, sta, tids, num_frames, reason, more_data),
1140
1141 TP_STRUCT__entry(
1142 LOCAL_ENTRY
1143 STA_ENTRY
1144 __field(u16, tids)
1145 __field(int, num_frames)
1146 __field(int, reason)
1147 __field(bool, more_data)
1148 ),
1149
1150 TP_fast_assign(
1151 LOCAL_ASSIGN;
1152 STA_ASSIGN;
1153 __entry->tids = tids;
1154 __entry->num_frames = num_frames;
1155 __entry->reason = reason;
1156 __entry->more_data = more_data;
1157 ),
1158
1159 TP_printk(
1160 LOCAL_PR_FMT STA_PR_FMT
1161 " TIDs:0x%.4x frames:%d reason:%d more:%d",
1162 LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames,
1163 __entry->reason, __entry->more_data
1164 )
1165);
1166
1167DEFINE_EVENT(release_evt, drv_release_buffered_frames,
1168 TP_PROTO(struct ieee80211_local *local,
1169 struct ieee80211_sta *sta,
1170 u16 tids, int num_frames,
1171 enum ieee80211_frame_release_type reason,
1172 bool more_data),
1173
1174 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1175);
1176
1177DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
1178 TP_PROTO(struct ieee80211_local *local,
1179 struct ieee80211_sta *sta,
1180 u16 tids, int num_frames,
1181 enum ieee80211_frame_release_type reason,
1182 bool more_data),
1183
1184 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1185);
1186
1120/* 1187/*
1121 * Tracing for API calls that drivers call. 1188 * Tracing for API calls that drivers call.
1122 */ 1189 */
@@ -1431,6 +1498,28 @@ TRACE_EVENT(api_enable_rssi_reports,
1431 ) 1498 )
1432); 1499);
1433 1500
1501TRACE_EVENT(api_eosp,
1502 TP_PROTO(struct ieee80211_local *local,
1503 struct ieee80211_sta *sta),
1504
1505 TP_ARGS(local, sta),
1506
1507 TP_STRUCT__entry(
1508 LOCAL_ENTRY
1509 STA_ENTRY
1510 ),
1511
1512 TP_fast_assign(
1513 LOCAL_ASSIGN;
1514 STA_ASSIGN;
1515 ),
1516
1517 TP_printk(
1518 LOCAL_PR_FMT STA_PR_FMT,
1519 LOCAL_PR_ARG, STA_PR_FMT
1520 )
1521);
1522
1434/* 1523/*
1435 * Tracing for internal functions 1524 * Tracing for internal functions
1436 * (which may also be called in response to driver calls) 1525 * (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 7cfc286946c0..f0fb737efa86 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/export.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "rate.h" 20#include "rate.h"
@@ -130,7 +131,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
130 * down by the code that set the flag, so this 131 * down by the code that set the flag, so this
131 * need not run. 132 * need not run.
132 */ 133 */
133 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) 134 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA))
134 return; 135 return;
135 136
136 mutex_lock(&sta->ampdu_mlme.mtx); 137 mutex_lock(&sta->ampdu_mlme.mtx);
@@ -186,12 +187,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
186 u16 params; 187 u16 params;
187 188
188 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 189 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
189 190 if (!skb)
190 if (!skb) {
191 printk(KERN_ERR "%s: failed to allocate buffer "
192 "for delba frame\n", sdata->name);
193 return; 191 return;
194 }
195 192
196 skb_reserve(skb, local->hw.extra_tx_headroom); 193 skb_reserve(skb, local->hw.extra_tx_headroom);
197 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 194 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56c24cabf26d..ede9a8b341ac 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -81,10 +81,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
81 lockdep_assert_held(&ifibss->mtx); 81 lockdep_assert_held(&ifibss->mtx);
82 82
83 /* Reset own TSF to allow time synchronization work. */ 83 /* Reset own TSF to allow time synchronization work. */
84 drv_reset_tsf(local); 84 drv_reset_tsf(local, sdata);
85 85
86 skb = ifibss->skb; 86 skb = ifibss->skb;
87 rcu_assign_pointer(ifibss->presp, NULL); 87 RCU_INIT_POINTER(ifibss->presp, NULL);
88 synchronize_rcu(); 88 synchronize_rcu();
89 skb->data = skb->head; 89 skb->data = skb->head;
90 skb->len = 0; 90 skb->len = 0;
@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
184 *pos++ = 0; /* U-APSD no in use */ 184 *pos++ = 0; /* U-APSD no in use */
185 } 185 }
186 186
187 rcu_assign_pointer(ifibss->presp, skb); 187 RCU_INIT_POINTER(ifibss->presp, skb);
188 188
189 sdata->vif.bss_conf.beacon_int = beacon_int; 189 sdata->vif.bss_conf.beacon_int = beacon_int;
190 sdata->vif.bss_conf.basic_rates = basic_rates; 190 sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -314,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
314 } 314 }
315 315
316 if (sta && elems->wmm_info) 316 if (sta && elems->wmm_info)
317 set_sta_flags(sta, WLAN_STA_WME); 317 set_sta_flag(sta, WLAN_STA_WME);
318 318
319 rcu_read_unlock(); 319 rcu_read_unlock();
320 } 320 }
@@ -382,7 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
382 * second best option: get current TSF 382 * second best option: get current TSF
383 * (will return -1 if not supported) 383 * (will return -1 if not supported)
384 */ 384 */
385 rx_timestamp = drv_get_tsf(local); 385 rx_timestamp = drv_get_tsf(local, sdata);
386 } 386 }
387 387
388#ifdef CONFIG_MAC80211_IBSS_DEBUG 388#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -417,7 +417,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
417 * must be callable in atomic context. 417 * must be callable in atomic context.
418 */ 418 */
419struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 419struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
420 u8 *bssid,u8 *addr, u32 supp_rates, 420 u8 *bssid, u8 *addr, u32 supp_rates,
421 gfp_t gfp) 421 gfp_t gfp)
422{ 422{
423 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 423 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -452,7 +452,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
452 return NULL; 452 return NULL;
453 453
454 sta->last_rx = jiffies; 454 sta->last_rx = jiffies;
455 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 455 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
456 456
457 /* make sure mandatory rates are always added */ 457 /* make sure mandatory rates are always added */
458 sta->sta.supp_rates[band] = supp_rates | 458 sta->sta.supp_rates[band] = supp_rates |
@@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
995 kfree(sdata->u.ibss.ie); 995 kfree(sdata->u.ibss.ie);
996 skb = rcu_dereference_protected(sdata->u.ibss.presp, 996 skb = rcu_dereference_protected(sdata->u.ibss.presp,
997 lockdep_is_held(&sdata->u.ibss.mtx)); 997 lockdep_is_held(&sdata->u.ibss.mtx));
998 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 998 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
999 sdata->vif.bss_conf.ibss_joined = false; 999 sdata->vif.bss_conf.ibss_joined = false;
1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1001 BSS_CHANGED_IBSS); 1001 BSS_CHANGED_IBSS);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 400c09bea639..ea10a51babda 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -136,7 +136,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
136#define TX_DROP ((__force ieee80211_tx_result) 1u) 136#define TX_DROP ((__force ieee80211_tx_result) 1u)
137#define TX_QUEUED ((__force ieee80211_tx_result) 2u) 137#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
138 138
139#define IEEE80211_TX_FRAGMENTED BIT(0)
140#define IEEE80211_TX_UNICAST BIT(1) 139#define IEEE80211_TX_UNICAST BIT(1)
141#define IEEE80211_TX_PS_BUFFERED BIT(2) 140#define IEEE80211_TX_PS_BUFFERED BIT(2)
142 141
@@ -149,7 +148,6 @@ struct ieee80211_tx_data {
149 148
150 struct ieee80211_channel *channel; 149 struct ieee80211_channel *channel;
151 150
152 u16 ethertype;
153 unsigned int flags; 151 unsigned int flags;
154}; 152};
155 153
@@ -261,6 +259,7 @@ struct mesh_stats {
261 __u32 fwded_frames; /* Mesh total forwarded frames */ 259 __u32 fwded_frames; /* Mesh total forwarded frames */
262 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ 260 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
263 __u32 dropped_frames_no_route; /* Not transmitted, no route found */ 261 __u32 dropped_frames_no_route; /* Not transmitted, no route found */
262 __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
264 atomic_t estab_plinks; 263 atomic_t estab_plinks;
265}; 264};
266 265
@@ -345,6 +344,7 @@ struct ieee80211_work {
345 struct { 344 struct {
346 struct sk_buff *frame; 345 struct sk_buff *frame;
347 u32 wait; 346 u32 wait;
347 bool status;
348 } offchan_tx; 348 } offchan_tx;
349 }; 349 };
350 350
@@ -389,6 +389,7 @@ struct ieee80211_if_managed {
389 389
390 unsigned long timers_running; /* used for quiesce/restart */ 390 unsigned long timers_running; /* used for quiesce/restart */
391 bool powersave; /* powersave requested for this iface */ 391 bool powersave; /* powersave requested for this iface */
392 bool broken_ap; /* AP is broken -- turn off powersave */
392 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 393 enum ieee80211_smps_mode req_smps, /* requested smps mode */
393 ap_smps, /* smps mode AP thinks we're in */ 394 ap_smps, /* smps mode AP thinks we're in */
394 driver_smps_mode; /* smps mode request */ 395 driver_smps_mode; /* smps mode request */
@@ -514,6 +515,7 @@ struct ieee80211_if_mesh {
514 struct mesh_config mshcfg; 515 struct mesh_config mshcfg;
515 u32 mesh_seqnum; 516 u32 mesh_seqnum;
516 bool accepting_plinks; 517 bool accepting_plinks;
518 int num_gates;
517 const u8 *ie; 519 const u8 *ie;
518 u8 ie_len; 520 u8 ie_len;
519 enum { 521 enum {
@@ -607,6 +609,8 @@ struct ieee80211_sub_if_data {
607 __be16 control_port_protocol; 609 __be16 control_port_protocol;
608 bool control_port_no_encrypt; 610 bool control_port_no_encrypt;
609 611
612 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
613
610 struct work_struct work; 614 struct work_struct work;
611 struct sk_buff_head skb_queue; 615 struct sk_buff_head skb_queue;
612 616
@@ -660,6 +664,11 @@ enum sdata_queue_type {
660enum { 664enum {
661 IEEE80211_RX_MSG = 1, 665 IEEE80211_RX_MSG = 1,
662 IEEE80211_TX_STATUS_MSG = 2, 666 IEEE80211_TX_STATUS_MSG = 2,
667 IEEE80211_EOSP_MSG = 3,
668};
669
670struct skb_eosp_msg_data {
671 u8 sta[ETH_ALEN], iface[ETH_ALEN];
663}; 672};
664 673
665enum queue_stop_reason { 674enum queue_stop_reason {
@@ -669,6 +678,7 @@ enum queue_stop_reason {
669 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 678 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
670 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 679 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
671 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 680 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
681 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
672}; 682};
673 683
674#ifdef CONFIG_MAC80211_LEDS 684#ifdef CONFIG_MAC80211_LEDS
@@ -748,7 +758,6 @@ struct ieee80211_local {
748 struct workqueue_struct *workqueue; 758 struct workqueue_struct *workqueue;
749 759
750 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; 760 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
751 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
752 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ 761 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
753 spinlock_t queue_stop_reason_lock; 762 spinlock_t queue_stop_reason_lock;
754 763
@@ -1002,7 +1011,6 @@ struct ieee80211_local {
1002 unsigned int hw_roc_duration; 1011 unsigned int hw_roc_duration;
1003 u32 hw_roc_cookie; 1012 u32 hw_roc_cookie;
1004 bool hw_roc_for_tx; 1013 bool hw_roc_for_tx;
1005 unsigned long hw_offchan_tx_cookie;
1006 1014
1007 /* dummy netdev for use w/ NAPI */ 1015 /* dummy netdev for use w/ NAPI */
1008 struct net_device napi_dev; 1016 struct net_device napi_dev;
@@ -1022,69 +1030,6 @@ struct ieee80211_ra_tid {
1022 u16 tid; 1030 u16 tid;
1023}; 1031};
1024 1032
1025/* Parsed Information Elements */
1026struct ieee802_11_elems {
1027 u8 *ie_start;
1028 size_t total_len;
1029
1030 /* pointers to IEs */
1031 u8 *ssid;
1032 u8 *supp_rates;
1033 u8 *fh_params;
1034 u8 *ds_params;
1035 u8 *cf_params;
1036 struct ieee80211_tim_ie *tim;
1037 u8 *ibss_params;
1038 u8 *challenge;
1039 u8 *wpa;
1040 u8 *rsn;
1041 u8 *erp_info;
1042 u8 *ext_supp_rates;
1043 u8 *wmm_info;
1044 u8 *wmm_param;
1045 struct ieee80211_ht_cap *ht_cap_elem;
1046 struct ieee80211_ht_info *ht_info_elem;
1047 struct ieee80211_meshconf_ie *mesh_config;
1048 u8 *mesh_id;
1049 u8 *peer_link;
1050 u8 *preq;
1051 u8 *prep;
1052 u8 *perr;
1053 struct ieee80211_rann_ie *rann;
1054 u8 *ch_switch_elem;
1055 u8 *country_elem;
1056 u8 *pwr_constr_elem;
1057 u8 *quiet_elem; /* first quite element */
1058 u8 *timeout_int;
1059
1060 /* length of them, respectively */
1061 u8 ssid_len;
1062 u8 supp_rates_len;
1063 u8 fh_params_len;
1064 u8 ds_params_len;
1065 u8 cf_params_len;
1066 u8 tim_len;
1067 u8 ibss_params_len;
1068 u8 challenge_len;
1069 u8 wpa_len;
1070 u8 rsn_len;
1071 u8 erp_info_len;
1072 u8 ext_supp_rates_len;
1073 u8 wmm_info_len;
1074 u8 wmm_param_len;
1075 u8 mesh_id_len;
1076 u8 peer_link_len;
1077 u8 preq_len;
1078 u8 prep_len;
1079 u8 perr_len;
1080 u8 ch_switch_elem_len;
1081 u8 country_elem_len;
1082 u8 pwr_constr_elem_len;
1083 u8 quiet_elem_len;
1084 u8 num_of_quiet_elem; /* can be more the one */
1085 u8 timeout_int_len;
1086};
1087
1088static inline struct ieee80211_local *hw_to_local( 1033static inline struct ieee80211_local *hw_to_local(
1089 struct ieee80211_hw *hw) 1034 struct ieee80211_hw *hw)
1090{ 1035{
@@ -1233,23 +1178,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1233netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, 1178netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1234 struct net_device *dev); 1179 struct net_device *dev);
1235 1180
1236/*
1237 * radiotap header for status frames
1238 */
1239struct ieee80211_tx_status_rtap_hdr {
1240 struct ieee80211_radiotap_header hdr;
1241 u8 rate;
1242 u8 padding_for_rate;
1243 __le16 tx_flags;
1244 u8 data_retries;
1245} __packed;
1246
1247
1248/* HT */ 1181/* HT */
1249void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1182void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
1250 struct ieee80211_ht_cap *ht_cap_ie, 1183 struct ieee80211_ht_cap *ht_cap_ie,
1251 struct ieee80211_sta_ht_cap *ht_cap); 1184 struct ieee80211_sta_ht_cap *ht_cap);
1252void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
1253void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1185void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1254 const u8 *da, u16 tid, 1186 const u8 *da, u16 tid,
1255 u16 initiator, u16 reason_code); 1187 u16 initiator, u16 reason_code);
@@ -1333,6 +1265,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
1333 struct ieee80211_hdr *hdr, const u8 *tsc, 1265 struct ieee80211_hdr *hdr, const u8 *tsc,
1334 gfp_t gfp); 1266 gfp_t gfp);
1335void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1267void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1268void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1336void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1269void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1337void ieee802_11_parse_elems(u8 *start, size_t len, 1270void ieee802_11_parse_elems(u8 *start, size_t len,
1338 struct ieee802_11_elems *elems); 1271 struct ieee802_11_elems *elems);
@@ -1364,11 +1297,11 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1364 enum queue_stop_reason reason); 1297 enum queue_stop_reason reason);
1365void ieee80211_add_pending_skb(struct ieee80211_local *local, 1298void ieee80211_add_pending_skb(struct ieee80211_local *local,
1366 struct sk_buff *skb); 1299 struct sk_buff *skb);
1367int ieee80211_add_pending_skbs(struct ieee80211_local *local, 1300void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1368 struct sk_buff_head *skbs); 1301 struct sk_buff_head *skbs);
1369int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 1302void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1370 struct sk_buff_head *skbs, 1303 struct sk_buff_head *skbs,
1371 void (*fn)(void *data), void *data); 1304 void (*fn)(void *data), void *data);
1372 1305
1373void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1306void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1374 u16 transaction, u16 auth_alg, 1307 u16 transaction, u16 auth_alg,
@@ -1386,7 +1319,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1386void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1319void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1387 const u8 *ssid, size_t ssid_len, 1320 const u8 *ssid, size_t ssid_len,
1388 const u8 *ie, size_t ie_len, 1321 const u8 *ie, size_t ie_len,
1389 u32 ratemask, bool directed); 1322 u32 ratemask, bool directed, bool no_cck);
1390 1323
1391void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1324void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1392 const size_t supp_rates_len, 1325 const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 556e7e6ddf0a..30d73552e9ab 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -299,8 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
299 goto err_del_interface; 299 goto err_del_interface;
300 } 300 }
301 301
302 /* no locking required since STA is not live yet */ 302 /* no atomic bitop required since STA is not live yet */
303 sta->flags |= WLAN_STA_AUTHORIZED; 303 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
304 304
305 res = sta_info_insert(sta); 305 res = sta_info_insert(sta);
306 if (res) { 306 if (res) {
@@ -456,21 +456,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
456 BSS_CHANGED_BEACON_ENABLED); 456 BSS_CHANGED_BEACON_ENABLED);
457 457
458 /* remove beacon */ 458 /* remove beacon */
459 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 459 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
460 synchronize_rcu(); 460 synchronize_rcu();
461 kfree(old_beacon); 461 kfree(old_beacon);
462 462
463 /* free all potentially still buffered bcast frames */
464 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
465 local->total_ps_buffered--;
466 dev_kfree_skb(skb);
467 }
468
469 /* down all dependent devices, that is VLANs */ 463 /* down all dependent devices, that is VLANs */
470 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 464 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
471 u.vlan.list) 465 u.vlan.list)
472 dev_close(vlan->dev); 466 dev_close(vlan->dev);
473 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 467 WARN_ON(!list_empty(&sdata->u.ap.vlans));
468
469 /* free all potentially still buffered bcast frames */
470 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
471 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
474 } 472 }
475 473
476 if (going_down) 474 if (going_down)
@@ -645,7 +643,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
645 .ndo_stop = ieee80211_stop, 643 .ndo_stop = ieee80211_stop,
646 .ndo_uninit = ieee80211_teardown_sdata, 644 .ndo_uninit = ieee80211_teardown_sdata,
647 .ndo_start_xmit = ieee80211_subif_start_xmit, 645 .ndo_start_xmit = ieee80211_subif_start_xmit,
648 .ndo_set_multicast_list = ieee80211_set_multicast_list, 646 .ndo_set_rx_mode = ieee80211_set_multicast_list,
649 .ndo_change_mtu = ieee80211_change_mtu, 647 .ndo_change_mtu = ieee80211_change_mtu,
650 .ndo_set_mac_address = ieee80211_change_mac, 648 .ndo_set_mac_address = ieee80211_change_mac,
651 .ndo_select_queue = ieee80211_netdev_select_queue, 649 .ndo_select_queue = ieee80211_netdev_select_queue,
@@ -689,7 +687,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
689 .ndo_stop = ieee80211_stop, 687 .ndo_stop = ieee80211_stop,
690 .ndo_uninit = ieee80211_teardown_sdata, 688 .ndo_uninit = ieee80211_teardown_sdata,
691 .ndo_start_xmit = ieee80211_monitor_start_xmit, 689 .ndo_start_xmit = ieee80211_monitor_start_xmit,
692 .ndo_set_multicast_list = ieee80211_set_multicast_list, 690 .ndo_set_rx_mode = ieee80211_set_multicast_list,
693 .ndo_change_mtu = ieee80211_change_mtu, 691 .ndo_change_mtu = ieee80211_change_mtu,
694 .ndo_set_mac_address = eth_mac_addr, 692 .ndo_set_mac_address = eth_mac_addr,
695 .ndo_select_queue = ieee80211_monitor_select_queue, 693 .ndo_select_queue = ieee80211_monitor_select_queue,
@@ -1214,6 +1212,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1214 list_del_rcu(&sdata->list); 1212 list_del_rcu(&sdata->list);
1215 mutex_unlock(&sdata->local->iflist_mtx); 1213 mutex_unlock(&sdata->local->iflist_mtx);
1216 1214
1215 if (ieee80211_vif_is_mesh(&sdata->vif))
1216 mesh_path_flush_by_iface(sdata);
1217
1217 synchronize_rcu(); 1218 synchronize_rcu();
1218 unregister_netdevice(sdata->dev); 1219 unregister_netdevice(sdata->dev);
1219} 1220}
@@ -1233,6 +1234,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1233 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1234 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1234 list_del(&sdata->list); 1235 list_del(&sdata->list);
1235 1236
1237 if (ieee80211_vif_is_mesh(&sdata->vif))
1238 mesh_path_flush_by_iface(sdata);
1239
1236 unregister_netdevice_queue(sdata->dev, &unreg_list); 1240 unregister_netdevice_queue(sdata->dev, &unreg_list);
1237 } 1241 }
1238 mutex_unlock(&local->iflist_mtx); 1242 mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5150c6d11b57..fb02ea52d2c2 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -15,6 +15,7 @@
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/export.h>
18#include <net/mac80211.h> 19#include <net/mac80211.h>
19#include "ieee80211_i.h" 20#include "ieee80211_i.h"
20#include "driver-ops.h" 21#include "driver-ops.h"
@@ -464,7 +465,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
464 * some hardware cannot handle TKIP with QoS, so 465 * some hardware cannot handle TKIP with QoS, so
465 * we indicate whether QoS could be in use. 466 * we indicate whether QoS could be in use.
466 */ 467 */
467 if (test_sta_flags(sta, WLAN_STA_WME)) 468 if (test_sta_flag(sta, WLAN_STA_WME))
468 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; 469 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
469 } else { 470 } else {
470 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 471 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -478,7 +479,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
478 /* same here, the AP could be using QoS */ 479 /* same here, the AP could be using QoS */
479 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid); 480 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
480 if (ap) { 481 if (ap) {
481 if (test_sta_flags(ap, WLAN_STA_WME)) 482 if (test_sta_flag(ap, WLAN_STA_WME))
482 key->conf.flags |= 483 key->conf.flags |=
483 IEEE80211_KEY_FLAG_WMM_STA; 484 IEEE80211_KEY_FLAG_WMM_STA;
484 } 485 }
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 14590332c81c..1bf7903496f8 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -9,6 +9,7 @@
9/* just for IFNAMSIZ */ 9/* just for IFNAMSIZ */
10#include <linux/if.h> 10#include <linux/if.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/export.h>
12#include "led.h" 13#include "led.h"
13 14
14void ieee80211_led_rx(struct ieee80211_local *local) 15void ieee80211_led_rx(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index acb44230b251..d999bf3b84e1 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -19,7 +19,7 @@
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22#include <linux/pm_qos_params.h> 22#include <linux/pm_qos.h>
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <net/net_namespace.h> 24#include <net/net_namespace.h>
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
@@ -325,6 +325,8 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
325static void ieee80211_tasklet_handler(unsigned long data) 325static void ieee80211_tasklet_handler(unsigned long data)
326{ 326{
327 struct ieee80211_local *local = (struct ieee80211_local *) data; 327 struct ieee80211_local *local = (struct ieee80211_local *) data;
328 struct sta_info *sta, *tmp;
329 struct skb_eosp_msg_data *eosp_data;
328 struct sk_buff *skb; 330 struct sk_buff *skb;
329 331
330 while ((skb = skb_dequeue(&local->skb_queue)) || 332 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -340,6 +342,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
340 skb->pkt_type = 0; 342 skb->pkt_type = 0;
341 ieee80211_tx_status(local_to_hw(local), skb); 343 ieee80211_tx_status(local_to_hw(local), skb);
342 break; 344 break;
345 case IEEE80211_EOSP_MSG:
346 eosp_data = (void *)skb->cb;
347 for_each_sta_info(local, eosp_data->sta, sta, tmp) {
348 /* skip wrong virtual interface */
349 if (memcmp(eosp_data->iface,
350 sta->sdata->vif.addr, ETH_ALEN))
351 continue;
352 clear_sta_flag(sta, WLAN_STA_SP);
353 break;
354 }
355 dev_kfree_skb(skb);
356 break;
343 default: 357 default:
344 WARN(1, "mac80211: Packet is of unknown type %d\n", 358 WARN(1, "mac80211: Packet is of unknown type %d\n",
345 skb->pkt_type); 359 skb->pkt_type);
@@ -608,6 +622,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
608 local->hw.max_rates = 1; 622 local->hw.max_rates = 1;
609 local->hw.max_report_rates = 0; 623 local->hw.max_report_rates = 0;
610 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 624 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
625 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
611 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 626 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
612 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 627 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
613 local->user_power_level = -1; 628 local->user_power_level = -1;
@@ -862,6 +877,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
862 if (local->ops->sched_scan_start) 877 if (local->ops->sched_scan_start)
863 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 878 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
864 879
880 /* mac80211 based drivers don't support internal TDLS setup */
881 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
882 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
883
865 result = wiphy_register(local->hw.wiphy); 884 result = wiphy_register(local->hw.wiphy);
866 if (result < 0) 885 if (result < 0)
867 goto fail_wiphy_register; 886 goto fail_wiphy_register;
@@ -885,12 +904,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
885 * and we need some headroom for passing the frame to monitor 904 * and we need some headroom for passing the frame to monitor
886 * interfaces, but never both at the same time. 905 * interfaces, but never both at the same time.
887 */ 906 */
888#ifndef __CHECKER__
889 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
890 sizeof(struct ieee80211_tx_status_rtap_hdr));
891#endif
892 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 907 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
893 sizeof(struct ieee80211_tx_status_rtap_hdr)); 908 IEEE80211_TX_STATUS_HEADROOM);
894 909
895 debugfs_hw_add(local); 910 debugfs_hw_add(local);
896 911
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980c8e60..a7078fdba8ca 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
17#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
18#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
19
20#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 16#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
21#define MESHCONF_CAPAB_FORWARDING 0x08 17#define MESHCONF_CAPAB_FORWARDING 0x08
22 18
@@ -27,6 +23,17 @@
27int mesh_allocated; 23int mesh_allocated;
28static struct kmem_cache *rm_cache; 24static struct kmem_cache *rm_cache;
29 25
26#ifdef CONFIG_MAC80211_MESH
27bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
28{
29 return (mgmt->u.action.u.mesh_action.action_code ==
30 WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
31}
32#else
33bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
34{ return false; }
35#endif
36
30void ieee80211s_init(void) 37void ieee80211s_init(void)
31{ 38{
32 mesh_pathtbl_init(); 39 mesh_pathtbl_init();
@@ -193,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
193 } 200 }
194 201
195 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); 202 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
196 if (!p) { 203 if (!p)
197 printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
198 return 0; 204 return 0;
199 } 205
200 p->seqnum = seqnum; 206 p->seqnum = seqnum;
201 p->exp_time = jiffies + RMC_TIMEOUT; 207 p->exp_time = jiffies + RMC_TIMEOUT;
202 memcpy(p->sa, sa, ETH_ALEN); 208 memcpy(p->sa, sa, ETH_ALEN);
@@ -204,89 +210,136 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
204 return 0; 210 return 0;
205} 211}
206 212
207void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) 213int
214mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
208{ 215{
209 struct ieee80211_local *local = sdata->local; 216 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
210 struct ieee80211_supported_band *sband; 217 u8 *pos, neighbors;
211 u8 *pos; 218 u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
212 int len, i, rate;
213 u8 neighbors;
214
215 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
216 len = sband->n_bitrates;
217 if (len > 8)
218 len = 8;
219 pos = skb_put(skb, len + 2);
220 *pos++ = WLAN_EID_SUPP_RATES;
221 *pos++ = len;
222 for (i = 0; i < len; i++) {
223 rate = sband->bitrates[i].bitrate;
224 *pos++ = (u8) (rate / 5);
225 }
226
227 if (sband->n_bitrates > len) {
228 pos = skb_put(skb, sband->n_bitrates - len + 2);
229 *pos++ = WLAN_EID_EXT_SUPP_RATES;
230 *pos++ = sband->n_bitrates - len;
231 for (i = len; i < sband->n_bitrates; i++) {
232 rate = sband->bitrates[i].bitrate;
233 *pos++ = (u8) (rate / 5);
234 }
235 }
236
237 if (sband->band == IEEE80211_BAND_2GHZ) {
238 pos = skb_put(skb, 2 + 1);
239 *pos++ = WLAN_EID_DS_PARAMS;
240 *pos++ = 1;
241 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
242 }
243 219
244 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); 220 if (skb_tailroom(skb) < 2 + meshconf_len)
245 *pos++ = WLAN_EID_MESH_ID; 221 return -ENOMEM;
246 *pos++ = sdata->u.mesh.mesh_id_len;
247 if (sdata->u.mesh.mesh_id_len)
248 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
249 222
250 pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie)); 223 pos = skb_put(skb, 2 + meshconf_len);
251 *pos++ = WLAN_EID_MESH_CONFIG; 224 *pos++ = WLAN_EID_MESH_CONFIG;
252 *pos++ = sizeof(struct ieee80211_meshconf_ie); 225 *pos++ = meshconf_len;
253 226
254 /* Active path selection protocol ID */ 227 /* Active path selection protocol ID */
255 *pos++ = sdata->u.mesh.mesh_pp_id; 228 *pos++ = ifmsh->mesh_pp_id;
256
257 /* Active path selection metric ID */ 229 /* Active path selection metric ID */
258 *pos++ = sdata->u.mesh.mesh_pm_id; 230 *pos++ = ifmsh->mesh_pm_id;
259
260 /* Congestion control mode identifier */ 231 /* Congestion control mode identifier */
261 *pos++ = sdata->u.mesh.mesh_cc_id; 232 *pos++ = ifmsh->mesh_cc_id;
262
263 /* Synchronization protocol identifier */ 233 /* Synchronization protocol identifier */
264 *pos++ = sdata->u.mesh.mesh_sp_id; 234 *pos++ = ifmsh->mesh_sp_id;
265
266 /* Authentication Protocol identifier */ 235 /* Authentication Protocol identifier */
267 *pos++ = sdata->u.mesh.mesh_auth_id; 236 *pos++ = ifmsh->mesh_auth_id;
268
269 /* Mesh Formation Info - number of neighbors */ 237 /* Mesh Formation Info - number of neighbors */
270 neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); 238 neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
271 /* Number of neighbor mesh STAs or 15 whichever is smaller */ 239 /* Number of neighbor mesh STAs or 15 whichever is smaller */
272 neighbors = (neighbors > 15) ? 15 : neighbors; 240 neighbors = (neighbors > 15) ? 15 : neighbors;
273 *pos++ = neighbors << 1; 241 *pos++ = neighbors << 1;
274
275 /* Mesh capability */ 242 /* Mesh capability */
276 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); 243 ifmsh->accepting_plinks = mesh_plink_availables(sdata);
277 *pos = MESHCONF_CAPAB_FORWARDING; 244 *pos = MESHCONF_CAPAB_FORWARDING;
278 *pos++ |= sdata->u.mesh.accepting_plinks ? 245 *pos++ |= ifmsh->accepting_plinks ?
279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 246 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
280 *pos++ = 0x00; 247 *pos++ = 0x00;
281 248
282 if (sdata->u.mesh.ie) { 249 return 0;
283 int len = sdata->u.mesh.ie_len; 250}
284 const u8 *data = sdata->u.mesh.ie; 251
285 if (skb_tailroom(skb) > len) 252int
286 memcpy(skb_put(skb, len), data, len); 253mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
254{
255 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
256 u8 *pos;
257
258 if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
259 return -ENOMEM;
260
261 pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
262 *pos++ = WLAN_EID_MESH_ID;
263 *pos++ = ifmsh->mesh_id_len;
264 if (ifmsh->mesh_id_len)
265 memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
266
267 return 0;
268}
269
270int
271mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
272{
273 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
274 u8 offset, len;
275 const u8 *data;
276
277 if (!ifmsh->ie || !ifmsh->ie_len)
278 return 0;
279
280 /* fast-forward to vendor IEs */
281 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
282
283 if (offset) {
284 len = ifmsh->ie_len - offset;
285 data = ifmsh->ie + offset;
286 if (skb_tailroom(skb) < len)
287 return -ENOMEM;
288 memcpy(skb_put(skb, len), data, len);
287 } 289 }
290
291 return 0;
288} 292}
289 293
294int
295mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
296{
297 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
298 u8 len = 0;
299 const u8 *data;
300
301 if (!ifmsh->ie || !ifmsh->ie_len)
302 return 0;
303
304 /* find RSN IE */
305 data = ifmsh->ie;
306 while (data < ifmsh->ie + ifmsh->ie_len) {
307 if (*data == WLAN_EID_RSN) {
308 len = data[1] + 2;
309 break;
310 }
311 data++;
312 }
313
314 if (len) {
315 if (skb_tailroom(skb) < len)
316 return -ENOMEM;
317 memcpy(skb_put(skb, len), data, len);
318 }
319
320 return 0;
321}
322
323int mesh_add_ds_params_ie(struct sk_buff *skb,
324 struct ieee80211_sub_if_data *sdata)
325{
326 struct ieee80211_local *local = sdata->local;
327 struct ieee80211_supported_band *sband;
328 u8 *pos;
329
330 if (skb_tailroom(skb) < 3)
331 return -ENOMEM;
332
333 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
334 if (sband->band == IEEE80211_BAND_2GHZ) {
335 pos = skb_put(skb, 2 + 1);
336 *pos++ = WLAN_EID_DS_PARAMS;
337 *pos++ = 1;
338 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
339 }
340
341 return 0;
342}
290 343
291static void ieee80211_mesh_path_timer(unsigned long data) 344static void ieee80211_mesh_path_timer(unsigned long data)
292{ 345{
@@ -352,8 +405,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
352 memcpy(hdr->addr3, meshsa, ETH_ALEN); 405 memcpy(hdr->addr3, meshsa, ETH_ALEN);
353 return 24; 406 return 24;
354 } else { 407 } else {
355 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | 408 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
356 IEEE80211_FCTL_TODS);
357 /* RA TA DA SA */ 409 /* RA TA DA SA */
358 memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ 410 memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
359 memcpy(hdr->addr2, meshsa, ETH_ALEN); 411 memcpy(hdr->addr2, meshsa, ETH_ALEN);
@@ -425,7 +477,8 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
425 477
426 mesh_path_tx_root_frame(sdata); 478 mesh_path_tx_root_frame(sdata);
427 mod_timer(&ifmsh->mesh_path_root_timer, 479 mod_timer(&ifmsh->mesh_path_root_timer,
428 round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); 480 round_jiffies(TU_TO_EXP_TIME(
481 ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
429} 482}
430 483
431#ifdef CONFIG_PM 484#ifdef CONFIG_PM
@@ -433,7 +486,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
433{ 486{
434 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 487 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
435 488
436 /* use atomic bitops in case both timers fire at the same time */ 489 /* use atomic bitops in case all timers fire at the same time */
437 490
438 if (del_timer_sync(&ifmsh->housekeeping_timer)) 491 if (del_timer_sync(&ifmsh->housekeeping_timer))
439 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); 492 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -557,11 +610,18 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
557 struct ieee80211_rx_status *rx_status) 610 struct ieee80211_rx_status *rx_status)
558{ 611{
559 switch (mgmt->u.action.category) { 612 switch (mgmt->u.action.category) {
560 case WLAN_CATEGORY_MESH_ACTION: 613 case WLAN_CATEGORY_SELF_PROTECTED:
561 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 614 switch (mgmt->u.action.u.self_prot.action_code) {
615 case WLAN_SP_MESH_PEERING_OPEN:
616 case WLAN_SP_MESH_PEERING_CLOSE:
617 case WLAN_SP_MESH_PEERING_CONFIRM:
618 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
619 break;
620 }
562 break; 621 break;
563 case WLAN_CATEGORY_MESH_PATH_SEL: 622 case WLAN_CATEGORY_MESH_ACTION:
564 mesh_rx_path_sel_frame(sdata, mgmt, len); 623 if (mesh_action_is_path_sel(mgmt))
624 mesh_rx_path_sel_frame(sdata, mgmt, len);
565 break; 625 break;
566 } 626 }
567} 627}
@@ -633,6 +693,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
633 ifmsh->accepting_plinks = true; 693 ifmsh->accepting_plinks = true;
634 ifmsh->preq_id = 0; 694 ifmsh->preq_id = 0;
635 ifmsh->sn = 0; 695 ifmsh->sn = 0;
696 ifmsh->num_gates = 0;
636 atomic_set(&ifmsh->mpaths, 0); 697 atomic_set(&ifmsh->mpaths, 0);
637 mesh_rmc_init(sdata); 698 mesh_rmc_init(sdata);
638 ifmsh->last_preq = jiffies; 699 ifmsh->last_preq = jiffies;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 249e733362e7..8c00e2d1d636 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -80,7 +80,10 @@ enum mesh_deferred_task_flags {
80 * retry 80 * retry
81 * @discovery_retries: number of discovery retries 81 * @discovery_retries: number of discovery retries
82 * @flags: mesh path flags, as specified on &enum mesh_path_flags 82 * @flags: mesh path flags, as specified on &enum mesh_path_flags
83 * @state_lock: mesh path state lock 83 * @state_lock: mesh path state lock used to protect changes to the
84 * mpath itself. No need to take this lock when adding or removing
85 * an mpath to a hash bucket on a path table.
86 * @is_gate: the destination station of this path is a mesh gate
84 * 87 *
85 * 88 *
86 * The combination of dst and sdata is unique in the mesh path table. Since the 89 * The combination of dst and sdata is unique in the mesh path table. Since the
@@ -104,6 +107,7 @@ struct mesh_path {
104 u8 discovery_retries; 107 u8 discovery_retries;
105 enum mesh_path_flags flags; 108 enum mesh_path_flags flags;
106 spinlock_t state_lock; 109 spinlock_t state_lock;
110 bool is_gate;
107}; 111};
108 112
109/** 113/**
@@ -120,6 +124,9 @@ struct mesh_path {
120 * buckets 124 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 125 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 126 * reached, the table will grow
127 * @known_gates: list of known mesh gates and their mpaths by the station. The
128 * gate's mpath may or may not be resolved and active.
129 *
123 * rcu_head: RCU head to free the table 130 * rcu_head: RCU head to free the table
124 */ 131 */
125struct mesh_table { 132struct mesh_table {
@@ -133,6 +140,8 @@ struct mesh_table {
133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 140 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
134 int size_order; 141 int size_order;
135 int mean_chain_len; 142 int mean_chain_len;
143 struct hlist_head *known_gates;
144 spinlock_t gates_lock;
136 145
137 struct rcu_head rcu_head; 146 struct rcu_head rcu_head;
138}; 147};
@@ -166,6 +175,8 @@ struct mesh_rmc {
166 u32 idx_mask; 175 u32 idx_mask;
167}; 176};
168 177
178#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
179#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
169 180
170#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ 181#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
171 182
@@ -177,14 +188,6 @@ struct mesh_rmc {
177/* Maximum number of paths per interface */ 188/* Maximum number of paths per interface */
178#define MESH_MAX_MPATHS 1024 189#define MESH_MAX_MPATHS 1024
179 190
180/* Pending ANA approval */
181#define MESH_PATH_SEL_ACTION 0
182
183/* PERR reason codes */
184#define PEER_RCODE_UNSPECIFIED 11
185#define PERR_RCODE_NO_ROUTE 12
186#define PERR_RCODE_DEST_UNREACH 13
187
188/* Public interfaces */ 191/* Public interfaces */
189/* Various */ 192/* Various */
190int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 193int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -199,6 +202,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie,
199void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); 202void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
200void mesh_mgmt_ies_add(struct sk_buff *skb, 203void mesh_mgmt_ies_add(struct sk_buff *skb,
201 struct ieee80211_sub_if_data *sdata); 204 struct ieee80211_sub_if_data *sdata);
205int mesh_add_meshconf_ie(struct sk_buff *skb,
206 struct ieee80211_sub_if_data *sdata);
207int mesh_add_meshid_ie(struct sk_buff *skb,
208 struct ieee80211_sub_if_data *sdata);
209int mesh_add_rsn_ie(struct sk_buff *skb,
210 struct ieee80211_sub_if_data *sdata);
211int mesh_add_vendor_ies(struct sk_buff *skb,
212 struct ieee80211_sub_if_data *sdata);
213int mesh_add_ds_params_ie(struct sk_buff *skb,
214 struct ieee80211_sub_if_data *sdata);
202void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 215void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
203int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 216int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
204void ieee80211s_init(void); 217void ieee80211s_init(void);
@@ -223,10 +236,13 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx,
223 struct ieee80211_sub_if_data *sdata); 236 struct ieee80211_sub_if_data *sdata);
224void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 237void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
225void mesh_path_expire(struct ieee80211_sub_if_data *sdata); 238void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
226void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
227void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 239void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
228 struct ieee80211_mgmt *mgmt, size_t len); 240 struct ieee80211_mgmt *mgmt, size_t len);
229int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 241int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
242
243int mesh_path_add_gate(struct mesh_path *mpath);
244int mesh_path_send_to_gates(struct mesh_path *mpath);
245int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
230/* Mesh plinks */ 246/* Mesh plinks */
231void mesh_neighbour_update(u8 *hw_addr, u32 rates, 247void mesh_neighbour_update(u8 *hw_addr, u32 rates,
232 struct ieee80211_sub_if_data *sdata, 248 struct ieee80211_sub_if_data *sdata,
@@ -256,12 +272,14 @@ void mesh_pathtbl_unregister(void);
256int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); 272int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
257void mesh_path_timer(unsigned long data); 273void mesh_path_timer(unsigned long data);
258void mesh_path_flush_by_nexthop(struct sta_info *sta); 274void mesh_path_flush_by_nexthop(struct sta_info *sta);
275void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
259void mesh_path_discard_frame(struct sk_buff *skb, 276void mesh_path_discard_frame(struct sk_buff *skb,
260 struct ieee80211_sub_if_data *sdata); 277 struct ieee80211_sub_if_data *sdata);
261void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 278void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
262void mesh_path_restart(struct ieee80211_sub_if_data *sdata); 279void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
263void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); 280void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
264 281
282bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
265extern int mesh_paths_generation; 283extern int mesh_paths_generation;
266 284
267#ifdef CONFIG_MAC80211_MESH 285#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3460108810d5..174040a42887 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,10 +8,12 @@
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include "wme.h"
11#include "mesh.h" 12#include "mesh.h"
12 13
13#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG 14#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
14#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args) 15#define mhwmp_dbg(fmt, args...) \
16 printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
15#else 17#else
16#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0) 18#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
17#endif 19#endif
@@ -68,12 +70,12 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
68#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) 70#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
69#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) 71#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
70#define PREP_IE_TTL(x) PREQ_IE_TTL(x) 72#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
71#define PREP_IE_ORIG_ADDR(x) (x + 3) 73#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
72#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0) 74#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
73#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)) 75#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
74#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)) 76#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
75#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) 77#define PREP_IE_TARGET_ADDR(x) (x + 3)
76#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x)) 78#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
77 79
78#define PERR_IE_TTL(x) (*(x)) 80#define PERR_IE_TTL(x) (*(x))
79#define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) 81#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
@@ -132,24 +134,25 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 134 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
133 /* BSSID == SA */ 135 /* BSSID == SA */
134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 136 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
135 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; 137 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 138 mgmt->u.action.u.mesh_action.action_code =
139 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
137 140
138 switch (action) { 141 switch (action) {
139 case MPATH_PREQ: 142 case MPATH_PREQ:
140 mhwmp_dbg("sending PREQ to %pM\n", target); 143 mhwmp_dbg("sending PREQ to %pM", target);
141 ie_len = 37; 144 ie_len = 37;
142 pos = skb_put(skb, 2 + ie_len); 145 pos = skb_put(skb, 2 + ie_len);
143 *pos++ = WLAN_EID_PREQ; 146 *pos++ = WLAN_EID_PREQ;
144 break; 147 break;
145 case MPATH_PREP: 148 case MPATH_PREP:
146 mhwmp_dbg("sending PREP to %pM\n", target); 149 mhwmp_dbg("sending PREP to %pM", target);
147 ie_len = 31; 150 ie_len = 31;
148 pos = skb_put(skb, 2 + ie_len); 151 pos = skb_put(skb, 2 + ie_len);
149 *pos++ = WLAN_EID_PREP; 152 *pos++ = WLAN_EID_PREP;
150 break; 153 break;
151 case MPATH_RANN: 154 case MPATH_RANN:
152 mhwmp_dbg("sending RANN from %pM\n", orig_addr); 155 mhwmp_dbg("sending RANN from %pM", orig_addr);
153 ie_len = sizeof(struct ieee80211_rann_ie); 156 ie_len = sizeof(struct ieee80211_rann_ie);
154 pos = skb_put(skb, 2 + ie_len); 157 pos = skb_put(skb, 2 + ie_len);
155 *pos++ = WLAN_EID_RANN; 158 *pos++ = WLAN_EID_RANN;
@@ -163,35 +166,63 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
163 *pos++ = flags; 166 *pos++ = flags;
164 *pos++ = hop_count; 167 *pos++ = hop_count;
165 *pos++ = ttl; 168 *pos++ = ttl;
166 if (action == MPATH_PREQ) { 169 if (action == MPATH_PREP) {
167 memcpy(pos, &preq_id, 4); 170 memcpy(pos, target, ETH_ALEN);
171 pos += ETH_ALEN;
172 memcpy(pos, &target_sn, 4);
168 pos += 4; 173 pos += 4;
169 } 174 } else {
170 memcpy(pos, orig_addr, ETH_ALEN); 175 if (action == MPATH_PREQ) {
171 pos += ETH_ALEN; 176 memcpy(pos, &preq_id, 4);
172 memcpy(pos, &orig_sn, 4); 177 pos += 4;
173 pos += 4; 178 }
174 if (action != MPATH_RANN) { 179 memcpy(pos, orig_addr, ETH_ALEN);
175 memcpy(pos, &lifetime, 4); 180 pos += ETH_ALEN;
181 memcpy(pos, &orig_sn, 4);
176 pos += 4; 182 pos += 4;
177 } 183 }
184 memcpy(pos, &lifetime, 4); /* interval for RANN */
185 pos += 4;
178 memcpy(pos, &metric, 4); 186 memcpy(pos, &metric, 4);
179 pos += 4; 187 pos += 4;
180 if (action == MPATH_PREQ) { 188 if (action == MPATH_PREQ) {
181 /* destination count */ 189 *pos++ = 1; /* destination count */
182 *pos++ = 1;
183 *pos++ = target_flags; 190 *pos++ = target_flags;
184 }
185 if (action != MPATH_RANN) {
186 memcpy(pos, target, ETH_ALEN); 191 memcpy(pos, target, ETH_ALEN);
187 pos += ETH_ALEN; 192 pos += ETH_ALEN;
188 memcpy(pos, &target_sn, 4); 193 memcpy(pos, &target_sn, 4);
194 pos += 4;
195 } else if (action == MPATH_PREP) {
196 memcpy(pos, orig_addr, ETH_ALEN);
197 pos += ETH_ALEN;
198 memcpy(pos, &orig_sn, 4);
199 pos += 4;
189 } 200 }
190 201
191 ieee80211_tx_skb(sdata, skb); 202 ieee80211_tx_skb(sdata, skb);
192 return 0; 203 return 0;
193} 204}
194 205
206
207/* Headroom is not adjusted. Caller should ensure that skb has sufficient
208 * headroom in case the frame is encrypted. */
209static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
210 struct sk_buff *skb)
211{
212 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
213
214 skb_set_mac_header(skb, 0);
215 skb_set_network_header(skb, 0);
216 skb_set_transport_header(skb, 0);
217
218 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
219 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
220 skb->priority = 7;
221
222 info->control.vif = &sdata->vif;
223 ieee80211_set_qos_hdr(sdata, skb);
224}
225
195/** 226/**
196 * mesh_send_path error - Sends a PERR mesh management frame 227 * mesh_send_path error - Sends a PERR mesh management frame
197 * 228 *
@@ -199,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
199 * @target_sn: SN of the broken destination 230 * @target_sn: SN of the broken destination
200 * @target_rcode: reason code for this PERR 231 * @target_rcode: reason code for this PERR
201 * @ra: node this frame is addressed to 232 * @ra: node this frame is addressed to
233 *
234 * Note: This function may be called with driver locks taken that the driver
235 * also acquires in the TX path. To avoid a deadlock we don't transmit the
236 * frame directly but add it to the pending queue instead.
202 */ 237 */
203int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, 238int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
204 __le16 target_rcode, const u8 *ra, 239 __le16 target_rcode, const u8 *ra,
@@ -212,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
212 247
213 if (!skb) 248 if (!skb)
214 return -1; 249 return -1;
215 skb_reserve(skb, local->hw.extra_tx_headroom); 250 skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
216 /* 25 is the size of the common mgmt part (24) plus the size of the 251 /* 25 is the size of the common mgmt part (24) plus the size of the
217 * common action part (1) 252 * common action part (1)
218 */ 253 */
@@ -224,9 +259,11 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
224 259
225 memcpy(mgmt->da, ra, ETH_ALEN); 260 memcpy(mgmt->da, ra, ETH_ALEN);
226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
227 /* BSSID is left zeroed, wildcard value */ 262 /* BSSID == SA */
228 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; 263 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 264 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
265 mgmt->u.action.u.mesh_action.action_code =
266 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
230 ie_len = 15; 267 ie_len = 15;
231 pos = skb_put(skb, 2 + ie_len); 268 pos = skb_put(skb, 2 + ie_len);
232 *pos++ = WLAN_EID_PERR; 269 *pos++ = WLAN_EID_PERR;
@@ -251,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
251 pos += 4; 288 pos += 4;
252 memcpy(pos, &target_rcode, 2); 289 memcpy(pos, &target_rcode, 2);
253 290
254 ieee80211_tx_skb(sdata, skb); 291 /* see note in function header */
292 prepare_frame_for_deferred_tx(sdata, skb);
293 ieee80211_add_pending_skb(local, skb);
255 return 0; 294 return 0;
256} 295}
257 296
@@ -449,7 +488,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
449 488
450 if (fresh_info) { 489 if (fresh_info) {
451 mesh_path_assign_nexthop(mpath, sta); 490 mesh_path_assign_nexthop(mpath, sta);
452 mpath->flags &= ~MESH_PATH_SN_VALID;
453 mpath->metric = last_hop_metric; 491 mpath->metric = last_hop_metric;
454 mpath->exp_time = time_after(mpath->exp_time, exp_time) 492 mpath->exp_time = time_after(mpath->exp_time, exp_time)
455 ? mpath->exp_time : exp_time; 493 ? mpath->exp_time : exp_time;
@@ -484,10 +522,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
484 orig_sn = PREQ_IE_ORIG_SN(preq_elem); 522 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
485 target_flags = PREQ_IE_TARGET_F(preq_elem); 523 target_flags = PREQ_IE_TARGET_F(preq_elem);
486 524
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 525 mhwmp_dbg("received PREQ from %pM", orig_addr);
488 526
489 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) { 527 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 528 mhwmp_dbg("PREQ is for us");
491 forward = false; 529 forward = false;
492 reply = true; 530 reply = true;
493 metric = 0; 531 metric = 0;
@@ -523,7 +561,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
523 lifetime = PREQ_IE_LIFETIME(preq_elem); 561 lifetime = PREQ_IE_LIFETIME(preq_elem);
524 ttl = ifmsh->mshcfg.element_ttl; 562 ttl = ifmsh->mshcfg.element_ttl;
525 if (ttl != 0) { 563 if (ttl != 0) {
526 mhwmp_dbg("replying to the PREQ\n"); 564 mhwmp_dbg("replying to the PREQ");
527 mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr, 565 mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
528 cpu_to_le32(target_sn), 0, orig_addr, 566 cpu_to_le32(target_sn), 0, orig_addr,
529 cpu_to_le32(orig_sn), mgmt->sa, 0, ttl, 567 cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
@@ -543,7 +581,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
543 ifmsh->mshstats.dropped_frames_ttl++; 581 ifmsh->mshstats.dropped_frames_ttl++;
544 return; 582 return;
545 } 583 }
546 mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr); 584 mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
547 --ttl; 585 --ttl;
548 flags = PREQ_IE_FLAGS(preq_elem); 586 flags = PREQ_IE_FLAGS(preq_elem);
549 preq_id = PREQ_IE_PREQ_ID(preq_elem); 587 preq_id = PREQ_IE_PREQ_ID(preq_elem);
@@ -578,7 +616,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
578 u8 next_hop[ETH_ALEN]; 616 u8 next_hop[ETH_ALEN];
579 u32 target_sn, orig_sn, lifetime; 617 u32 target_sn, orig_sn, lifetime;
580 618
581 mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem)); 619 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
582 620
583 /* Note that we divert from the draft nomenclature and denominate 621 /* Note that we divert from the draft nomenclature and denominate
584 * destination to what the draft refers to as origininator. So in this 622 * destination to what the draft refers to as origininator. So in this
@@ -684,6 +722,8 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
684 u8 ttl, flags, hopcount; 722 u8 ttl, flags, hopcount;
685 u8 *orig_addr; 723 u8 *orig_addr;
686 u32 orig_sn, metric; 724 u32 orig_sn, metric;
725 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
726 bool root_is_gate;
687 727
688 ttl = rann->rann_ttl; 728 ttl = rann->rann_ttl;
689 if (ttl <= 1) { 729 if (ttl <= 1) {
@@ -692,12 +732,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
692 } 732 }
693 ttl--; 733 ttl--;
694 flags = rann->rann_flags; 734 flags = rann->rann_flags;
735 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
695 orig_addr = rann->rann_addr; 736 orig_addr = rann->rann_addr;
696 orig_sn = rann->rann_seq; 737 orig_sn = rann->rann_seq;
697 hopcount = rann->rann_hopcount; 738 hopcount = rann->rann_hopcount;
698 hopcount++; 739 hopcount++;
699 metric = rann->rann_metric; 740 metric = rann->rann_metric;
700 mhwmp_dbg("received RANN from %pM\n", orig_addr); 741
742 /* Ignore our own RANNs */
743 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
744 return;
745
746 mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
747 root_is_gate);
701 748
702 rcu_read_lock(); 749 rcu_read_lock();
703 mpath = mesh_path_lookup(orig_addr, sdata); 750 mpath = mesh_path_lookup(orig_addr, sdata);
@@ -709,18 +756,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
709 sdata->u.mesh.mshstats.dropped_frames_no_route++; 756 sdata->u.mesh.mshstats.dropped_frames_no_route++;
710 return; 757 return;
711 } 758 }
712 mesh_queue_preq(mpath,
713 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
714 } 759 }
760
761 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
762 time_after(jiffies, mpath->exp_time - 1*HZ)) &&
763 !(mpath->flags & MESH_PATH_FIXED)) {
764 mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
765 orig_addr);
766 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
767 }
768
715 if (mpath->sn < orig_sn) { 769 if (mpath->sn < orig_sn) {
716 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 770 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
717 cpu_to_le32(orig_sn), 771 cpu_to_le32(orig_sn),
718 0, NULL, 0, broadcast_addr, 772 0, NULL, 0, broadcast_addr,
719 hopcount, ttl, 0, 773 hopcount, ttl, cpu_to_le32(interval),
720 cpu_to_le32(metric + mpath->metric), 774 cpu_to_le32(metric + mpath->metric),
721 0, sdata); 775 0, sdata);
722 mpath->sn = orig_sn; 776 mpath->sn = orig_sn;
723 } 777 }
778 if (root_is_gate)
779 mesh_path_add_gate(mpath);
780
724 rcu_read_unlock(); 781 rcu_read_unlock();
725} 782}
726 783
@@ -732,11 +789,20 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
732 struct ieee802_11_elems elems; 789 struct ieee802_11_elems elems;
733 size_t baselen; 790 size_t baselen;
734 u32 last_hop_metric; 791 u32 last_hop_metric;
792 struct sta_info *sta;
735 793
736 /* need action_code */ 794 /* need action_code */
737 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 795 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
738 return; 796 return;
739 797
798 rcu_read_lock();
799 sta = sta_info_get(sdata, mgmt->sa);
800 if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
801 rcu_read_unlock();
802 return;
803 }
804 rcu_read_unlock();
805
740 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; 806 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
741 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 807 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
742 len - baselen, &elems); 808 len - baselen, &elems);
@@ -788,16 +854,16 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
788 854
789 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); 855 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
790 if (!preq_node) { 856 if (!preq_node) {
791 mhwmp_dbg("could not allocate PREQ node\n"); 857 mhwmp_dbg("could not allocate PREQ node");
792 return; 858 return;
793 } 859 }
794 860
795 spin_lock(&ifmsh->mesh_preq_queue_lock); 861 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
796 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 862 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
797 spin_unlock(&ifmsh->mesh_preq_queue_lock); 863 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
798 kfree(preq_node); 864 kfree(preq_node);
799 if (printk_ratelimit()) 865 if (printk_ratelimit())
800 mhwmp_dbg("PREQ node queue full\n"); 866 mhwmp_dbg("PREQ node queue full");
801 return; 867 return;
802 } 868 }
803 869
@@ -806,7 +872,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
806 872
807 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); 873 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
808 ++ifmsh->preq_queue_len; 874 ++ifmsh->preq_queue_len;
809 spin_unlock(&ifmsh->mesh_preq_queue_lock); 875 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
810 876
811 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) 877 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
812 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 878 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
@@ -982,35 +1048,46 @@ void mesh_path_timer(unsigned long data)
982{ 1048{
983 struct mesh_path *mpath = (void *) data; 1049 struct mesh_path *mpath = (void *) data;
984 struct ieee80211_sub_if_data *sdata = mpath->sdata; 1050 struct ieee80211_sub_if_data *sdata = mpath->sdata;
1051 int ret;
985 1052
986 if (sdata->local->quiescing) 1053 if (sdata->local->quiescing)
987 return; 1054 return;
988 1055
989 spin_lock_bh(&mpath->state_lock); 1056 spin_lock_bh(&mpath->state_lock);
990 if (mpath->flags & MESH_PATH_RESOLVED || 1057 if (mpath->flags & MESH_PATH_RESOLVED ||
991 (!(mpath->flags & MESH_PATH_RESOLVING))) 1058 (!(mpath->flags & MESH_PATH_RESOLVING))) {
992 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 1059 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
993 else if (mpath->discovery_retries < max_preq_retries(sdata)) { 1060 spin_unlock_bh(&mpath->state_lock);
1061 } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
994 ++mpath->discovery_retries; 1062 ++mpath->discovery_retries;
995 mpath->discovery_timeout *= 2; 1063 mpath->discovery_timeout *= 2;
1064 spin_unlock_bh(&mpath->state_lock);
996 mesh_queue_preq(mpath, 0); 1065 mesh_queue_preq(mpath, 0);
997 } else { 1066 } else {
998 mpath->flags = 0; 1067 mpath->flags = 0;
999 mpath->exp_time = jiffies; 1068 mpath->exp_time = jiffies;
1000 mesh_path_flush_pending(mpath); 1069 spin_unlock_bh(&mpath->state_lock);
1070 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1071 ret = mesh_path_send_to_gates(mpath);
1072 if (ret)
1073 mhwmp_dbg("no gate was reachable");
1074 } else
1075 mesh_path_flush_pending(mpath);
1001 } 1076 }
1002
1003 spin_unlock_bh(&mpath->state_lock);
1004} 1077}
1005 1078
1006void 1079void
1007mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) 1080mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1008{ 1081{
1009 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1082 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1083 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1084 u8 flags;
1010 1085
1011 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr, 1086 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1087 ? RANN_FLAG_IS_GATE : 0;
1088 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1012 cpu_to_le32(++ifmsh->sn), 1089 cpu_to_le32(++ifmsh->sn),
1013 0, NULL, 0, broadcast_addr, 1090 0, NULL, 0, broadcast_addr,
1014 0, sdata->u.mesh.mshcfg.element_ttl, 1091 0, sdata->u.mesh.mshcfg.element_ttl,
1015 0, 0, 0, sdata); 1092 cpu_to_le32(interval), 0, 0, sdata);
1016} 1093}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee6518254..7f54c5042235 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "wme.h"
17#include "ieee80211_i.h" 18#include "ieee80211_i.h"
18#include "mesh.h" 19#include "mesh.h"
19 20
21#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23#else
24#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25#endif
26
20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 27/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21#define INIT_PATHS_SIZE_ORDER 2 28#define INIT_PATHS_SIZE_ORDER 2
22 29
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42int mesh_paths_generation; 49int mesh_paths_generation;
43 50
44/* This lock will have the grow table function as writer and add / delete nodes 51/* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected 52 * as readers. RCU provides sufficient protection only when reading the table
46 * by RCU 53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
47 */ 56 */
48static DEFINE_RWLOCK(pathtbl_resize_lock); 57static DEFINE_RWLOCK(pathtbl_resize_lock);
49 58
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
60 lockdep_is_held(&pathtbl_resize_lock)); 69 lockdep_is_held(&pathtbl_resize_lock));
61} 70}
62 71
72static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
73
63/* 74/*
64 * CAREFUL -- "tbl" must not be an expression, 75 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since 76 * in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
103 sizeof(newtbl->hash_rnd)); 114 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++) 115 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]); 116 spin_lock_init(&newtbl->hashwlock[i]);
117 spin_lock_init(&newtbl->gates_lock);
106 118
107 return newtbl; 119 return newtbl;
108} 120}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
118{ 130{
119 struct hlist_head *mesh_hash; 131 struct hlist_head *mesh_hash;
120 struct hlist_node *p, *q; 132 struct hlist_node *p, *q;
133 struct mpath_node *gate;
121 int i; 134 int i;
122 135
123 mesh_hash = tbl->hash_buckets; 136 mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
129 } 142 }
130 spin_unlock_bh(&tbl->hashwlock[i]); 143 spin_unlock_bh(&tbl->hashwlock[i]);
131 } 144 }
145 if (free_leafs) {
146 spin_lock_bh(&tbl->gates_lock);
147 hlist_for_each_entry_safe(gate, p, q,
148 tbl->known_gates, list) {
149 hlist_del(&gate->list);
150 kfree(gate);
151 }
152 kfree(tbl->known_gates);
153 spin_unlock_bh(&tbl->gates_lock);
154 }
155
132 __mesh_table_free(tbl); 156 __mesh_table_free(tbl);
133} 157}
134 158
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
146 newtbl->free_node = oldtbl->free_node; 170 newtbl->free_node = oldtbl->free_node;
147 newtbl->mean_chain_len = oldtbl->mean_chain_len; 171 newtbl->mean_chain_len = oldtbl->mean_chain_len;
148 newtbl->copy_node = oldtbl->copy_node; 172 newtbl->copy_node = oldtbl->copy_node;
173 newtbl->known_gates = oldtbl->known_gates;
149 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); 174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
150 175
151 oldhash = oldtbl->hash_buckets; 176 oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
188 struct ieee80211_hdr *hdr; 213 struct ieee80211_hdr *hdr;
189 struct sk_buff_head tmpq; 214 struct sk_buff_head tmpq;
190 unsigned long flags; 215 unsigned long flags;
216 struct ieee80211_sub_if_data *sdata = mpath->sdata;
191 217
192 rcu_assign_pointer(mpath->next_hop, sta); 218 rcu_assign_pointer(mpath->next_hop, sta);
193 219
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
198 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { 224 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
199 hdr = (struct ieee80211_hdr *) skb->data; 225 hdr = (struct ieee80211_hdr *) skb->data;
200 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 226 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
227 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
228 ieee80211_set_qos_hdr(sdata, skb);
201 __skb_queue_tail(&tmpq, skb); 229 __skb_queue_tail(&tmpq, skb);
202 } 230 }
203 231
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
205 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 233 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
206} 234}
207 235
236static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
237 struct mesh_path *gate_mpath)
238{
239 struct ieee80211_hdr *hdr;
240 struct ieee80211s_hdr *mshdr;
241 int mesh_hdrlen, hdrlen;
242 char *next_hop;
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
246 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
247
248 if (!(mshdr->flags & MESH_FLAGS_AE)) {
249 /* size of the fixed part of the mesh header */
250 mesh_hdrlen = 6;
251
252 /* make room for the two extended addresses */
253 skb_push(skb, 2 * ETH_ALEN);
254 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
255
256 hdr = (struct ieee80211_hdr *) skb->data;
257
258 /* we preserve the previous mesh header and only add
259 * the new addreses */
260 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
261 mshdr->flags = MESH_FLAGS_AE_A5_A6;
262 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
263 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
264 }
265
266 /* update next hop */
267 hdr = (struct ieee80211_hdr *) skb->data;
268 rcu_read_lock();
269 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
270 memcpy(hdr->addr1, next_hop, ETH_ALEN);
271 rcu_read_unlock();
272 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
273}
208 274
209/** 275/**
210 * mesh_path_lookup - look up a path in the mesh path table
211 * @dst: hardware address (ETH_ALEN length) of destination
212 * @sdata: local subif
213 * 276 *
214 * Returns: pointer to the mesh path structure, or NULL if not found 277 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
215 * 278 *
216 * Locking: must be called within a read rcu section. 279 * This function is used to transfer or copy frames from an unresolved mpath to
280 * a gate mpath. The function also adds the Address Extension field and
281 * updates the next hop.
282 *
283 * If a frame already has an Address Extension field, only the next hop and
284 * destination addresses are updated.
285 *
286 * The gate mpath must be an active mpath with a valid mpath->next_hop.
287 *
288 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
289 * @from_mpath: The failed mpath
290 * @copy: When true, copy all the frames to the new mpath queue. When false,
291 * move them.
217 */ 292 */
218struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 293static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
294 struct mesh_path *from_mpath,
295 bool copy)
219{ 296{
220 struct mesh_path *mpath; 297 struct sk_buff *skb, *cp_skb = NULL;
221 struct hlist_node *n; 298 struct sk_buff_head gateq, failq;
222 struct hlist_head *bucket; 299 unsigned long flags;
223 struct mesh_table *tbl; 300 int num_skbs;
224 struct mpath_node *node;
225 301
226 tbl = rcu_dereference(mesh_paths); 302 BUG_ON(gate_mpath == from_mpath);
303 BUG_ON(!gate_mpath->next_hop);
227 304
228 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 305 __skb_queue_head_init(&gateq);
229 hlist_for_each_entry_rcu(node, n, bucket, list) { 306 __skb_queue_head_init(&failq);
230 mpath = node->mpath; 307
231 if (mpath->sdata == sdata && 308 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
232 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 309 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
233 if (MPATH_EXPIRED(mpath)) { 310 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
234 spin_lock_bh(&mpath->state_lock); 311
235 if (MPATH_EXPIRED(mpath)) 312 num_skbs = skb_queue_len(&failq);
236 mpath->flags &= ~MESH_PATH_ACTIVE; 313
237 spin_unlock_bh(&mpath->state_lock); 314 while (num_skbs--) {
238 } 315 skb = __skb_dequeue(&failq);
239 return mpath; 316 if (copy) {
317 cp_skb = skb_copy(skb, GFP_ATOMIC);
318 if (cp_skb)
319 __skb_queue_tail(&failq, cp_skb);
240 } 320 }
321
322 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
323 __skb_queue_tail(&gateq, skb);
241 } 324 }
242 return NULL; 325
326 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
327 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
328 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
329 gate_mpath->dst,
330 skb_queue_len(&gate_mpath->frame_queue));
331 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
332
333 if (!copy)
334 return;
335
336 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
337 skb_queue_splice(&failq, &from_mpath->frame_queue);
338 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
243} 339}
244 340
245struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 341
342static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
343 struct ieee80211_sub_if_data *sdata)
246{ 344{
247 struct mesh_path *mpath; 345 struct mesh_path *mpath;
248 struct hlist_node *n; 346 struct hlist_node *n;
249 struct hlist_head *bucket; 347 struct hlist_head *bucket;
250 struct mesh_table *tbl;
251 struct mpath_node *node; 348 struct mpath_node *node;
252 349
253 tbl = rcu_dereference(mpp_paths);
254
255 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 350 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
256 hlist_for_each_entry_rcu(node, n, bucket, list) { 351 hlist_for_each_entry_rcu(node, n, bucket, list) {
257 mpath = node->mpath; 352 mpath = node->mpath;
258 if (mpath->sdata == sdata && 353 if (mpath->sdata == sdata &&
259 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 354 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
260 if (MPATH_EXPIRED(mpath)) { 355 if (MPATH_EXPIRED(mpath)) {
261 spin_lock_bh(&mpath->state_lock); 356 spin_lock_bh(&mpath->state_lock);
262 if (MPATH_EXPIRED(mpath)) 357 mpath->flags &= ~MESH_PATH_ACTIVE;
263 mpath->flags &= ~MESH_PATH_ACTIVE;
264 spin_unlock_bh(&mpath->state_lock); 358 spin_unlock_bh(&mpath->state_lock);
265 } 359 }
266 return mpath; 360 return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
269 return NULL; 363 return NULL;
270} 364}
271 365
366/**
367 * mesh_path_lookup - look up a path in the mesh path table
368 * @dst: hardware address (ETH_ALEN length) of destination
369 * @sdata: local subif
370 *
371 * Returns: pointer to the mesh path structure, or NULL if not found
372 *
373 * Locking: must be called within a read rcu section.
374 */
375struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
376{
377 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
378}
379
380struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
381{
382 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
383}
384
272 385
273/** 386/**
274 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 387 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
293 if (j++ == idx) { 406 if (j++ == idx) {
294 if (MPATH_EXPIRED(node->mpath)) { 407 if (MPATH_EXPIRED(node->mpath)) {
295 spin_lock_bh(&node->mpath->state_lock); 408 spin_lock_bh(&node->mpath->state_lock);
296 if (MPATH_EXPIRED(node->mpath)) 409 node->mpath->flags &= ~MESH_PATH_ACTIVE;
297 node->mpath->flags &= ~MESH_PATH_ACTIVE;
298 spin_unlock_bh(&node->mpath->state_lock); 410 spin_unlock_bh(&node->mpath->state_lock);
299 } 411 }
300 return node->mpath; 412 return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
304 return NULL; 416 return NULL;
305} 417}
306 418
419static void mesh_gate_node_reclaim(struct rcu_head *rp)
420{
421 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
422 kfree(node);
423}
424
425/**
426 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
427 * @mesh_tbl: table which contains known_gates list
428 * @mpath: mpath to known mesh gate
429 *
430 * Returns: 0 on success
431 *
432 */
433static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
434{
435 struct mpath_node *gate, *new_gate;
436 struct hlist_node *n;
437 int err;
438
439 rcu_read_lock();
440 tbl = rcu_dereference(tbl);
441
442 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
443 if (gate->mpath == mpath) {
444 err = -EEXIST;
445 goto err_rcu;
446 }
447
448 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
449 if (!new_gate) {
450 err = -ENOMEM;
451 goto err_rcu;
452 }
453
454 mpath->is_gate = true;
455 mpath->sdata->u.mesh.num_gates++;
456 new_gate->mpath = mpath;
457 spin_lock_bh(&tbl->gates_lock);
458 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
459 spin_unlock_bh(&tbl->gates_lock);
460 rcu_read_unlock();
461 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
462 mpath->sdata->name, mpath->dst,
463 mpath->sdata->u.mesh.num_gates);
464 return 0;
465err_rcu:
466 rcu_read_unlock();
467 return err;
468}
469
470/**
471 * mesh_gate_del - remove a mesh gate from the list of known gates
472 * @tbl: table which holds our list of known gates
473 * @mpath: gate mpath
474 *
475 * Returns: 0 on success
476 *
477 * Locking: must be called inside rcu_read_lock() section
478 */
479static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
480{
481 struct mpath_node *gate;
482 struct hlist_node *p, *q;
483
484 tbl = rcu_dereference(tbl);
485
486 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
487 if (gate->mpath == mpath) {
488 spin_lock_bh(&tbl->gates_lock);
489 hlist_del_rcu(&gate->list);
490 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
491 spin_unlock_bh(&tbl->gates_lock);
492 mpath->sdata->u.mesh.num_gates--;
493 mpath->is_gate = false;
494 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
495 "%d known gates\n", mpath->sdata->name,
496 mpath->dst, mpath->sdata->u.mesh.num_gates);
497 break;
498 }
499
500 return 0;
501}
502
503/**
504 *
505 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
506 * @mpath: gate path to add to table
507 */
508int mesh_path_add_gate(struct mesh_path *mpath)
509{
510 return mesh_gate_add(mesh_paths, mpath);
511}
512
513/**
514 * mesh_gate_num - number of gates known to this interface
515 * @sdata: subif data
516 */
517int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
518{
519 return sdata->u.mesh.num_gates;
520}
521
307/** 522/**
308 * mesh_path_add - allocate and add a new path to the mesh path table 523 * mesh_path_add - allocate and add a new path to the mesh path table
309 * @addr: destination address of the path (ETH_ALEN length) 524 * @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
481 new_mpath->flags = 0; 696 new_mpath->flags = 0;
482 skb_queue_head_init(&new_mpath->frame_queue); 697 skb_queue_head_init(&new_mpath->frame_queue);
483 new_node->mpath = new_mpath; 698 new_node->mpath = new_mpath;
699 init_timer(&new_mpath->timer);
484 new_mpath->exp_time = jiffies; 700 new_mpath->exp_time = jiffies;
485 spin_lock_init(&new_mpath->state_lock); 701 spin_lock_init(&new_mpath->state_lock);
486 702
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
539 struct hlist_node *p; 755 struct hlist_node *p;
540 struct ieee80211_sub_if_data *sdata = sta->sdata; 756 struct ieee80211_sub_if_data *sdata = sta->sdata;
541 int i; 757 int i;
758 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
542 759
543 rcu_read_lock(); 760 rcu_read_lock();
544 tbl = rcu_dereference(mesh_paths); 761 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) { 762 for_each_mesh_entry(tbl, p, node, i) {
546 mpath = node->mpath; 763 mpath = node->mpath;
547 spin_lock_bh(&mpath->state_lock);
548 if (rcu_dereference(mpath->next_hop) == sta && 764 if (rcu_dereference(mpath->next_hop) == sta &&
549 mpath->flags & MESH_PATH_ACTIVE && 765 mpath->flags & MESH_PATH_ACTIVE &&
550 !(mpath->flags & MESH_PATH_FIXED)) { 766 !(mpath->flags & MESH_PATH_FIXED)) {
767 spin_lock_bh(&mpath->state_lock);
551 mpath->flags &= ~MESH_PATH_ACTIVE; 768 mpath->flags &= ~MESH_PATH_ACTIVE;
552 ++mpath->sn; 769 ++mpath->sn;
553 spin_unlock_bh(&mpath->state_lock); 770 spin_unlock_bh(&mpath->state_lock);
554 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, 771 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
555 mpath->dst, cpu_to_le32(mpath->sn), 772 mpath->dst, cpu_to_le32(mpath->sn),
556 cpu_to_le16(PERR_RCODE_DEST_UNREACH), 773 reason, bcast, sdata);
557 bcast, sdata); 774 }
558 } else
559 spin_unlock_bh(&mpath->state_lock);
560 } 775 }
561 rcu_read_unlock(); 776 rcu_read_unlock();
562} 777}
563 778
779static void mesh_path_node_reclaim(struct rcu_head *rp)
780{
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783
784 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath);
787 kfree(node);
788}
789
790/* needs to be called with the corresponding hashwlock taken */
791static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792{
793 struct mesh_path *mpath;
794 mpath = node->mpath;
795 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate)
798 mesh_gate_del(tbl, mpath);
799 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock);
802 atomic_dec(&tbl->entries);
803}
804
564/** 805/**
565 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 806 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
566 * 807 *
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
581 int i; 822 int i;
582 823
583 rcu_read_lock(); 824 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths); 825 read_lock_bh(&pathtbl_resize_lock);
826 tbl = resize_dereference_mesh_paths();
585 for_each_mesh_entry(tbl, p, node, i) { 827 for_each_mesh_entry(tbl, p, node, i) {
586 mpath = node->mpath; 828 mpath = node->mpath;
587 if (rcu_dereference(mpath->next_hop) == sta) 829 if (rcu_dereference(mpath->next_hop) == sta) {
588 mesh_path_del(mpath->dst, mpath->sdata); 830 spin_lock_bh(&tbl->hashwlock[i]);
831 __mesh_path_del(tbl, node);
832 spin_unlock_bh(&tbl->hashwlock[i]);
833 }
589 } 834 }
835 read_unlock_bh(&pathtbl_resize_lock);
590 rcu_read_unlock(); 836 rcu_read_unlock();
591} 837}
592 838
593void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 839static void table_flush_by_iface(struct mesh_table *tbl,
840 struct ieee80211_sub_if_data *sdata)
594{ 841{
595 struct mesh_table *tbl;
596 struct mesh_path *mpath; 842 struct mesh_path *mpath;
597 struct mpath_node *node; 843 struct mpath_node *node;
598 struct hlist_node *p; 844 struct hlist_node *p;
599 int i; 845 int i;
600 846
601 rcu_read_lock(); 847 WARN_ON(!rcu_read_lock_held());
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) { 848 for_each_mesh_entry(tbl, p, node, i) {
604 mpath = node->mpath; 849 mpath = node->mpath;
605 if (mpath->sdata == sdata) 850 if (mpath->sdata != sdata)
606 mesh_path_del(mpath->dst, mpath->sdata); 851 continue;
852 spin_lock_bh(&tbl->hashwlock[i]);
853 __mesh_path_del(tbl, node);
854 spin_unlock_bh(&tbl->hashwlock[i]);
607 } 855 }
608 rcu_read_unlock();
609} 856}
610 857
611static void mesh_path_node_reclaim(struct rcu_head *rp) 858/**
859 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
860 *
861 * This function deletes both mesh paths as well as mesh portal paths.
862 *
863 * @sdata - interface data to match
864 *
865 */
866void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
612{ 867{
613 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 868 struct mesh_table *tbl;
614 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
615 869
616 del_timer_sync(&node->mpath->timer); 870 rcu_read_lock();
617 atomic_dec(&sdata->u.mesh.mpaths); 871 read_lock_bh(&pathtbl_resize_lock);
618 kfree(node->mpath); 872 tbl = resize_dereference_mesh_paths();
619 kfree(node); 873 table_flush_by_iface(tbl, sdata);
874 tbl = resize_dereference_mpp_paths();
875 table_flush_by_iface(tbl, sdata);
876 read_unlock_bh(&pathtbl_resize_lock);
877 rcu_read_unlock();
620} 878}
621 879
622/** 880/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
647 mpath = node->mpath; 905 mpath = node->mpath;
648 if (mpath->sdata == sdata && 906 if (mpath->sdata == sdata &&
649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 907 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650 spin_lock(&mpath->state_lock); 908 __mesh_path_del(tbl, node);
651 mpath->flags |= MESH_PATH_RESOLVING;
652 hlist_del_rcu(&node->list);
653 call_rcu(&node->rcu, mesh_path_node_reclaim);
654 atomic_dec(&tbl->entries);
655 spin_unlock(&mpath->state_lock);
656 goto enddel; 909 goto enddel;
657 } 910 }
658 } 911 }
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
681} 934}
682 935
683/** 936/**
937 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
938 *
939 * @mpath: mesh path whose queue will be emptied
940 *
941 * If there is only one gate, the frames are transferred from the failed mpath
942 * queue to that gate's queue. If there are more than one gates, the frames
943 * are copied from each gate to the next. After frames are copied, the
944 * mpath queues are emptied onto the transmission queue.
945 */
946int mesh_path_send_to_gates(struct mesh_path *mpath)
947{
948 struct ieee80211_sub_if_data *sdata = mpath->sdata;
949 struct hlist_node *n;
950 struct mesh_table *tbl;
951 struct mesh_path *from_mpath = mpath;
952 struct mpath_node *gate = NULL;
953 bool copy = false;
954 struct hlist_head *known_gates;
955
956 rcu_read_lock();
957 tbl = rcu_dereference(mesh_paths);
958 known_gates = tbl->known_gates;
959 rcu_read_unlock();
960
961 if (!known_gates)
962 return -EHOSTUNREACH;
963
964 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
965 if (gate->mpath->sdata != sdata)
966 continue;
967
968 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
969 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
970 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
971 from_mpath = gate->mpath;
972 copy = true;
973 } else {
974 mpath_dbg("Not forwarding %p\n", gate->mpath);
975 mpath_dbg("flags %x\n", gate->mpath->flags);
976 }
977 }
978
979 hlist_for_each_entry_rcu(gate, n, known_gates, list)
980 if (gate->mpath->sdata == sdata) {
981 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
982 mesh_path_tx_pending(gate->mpath);
983 }
984
985 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
986}
987
988/**
684 * mesh_path_discard_frame - discard a frame whose path could not be resolved 989 * mesh_path_discard_frame - discard a frame whose path could not be resolved
685 * 990 *
686 * @skb: frame to discard 991 * @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
699 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
700 struct mesh_path *mpath; 1005 struct mesh_path *mpath;
701 u32 sn = 0; 1006 u32 sn = 0;
1007 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
702 1008
703 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { 1009 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
704 u8 *ra, *da; 1010 u8 *ra, *da;
705 1011
706 da = hdr->addr3; 1012 da = hdr->addr3;
707 ra = hdr->addr1; 1013 ra = hdr->addr1;
1014 rcu_read_lock();
708 mpath = mesh_path_lookup(da, sdata); 1015 mpath = mesh_path_lookup(da, sdata);
709 if (mpath) 1016 if (mpath) {
1017 spin_lock_bh(&mpath->state_lock);
710 sn = ++mpath->sn; 1018 sn = ++mpath->sn;
1019 spin_unlock_bh(&mpath->state_lock);
1020 }
1021 rcu_read_unlock();
711 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, 1022 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
712 cpu_to_le32(sn), 1023 cpu_to_le32(sn), reason, ra, sdata);
713 cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
714 } 1024 }
715 1025
716 kfree_skb(skb); 1026 kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
728{ 1038{
729 struct sk_buff *skb; 1039 struct sk_buff *skb;
730 1040
731 while ((skb = skb_dequeue(&mpath->frame_queue)) && 1041 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
732 (mpath->flags & MESH_PATH_ACTIVE))
733 mesh_path_discard_frame(skb, mpath->sdata); 1042 mesh_path_discard_frame(skb, mpath->sdata);
734} 1043}
735 1044
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
790int mesh_pathtbl_init(void) 1099int mesh_pathtbl_init(void)
791{ 1100{
792 struct mesh_table *tbl_path, *tbl_mpp; 1101 struct mesh_table *tbl_path, *tbl_mpp;
1102 int ret;
793 1103
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1104 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path) 1105 if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
797 tbl_path->free_node = &mesh_path_node_free; 1107 tbl_path->free_node = &mesh_path_node_free;
798 tbl_path->copy_node = &mesh_path_node_copy; 1108 tbl_path->copy_node = &mesh_path_node_copy;
799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN; 1109 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1110 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1111 if (!tbl_path->known_gates) {
1112 ret = -ENOMEM;
1113 goto free_path;
1114 }
1115 INIT_HLIST_HEAD(tbl_path->known_gates);
1116
800 1117
801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1118 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
802 if (!tbl_mpp) { 1119 if (!tbl_mpp) {
803 mesh_table_free(tbl_path, true); 1120 ret = -ENOMEM;
804 return -ENOMEM; 1121 goto free_path;
805 } 1122 }
806 tbl_mpp->free_node = &mesh_path_node_free; 1123 tbl_mpp->free_node = &mesh_path_node_free;
807 tbl_mpp->copy_node = &mesh_path_node_copy; 1124 tbl_mpp->copy_node = &mesh_path_node_copy;
808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; 1125 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1126 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1127 if (!tbl_mpp->known_gates) {
1128 ret = -ENOMEM;
1129 goto free_mpp;
1130 }
1131 INIT_HLIST_HEAD(tbl_mpp->known_gates);
809 1132
810 /* Need no locking since this is during init */ 1133 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path); 1134 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp); 1135 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
813 1136
814 return 0; 1137 return 0;
1138
1139free_mpp:
1140 mesh_table_free(tbl_mpp, true);
1141free_path:
1142 mesh_table_free(tbl_path, true);
1143 return ret;
815} 1144}
816 1145
817void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 1146void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
828 if (node->mpath->sdata != sdata) 1157 if (node->mpath->sdata != sdata)
829 continue; 1158 continue;
830 mpath = node->mpath; 1159 mpath = node->mpath;
831 spin_lock_bh(&mpath->state_lock);
832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1160 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
833 (!(mpath->flags & MESH_PATH_FIXED)) && 1161 (!(mpath->flags & MESH_PATH_FIXED)) &&
834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { 1162 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
835 spin_unlock_bh(&mpath->state_lock);
836 mesh_path_del(mpath->dst, mpath->sdata); 1163 mesh_path_del(mpath->dst, mpath->sdata);
837 } else
838 spin_unlock_bh(&mpath->state_lock);
839 } 1164 }
840 rcu_read_unlock(); 1165 rcu_read_unlock();
841} 1166}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
843void mesh_pathtbl_unregister(void) 1168void mesh_pathtbl_unregister(void)
844{ 1169{
845 /* no need for locking during exit path */ 1170 /* no need for locking during exit path */
846 mesh_table_free(rcu_dereference_raw(mesh_paths), true); 1171 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true); 1172 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
848} 1173}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f4adc0917888..7e57f5d07f66 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,35 +19,18 @@
19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0) 19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif 20#endif
21 21
22#define PLINK_GET_LLID(p) (p + 4) 22#define PLINK_GET_LLID(p) (p + 2)
23#define PLINK_GET_PLID(p) (p + 6) 23#define PLINK_GET_PLID(p) (p + 4)
24 24
25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ 25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
26 jiffies + HZ * t / 1000)) 26 jiffies + HZ * t / 1000))
27 27
28/* Peer link cancel reasons, all subject to ANA approval */
29#define MESH_LINK_CANCELLED 2
30#define MESH_MAX_NEIGHBORS 3
31#define MESH_CAPABILITY_POLICY_VIOLATION 4
32#define MESH_CLOSE_RCVD 5
33#define MESH_MAX_RETRIES 6
34#define MESH_CONFIRM_TIMEOUT 7
35#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
37#define MESH_SECURITY_FAILED_VERIFICATION 10
38
39#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) 28#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
40#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) 29#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
41#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) 30#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
42#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) 31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 33
45enum plink_frame_type {
46 PLINK_OPEN = 1,
47 PLINK_CONFIRM,
48 PLINK_CLOSE
49};
50
51enum plink_event { 34enum plink_event {
52 PLINK_UNDEFINED, 35 PLINK_UNDEFINED,
53 OPN_ACPT, 36 OPN_ACPT,
@@ -60,6 +43,10 @@ enum plink_event {
60 CLS_IGNR 43 CLS_IGNR
61}; 44};
62 45
46static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
47 enum ieee80211_self_protected_actioncode action,
48 u8 *da, __le16 llid, __le16 plid, __le16 reason);
49
63static inline 50static inline
64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 51void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 52{
@@ -105,7 +92,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 92 if (!sta)
106 return NULL; 93 return NULL;
107 94
108 sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH; 95 set_sta_flag(sta, WLAN_STA_AUTH);
96 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
97 set_sta_flag(sta, WLAN_STA_WME);
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 98 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta); 99 rate_control_rate_init(sta);
111 100
@@ -150,6 +139,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
150 139
151 spin_lock_bh(&sta->lock); 140 spin_lock_bh(&sta->lock);
152 deactivated = __mesh_plink_deactivate(sta); 141 deactivated = __mesh_plink_deactivate(sta);
142 sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
143 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
144 sta->sta.addr, sta->llid, sta->plid,
145 sta->reason);
153 spin_unlock_bh(&sta->lock); 146 spin_unlock_bh(&sta->lock);
154 147
155 if (deactivated) 148 if (deactivated)
@@ -157,16 +150,16 @@ void mesh_plink_deactivate(struct sta_info *sta)
157} 150}
158 151
159static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 152static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
160 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, 153 enum ieee80211_self_protected_actioncode action,
161 __le16 reason) { 154 u8 *da, __le16 llid, __le16 plid, __le16 reason) {
162 struct ieee80211_local *local = sdata->local; 155 struct ieee80211_local *local = sdata->local;
163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + 156 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
164 sdata->u.mesh.ie_len); 157 sdata->u.mesh.ie_len);
165 struct ieee80211_mgmt *mgmt; 158 struct ieee80211_mgmt *mgmt;
166 bool include_plid = false; 159 bool include_plid = false;
167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; 160 int ie_len = 4;
161 u16 peering_proto = 0;
168 u8 *pos; 162 u8 *pos;
169 int ie_len;
170 163
171 if (!skb) 164 if (!skb)
172 return -1; 165 return -1;
@@ -175,63 +168,75 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
175 * common action part (1) 168 * common action part (1)
176 */ 169 */
177 mgmt = (struct ieee80211_mgmt *) 170 mgmt = (struct ieee80211_mgmt *)
178 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); 171 skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
179 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); 172 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
180 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 173 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
181 IEEE80211_STYPE_ACTION); 174 IEEE80211_STYPE_ACTION);
182 memcpy(mgmt->da, da, ETH_ALEN); 175 memcpy(mgmt->da, da, ETH_ALEN);
183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 176 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
184 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 177 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
185 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; 178 mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
186 mgmt->u.action.u.plink_action.action_code = action; 179 mgmt->u.action.u.self_prot.action_code = action;
187 180
188 if (action == PLINK_CLOSE) 181 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
189 mgmt->u.action.u.plink_action.aux = reason; 182 /* capability info */
190 else { 183 pos = skb_put(skb, 2);
191 mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0); 184 memset(pos, 0, 2);
192 if (action == PLINK_CONFIRM) { 185 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
193 pos = skb_put(skb, 4); 186 /* AID */
194 /* two-byte status code followed by two-byte AID */ 187 pos = skb_put(skb, 2);
195 memset(pos, 0, 2);
196 memcpy(pos + 2, &plid, 2); 188 memcpy(pos + 2, &plid, 2);
197 } 189 }
198 mesh_mgmt_ies_add(skb, sdata); 190 if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
191 ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
192 mesh_add_rsn_ie(skb, sdata) ||
193 mesh_add_meshid_ie(skb, sdata) ||
194 mesh_add_meshconf_ie(skb, sdata))
195 return -1;
196 } else { /* WLAN_SP_MESH_PEERING_CLOSE */
197 if (mesh_add_meshid_ie(skb, sdata))
198 return -1;
199 } 199 }
200 200
201 /* Add Peer Link Management element */ 201 /* Add Mesh Peering Management element */
202 switch (action) { 202 switch (action) {
203 case PLINK_OPEN: 203 case WLAN_SP_MESH_PEERING_OPEN:
204 ie_len = 6;
205 break; 204 break;
206 case PLINK_CONFIRM: 205 case WLAN_SP_MESH_PEERING_CONFIRM:
207 ie_len = 8; 206 ie_len += 2;
208 include_plid = true; 207 include_plid = true;
209 break; 208 break;
210 case PLINK_CLOSE: 209 case WLAN_SP_MESH_PEERING_CLOSE:
211 default: 210 if (plid) {
212 if (!plid) 211 ie_len += 2;
213 ie_len = 8;
214 else {
215 ie_len = 10;
216 include_plid = true; 212 include_plid = true;
217 } 213 }
214 ie_len += 2; /* reason code */
218 break; 215 break;
216 default:
217 return -EINVAL;
219 } 218 }
220 219
220 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
221 return -ENOMEM;
222
221 pos = skb_put(skb, 2 + ie_len); 223 pos = skb_put(skb, 2 + ie_len);
222 *pos++ = WLAN_EID_PEER_LINK; 224 *pos++ = WLAN_EID_PEER_MGMT;
223 *pos++ = ie_len; 225 *pos++ = ie_len;
224 memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto)); 226 memcpy(pos, &peering_proto, 2);
225 pos += 4; 227 pos += 2;
226 memcpy(pos, &llid, 2); 228 memcpy(pos, &llid, 2);
229 pos += 2;
227 if (include_plid) { 230 if (include_plid) {
228 pos += 2;
229 memcpy(pos, &plid, 2); 231 memcpy(pos, &plid, 2);
230 }
231 if (action == PLINK_CLOSE) {
232 pos += 2; 232 pos += 2;
233 }
234 if (action == WLAN_SP_MESH_PEERING_CLOSE) {
233 memcpy(pos, &reason, 2); 235 memcpy(pos, &reason, 2);
236 pos += 2;
234 } 237 }
238 if (mesh_add_vendor_ies(skb, sdata))
239 return -1;
235 240
236 ieee80211_tx_skb(sdata, skb); 241 ieee80211_tx_skb(sdata, skb);
237 return 0; 242 return 0;
@@ -322,21 +327,21 @@ static void mesh_plink_timer(unsigned long data)
322 ++sta->plink_retries; 327 ++sta->plink_retries;
323 mod_plink_timer(sta, sta->plink_timeout); 328 mod_plink_timer(sta, sta->plink_timeout);
324 spin_unlock_bh(&sta->lock); 329 spin_unlock_bh(&sta->lock);
325 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 330 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
326 0, 0); 331 sta->sta.addr, llid, 0, 0);
327 break; 332 break;
328 } 333 }
329 reason = cpu_to_le16(MESH_MAX_RETRIES); 334 reason = cpu_to_le16(WLAN_REASON_MESH_MAX_RETRIES);
330 /* fall through on else */ 335 /* fall through on else */
331 case NL80211_PLINK_CNF_RCVD: 336 case NL80211_PLINK_CNF_RCVD:
332 /* confirm timer */ 337 /* confirm timer */
333 if (!reason) 338 if (!reason)
334 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 339 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
335 sta->plink_state = NL80211_PLINK_HOLDING; 340 sta->plink_state = NL80211_PLINK_HOLDING;
336 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 341 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
337 spin_unlock_bh(&sta->lock); 342 spin_unlock_bh(&sta->lock);
338 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, 343 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
339 reason); 344 sta->sta.addr, llid, plid, reason);
340 break; 345 break;
341 case NL80211_PLINK_HOLDING: 346 case NL80211_PLINK_HOLDING:
342 /* holding timer */ 347 /* holding timer */
@@ -380,7 +385,7 @@ int mesh_plink_open(struct sta_info *sta)
380 __le16 llid; 385 __le16 llid;
381 struct ieee80211_sub_if_data *sdata = sta->sdata; 386 struct ieee80211_sub_if_data *sdata = sta->sdata;
382 387
383 if (!test_sta_flags(sta, WLAN_STA_AUTH)) 388 if (!test_sta_flag(sta, WLAN_STA_AUTH))
384 return -EPERM; 389 return -EPERM;
385 390
386 spin_lock_bh(&sta->lock); 391 spin_lock_bh(&sta->lock);
@@ -396,7 +401,7 @@ int mesh_plink_open(struct sta_info *sta)
396 mpl_dbg("Mesh plink: starting establishment with %pM\n", 401 mpl_dbg("Mesh plink: starting establishment with %pM\n",
397 sta->sta.addr); 402 sta->sta.addr);
398 403
399 return mesh_plink_frame_tx(sdata, PLINK_OPEN, 404 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
400 sta->sta.addr, llid, 0, 0); 405 sta->sta.addr, llid, 0, 0);
401} 406}
402 407
@@ -422,7 +427,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
422 struct ieee802_11_elems elems; 427 struct ieee802_11_elems elems;
423 struct sta_info *sta; 428 struct sta_info *sta;
424 enum plink_event event; 429 enum plink_event event;
425 enum plink_frame_type ftype; 430 enum ieee80211_self_protected_actioncode ftype;
426 size_t baselen; 431 size_t baselen;
427 bool deactivated, matches_local = true; 432 bool deactivated, matches_local = true;
428 u8 ie_len; 433 u8 ie_len;
@@ -449,14 +454,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
449 return; 454 return;
450 } 455 }
451 456
452 baseaddr = mgmt->u.action.u.plink_action.variable; 457 baseaddr = mgmt->u.action.u.self_prot.variable;
453 baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; 458 baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
454 if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { 459 if (mgmt->u.action.u.self_prot.action_code ==
460 WLAN_SP_MESH_PEERING_CONFIRM) {
455 baseaddr += 4; 461 baseaddr += 4;
456 baselen += 4; 462 baselen += 4;
457 } 463 }
458 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 464 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
459 if (!elems.peer_link) { 465 if (!elems.peering) {
460 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 466 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
461 return; 467 return;
462 } 468 }
@@ -466,37 +472,40 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
466 return; 472 return;
467 } 473 }
468 474
469 ftype = mgmt->u.action.u.plink_action.action_code; 475 ftype = mgmt->u.action.u.self_prot.action_code;
470 ie_len = elems.peer_link_len; 476 ie_len = elems.peering_len;
471 if ((ftype == PLINK_OPEN && ie_len != 6) || 477 if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
472 (ftype == PLINK_CONFIRM && ie_len != 8) || 478 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
473 (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) { 479 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
480 && ie_len != 8)) {
474 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", 481 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
475 ftype, ie_len); 482 ftype, ie_len);
476 return; 483 return;
477 } 484 }
478 485
479 if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) { 486 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
487 (!elems.mesh_id || !elems.mesh_config)) {
480 mpl_dbg("Mesh plink: missing necessary ie\n"); 488 mpl_dbg("Mesh plink: missing necessary ie\n");
481 return; 489 return;
482 } 490 }
483 /* Note the lines below are correct, the llid in the frame is the plid 491 /* Note the lines below are correct, the llid in the frame is the plid
484 * from the point of view of this host. 492 * from the point of view of this host.
485 */ 493 */
486 memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); 494 memcpy(&plid, PLINK_GET_LLID(elems.peering), 2);
487 if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10)) 495 if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
488 memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); 496 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
497 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
489 498
490 rcu_read_lock(); 499 rcu_read_lock();
491 500
492 sta = sta_info_get(sdata, mgmt->sa); 501 sta = sta_info_get(sdata, mgmt->sa);
493 if (!sta && ftype != PLINK_OPEN) { 502 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
494 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 503 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
495 rcu_read_unlock(); 504 rcu_read_unlock();
496 return; 505 return;
497 } 506 }
498 507
499 if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) { 508 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
500 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 509 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
501 rcu_read_unlock(); 510 rcu_read_unlock();
502 return; 511 return;
@@ -509,30 +518,30 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
509 518
510 /* Now we will figure out the appropriate event... */ 519 /* Now we will figure out the appropriate event... */
511 event = PLINK_UNDEFINED; 520 event = PLINK_UNDEFINED;
512 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { 521 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
522 (!mesh_matches_local(&elems, sdata))) {
513 matches_local = false; 523 matches_local = false;
514 switch (ftype) { 524 switch (ftype) {
515 case PLINK_OPEN: 525 case WLAN_SP_MESH_PEERING_OPEN:
516 event = OPN_RJCT; 526 event = OPN_RJCT;
517 break; 527 break;
518 case PLINK_CONFIRM: 528 case WLAN_SP_MESH_PEERING_CONFIRM:
519 event = CNF_RJCT; 529 event = CNF_RJCT;
520 break; 530 break;
521 case PLINK_CLOSE: 531 default:
522 /* avoid warning */
523 break; 532 break;
524 } 533 }
525 } 534 }
526 535
527 if (!sta && !matches_local) { 536 if (!sta && !matches_local) {
528 rcu_read_unlock(); 537 rcu_read_unlock();
529 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 538 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
530 llid = 0; 539 llid = 0;
531 mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid, 540 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
532 plid, reason); 541 mgmt->sa, llid, plid, reason);
533 return; 542 return;
534 } else if (!sta) { 543 } else if (!sta) {
535 /* ftype == PLINK_OPEN */ 544 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
536 u32 rates; 545 u32 rates;
537 546
538 rcu_read_unlock(); 547 rcu_read_unlock();
@@ -557,21 +566,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
557 } else if (matches_local) { 566 } else if (matches_local) {
558 spin_lock_bh(&sta->lock); 567 spin_lock_bh(&sta->lock);
559 switch (ftype) { 568 switch (ftype) {
560 case PLINK_OPEN: 569 case WLAN_SP_MESH_PEERING_OPEN:
561 if (!mesh_plink_free_count(sdata) || 570 if (!mesh_plink_free_count(sdata) ||
562 (sta->plid && sta->plid != plid)) 571 (sta->plid && sta->plid != plid))
563 event = OPN_IGNR; 572 event = OPN_IGNR;
564 else 573 else
565 event = OPN_ACPT; 574 event = OPN_ACPT;
566 break; 575 break;
567 case PLINK_CONFIRM: 576 case WLAN_SP_MESH_PEERING_CONFIRM:
568 if (!mesh_plink_free_count(sdata) || 577 if (!mesh_plink_free_count(sdata) ||
569 (sta->llid != llid || sta->plid != plid)) 578 (sta->llid != llid || sta->plid != plid))
570 event = CNF_IGNR; 579 event = CNF_IGNR;
571 else 580 else
572 event = CNF_ACPT; 581 event = CNF_ACPT;
573 break; 582 break;
574 case PLINK_CLOSE: 583 case WLAN_SP_MESH_PEERING_CLOSE:
575 if (sta->plink_state == NL80211_PLINK_ESTAB) 584 if (sta->plink_state == NL80211_PLINK_ESTAB)
576 /* Do not check for llid or plid. This does not 585 /* Do not check for llid or plid. This does not
577 * follow the standard but since multiple plinks 586 * follow the standard but since multiple plinks
@@ -620,10 +629,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
620 sta->llid = llid; 629 sta->llid = llid;
621 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 630 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
622 spin_unlock_bh(&sta->lock); 631 spin_unlock_bh(&sta->lock);
623 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, 632 mesh_plink_frame_tx(sdata,
624 0, 0); 633 WLAN_SP_MESH_PEERING_OPEN,
625 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, 634 sta->sta.addr, llid, 0, 0);
626 llid, plid, 0); 635 mesh_plink_frame_tx(sdata,
636 WLAN_SP_MESH_PEERING_CONFIRM,
637 sta->sta.addr, llid, plid, 0);
627 break; 638 break;
628 default: 639 default:
629 spin_unlock_bh(&sta->lock); 640 spin_unlock_bh(&sta->lock);
@@ -635,10 +646,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
635 switch (event) { 646 switch (event) {
636 case OPN_RJCT: 647 case OPN_RJCT:
637 case CNF_RJCT: 648 case CNF_RJCT:
638 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 649 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
639 case CLS_ACPT: 650 case CLS_ACPT:
640 if (!reason) 651 if (!reason)
641 reason = cpu_to_le16(MESH_CLOSE_RCVD); 652 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
642 sta->reason = reason; 653 sta->reason = reason;
643 sta->plink_state = NL80211_PLINK_HOLDING; 654 sta->plink_state = NL80211_PLINK_HOLDING;
644 if (!mod_plink_timer(sta, 655 if (!mod_plink_timer(sta,
@@ -647,8 +658,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
647 658
648 llid = sta->llid; 659 llid = sta->llid;
649 spin_unlock_bh(&sta->lock); 660 spin_unlock_bh(&sta->lock);
650 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 661 mesh_plink_frame_tx(sdata,
651 plid, reason); 662 WLAN_SP_MESH_PEERING_CLOSE,
663 sta->sta.addr, llid, plid, reason);
652 break; 664 break;
653 case OPN_ACPT: 665 case OPN_ACPT:
654 /* retry timer is left untouched */ 666 /* retry timer is left untouched */
@@ -656,8 +668,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
656 sta->plid = plid; 668 sta->plid = plid;
657 llid = sta->llid; 669 llid = sta->llid;
658 spin_unlock_bh(&sta->lock); 670 spin_unlock_bh(&sta->lock);
659 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 671 mesh_plink_frame_tx(sdata,
660 plid, 0); 672 WLAN_SP_MESH_PEERING_CONFIRM,
673 sta->sta.addr, llid, plid, 0);
661 break; 674 break;
662 case CNF_ACPT: 675 case CNF_ACPT:
663 sta->plink_state = NL80211_PLINK_CNF_RCVD; 676 sta->plink_state = NL80211_PLINK_CNF_RCVD;
@@ -677,10 +690,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
677 switch (event) { 690 switch (event) {
678 case OPN_RJCT: 691 case OPN_RJCT:
679 case CNF_RJCT: 692 case CNF_RJCT:
680 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 693 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
681 case CLS_ACPT: 694 case CLS_ACPT:
682 if (!reason) 695 if (!reason)
683 reason = cpu_to_le16(MESH_CLOSE_RCVD); 696 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
684 sta->reason = reason; 697 sta->reason = reason;
685 sta->plink_state = NL80211_PLINK_HOLDING; 698 sta->plink_state = NL80211_PLINK_HOLDING;
686 if (!mod_plink_timer(sta, 699 if (!mod_plink_timer(sta,
@@ -689,14 +702,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
689 702
690 llid = sta->llid; 703 llid = sta->llid;
691 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
692 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 705 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
693 plid, reason); 706 sta->sta.addr, llid, plid, reason);
694 break; 707 break;
695 case OPN_ACPT: 708 case OPN_ACPT:
696 llid = sta->llid; 709 llid = sta->llid;
697 spin_unlock_bh(&sta->lock); 710 spin_unlock_bh(&sta->lock);
698 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 711 mesh_plink_frame_tx(sdata,
699 plid, 0); 712 WLAN_SP_MESH_PEERING_CONFIRM,
713 sta->sta.addr, llid, plid, 0);
700 break; 714 break;
701 case CNF_ACPT: 715 case CNF_ACPT:
702 del_timer(&sta->plink_timer); 716 del_timer(&sta->plink_timer);
@@ -717,10 +731,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
717 switch (event) { 731 switch (event) {
718 case OPN_RJCT: 732 case OPN_RJCT:
719 case CNF_RJCT: 733 case CNF_RJCT:
720 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); 734 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
721 case CLS_ACPT: 735 case CLS_ACPT:
722 if (!reason) 736 if (!reason)
723 reason = cpu_to_le16(MESH_CLOSE_RCVD); 737 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
724 sta->reason = reason; 738 sta->reason = reason;
725 sta->plink_state = NL80211_PLINK_HOLDING; 739 sta->plink_state = NL80211_PLINK_HOLDING;
726 if (!mod_plink_timer(sta, 740 if (!mod_plink_timer(sta,
@@ -729,8 +743,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
729 743
730 llid = sta->llid; 744 llid = sta->llid;
731 spin_unlock_bh(&sta->lock); 745 spin_unlock_bh(&sta->lock);
732 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 746 mesh_plink_frame_tx(sdata,
733 plid, reason); 747 WLAN_SP_MESH_PEERING_CLOSE,
748 sta->sta.addr, llid, plid, reason);
734 break; 749 break;
735 case OPN_ACPT: 750 case OPN_ACPT:
736 del_timer(&sta->plink_timer); 751 del_timer(&sta->plink_timer);
@@ -740,8 +755,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
740 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 755 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
741 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 756 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
742 sta->sta.addr); 757 sta->sta.addr);
743 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 758 mesh_plink_frame_tx(sdata,
744 plid, 0); 759 WLAN_SP_MESH_PEERING_CONFIRM,
760 sta->sta.addr, llid, plid, 0);
745 break; 761 break;
746 default: 762 default:
747 spin_unlock_bh(&sta->lock); 763 spin_unlock_bh(&sta->lock);
@@ -752,7 +768,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
752 case NL80211_PLINK_ESTAB: 768 case NL80211_PLINK_ESTAB:
753 switch (event) { 769 switch (event) {
754 case CLS_ACPT: 770 case CLS_ACPT:
755 reason = cpu_to_le16(MESH_CLOSE_RCVD); 771 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
756 sta->reason = reason; 772 sta->reason = reason;
757 deactivated = __mesh_plink_deactivate(sta); 773 deactivated = __mesh_plink_deactivate(sta);
758 sta->plink_state = NL80211_PLINK_HOLDING; 774 sta->plink_state = NL80211_PLINK_HOLDING;
@@ -761,14 +777,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
761 spin_unlock_bh(&sta->lock); 777 spin_unlock_bh(&sta->lock);
762 if (deactivated) 778 if (deactivated)
763 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 779 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
764 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 780 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
765 plid, reason); 781 sta->sta.addr, llid, plid, reason);
766 break; 782 break;
767 case OPN_ACPT: 783 case OPN_ACPT:
768 llid = sta->llid; 784 llid = sta->llid;
769 spin_unlock_bh(&sta->lock); 785 spin_unlock_bh(&sta->lock);
770 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 786 mesh_plink_frame_tx(sdata,
771 plid, 0); 787 WLAN_SP_MESH_PEERING_CONFIRM,
788 sta->sta.addr, llid, plid, 0);
772 break; 789 break;
773 default: 790 default:
774 spin_unlock_bh(&sta->lock); 791 spin_unlock_bh(&sta->lock);
@@ -790,8 +807,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
790 llid = sta->llid; 807 llid = sta->llid;
791 reason = sta->reason; 808 reason = sta->reason;
792 spin_unlock_bh(&sta->lock); 809 spin_unlock_bh(&sta->lock);
793 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, 810 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
794 llid, plid, reason); 811 sta->sta.addr, llid, plid, reason);
795 break; 812 break;
796 default: 813 default:
797 spin_unlock_bh(&sta->lock); 814 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d6470c7fd6ce..72c8bea81a6c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -16,10 +16,12 @@
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/if_arp.h> 17#include <linux/if_arp.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/moduleparam.h>
19#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h> 21#include <linux/pm_qos.h>
21#include <linux/crc32.h> 22#include <linux/crc32.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/export.h>
23#include <net/mac80211.h> 25#include <net/mac80211.h>
24#include <asm/unaligned.h> 26#include <asm/unaligned.h>
25 27
@@ -160,7 +162,8 @@ static int ecw2cw(int ecw)
160 */ 162 */
161static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, 163static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
162 struct ieee80211_ht_info *hti, 164 struct ieee80211_ht_info *hti,
163 const u8 *bssid, u16 ap_ht_cap_flags) 165 const u8 *bssid, u16 ap_ht_cap_flags,
166 bool beacon_htcap_ie)
164{ 167{
165 struct ieee80211_local *local = sdata->local; 168 struct ieee80211_local *local = sdata->local;
166 struct ieee80211_supported_band *sband; 169 struct ieee80211_supported_band *sband;
@@ -232,6 +235,21 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 235 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 236 }
234 237
238 if (beacon_htcap_ie && (prev_chantype != channel_type)) {
239 /*
240 * Whenever the AP announces the HT mode change that can be
241 * 40MHz intolerant or etc., it would be safer to stop tx
242 * queues before doing hw config to avoid buffer overflow.
243 */
244 ieee80211_stop_queues_by_reason(&sdata->local->hw,
245 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
246
247 /* flush out all packets */
248 synchronize_net();
249
250 drv_flush(local, false);
251 }
252
235 /* channel_type change automatically detected */ 253 /* channel_type change automatically detected */
236 ieee80211_hw_config(local, 0); 254 ieee80211_hw_config(local, 0);
237 255
@@ -243,6 +261,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
243 IEEE80211_RC_HT_CHANGED, 261 IEEE80211_RC_HT_CHANGED,
244 channel_type); 262 channel_type);
245 rcu_read_unlock(); 263 rcu_read_unlock();
264
265 if (beacon_htcap_ie)
266 ieee80211_wake_queues_by_reason(&sdata->local->hw,
267 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
246 } 268 }
247 269
248 ht_opmode = le16_to_cpu(hti->operation_mode); 270 ht_opmode = le16_to_cpu(hti->operation_mode);
@@ -271,11 +293,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
271 struct ieee80211_mgmt *mgmt; 293 struct ieee80211_mgmt *mgmt;
272 294
273 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 295 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
274 if (!skb) { 296 if (!skb)
275 printk(KERN_DEBUG "%s: failed to allocate buffer for "
276 "deauth/disassoc frame\n", sdata->name);
277 return; 297 return;
278 } 298
279 skb_reserve(skb, local->hw.extra_tx_headroom); 299 skb_reserve(skb, local->hw.extra_tx_headroom);
280 300
281 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 301 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -330,6 +350,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
330{ 350{
331 struct sk_buff *skb; 351 struct sk_buff *skb;
332 struct ieee80211_hdr_3addr *nullfunc; 352 struct ieee80211_hdr_3addr *nullfunc;
353 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
333 354
334 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); 355 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
335 if (!skb) 356 if (!skb)
@@ -340,6 +361,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
340 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 361 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
341 362
342 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 363 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
364 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
365 IEEE80211_STA_CONNECTION_POLL))
366 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
367
343 ieee80211_tx_skb(sdata, skb); 368 ieee80211_tx_skb(sdata, skb);
344} 369}
345 370
@@ -354,11 +379,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
354 return; 379 return;
355 380
356 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); 381 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
357 if (!skb) { 382 if (!skb)
358 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
359 "nullfunc frame\n", sdata->name);
360 return; 383 return;
361 } 384
362 skb_reserve(skb, local->hw.extra_tx_headroom); 385 skb_reserve(skb, local->hw.extra_tx_headroom);
363 386
364 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); 387 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
@@ -394,6 +417,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
394 /* call "hw_config" only if doing sw channel switch */ 417 /* call "hw_config" only if doing sw channel switch */
395 ieee80211_hw_config(sdata->local, 418 ieee80211_hw_config(sdata->local,
396 IEEE80211_CONF_CHANGE_CHANNEL); 419 IEEE80211_CONF_CHANGE_CHANNEL);
420 } else {
421 /* update the device channel directly */
422 sdata->local->hw.conf.channel = sdata->local->oper_channel;
397 } 423 }
398 424
399 /* XXX: shouldn't really modify cfg80211-owned data! */ 425 /* XXX: shouldn't really modify cfg80211-owned data! */
@@ -608,11 +634,14 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
608{ 634{
609 struct ieee80211_if_managed *mgd = &sdata->u.mgd; 635 struct ieee80211_if_managed *mgd = &sdata->u.mgd;
610 struct sta_info *sta = NULL; 636 struct sta_info *sta = NULL;
611 u32 sta_flags = 0; 637 bool authorized = false;
612 638
613 if (!mgd->powersave) 639 if (!mgd->powersave)
614 return false; 640 return false;
615 641
642 if (mgd->broken_ap)
643 return false;
644
616 if (!mgd->associated) 645 if (!mgd->associated)
617 return false; 646 return false;
618 647
@@ -626,13 +655,10 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
626 rcu_read_lock(); 655 rcu_read_lock();
627 sta = sta_info_get(sdata, mgd->bssid); 656 sta = sta_info_get(sdata, mgd->bssid);
628 if (sta) 657 if (sta)
629 sta_flags = get_sta_flags(sta); 658 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
630 rcu_read_unlock(); 659 rcu_read_unlock();
631 660
632 if (!(sta_flags & WLAN_STA_AUTHORIZED)) 661 return authorized;
633 return false;
634
635 return true;
636} 662}
637 663
638/* need to hold RTNL or interface lock */ 664/* need to hold RTNL or interface lock */
@@ -917,8 +943,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
917 params.aifs, params.cw_min, params.cw_max, 943 params.aifs, params.cw_min, params.cw_max,
918 params.txop, params.uapsd); 944 params.txop, params.uapsd);
919#endif 945#endif
920 local->tx_conf[queue] = params; 946 sdata->tx_conf[queue] = params;
921 if (drv_conf_tx(local, queue, &params)) 947 if (drv_conf_tx(local, sdata, queue, &params))
922 wiphy_debug(local->hw.wiphy, 948 wiphy_debug(local->hw.wiphy,
923 "failed to set TX queue parameters for queue %d\n", 949 "failed to set TX queue parameters for queue %d\n",
924 queue); 950 queue);
@@ -1076,7 +1102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1076 mutex_lock(&local->sta_mtx); 1102 mutex_lock(&local->sta_mtx);
1077 sta = sta_info_get(sdata, bssid); 1103 sta = sta_info_get(sdata, bssid);
1078 if (sta) { 1104 if (sta) {
1079 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 1105 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1080 ieee80211_sta_tear_down_BA_sessions(sta, tx); 1106 ieee80211_sta_tear_down_BA_sessions(sta, tx);
1081 } 1107 }
1082 mutex_unlock(&local->sta_mtx); 1108 mutex_unlock(&local->sta_mtx);
@@ -1118,8 +1144,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1118 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 1144 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
1119 ieee80211_bss_info_change_notify(sdata, changed); 1145 ieee80211_bss_info_change_notify(sdata, changed);
1120 1146
1147 /* remove AP and TDLS peers */
1121 if (remove_sta) 1148 if (remove_sta)
1122 sta_info_destroy_addr(sdata, bssid); 1149 sta_info_flush(local, sdata);
1123 1150
1124 del_timer_sync(&sdata->u.mgd.conn_mon_timer); 1151 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1125 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1152 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1220,7 +1247,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1220 } else { 1247 } else {
1221 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1248 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1222 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, 1249 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
1223 (u32) -1, true); 1250 (u32) -1, true, false);
1224 } 1251 }
1225 1252
1226 ifmgd->probe_send_count++; 1253 ifmgd->probe_send_count++;
@@ -1467,10 +1494,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1467 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1494 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1468 1495
1469 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1496 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1470 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1497 printk(KERN_DEBUG
1471 "set\n", sdata->name, aid); 1498 "%s: invalid AID value 0x%x; bits 15:14 not set\n",
1499 sdata->name, aid);
1472 aid &= ~(BIT(15) | BIT(14)); 1500 aid &= ~(BIT(15) | BIT(14));
1473 1501
1502 ifmgd->broken_ap = false;
1503
1504 if (aid == 0 || aid > IEEE80211_MAX_AID) {
1505 printk(KERN_DEBUG
1506 "%s: invalid AID value %d (out of range), turn off PS\n",
1507 sdata->name, aid);
1508 aid = 0;
1509 ifmgd->broken_ap = true;
1510 }
1511
1474 pos = mgmt->u.assoc_resp.variable; 1512 pos = mgmt->u.assoc_resp.variable;
1475 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1513 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1476 1514
@@ -1482,17 +1520,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1482 1520
1483 ifmgd->aid = aid; 1521 ifmgd->aid = aid;
1484 1522
1485 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL); 1523 mutex_lock(&sdata->local->sta_mtx);
1486 if (!sta) { 1524 /*
1487 printk(KERN_DEBUG "%s: failed to alloc STA entry for" 1525 * station info was already allocated and inserted before
1488 " the AP\n", sdata->name); 1526 * the association and should be available to us
1527 */
1528 sta = sta_info_get_rx(sdata, cbss->bssid);
1529 if (WARN_ON(!sta)) {
1530 mutex_unlock(&sdata->local->sta_mtx);
1489 return false; 1531 return false;
1490 } 1532 }
1491 1533
1492 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | 1534 set_sta_flag(sta, WLAN_STA_AUTH);
1493 WLAN_STA_ASSOC_AP); 1535 set_sta_flag(sta, WLAN_STA_ASSOC);
1536 set_sta_flag(sta, WLAN_STA_ASSOC_AP);
1494 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 1537 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1495 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 1538 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
1496 1539
1497 rates = 0; 1540 rates = 0;
1498 basic_rates = 0; 1541 basic_rates = 0;
@@ -1551,12 +1594,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1551 rate_control_rate_init(sta); 1594 rate_control_rate_init(sta);
1552 1595
1553 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) 1596 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
1554 set_sta_flags(sta, WLAN_STA_MFP); 1597 set_sta_flag(sta, WLAN_STA_MFP);
1555 1598
1556 if (elems.wmm_param) 1599 if (elems.wmm_param)
1557 set_sta_flags(sta, WLAN_STA_WME); 1600 set_sta_flag(sta, WLAN_STA_WME);
1558 1601
1559 err = sta_info_insert(sta); 1602 /* sta_info_reinsert will also unlock the mutex lock */
1603 err = sta_info_reinsert(sta);
1560 sta = NULL; 1604 sta = NULL;
1561 if (err) { 1605 if (err) {
1562 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1606 printk(KERN_DEBUG "%s: failed to insert STA entry for"
@@ -1584,7 +1628,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1584 (sdata->local->hw.queues >= 4) && 1628 (sdata->local->hw.queues >= 4) &&
1585 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1629 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1586 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1630 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1587 cbss->bssid, ap_ht_cap_flags); 1631 cbss->bssid, ap_ht_cap_flags,
1632 false);
1588 1633
1589 /* set AID and assoc capability, 1634 /* set AID and assoc capability,
1590 * ieee80211_set_associated() will tell the driver */ 1635 * ieee80211_set_associated() will tell the driver */
@@ -1918,7 +1963,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1918 rcu_read_unlock(); 1963 rcu_read_unlock();
1919 1964
1920 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1965 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1921 bssid, ap_ht_cap_flags); 1966 bssid, ap_ht_cap_flags, true);
1922 } 1967 }
1923 1968
1924 /* Note: country IE parsing is done for us by cfg80211 */ 1969 /* Note: country IE parsing is done for us by cfg80211 */
@@ -2429,6 +2474,29 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2429 return 0; 2474 return 0;
2430} 2475}
2431 2476
2477/* create and insert a dummy station entry */
2478static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
2479 u8 *bssid) {
2480 struct sta_info *sta;
2481 int err;
2482
2483 sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
2484 if (!sta)
2485 return -ENOMEM;
2486
2487 sta->dummy = true;
2488
2489 err = sta_info_insert(sta);
2490 sta = NULL;
2491 if (err) {
2492 printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
2493 " the AP (error %d)\n", sdata->name, err);
2494 return err;
2495 }
2496
2497 return 0;
2498}
2499
2432static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, 2500static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2433 struct sk_buff *skb) 2501 struct sk_buff *skb)
2434{ 2502{
@@ -2436,9 +2504,11 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2436 struct ieee80211_mgmt *mgmt; 2504 struct ieee80211_mgmt *mgmt;
2437 struct ieee80211_rx_status *rx_status; 2505 struct ieee80211_rx_status *rx_status;
2438 struct ieee802_11_elems elems; 2506 struct ieee802_11_elems elems;
2507 struct cfg80211_bss *cbss = wk->assoc.bss;
2439 u16 status; 2508 u16 status;
2440 2509
2441 if (!skb) { 2510 if (!skb) {
2511 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2442 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); 2512 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
2443 goto destroy; 2513 goto destroy;
2444 } 2514 }
@@ -2468,12 +2538,16 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2468 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { 2538 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
2469 mutex_unlock(&wk->sdata->u.mgd.mtx); 2539 mutex_unlock(&wk->sdata->u.mgd.mtx);
2470 /* oops -- internal error -- send timeout for now */ 2540 /* oops -- internal error -- send timeout for now */
2541 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2471 cfg80211_send_assoc_timeout(wk->sdata->dev, 2542 cfg80211_send_assoc_timeout(wk->sdata->dev,
2472 wk->filter_ta); 2543 wk->filter_ta);
2473 return WORK_DONE_DESTROY; 2544 return WORK_DONE_DESTROY;
2474 } 2545 }
2475 2546
2476 mutex_unlock(&wk->sdata->u.mgd.mtx); 2547 mutex_unlock(&wk->sdata->u.mgd.mtx);
2548 } else {
2549 /* assoc failed - destroy the dummy station entry */
2550 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2477 } 2551 }
2478 2552
2479 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); 2553 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@ -2492,7 +2566,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2492 struct ieee80211_bss *bss = (void *)req->bss->priv; 2566 struct ieee80211_bss *bss = (void *)req->bss->priv;
2493 struct ieee80211_work *wk; 2567 struct ieee80211_work *wk;
2494 const u8 *ssid; 2568 const u8 *ssid;
2495 int i; 2569 int i, err;
2496 2570
2497 mutex_lock(&ifmgd->mtx); 2571 mutex_lock(&ifmgd->mtx);
2498 if (ifmgd->associated) { 2572 if (ifmgd->associated) {
@@ -2517,6 +2591,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2517 if (!wk) 2591 if (!wk)
2518 return -ENOMEM; 2592 return -ENOMEM;
2519 2593
2594 /*
2595 * create a dummy station info entry in order
2596 * to start accepting incoming EAPOL packets from the station
2597 */
2598 err = ieee80211_pre_assoc(sdata, req->bss->bssid);
2599 if (err) {
2600 kfree(wk);
2601 return err;
2602 }
2603
2520 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 2604 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
2521 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 2605 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2522 2606
@@ -2674,7 +2758,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2674 req->reason_code, cookie, 2758 req->reason_code, cookie,
2675 !req->local_state_change); 2759 !req->local_state_change);
2676 if (assoc_bss) 2760 if (assoc_bss)
2677 sta_info_destroy_addr(sdata, bssid); 2761 sta_info_flush(sdata->local, sdata);
2678 2762
2679 mutex_lock(&sdata->local->mtx); 2763 mutex_lock(&sdata->local->mtx);
2680 ieee80211_recalc_idle(sdata->local); 2764 ieee80211_recalc_idle(sdata->local);
@@ -2714,7 +2798,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2714 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 2798 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
2715 IEEE80211_STYPE_DISASSOC, req->reason_code, 2799 IEEE80211_STYPE_DISASSOC, req->reason_code,
2716 cookie, !req->local_state_change); 2800 cookie, !req->local_state_change);
2717 sta_info_destroy_addr(sdata, bssid); 2801 sta_info_flush(sdata->local, sdata);
2718 2802
2719 mutex_lock(&sdata->local->mtx); 2803 mutex_lock(&sdata->local->mtx);
2720 ieee80211_recalc_idle(sdata->local); 2804 ieee80211_recalc_idle(sdata->local);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 13427b194ced..3d414411a96e 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -12,6 +12,7 @@
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15#include <linux/export.h>
15#include <net/mac80211.h> 16#include <net/mac80211.h>
16#include "ieee80211_i.h" 17#include "ieee80211_i.h"
17#include "driver-trace.h" 18#include "driver-trace.h"
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 6326d3439861..9ee7164b207c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -42,7 +42,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
42 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 42 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
43 mutex_lock(&local->sta_mtx); 43 mutex_lock(&local->sta_mtx);
44 list_for_each_entry(sta, &local->sta_list, list) { 44 list_for_each_entry(sta, &local->sta_list, list) {
45 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 45 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
46 ieee80211_sta_tear_down_BA_sessions(sta, true); 46 ieee80211_sta_tear_down_BA_sessions(sta, true);
47 } 47 }
48 mutex_unlock(&local->sta_mtx); 48 mutex_unlock(&local->sta_mtx);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3d5a2cb835c4..5a5a7767d541 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
14#include "rate.h" 15#include "rate.h"
15#include "ieee80211_i.h" 16#include "ieee80211_i.h"
16#include "debugfs.h" 17#include "debugfs.h"
@@ -199,7 +200,7 @@ static void rate_control_release(struct kref *kref)
199 kfree(ctrl_ref); 200 kfree(ctrl_ref);
200} 201}
201 202
202static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc) 203static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
203{ 204{
204 struct sk_buff *skb = txrc->skb; 205 struct sk_buff *skb = txrc->skb;
205 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 206 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -208,7 +209,9 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
208 209
209 fc = hdr->frame_control; 210 fc = hdr->frame_control;
210 211
211 return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc); 212 return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
213 IEEE80211_TX_CTL_USE_MINRATE)) ||
214 !ieee80211_is_data(fc);
212} 215}
213 216
214static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, 217static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
@@ -233,6 +236,27 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
233 /* could not find a basic rate; use original selection */ 236 /* could not find a basic rate; use original selection */
234} 237}
235 238
239static inline s8
240rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
241 struct ieee80211_sta *sta)
242{
243 int i;
244
245 for (i = 0; i < sband->n_bitrates; i++) {
246 struct ieee80211_rate *srate = &sband->bitrates[i];
247 if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
248 (srate->bitrate == 55) || (srate->bitrate == 110))
249 continue;
250
251 if (rate_supported(sta, sband->band, i))
252 return i;
253 }
254
255 /* No matching rate found */
256 return 0;
257}
258
259
236bool rate_control_send_low(struct ieee80211_sta *sta, 260bool rate_control_send_low(struct ieee80211_sta *sta,
237 void *priv_sta, 261 void *priv_sta,
238 struct ieee80211_tx_rate_control *txrc) 262 struct ieee80211_tx_rate_control *txrc)
@@ -241,8 +265,14 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
241 struct ieee80211_supported_band *sband = txrc->sband; 265 struct ieee80211_supported_band *sband = txrc->sband;
242 int mcast_rate; 266 int mcast_rate;
243 267
244 if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) { 268 if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
245 info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta); 269 if ((sband->band != IEEE80211_BAND_2GHZ) ||
270 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
271 info->control.rates[0].idx =
272 rate_lowest_index(txrc->sband, sta);
273 else
274 info->control.rates[0].idx =
275 rate_lowest_non_cck_index(txrc->sband, sta);
246 info->control.rates[0].count = 276 info->control.rates[0].count =
247 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 277 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
248 1 : txrc->hw->max_rate_tries; 278 1 : txrc->hw->max_rate_tries;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index a290ad231d77..d5a56226e675 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -50,6 +50,7 @@
50#include <linux/debugfs.h> 50#include <linux/debugfs.h>
51#include <linux/ieee80211.h> 51#include <linux/ieee80211.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/export.h>
53#include <net/mac80211.h> 54#include <net/mac80211.h>
54#include "rc80211_minstrel.h" 55#include "rc80211_minstrel.h"
55 56
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 66a1eeb279c6..cdb28535716b 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -281,6 +281,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
281 281
282 mr = minstrel_get_ratestats(mi, mg->max_tp_rate); 282 mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
283 if (cur_tp < mr->cur_tp) { 283 if (cur_tp < mr->cur_tp) {
284 mi->max_tp_rate2 = mi->max_tp_rate;
285 cur_tp2 = cur_tp;
284 mi->max_tp_rate = mg->max_tp_rate; 286 mi->max_tp_rate = mg->max_tp_rate;
285 cur_tp = mr->cur_tp; 287 cur_tp = mr->cur_tp;
286 } 288 }
@@ -452,7 +454,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
452 454
453 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 455 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
454 minstrel_ht_update_stats(mp, mi); 456 minstrel_ht_update_stats(mp, mi);
455 minstrel_aggr_check(mp, sta, skb); 457 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
458 minstrel_aggr_check(mp, sta, skb);
456 } 459 }
457} 460}
458 461
@@ -608,7 +611,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
608 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); 611 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
609 612
610 info->flags |= mi->tx_flags; 613 info->flags |= mi->tx_flags;
611 sample_idx = minstrel_get_sample_rate(mp, mi); 614
615 /* Don't use EAPOL frames for sampling on non-mrr hw */
616 if (mp->hw->max_rates == 1 &&
617 txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
618 sample_idx = -1;
619 else
620 sample_idx = minstrel_get_sample_rate(mp, mi);
612 621
613#ifdef CONFIG_MAC80211_DEBUGFS 622#ifdef CONFIG_MAC80211_DEBUGFS
614 /* use fixed index if set */ 623 /* use fixed index if set */
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index cefcb5d2dae6..e788f76a1dfe 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -10,6 +10,7 @@
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/ieee80211.h> 12#include <linux/ieee80211.h>
13#include <linux/export.h>
13#include <net/mac80211.h> 14#include <net/mac80211.h>
14#include "rc80211_minstrel.h" 15#include "rc80211_minstrel.h"
15#include "rc80211_minstrel_ht.h" 16#include "rc80211_minstrel_ht.h"
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 4851e9e2daed..c97a0657c043 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/export.h>
16 17
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "rate.h" 19#include "rate.h"
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fe2c2a717793..bb53726cb04a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -16,6 +16,7 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/export.h>
19#include <net/mac80211.h> 20#include <net/mac80211.h>
20#include <net/ieee80211_radiotap.h> 21#include <net/ieee80211_radiotap.h>
21 22
@@ -476,7 +477,6 @@ static ieee80211_rx_result
476ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 477ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
477{ 478{
478 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 479 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
479 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
480 char *dev_addr = rx->sdata->vif.addr; 480 char *dev_addr = rx->sdata->vif.addr;
481 481
482 if (ieee80211_is_data(hdr->frame_control)) { 482 if (ieee80211_is_data(hdr->frame_control)) {
@@ -524,14 +524,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
524 524
525 } 525 }
526 526
527#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
528
529 if (ieee80211_is_data(hdr->frame_control) &&
530 is_multicast_ether_addr(hdr->addr1) &&
531 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
532 return RX_DROP_MONITOR;
533#undef msh_h_get
534
535 return RX_CONTINUE; 527 return RX_CONTINUE;
536} 528}
537 529
@@ -850,8 +842,21 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
850 ieee80211_is_pspoll(hdr->frame_control)) && 842 ieee80211_is_pspoll(hdr->frame_control)) &&
851 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 843 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
852 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 844 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
853 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) 845 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
846 if (rx->sta && rx->sta->dummy &&
847 ieee80211_is_data_present(hdr->frame_control)) {
848 u16 ethertype;
849 u8 *payload;
850
851 payload = rx->skb->data +
852 ieee80211_hdrlen(hdr->frame_control);
853 ethertype = (payload[6] << 8) | payload[7];
854 if (cpu_to_be16(ethertype) ==
855 rx->sdata->control_port_protocol)
856 return RX_CONTINUE;
857 }
854 return RX_DROP_MONITOR; 858 return RX_DROP_MONITOR;
859 }
855 860
856 return RX_CONTINUE; 861 return RX_CONTINUE;
857} 862}
@@ -1106,7 +1111,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
1106 struct ieee80211_local *local = sdata->local; 1111 struct ieee80211_local *local = sdata->local;
1107 1112
1108 atomic_inc(&sdata->bss->num_sta_ps); 1113 atomic_inc(&sdata->bss->num_sta_ps);
1109 set_sta_flags(sta, WLAN_STA_PS_STA); 1114 set_sta_flag(sta, WLAN_STA_PS_STA);
1110 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1115 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1111 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1116 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1112#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1117#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -1126,7 +1131,7 @@ static void ap_sta_ps_end(struct sta_info *sta)
1126 sdata->name, sta->sta.addr, sta->sta.aid); 1131 sdata->name, sta->sta.addr, sta->sta.aid);
1127#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1132#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1128 1133
1129 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1134 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1130#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1135#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1131 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1136 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1132 sdata->name, sta->sta.addr, sta->sta.aid); 1137 sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1145,7 +1150,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1145 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1150 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1146 1151
1147 /* Don't let the same PS state be set twice */ 1152 /* Don't let the same PS state be set twice */
1148 in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA); 1153 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1149 if ((start && in_ps) || (!start && !in_ps)) 1154 if ((start && in_ps) || (!start && !in_ps))
1150 return -EINVAL; 1155 return -EINVAL;
1151 1156
@@ -1159,6 +1164,81 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1159EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1164EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1160 1165
1161static ieee80211_rx_result debug_noinline 1166static ieee80211_rx_result debug_noinline
1167ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1168{
1169 struct ieee80211_sub_if_data *sdata = rx->sdata;
1170 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1172 int tid, ac;
1173
1174 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1175 return RX_CONTINUE;
1176
1177 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1178 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1179 return RX_CONTINUE;
1180
1181 /*
1182 * The device handles station powersave, so don't do anything about
1183 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1184 * it to mac80211 since they're handled.)
1185 */
1186 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1187 return RX_CONTINUE;
1188
1189 /*
1190 * Don't do anything if the station isn't already asleep. In
1191 * the uAPSD case, the station will probably be marked asleep,
1192 * in the PS-Poll case the station must be confused ...
1193 */
1194 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1195 return RX_CONTINUE;
1196
1197 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1198 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1199 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1200 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1201 else
1202 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1203 }
1204
1205 /* Free PS Poll skb here instead of returning RX_DROP that would
1206 * count as an dropped frame. */
1207 dev_kfree_skb(rx->skb);
1208
1209 return RX_QUEUED;
1210 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1211 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1212 ieee80211_has_pm(hdr->frame_control) &&
1213 (ieee80211_is_data_qos(hdr->frame_control) ||
1214 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1215 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1216 ac = ieee802_1d_to_ac[tid & 7];
1217
1218 /*
1219 * If this AC is not trigger-enabled do nothing.
1220 *
1221 * NB: This could/should check a separate bitmap of trigger-
1222 * enabled queues, but for now we only implement uAPSD w/o
1223 * TSPEC changes to the ACs, so they're always the same.
1224 */
1225 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1226 return RX_CONTINUE;
1227
1228 /* if we are in a service period, do nothing */
1229 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1230 return RX_CONTINUE;
1231
1232 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1233 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1234 else
1235 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1236 }
1237
1238 return RX_CONTINUE;
1239}
1240
1241static ieee80211_rx_result debug_noinline
1162ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1242ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1163{ 1243{
1164 struct sta_info *sta = rx->sta; 1244 struct sta_info *sta = rx->sta;
@@ -1216,7 +1296,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1216 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1296 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1217 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1297 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1218 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1298 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1219 if (test_sta_flags(sta, WLAN_STA_PS_STA)) { 1299 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1220 /* 1300 /*
1221 * Ignore doze->wake transitions that are 1301 * Ignore doze->wake transitions that are
1222 * indicated by non-data frames, the standard 1302 * indicated by non-data frames, the standard
@@ -1469,33 +1549,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1469} 1549}
1470 1550
1471static ieee80211_rx_result debug_noinline 1551static ieee80211_rx_result debug_noinline
1472ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1473{
1474 struct ieee80211_sub_if_data *sdata = rx->sdata;
1475 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1476 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1477
1478 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1479 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1480 return RX_CONTINUE;
1481
1482 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1483 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1484 return RX_DROP_UNUSABLE;
1485
1486 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1487 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1488 else
1489 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1490
1491 /* Free PS Poll skb here instead of returning RX_DROP that would
1492 * count as an dropped frame. */
1493 dev_kfree_skb(rx->skb);
1494
1495 return RX_QUEUED;
1496}
1497
1498static ieee80211_rx_result debug_noinline
1499ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1552ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1500{ 1553{
1501 u8 *data = rx->skb->data; 1554 u8 *data = rx->skb->data;
@@ -1518,7 +1571,7 @@ static int
1518ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1571ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1519{ 1572{
1520 if (unlikely(!rx->sta || 1573 if (unlikely(!rx->sta ||
1521 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) 1574 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1522 return -EACCES; 1575 return -EACCES;
1523 1576
1524 return 0; 1577 return 0;
@@ -1561,7 +1614,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1561 if (status->flag & RX_FLAG_DECRYPTED) 1614 if (status->flag & RX_FLAG_DECRYPTED)
1562 return 0; 1615 return 0;
1563 1616
1564 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1617 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1565 if (unlikely(!ieee80211_has_protected(fc) && 1618 if (unlikely(!ieee80211_has_protected(fc) &&
1566 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1619 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1567 rx->key)) { 1620 rx->key)) {
@@ -1827,6 +1880,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1827 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1880 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1828 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1881 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1829 1882
1883 /* frame is in RMC, don't forward */
1884 if (ieee80211_is_data(hdr->frame_control) &&
1885 is_multicast_ether_addr(hdr->addr1) &&
1886 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1887 return RX_DROP_MONITOR;
1888
1830 if (!ieee80211_is_data(hdr->frame_control)) 1889 if (!ieee80211_is_data(hdr->frame_control))
1831 return RX_CONTINUE; 1890 return RX_CONTINUE;
1832 1891
@@ -1834,6 +1893,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1834 /* illegal frame */ 1893 /* illegal frame */
1835 return RX_DROP_MONITOR; 1894 return RX_DROP_MONITOR;
1836 1895
1896 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1897 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1898 dropped_frames_congestion);
1899 return RX_DROP_MONITOR;
1900 }
1901
1837 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1902 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1838 struct mesh_path *mppath; 1903 struct mesh_path *mppath;
1839 char *proxied_addr; 1904 char *proxied_addr;
@@ -1889,13 +1954,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1889 memset(info, 0, sizeof(*info)); 1954 memset(info, 0, sizeof(*info));
1890 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1955 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1891 info->control.vif = &rx->sdata->vif; 1956 info->control.vif = &rx->sdata->vif;
1892 skb_set_queue_mapping(skb, 1957 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1893 ieee80211_select_queue(rx->sdata, fwd_skb));
1894 ieee80211_set_qos_hdr(local, skb);
1895 if (is_multicast_ether_addr(fwd_hdr->addr1))
1896 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1958 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1897 fwded_mcast); 1959 fwded_mcast);
1898 else { 1960 skb_set_queue_mapping(fwd_skb,
1961 ieee80211_select_queue(sdata, fwd_skb));
1962 ieee80211_set_qos_hdr(sdata, fwd_skb);
1963 } else {
1899 int err; 1964 int err;
1900 /* 1965 /*
1901 * Save TA to addr1 to send TA a path error if a 1966 * Save TA to addr1 to send TA a path error if a
@@ -2220,12 +2285,29 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2220 goto handled; 2285 goto handled;
2221 } 2286 }
2222 break; 2287 break;
2288 case WLAN_CATEGORY_SELF_PROTECTED:
2289 switch (mgmt->u.action.u.self_prot.action_code) {
2290 case WLAN_SP_MESH_PEERING_OPEN:
2291 case WLAN_SP_MESH_PEERING_CLOSE:
2292 case WLAN_SP_MESH_PEERING_CONFIRM:
2293 if (!ieee80211_vif_is_mesh(&sdata->vif))
2294 goto invalid;
2295 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2296 /* userspace handles this frame */
2297 break;
2298 goto queue;
2299 case WLAN_SP_MGK_INFORM:
2300 case WLAN_SP_MGK_ACK:
2301 if (!ieee80211_vif_is_mesh(&sdata->vif))
2302 goto invalid;
2303 break;
2304 }
2305 break;
2223 case WLAN_CATEGORY_MESH_ACTION: 2306 case WLAN_CATEGORY_MESH_ACTION:
2224 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2307 if (!ieee80211_vif_is_mesh(&sdata->vif))
2225 break; 2308 break;
2226 goto queue; 2309 if (mesh_action_is_path_sel(mgmt) &&
2227 case WLAN_CATEGORY_MESH_PATH_SEL: 2310 (!mesh_path_sel_is_hwmp(sdata)))
2228 if (!mesh_path_sel_is_hwmp(sdata))
2229 break; 2311 break;
2230 goto queue; 2312 goto queue;
2231 } 2313 }
@@ -2534,17 +2616,17 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2534 2616
2535 CALL_RXH(ieee80211_rx_h_decrypt) 2617 CALL_RXH(ieee80211_rx_h_decrypt)
2536 CALL_RXH(ieee80211_rx_h_check_more_data) 2618 CALL_RXH(ieee80211_rx_h_check_more_data)
2619 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2537 CALL_RXH(ieee80211_rx_h_sta_process) 2620 CALL_RXH(ieee80211_rx_h_sta_process)
2538 CALL_RXH(ieee80211_rx_h_defragment) 2621 CALL_RXH(ieee80211_rx_h_defragment)
2539 CALL_RXH(ieee80211_rx_h_ps_poll)
2540 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2622 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2541 /* must be after MMIC verify so header is counted in MPDU mic */ 2623 /* must be after MMIC verify so header is counted in MPDU mic */
2542 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2543 CALL_RXH(ieee80211_rx_h_amsdu)
2544#ifdef CONFIG_MAC80211_MESH 2624#ifdef CONFIG_MAC80211_MESH
2545 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2625 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2546 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2626 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2547#endif 2627#endif
2628 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2629 CALL_RXH(ieee80211_rx_h_amsdu)
2548 CALL_RXH(ieee80211_rx_h_data) 2630 CALL_RXH(ieee80211_rx_h_data)
2549 CALL_RXH(ieee80211_rx_h_ctrl); 2631 CALL_RXH(ieee80211_rx_h_ctrl);
2550 CALL_RXH(ieee80211_rx_h_mgmt_check) 2632 CALL_RXH(ieee80211_rx_h_mgmt_check)
@@ -2686,7 +2768,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2686 } else if (!ieee80211_bssid_match(bssid, 2768 } else if (!ieee80211_bssid_match(bssid,
2687 sdata->vif.addr)) { 2769 sdata->vif.addr)) {
2688 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2770 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2689 !ieee80211_is_beacon(hdr->frame_control)) 2771 !ieee80211_is_beacon(hdr->frame_control) &&
2772 !(ieee80211_is_action(hdr->frame_control) &&
2773 sdata->vif.p2p))
2690 return 0; 2774 return 0;
2691 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2775 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2692 } 2776 }
@@ -2791,7 +2875,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2791 if (ieee80211_is_data(fc)) { 2875 if (ieee80211_is_data(fc)) {
2792 prev_sta = NULL; 2876 prev_sta = NULL;
2793 2877
2794 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2878 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2795 if (!prev_sta) { 2879 if (!prev_sta) {
2796 prev_sta = sta; 2880 prev_sta = sta;
2797 continue; 2881 continue;
@@ -2835,7 +2919,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2835 continue; 2919 continue;
2836 } 2920 }
2837 2921
2838 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2922 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2839 rx.sdata = prev; 2923 rx.sdata = prev;
2840 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2924 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2841 2925
@@ -2843,7 +2927,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2843 } 2927 }
2844 2928
2845 if (prev) { 2929 if (prev) {
2846 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2930 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2847 rx.sdata = prev; 2931 rx.sdata = prev;
2848 2932
2849 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2933 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 6f09eca01112..105436dbb90d 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,9 +14,10 @@
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos.h>
18#include <net/sch_generic.h> 18#include <net/sch_generic.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/export.h>
20#include <net/mac80211.h> 21#include <net/mac80211.h>
21 22
22#include "ieee80211_i.h" 23#include "ieee80211_i.h"
@@ -254,6 +255,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
254 req->ie, req->ie_len, band, 255 req->ie, req->ie_len, band,
255 req->rates[band], 0); 256 req->rates[band], 0);
256 local->hw_scan_req->ie_len = ielen; 257 local->hw_scan_req->ie_len = ielen;
258 local->hw_scan_req->no_cck = req->no_cck;
257 259
258 return true; 260 return true;
259} 261}
@@ -660,7 +662,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
660 local->scan_req->ssids[i].ssid, 662 local->scan_req->ssids[i].ssid,
661 local->scan_req->ssids[i].ssid_len, 663 local->scan_req->ssids[i].ssid_len,
662 local->scan_req->ie, local->scan_req->ie_len, 664 local->scan_req->ie, local->scan_req->ie_len,
663 local->scan_req->rates[band], false); 665 local->scan_req->rates[band], false,
666 local->scan_req->no_cck);
664 667
665 /* 668 /*
666 * After sending probe requests, wait for probe responses 669 * After sending probe requests, wait for probe responses
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 7733f66ee2c4..578eea3fc04d 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
32 32
33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + 33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
34 sizeof(struct ieee80211_msrment_ie)); 34 sizeof(struct ieee80211_msrment_ie));
35 35 if (!skb)
36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->name);
39 return; 36 return;
40 }
41 37
42 skb_reserve(skb, local->hw.extra_tx_headroom); 38 skb_reserve(skb, local->hw.extra_tx_headroom);
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 39 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 21070e9bc8d0..ce962d2c8782 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -24,6 +24,7 @@
24#include "sta_info.h" 24#include "sta_info.h"
25#include "debugfs_sta.h" 25#include "debugfs_sta.h"
26#include "mesh.h" 26#include "mesh.h"
27#include "wme.h"
27 28
28/** 29/**
29 * DOC: STA information lifetime rules 30 * DOC: STA information lifetime rules
@@ -72,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
72 if (!s) 73 if (!s)
73 return -ENOENT; 74 return -ENOENT;
74 if (s == sta) { 75 if (s == sta) {
75 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], 76 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
76 s->hnext); 77 s->hnext);
77 return 0; 78 return 0;
78 } 79 }
@@ -82,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
82 s = rcu_dereference_protected(s->hnext, 83 s = rcu_dereference_protected(s->hnext,
83 lockdep_is_held(&local->sta_lock)); 84 lockdep_is_held(&local->sta_lock));
84 if (rcu_access_pointer(s->hnext)) { 85 if (rcu_access_pointer(s->hnext)) {
85 rcu_assign_pointer(s->hnext, sta->hnext); 86 RCU_INIT_POINTER(s->hnext, sta->hnext);
86 return 0; 87 return 0;
87 } 88 }
88 89
@@ -100,6 +101,27 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
100 lockdep_is_held(&local->sta_lock) || 101 lockdep_is_held(&local->sta_lock) ||
101 lockdep_is_held(&local->sta_mtx)); 102 lockdep_is_held(&local->sta_mtx));
102 while (sta) { 103 while (sta) {
104 if (sta->sdata == sdata && !sta->dummy &&
105 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
106 break;
107 sta = rcu_dereference_check(sta->hnext,
108 lockdep_is_held(&local->sta_lock) ||
109 lockdep_is_held(&local->sta_mtx));
110 }
111 return sta;
112}
113
114/* get a station info entry even if it is a dummy station*/
115struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
116 const u8 *addr)
117{
118 struct ieee80211_local *local = sdata->local;
119 struct sta_info *sta;
120
121 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
122 lockdep_is_held(&local->sta_lock) ||
123 lockdep_is_held(&local->sta_mtx));
124 while (sta) {
103 if (sta->sdata == sdata && 125 if (sta->sdata == sdata &&
104 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 126 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
105 break; 127 break;
@@ -126,6 +148,32 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
126 while (sta) { 148 while (sta) {
127 if ((sta->sdata == sdata || 149 if ((sta->sdata == sdata ||
128 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && 150 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
151 !sta->dummy &&
152 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
153 break;
154 sta = rcu_dereference_check(sta->hnext,
155 lockdep_is_held(&local->sta_lock) ||
156 lockdep_is_held(&local->sta_mtx));
157 }
158 return sta;
159}
160
161/*
162 * Get sta info either from the specified interface
163 * or from one of its vlans (including dummy stations)
164 */
165struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
166 const u8 *addr)
167{
168 struct ieee80211_local *local = sdata->local;
169 struct sta_info *sta;
170
171 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
172 lockdep_is_held(&local->sta_lock) ||
173 lockdep_is_held(&local->sta_mtx));
174 while (sta) {
175 if ((sta->sdata == sdata ||
176 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 177 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
130 break; 178 break;
131 sta = rcu_dereference_check(sta->hnext, 179 sta = rcu_dereference_check(sta->hnext,
@@ -184,7 +232,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
184 struct sta_info *sta) 232 struct sta_info *sta)
185{ 233{
186 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; 234 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
187 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); 235 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
188} 236}
189 237
190static void sta_unblock(struct work_struct *wk) 238static void sta_unblock(struct work_struct *wk)
@@ -196,13 +244,22 @@ static void sta_unblock(struct work_struct *wk)
196 if (sta->dead) 244 if (sta->dead)
197 return; 245 return;
198 246
199 if (!test_sta_flags(sta, WLAN_STA_PS_STA)) 247 if (!test_sta_flag(sta, WLAN_STA_PS_STA))
200 ieee80211_sta_ps_deliver_wakeup(sta); 248 ieee80211_sta_ps_deliver_wakeup(sta);
201 else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) { 249 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
202 clear_sta_flags(sta, WLAN_STA_PS_DRIVER); 250 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
251
252 local_bh_disable();
203 ieee80211_sta_ps_deliver_poll_response(sta); 253 ieee80211_sta_ps_deliver_poll_response(sta);
254 local_bh_enable();
255 } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
256 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
257
258 local_bh_disable();
259 ieee80211_sta_ps_deliver_uapsd(sta);
260 local_bh_enable();
204 } else 261 } else
205 clear_sta_flags(sta, WLAN_STA_PS_DRIVER); 262 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
206} 263}
207 264
208static int sta_prepare_rate_control(struct ieee80211_local *local, 265static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -235,7 +292,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
235 return NULL; 292 return NULL;
236 293
237 spin_lock_init(&sta->lock); 294 spin_lock_init(&sta->lock);
238 spin_lock_init(&sta->flaglock);
239 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 295 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
240 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 296 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
241 mutex_init(&sta->ampdu_mlme.mtx); 297 mutex_init(&sta->ampdu_mlme.mtx);
@@ -262,8 +318,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
262 */ 318 */
263 sta->timer_to_tid[i] = i; 319 sta->timer_to_tid[i] = i;
264 } 320 }
265 skb_queue_head_init(&sta->ps_tx_buf); 321 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
266 skb_queue_head_init(&sta->tx_filtered); 322 skb_queue_head_init(&sta->ps_tx_buf[i]);
323 skb_queue_head_init(&sta->tx_filtered[i]);
324 }
267 325
268 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 326 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
269 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 327 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
@@ -280,7 +338,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
280 return sta; 338 return sta;
281} 339}
282 340
283static int sta_info_finish_insert(struct sta_info *sta, bool async) 341static int sta_info_finish_insert(struct sta_info *sta,
342 bool async, bool dummy_reinsert)
284{ 343{
285 struct ieee80211_local *local = sta->local; 344 struct ieee80211_local *local = sta->local;
286 struct ieee80211_sub_if_data *sdata = sta->sdata; 345 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,50 +349,58 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
290 349
291 lockdep_assert_held(&local->sta_mtx); 350 lockdep_assert_held(&local->sta_mtx);
292 351
293 /* notify driver */ 352 if (!sta->dummy || dummy_reinsert) {
294 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 353 /* notify driver */
295 sdata = container_of(sdata->bss, 354 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
296 struct ieee80211_sub_if_data, 355 sdata = container_of(sdata->bss,
297 u.ap); 356 struct ieee80211_sub_if_data,
298 err = drv_sta_add(local, sdata, &sta->sta); 357 u.ap);
299 if (err) { 358 err = drv_sta_add(local, sdata, &sta->sta);
300 if (!async) 359 if (err) {
301 return err; 360 if (!async)
302 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)" 361 return err;
303 " - keeping it anyway.\n", 362 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
304 sdata->name, sta->sta.addr, err); 363 "driver (%d) - keeping it anyway.\n",
305 } else { 364 sdata->name, sta->sta.addr, err);
306 sta->uploaded = true; 365 } else {
366 sta->uploaded = true;
307#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 367#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
308 if (async) 368 if (async)
309 wiphy_debug(local->hw.wiphy, 369 wiphy_debug(local->hw.wiphy,
310 "Finished adding IBSS STA %pM\n", 370 "Finished adding IBSS STA %pM\n",
311 sta->sta.addr); 371 sta->sta.addr);
312#endif 372#endif
373 }
374
375 sdata = sta->sdata;
313 } 376 }
314 377
315 sdata = sta->sdata; 378 if (!dummy_reinsert) {
379 if (!async) {
380 local->num_sta++;
381 local->sta_generation++;
382 smp_mb();
316 383
317 if (!async) { 384 /* make the station visible */
318 local->num_sta++; 385 spin_lock_irqsave(&local->sta_lock, flags);
319 local->sta_generation++; 386 sta_info_hash_add(local, sta);
320 smp_mb(); 387 spin_unlock_irqrestore(&local->sta_lock, flags);
388 }
321 389
322 /* make the station visible */ 390 list_add(&sta->list, &local->sta_list);
323 spin_lock_irqsave(&local->sta_lock, flags); 391 } else {
324 sta_info_hash_add(local, sta); 392 sta->dummy = false;
325 spin_unlock_irqrestore(&local->sta_lock, flags);
326 } 393 }
327 394
328 list_add(&sta->list, &local->sta_list); 395 if (!sta->dummy) {
329 396 ieee80211_sta_debugfs_add(sta);
330 ieee80211_sta_debugfs_add(sta); 397 rate_control_add_sta_debugfs(sta);
331 rate_control_add_sta_debugfs(sta);
332
333 sinfo.filled = 0;
334 sinfo.generation = local->sta_generation;
335 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
336 398
399 memset(&sinfo, 0, sizeof(sinfo));
400 sinfo.filled = 0;
401 sinfo.generation = local->sta_generation;
402 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
403 }
337 404
338 return 0; 405 return 0;
339} 406}
@@ -350,7 +417,7 @@ static void sta_info_finish_pending(struct ieee80211_local *local)
350 list_del(&sta->list); 417 list_del(&sta->list);
351 spin_unlock_irqrestore(&local->sta_lock, flags); 418 spin_unlock_irqrestore(&local->sta_lock, flags);
352 419
353 sta_info_finish_insert(sta, true); 420 sta_info_finish_insert(sta, true, false);
354 421
355 spin_lock_irqsave(&local->sta_lock, flags); 422 spin_lock_irqsave(&local->sta_lock, flags);
356 } 423 }
@@ -367,106 +434,117 @@ static void sta_info_finish_work(struct work_struct *work)
367 mutex_unlock(&local->sta_mtx); 434 mutex_unlock(&local->sta_mtx);
368} 435}
369 436
370int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 437static int sta_info_insert_check(struct sta_info *sta)
371{ 438{
372 struct ieee80211_local *local = sta->local;
373 struct ieee80211_sub_if_data *sdata = sta->sdata; 439 struct ieee80211_sub_if_data *sdata = sta->sdata;
374 unsigned long flags;
375 int err = 0;
376 440
377 /* 441 /*
378 * Can't be a WARN_ON because it can be triggered through a race: 442 * Can't be a WARN_ON because it can be triggered through a race:
379 * something inserts a STA (on one CPU) without holding the RTNL 443 * something inserts a STA (on one CPU) without holding the RTNL
380 * and another CPU turns off the net device. 444 * and another CPU turns off the net device.
381 */ 445 */
382 if (unlikely(!ieee80211_sdata_running(sdata))) { 446 if (unlikely(!ieee80211_sdata_running(sdata)))
383 err = -ENETDOWN; 447 return -ENETDOWN;
384 rcu_read_lock();
385 goto out_free;
386 }
387 448
388 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || 449 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
389 is_multicast_ether_addr(sta->sta.addr))) { 450 is_multicast_ether_addr(sta->sta.addr)))
390 err = -EINVAL; 451 return -EINVAL;
452
453 return 0;
454}
455
456static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
457{
458 struct ieee80211_local *local = sta->local;
459 struct ieee80211_sub_if_data *sdata = sta->sdata;
460 unsigned long flags;
461
462 spin_lock_irqsave(&local->sta_lock, flags);
463 /* check if STA exists already */
464 if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
465 spin_unlock_irqrestore(&local->sta_lock, flags);
391 rcu_read_lock(); 466 rcu_read_lock();
392 goto out_free; 467 return -EEXIST;
393 } 468 }
394 469
395 /* 470 local->num_sta++;
396 * In ad-hoc mode, we sometimes need to insert stations 471 local->sta_generation++;
397 * from tasklet context from the RX path. To avoid races, 472 smp_mb();
398 * always do so in that case -- see the comment below. 473 sta_info_hash_add(local, sta);
399 */
400 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
401 spin_lock_irqsave(&local->sta_lock, flags);
402 /* check if STA exists already */
403 if (sta_info_get_bss(sdata, sta->sta.addr)) {
404 spin_unlock_irqrestore(&local->sta_lock, flags);
405 rcu_read_lock();
406 err = -EEXIST;
407 goto out_free;
408 }
409
410 local->num_sta++;
411 local->sta_generation++;
412 smp_mb();
413 sta_info_hash_add(local, sta);
414 474
415 list_add_tail(&sta->list, &local->sta_pending_list); 475 list_add_tail(&sta->list, &local->sta_pending_list);
416 476
417 rcu_read_lock(); 477 rcu_read_lock();
418 spin_unlock_irqrestore(&local->sta_lock, flags); 478 spin_unlock_irqrestore(&local->sta_lock, flags);
419 479
420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 480#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
421 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n", 481 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
422 sta->sta.addr); 482 sta->sta.addr);
423#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 483#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
424 484
425 ieee80211_queue_work(&local->hw, &local->sta_finish_work); 485 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
426 486
427 return 0; 487 return 0;
428 } 488}
489
490/*
491 * should be called with sta_mtx locked
492 * this function replaces the mutex lock
493 * with a RCU lock
494 */
495static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
496{
497 struct ieee80211_local *local = sta->local;
498 struct ieee80211_sub_if_data *sdata = sta->sdata;
499 unsigned long flags;
500 struct sta_info *exist_sta;
501 bool dummy_reinsert = false;
502 int err = 0;
503
504 lockdep_assert_held(&local->sta_mtx);
429 505
430 /* 506 /*
431 * On first glance, this will look racy, because the code 507 * On first glance, this will look racy, because the code
432 * below this point, which inserts a station with sleeping, 508 * in this function, which inserts a station with sleeping,
433 * unlocks the sta_lock between checking existence in the 509 * unlocks the sta_lock between checking existence in the
434 * hash table and inserting into it. 510 * hash table and inserting into it.
435 * 511 *
436 * However, it is not racy against itself because it keeps 512 * However, it is not racy against itself because it keeps
437 * the mutex locked. It still seems to race against the 513 * the mutex locked.
438 * above code that atomically inserts the station... That,
439 * however, is not true because the above code can only
440 * be invoked for IBSS interfaces, and the below code will
441 * not be -- and the two do not race against each other as
442 * the hash table also keys off the interface.
443 */ 514 */
444 515
445 might_sleep();
446
447 mutex_lock(&local->sta_mtx);
448
449 spin_lock_irqsave(&local->sta_lock, flags); 516 spin_lock_irqsave(&local->sta_lock, flags);
450 /* check if STA exists already */ 517 /*
451 if (sta_info_get_bss(sdata, sta->sta.addr)) { 518 * check if STA exists already.
452 spin_unlock_irqrestore(&local->sta_lock, flags); 519 * only accept a scenario of a second call to sta_info_insert_non_ibss
453 mutex_unlock(&local->sta_mtx); 520 * with a dummy station entry that was inserted earlier
454 rcu_read_lock(); 521 * in that case - assume that the dummy station flag should
455 err = -EEXIST; 522 * be removed.
456 goto out_free; 523 */
524 exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
525 if (exist_sta) {
526 if (exist_sta == sta && sta->dummy) {
527 dummy_reinsert = true;
528 } else {
529 spin_unlock_irqrestore(&local->sta_lock, flags);
530 mutex_unlock(&local->sta_mtx);
531 rcu_read_lock();
532 return -EEXIST;
533 }
457 } 534 }
458 535
459 spin_unlock_irqrestore(&local->sta_lock, flags); 536 spin_unlock_irqrestore(&local->sta_lock, flags);
460 537
461 err = sta_info_finish_insert(sta, false); 538 err = sta_info_finish_insert(sta, false, dummy_reinsert);
462 if (err) { 539 if (err) {
463 mutex_unlock(&local->sta_mtx); 540 mutex_unlock(&local->sta_mtx);
464 rcu_read_lock(); 541 rcu_read_lock();
465 goto out_free; 542 return err;
466 } 543 }
467 544
468#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 545#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
469 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr); 546 wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
547 sta->dummy ? "dummy " : "", sta->sta.addr);
470#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 548#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
471 549
472 /* move reference to rcu-protected */ 550 /* move reference to rcu-protected */
@@ -477,6 +555,51 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
477 mesh_accept_plinks_update(sdata); 555 mesh_accept_plinks_update(sdata);
478 556
479 return 0; 557 return 0;
558}
559
560int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
561{
562 struct ieee80211_local *local = sta->local;
563 struct ieee80211_sub_if_data *sdata = sta->sdata;
564 int err = 0;
565
566 err = sta_info_insert_check(sta);
567 if (err) {
568 rcu_read_lock();
569 goto out_free;
570 }
571
572 /*
573 * In ad-hoc mode, we sometimes need to insert stations
574 * from tasklet context from the RX path. To avoid races,
575 * always do so in that case -- see the comment below.
576 */
577 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
578 err = sta_info_insert_ibss(sta);
579 if (err)
580 goto out_free;
581
582 return 0;
583 }
584
585 /*
586 * It might seem that the function called below is in race against
587 * the function call above that atomically inserts the station... That,
588 * however, is not true because the above code can only
589 * be invoked for IBSS interfaces, and the below code will
590 * not be -- and the two do not race against each other as
591 * the hash table also keys off the interface.
592 */
593
594 might_sleep();
595
596 mutex_lock(&local->sta_mtx);
597
598 err = sta_info_insert_non_ibss(sta);
599 if (err)
600 goto out_free;
601
602 return 0;
480 out_free: 603 out_free:
481 BUG_ON(!err); 604 BUG_ON(!err);
482 __sta_info_free(local, sta); 605 __sta_info_free(local, sta);
@@ -492,6 +615,25 @@ int sta_info_insert(struct sta_info *sta)
492 return err; 615 return err;
493} 616}
494 617
618/* Caller must hold sta->local->sta_mtx */
619int sta_info_reinsert(struct sta_info *sta)
620{
621 struct ieee80211_local *local = sta->local;
622 int err = 0;
623
624 err = sta_info_insert_check(sta);
625 if (err) {
626 mutex_unlock(&local->sta_mtx);
627 return err;
628 }
629
630 might_sleep();
631
632 err = sta_info_insert_non_ibss(sta);
633 rcu_read_unlock();
634 return err;
635}
636
495static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 637static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
496{ 638{
497 /* 639 /*
@@ -510,64 +652,93 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
510 bss->tim[aid / 8] &= ~(1 << (aid % 8)); 652 bss->tim[aid / 8] &= ~(1 << (aid % 8));
511} 653}
512 654
513static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, 655static unsigned long ieee80211_tids_for_ac(int ac)
514 struct sta_info *sta)
515{ 656{
516 BUG_ON(!bss); 657 /* If we ever support TIDs > 7, this obviously needs to be adjusted */
517 658 switch (ac) {
518 __bss_tim_set(bss, sta->sta.aid); 659 case IEEE80211_AC_VO:
519 660 return BIT(6) | BIT(7);
520 if (sta->local->ops->set_tim) { 661 case IEEE80211_AC_VI:
521 sta->local->tim_in_locked_section = true; 662 return BIT(4) | BIT(5);
522 drv_set_tim(sta->local, &sta->sta, true); 663 case IEEE80211_AC_BE:
523 sta->local->tim_in_locked_section = false; 664 return BIT(0) | BIT(3);
665 case IEEE80211_AC_BK:
666 return BIT(1) | BIT(2);
667 default:
668 WARN_ON(1);
669 return 0;
524 } 670 }
525} 671}
526 672
527void sta_info_set_tim_bit(struct sta_info *sta) 673void sta_info_recalc_tim(struct sta_info *sta)
528{ 674{
675 struct ieee80211_local *local = sta->local;
676 struct ieee80211_if_ap *bss = sta->sdata->bss;
529 unsigned long flags; 677 unsigned long flags;
678 bool indicate_tim = false;
679 u8 ignore_for_tim = sta->sta.uapsd_queues;
680 int ac;
530 681
531 BUG_ON(!sta->sdata->bss); 682 if (WARN_ON_ONCE(!sta->sdata->bss))
683 return;
532 684
533 spin_lock_irqsave(&sta->local->sta_lock, flags); 685 /* No need to do anything if the driver does all */
534 __sta_info_set_tim_bit(sta->sdata->bss, sta); 686 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
535 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 687 return;
536}
537 688
538static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, 689 if (sta->dead)
539 struct sta_info *sta) 690 goto done;
540{ 691
541 BUG_ON(!bss); 692 /*
693 * If all ACs are delivery-enabled then we should build
694 * the TIM bit for all ACs anyway; if only some are then
695 * we ignore those and build the TIM bit using only the
696 * non-enabled ones.
697 */
698 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
699 ignore_for_tim = 0;
700
701 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
702 unsigned long tids;
542 703
543 __bss_tim_clear(bss, sta->sta.aid); 704 if (ignore_for_tim & BIT(ac))
705 continue;
706
707 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
708 !skb_queue_empty(&sta->ps_tx_buf[ac]);
709 if (indicate_tim)
710 break;
544 711
545 if (sta->local->ops->set_tim) { 712 tids = ieee80211_tids_for_ac(ac);
546 sta->local->tim_in_locked_section = true; 713
547 drv_set_tim(sta->local, &sta->sta, false); 714 indicate_tim |=
548 sta->local->tim_in_locked_section = false; 715 sta->driver_buffered_tids & tids;
549 } 716 }
550}
551 717
552void sta_info_clear_tim_bit(struct sta_info *sta) 718 done:
553{ 719 spin_lock_irqsave(&local->sta_lock, flags);
554 unsigned long flags;
555 720
556 BUG_ON(!sta->sdata->bss); 721 if (indicate_tim)
722 __bss_tim_set(bss, sta->sta.aid);
723 else
724 __bss_tim_clear(bss, sta->sta.aid);
557 725
558 spin_lock_irqsave(&sta->local->sta_lock, flags); 726 if (local->ops->set_tim) {
559 __sta_info_clear_tim_bit(sta->sdata->bss, sta); 727 local->tim_in_locked_section = true;
560 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 728 drv_set_tim(local, &sta->sta, indicate_tim);
729 local->tim_in_locked_section = false;
730 }
731
732 spin_unlock_irqrestore(&local->sta_lock, flags);
561} 733}
562 734
563static int sta_info_buffer_expired(struct sta_info *sta, 735static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
564 struct sk_buff *skb)
565{ 736{
566 struct ieee80211_tx_info *info; 737 struct ieee80211_tx_info *info;
567 int timeout; 738 int timeout;
568 739
569 if (!skb) 740 if (!skb)
570 return 0; 741 return false;
571 742
572 info = IEEE80211_SKB_CB(skb); 743 info = IEEE80211_SKB_CB(skb);
573 744
@@ -581,24 +752,59 @@ static int sta_info_buffer_expired(struct sta_info *sta,
581} 752}
582 753
583 754
584static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 755static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
585 struct sta_info *sta) 756 struct sta_info *sta, int ac)
586{ 757{
587 unsigned long flags; 758 unsigned long flags;
588 struct sk_buff *skb; 759 struct sk_buff *skb;
589 760
590 if (skb_queue_empty(&sta->ps_tx_buf)) 761 /*
591 return false; 762 * First check for frames that should expire on the filtered
763 * queue. Frames here were rejected by the driver and are on
764 * a separate queue to avoid reordering with normal PS-buffered
765 * frames. They also aren't accounted for right now in the
766 * total_ps_buffered counter.
767 */
768 for (;;) {
769 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
770 skb = skb_peek(&sta->tx_filtered[ac]);
771 if (sta_info_buffer_expired(sta, skb))
772 skb = __skb_dequeue(&sta->tx_filtered[ac]);
773 else
774 skb = NULL;
775 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
592 776
777 /*
778 * Frames are queued in order, so if this one
779 * hasn't expired yet we can stop testing. If
780 * we actually reached the end of the queue we
781 * also need to stop, of course.
782 */
783 if (!skb)
784 break;
785 dev_kfree_skb(skb);
786 }
787
788 /*
789 * Now also check the normal PS-buffered queue, this will
790 * only find something if the filtered queue was emptied
791 * since the filtered frames are all before the normal PS
792 * buffered frames.
793 */
593 for (;;) { 794 for (;;) {
594 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 795 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
595 skb = skb_peek(&sta->ps_tx_buf); 796 skb = skb_peek(&sta->ps_tx_buf[ac]);
596 if (sta_info_buffer_expired(sta, skb)) 797 if (sta_info_buffer_expired(sta, skb))
597 skb = __skb_dequeue(&sta->ps_tx_buf); 798 skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
598 else 799 else
599 skb = NULL; 800 skb = NULL;
600 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); 801 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
601 802
803 /*
804 * frames are queued in order, so if this one
805 * hasn't expired yet (or we reached the end of
806 * the queue) we can stop testing
807 */
602 if (!skb) 808 if (!skb)
603 break; 809 break;
604 810
@@ -608,22 +814,47 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
608 sta->sta.addr); 814 sta->sta.addr);
609#endif 815#endif
610 dev_kfree_skb(skb); 816 dev_kfree_skb(skb);
611
612 if (skb_queue_empty(&sta->ps_tx_buf) &&
613 !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
614 sta_info_clear_tim_bit(sta);
615 } 817 }
616 818
617 return true; 819 /*
820 * Finally, recalculate the TIM bit for this station -- it might
821 * now be clear because the station was too slow to retrieve its
822 * frames.
823 */
824 sta_info_recalc_tim(sta);
825
826 /*
827 * Return whether there are any frames still buffered, this is
828 * used to check whether the cleanup timer still needs to run,
829 * if there are no frames we don't need to rearm the timer.
830 */
831 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
832 skb_queue_empty(&sta->tx_filtered[ac]));
833}
834
835static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
836 struct sta_info *sta)
837{
838 bool have_buffered = false;
839 int ac;
840
841 /* This is only necessary for stations on BSS interfaces */
842 if (!sta->sdata->bss)
843 return false;
844
845 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
846 have_buffered |=
847 sta_info_cleanup_expire_buffered_ac(local, sta, ac);
848
849 return have_buffered;
618} 850}
619 851
620static int __must_check __sta_info_destroy(struct sta_info *sta) 852static int __must_check __sta_info_destroy(struct sta_info *sta)
621{ 853{
622 struct ieee80211_local *local; 854 struct ieee80211_local *local;
623 struct ieee80211_sub_if_data *sdata; 855 struct ieee80211_sub_if_data *sdata;
624 struct sk_buff *skb;
625 unsigned long flags; 856 unsigned long flags;
626 int ret, i; 857 int ret, i, ac;
627 858
628 might_sleep(); 859 might_sleep();
629 860
@@ -639,7 +870,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
639 * sessions -- block that to make sure the tear-down 870 * sessions -- block that to make sure the tear-down
640 * will be sufficient. 871 * will be sufficient.
641 */ 872 */
642 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 873 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
643 ieee80211_sta_tear_down_BA_sessions(sta, true); 874 ieee80211_sta_tear_down_BA_sessions(sta, true);
644 875
645 spin_lock_irqsave(&local->sta_lock, flags); 876 spin_lock_irqsave(&local->sta_lock, flags);
@@ -660,19 +891,22 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
660 891
661 sta->dead = true; 892 sta->dead = true;
662 893
663 if (test_and_clear_sta_flags(sta, 894 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
664 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) { 895 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
665 BUG_ON(!sdata->bss); 896 BUG_ON(!sdata->bss);
666 897
898 clear_sta_flag(sta, WLAN_STA_PS_STA);
899 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
900
667 atomic_dec(&sdata->bss->num_sta_ps); 901 atomic_dec(&sdata->bss->num_sta_ps);
668 sta_info_clear_tim_bit(sta); 902 sta_info_recalc_tim(sta);
669 } 903 }
670 904
671 local->num_sta--; 905 local->num_sta--;
672 local->sta_generation++; 906 local->sta_generation++;
673 907
674 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 908 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
675 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 909 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
676 910
677 if (sta->uploaded) { 911 if (sta->uploaded) {
678 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 912 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -691,6 +925,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
691 */ 925 */
692 synchronize_rcu(); 926 synchronize_rcu();
693 927
928 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
929 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
930 __skb_queue_purge(&sta->ps_tx_buf[ac]);
931 __skb_queue_purge(&sta->tx_filtered[ac]);
932 }
933
694#ifdef CONFIG_MAC80211_MESH 934#ifdef CONFIG_MAC80211_MESH
695 if (ieee80211_vif_is_mesh(&sdata->vif)) 935 if (ieee80211_vif_is_mesh(&sdata->vif))
696 mesh_accept_plinks_update(sdata); 936 mesh_accept_plinks_update(sdata);
@@ -713,14 +953,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
713 } 953 }
714#endif 954#endif
715 955
716 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
717 local->total_ps_buffered--;
718 dev_kfree_skb_any(skb);
719 }
720
721 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
722 dev_kfree_skb_any(skb);
723
724 __sta_info_free(local, sta); 956 __sta_info_free(local, sta);
725 957
726 return 0; 958 return 0;
@@ -732,7 +964,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
732 int ret; 964 int ret;
733 965
734 mutex_lock(&sdata->local->sta_mtx); 966 mutex_lock(&sdata->local->sta_mtx);
735 sta = sta_info_get(sdata, addr); 967 sta = sta_info_get_rx(sdata, addr);
736 ret = __sta_info_destroy(sta); 968 ret = __sta_info_destroy(sta);
737 mutex_unlock(&sdata->local->sta_mtx); 969 mutex_unlock(&sdata->local->sta_mtx);
738 970
@@ -746,7 +978,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
746 int ret; 978 int ret;
747 979
748 mutex_lock(&sdata->local->sta_mtx); 980 mutex_lock(&sdata->local->sta_mtx);
749 sta = sta_info_get_bss(sdata, addr); 981 sta = sta_info_get_bss_rx(sdata, addr);
750 ret = __sta_info_destroy(sta); 982 ret = __sta_info_destroy(sta);
751 mutex_unlock(&sdata->local->sta_mtx); 983 mutex_unlock(&sdata->local->sta_mtx);
752 984
@@ -886,7 +1118,8 @@ static void clear_sta_ps_flags(void *_sta)
886{ 1118{
887 struct sta_info *sta = _sta; 1119 struct sta_info *sta = _sta;
888 1120
889 clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA); 1121 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1122 clear_sta_flag(sta, WLAN_STA_PS_STA);
890} 1123}
891 1124
892/* powersave support code */ 1125/* powersave support code */
@@ -894,88 +1127,341 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
894{ 1127{
895 struct ieee80211_sub_if_data *sdata = sta->sdata; 1128 struct ieee80211_sub_if_data *sdata = sta->sdata;
896 struct ieee80211_local *local = sdata->local; 1129 struct ieee80211_local *local = sdata->local;
897 int sent, buffered; 1130 struct sk_buff_head pending;
1131 int filtered = 0, buffered = 0, ac;
1132
1133 clear_sta_flag(sta, WLAN_STA_SP);
1134
1135 BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
1136 sta->driver_buffered_tids = 0;
898 1137
899 clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
900 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1138 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
901 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1139 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
902 1140
903 if (!skb_queue_empty(&sta->ps_tx_buf)) 1141 skb_queue_head_init(&pending);
904 sta_info_clear_tim_bit(sta);
905 1142
906 /* Send all buffered frames to the station */ 1143 /* Send all buffered frames to the station */
907 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); 1144 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
908 buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf, 1145 int count = skb_queue_len(&pending), tmp;
909 clear_sta_ps_flags, sta); 1146
910 sent += buffered; 1147 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
1148 tmp = skb_queue_len(&pending);
1149 filtered += tmp - count;
1150 count = tmp;
1151
1152 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
1153 tmp = skb_queue_len(&pending);
1154 buffered += tmp - count;
1155 }
1156
1157 ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
1158
911 local->total_ps_buffered -= buffered; 1159 local->total_ps_buffered -= buffered;
912 1160
1161 sta_info_recalc_tim(sta);
1162
913#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1163#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
914 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 1164 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
915 "since STA not sleeping anymore\n", sdata->name, 1165 "since STA not sleeping anymore\n", sdata->name,
916 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 1166 sta->sta.addr, sta->sta.aid, filtered, buffered);
917#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1167#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
918} 1168}
919 1169
920void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1170static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1171 struct sta_info *sta, int tid,
1172 enum ieee80211_frame_release_type reason)
921{ 1173{
922 struct ieee80211_sub_if_data *sdata = sta->sdata;
923 struct ieee80211_local *local = sdata->local; 1174 struct ieee80211_local *local = sdata->local;
1175 struct ieee80211_qos_hdr *nullfunc;
924 struct sk_buff *skb; 1176 struct sk_buff *skb;
925 int no_pending_pkts; 1177 int size = sizeof(*nullfunc);
1178 __le16 fc;
1179 bool qos = test_sta_flag(sta, WLAN_STA_WME);
1180 struct ieee80211_tx_info *info;
926 1181
927 skb = skb_dequeue(&sta->tx_filtered); 1182 if (qos) {
928 if (!skb) { 1183 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
929 skb = skb_dequeue(&sta->ps_tx_buf); 1184 IEEE80211_STYPE_QOS_NULLFUNC |
930 if (skb) 1185 IEEE80211_FCTL_FROMDS);
931 local->total_ps_buffered--; 1186 } else {
1187 size -= 2;
1188 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1189 IEEE80211_STYPE_NULLFUNC |
1190 IEEE80211_FCTL_FROMDS);
1191 }
1192
1193 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
1194 if (!skb)
1195 return;
1196
1197 skb_reserve(skb, local->hw.extra_tx_headroom);
1198
1199 nullfunc = (void *) skb_put(skb, size);
1200 nullfunc->frame_control = fc;
1201 nullfunc->duration_id = 0;
1202 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
1203 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
1204 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
1205
1206 skb->priority = tid;
1207 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
1208 if (qos) {
1209 nullfunc->qos_ctrl = cpu_to_le16(tid);
1210
1211 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
1212 nullfunc->qos_ctrl |=
1213 cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
932 } 1214 }
933 no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
934 skb_queue_empty(&sta->ps_tx_buf);
935 1215
936 if (skb) { 1216 info = IEEE80211_SKB_CB(skb);
937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1217
938 struct ieee80211_hdr *hdr = 1218 /*
939 (struct ieee80211_hdr *) skb->data; 1219 * Tell TX path to send this frame even though the
1220 * STA may still remain is PS mode after this frame
1221 * exchange. Also set EOSP to indicate this packet
1222 * ends the poll/service period.
1223 */
1224 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE |
1225 IEEE80211_TX_STATUS_EOSP |
1226 IEEE80211_TX_CTL_REQ_TX_STATUS;
1227
1228 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
1229
1230 ieee80211_xmit(sdata, skb);
1231}
1232
1233static void
1234ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1235 int n_frames, u8 ignored_acs,
1236 enum ieee80211_frame_release_type reason)
1237{
1238 struct ieee80211_sub_if_data *sdata = sta->sdata;
1239 struct ieee80211_local *local = sdata->local;
1240 bool found = false;
1241 bool more_data = false;
1242 int ac;
1243 unsigned long driver_release_tids = 0;
1244 struct sk_buff_head frames;
1245
1246 /* Service or PS-Poll period starts */
1247 set_sta_flag(sta, WLAN_STA_SP);
1248
1249 __skb_queue_head_init(&frames);
1250
1251 /*
1252 * Get response frame(s) and more data bit for it.
1253 */
1254 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1255 unsigned long tids;
1256
1257 if (ignored_acs & BIT(ac))
1258 continue;
1259
1260 tids = ieee80211_tids_for_ac(ac);
1261
1262 if (!found) {
1263 driver_release_tids = sta->driver_buffered_tids & tids;
1264 if (driver_release_tids) {
1265 found = true;
1266 } else {
1267 struct sk_buff *skb;
1268
1269 while (n_frames > 0) {
1270 skb = skb_dequeue(&sta->tx_filtered[ac]);
1271 if (!skb) {
1272 skb = skb_dequeue(
1273 &sta->ps_tx_buf[ac]);
1274 if (skb)
1275 local->total_ps_buffered--;
1276 }
1277 if (!skb)
1278 break;
1279 n_frames--;
1280 found = true;
1281 __skb_queue_tail(&frames, skb);
1282 }
1283 }
1284
1285 /*
1286 * If the driver has data on more than one TID then
1287 * certainly there's more data if we release just a
1288 * single frame now (from a single TID).
1289 */
1290 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
1291 hweight16(driver_release_tids) > 1) {
1292 more_data = true;
1293 driver_release_tids =
1294 BIT(ffs(driver_release_tids) - 1);
1295 break;
1296 }
1297 }
1298
1299 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1300 !skb_queue_empty(&sta->ps_tx_buf[ac])) {
1301 more_data = true;
1302 break;
1303 }
1304 }
1305
1306 if (!found) {
1307 int tid;
940 1308
941 /* 1309 /*
942 * Tell TX path to send this frame even though the STA may 1310 * For PS-Poll, this can only happen due to a race condition
943 * still remain is PS mode after this frame exchange. 1311 * when we set the TIM bit and the station notices it, but
1312 * before it can poll for the frame we expire it.
1313 *
1314 * For uAPSD, this is said in the standard (11.2.1.5 h):
1315 * At each unscheduled SP for a non-AP STA, the AP shall
1316 * attempt to transmit at least one MSDU or MMPDU, but no
1317 * more than the value specified in the Max SP Length field
1318 * in the QoS Capability element from delivery-enabled ACs,
1319 * that are destined for the non-AP STA.
1320 *
1321 * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
944 */ 1322 */
945 info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
946 1323
947#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1324 /* This will evaluate to 1, 3, 5 or 7. */
948 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", 1325 tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
949 sta->sta.addr, sta->sta.aid,
950 skb_queue_len(&sta->ps_tx_buf));
951#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
952 1326
953 /* Use MoreData flag to indicate whether there are more 1327 ieee80211_send_null_response(sdata, sta, tid, reason);
954 * buffered frames for this STA */ 1328 return;
955 if (no_pending_pkts) 1329 }
956 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1330
957 else 1331 if (!driver_release_tids) {
958 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1332 struct sk_buff_head pending;
1333 struct sk_buff *skb;
1334 int num = 0;
1335 u16 tids = 0;
1336
1337 skb_queue_head_init(&pending);
1338
1339 while ((skb = __skb_dequeue(&frames))) {
1340 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1341 struct ieee80211_hdr *hdr = (void *) skb->data;
1342 u8 *qoshdr = NULL;
1343
1344 num++;
1345
1346 /*
1347 * Tell TX path to send this frame even though the
1348 * STA may still remain is PS mode after this frame
1349 * exchange.
1350 */
1351 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE;
1352
1353 /*
1354 * Use MoreData flag to indicate whether there are
1355 * more buffered frames for this STA
1356 */
1357 if (!more_data)
1358 hdr->frame_control &=
1359 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1363
1364 if (ieee80211_is_data_qos(hdr->frame_control) ||
1365 ieee80211_is_qos_nullfunc(hdr->frame_control))
1366 qoshdr = ieee80211_get_qos_ctl(hdr);
1367
1368 /* set EOSP for the frame */
1369 if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
1370 qoshdr && skb_queue_empty(&frames))
1371 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
1372
1373 info->flags |= IEEE80211_TX_STATUS_EOSP |
1374 IEEE80211_TX_CTL_REQ_TX_STATUS;
1375
1376 if (qoshdr)
1377 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
1378 else
1379 tids |= BIT(0);
1380
1381 __skb_queue_tail(&pending, skb);
1382 }
959 1383
960 ieee80211_add_pending_skb(local, skb); 1384 drv_allow_buffered_frames(local, sta, tids, num,
1385 reason, more_data);
961 1386
962 if (no_pending_pkts) 1387 ieee80211_add_pending_skbs(local, &pending);
963 sta_info_clear_tim_bit(sta); 1388
964#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1389 sta_info_recalc_tim(sta);
965 } else { 1390 } else {
966 /* 1391 /*
967 * FIXME: This can be the result of a race condition between 1392 * We need to release a frame that is buffered somewhere in the
968 * us expiring a frame and the station polling for it. 1393 * driver ... it'll have to handle that.
969 * Should we send it a null-func frame indicating we 1394 * Note that, as per the comment above, it'll also have to see
970 * have nothing buffered for it? 1395 * if there is more than just one frame on the specific TID that
1396 * we're releasing from, and it needs to set the more-data bit
1397 * accordingly if we tell it that there's no more data. If we do
1398 * tell it there's more data, then of course the more-data bit
1399 * needs to be set anyway.
1400 */
1401 drv_release_buffered_frames(local, sta, driver_release_tids,
1402 n_frames, reason, more_data);
1403
1404 /*
1405 * Note that we don't recalculate the TIM bit here as it would
1406 * most likely have no effect at all unless the driver told us
1407 * that the TID became empty before returning here from the
1408 * release function.
1409 * Either way, however, when the driver tells us that the TID
1410 * became empty we'll do the TIM recalculation.
971 */ 1411 */
972 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
973 "though there are no buffered frames for it\n",
974 sdata->name, sta->sta.addr);
975#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
976 } 1412 }
977} 1413}
978 1414
1415void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
1416{
1417 u8 ignore_for_response = sta->sta.uapsd_queues;
1418
1419 /*
1420 * If all ACs are delivery-enabled then we should reply
1421 * from any of them, if only some are enabled we reply
1422 * only from the non-enabled ones.
1423 */
1424 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
1425 ignore_for_response = 0;
1426
1427 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
1428 IEEE80211_FRAME_RELEASE_PSPOLL);
1429}
1430
1431void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
1432{
1433 int n_frames = sta->sta.max_sp;
1434 u8 delivery_enabled = sta->sta.uapsd_queues;
1435
1436 /*
1437 * If we ever grow support for TSPEC this might happen if
1438 * the TSPEC update from hostapd comes in between a trigger
1439 * frame setting WLAN_STA_UAPSD in the RX path and this
1440 * actually getting called.
1441 */
1442 if (!delivery_enabled)
1443 return;
1444
1445 switch (sta->sta.max_sp) {
1446 case 1:
1447 n_frames = 2;
1448 break;
1449 case 2:
1450 n_frames = 4;
1451 break;
1452 case 3:
1453 n_frames = 6;
1454 break;
1455 case 0:
1456 /* XXX: what is a good value? */
1457 n_frames = 8;
1458 break;
1459 }
1460
1461 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
1462 IEEE80211_FRAME_RELEASE_UAPSD);
1463}
1464
979void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1465void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
980 struct ieee80211_sta *pubsta, bool block) 1466 struct ieee80211_sta *pubsta, bool block)
981{ 1467{
@@ -984,17 +1470,50 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
984 trace_api_sta_block_awake(sta->local, pubsta, block); 1470 trace_api_sta_block_awake(sta->local, pubsta, block);
985 1471
986 if (block) 1472 if (block)
987 set_sta_flags(sta, WLAN_STA_PS_DRIVER); 1473 set_sta_flag(sta, WLAN_STA_PS_DRIVER);
988 else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) 1474 else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
989 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 1475 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
990} 1476}
991EXPORT_SYMBOL(ieee80211_sta_block_awake); 1477EXPORT_SYMBOL(ieee80211_sta_block_awake);
992 1478
993void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta) 1479void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta)
994{ 1480{
995 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1481 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1482 struct ieee80211_local *local = sta->local;
1483 struct sk_buff *skb;
1484 struct skb_eosp_msg_data *data;
1485
1486 trace_api_eosp(local, pubsta);
1487
1488 skb = alloc_skb(0, GFP_ATOMIC);
1489 if (!skb) {
1490 /* too bad ... but race is better than loss */
1491 clear_sta_flag(sta, WLAN_STA_SP);
1492 return;
1493 }
1494
1495 data = (void *)skb->cb;
1496 memcpy(data->sta, pubsta->addr, ETH_ALEN);
1497 memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
1498 skb->pkt_type = IEEE80211_EOSP_MSG;
1499 skb_queue_tail(&local->skb_queue, skb);
1500 tasklet_schedule(&local->tasklet);
1501}
1502EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe);
1503
1504void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1505 u8 tid, bool buffered)
1506{
1507 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1508
1509 if (WARN_ON(tid >= STA_TID_NUM))
1510 return;
1511
1512 if (buffered)
1513 set_bit(tid, &sta->driver_buffered_tids);
1514 else
1515 clear_bit(tid, &sta->driver_buffered_tids);
996 1516
997 set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF); 1517 sta_info_recalc_tim(sta);
998 sta_info_set_tim_bit(sta);
999} 1518}
1000EXPORT_SYMBOL(ieee80211_sta_set_tim); 1519EXPORT_SYMBOL(ieee80211_sta_set_buffered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 28beb78e601e..8c8ce05ad26f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -19,7 +19,8 @@
19/** 19/**
20 * enum ieee80211_sta_info_flags - Stations flags 20 * enum ieee80211_sta_info_flags - Stations flags
21 * 21 *
22 * These flags are used with &struct sta_info's @flags member. 22 * These flags are used with &struct sta_info's @flags member, but
23 * only indirectly with set_sta_flag() and friends.
23 * 24 *
24 * @WLAN_STA_AUTH: Station is authenticated. 25 * @WLAN_STA_AUTH: Station is authenticated.
25 * @WLAN_STA_ASSOC: Station is associated. 26 * @WLAN_STA_ASSOC: Station is associated.
@@ -43,24 +44,33 @@
43 * be in the queues 44 * be in the queues
44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 45 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
45 * station in power-save mode, reply when the driver unblocks. 46 * station in power-save mode, reply when the driver unblocks.
46 * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal 47 * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
47 * buffers. Automatically cleared on station wake-up. 48 * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
49 * packets. This means the link is enabled.
50 * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
51 * keeping station in power-save mode, reply when the driver
52 * unblocks the station.
53 * @WLAN_STA_SP: Station is in a service period, so don't try to
54 * reply to other uAPSD trigger frames or PS-Poll.
48 */ 55 */
49enum ieee80211_sta_info_flags { 56enum ieee80211_sta_info_flags {
50 WLAN_STA_AUTH = 1<<0, 57 WLAN_STA_AUTH,
51 WLAN_STA_ASSOC = 1<<1, 58 WLAN_STA_ASSOC,
52 WLAN_STA_PS_STA = 1<<2, 59 WLAN_STA_PS_STA,
53 WLAN_STA_AUTHORIZED = 1<<3, 60 WLAN_STA_AUTHORIZED,
54 WLAN_STA_SHORT_PREAMBLE = 1<<4, 61 WLAN_STA_SHORT_PREAMBLE,
55 WLAN_STA_ASSOC_AP = 1<<5, 62 WLAN_STA_ASSOC_AP,
56 WLAN_STA_WME = 1<<6, 63 WLAN_STA_WME,
57 WLAN_STA_WDS = 1<<7, 64 WLAN_STA_WDS,
58 WLAN_STA_CLEAR_PS_FILT = 1<<9, 65 WLAN_STA_CLEAR_PS_FILT,
59 WLAN_STA_MFP = 1<<10, 66 WLAN_STA_MFP,
60 WLAN_STA_BLOCK_BA = 1<<11, 67 WLAN_STA_BLOCK_BA,
61 WLAN_STA_PS_DRIVER = 1<<12, 68 WLAN_STA_PS_DRIVER,
62 WLAN_STA_PSPOLL = 1<<13, 69 WLAN_STA_PSPOLL,
63 WLAN_STA_PS_DRIVER_BUF = 1<<14, 70 WLAN_STA_TDLS_PEER,
71 WLAN_STA_TDLS_PEER_AUTH,
72 WLAN_STA_UAPSD,
73 WLAN_STA_SP,
64}; 74};
65 75
66#define STA_TID_NUM 16 76#define STA_TID_NUM 16
@@ -86,6 +96,8 @@ enum ieee80211_sta_info_flags {
86 * @stop_initiator: initiator of a session stop 96 * @stop_initiator: initiator of a session stop
87 * @tx_stop: TX DelBA frame when stopping 97 * @tx_stop: TX DelBA frame when stopping
88 * @buf_size: reorder buffer size at receiver 98 * @buf_size: reorder buffer size at receiver
99 * @failed_bar_ssn: ssn of the last failed BAR tx attempt
100 * @bar_pending: BAR needs to be re-sent
89 * 101 *
90 * This structure's lifetime is managed by RCU, assignments to 102 * This structure's lifetime is managed by RCU, assignments to
91 * the array holding it must hold the aggregation mutex. 103 * the array holding it must hold the aggregation mutex.
@@ -106,6 +118,9 @@ struct tid_ampdu_tx {
106 u8 stop_initiator; 118 u8 stop_initiator;
107 bool tx_stop; 119 bool tx_stop;
108 u8 buf_size; 120 u8 buf_size;
121
122 u16 failed_bar_ssn;
123 bool bar_pending;
109}; 124};
110 125
111/** 126/**
@@ -198,15 +213,16 @@ struct sta_ampdu_mlme {
198 * @last_rx_rate_flag: rx status flag of the last data packet 213 * @last_rx_rate_flag: rx status flag of the last data packet
199 * @lock: used for locking all fields that require locking, see comments 214 * @lock: used for locking all fields that require locking, see comments
200 * in the header file. 215 * in the header file.
201 * @flaglock: spinlock for flags accesses
202 * @drv_unblock_wk: used for driver PS unblocking 216 * @drv_unblock_wk: used for driver PS unblocking
203 * @listen_interval: listen interval of this station, when we're acting as AP 217 * @listen_interval: listen interval of this station, when we're acting as AP
204 * @flags: STA flags, see &enum ieee80211_sta_info_flags 218 * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
205 * @ps_tx_buf: buffer of frames to transmit to this station 219 * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
206 * when it leaves power saving state 220 * when it leaves power saving state or polls
207 * @tx_filtered: buffer of frames we already tried to transmit 221 * @tx_filtered: buffers (per AC) of frames we already tried to
208 * but were filtered by hardware due to STA having entered 222 * transmit but were filtered by hardware due to STA having
209 * power saving state 223 * entered power saving state, these are also delivered to
224 * the station when it leaves powersave or polls for frames
225 * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
210 * @rx_packets: Number of MSDUs received from this STA 226 * @rx_packets: Number of MSDUs received from this STA
211 * @rx_bytes: Number of bytes received from this STA 227 * @rx_bytes: Number of bytes received from this STA
212 * @wep_weak_iv_count: number of weak WEP IVs received from this station 228 * @wep_weak_iv_count: number of weak WEP IVs received from this station
@@ -238,10 +254,12 @@ struct sta_ampdu_mlme {
238 * @plink_timer: peer link watch timer 254 * @plink_timer: peer link watch timer
239 * @plink_timer_was_running: used by suspend/resume to restore timers 255 * @plink_timer_was_running: used by suspend/resume to restore timers
240 * @debugfs: debug filesystem info 256 * @debugfs: debug filesystem info
241 * @sta: station information we share with the driver
242 * @dead: set to true when sta is unlinked 257 * @dead: set to true when sta is unlinked
243 * @uploaded: set to true when sta is uploaded to the driver 258 * @uploaded: set to true when sta is uploaded to the driver
244 * @lost_packets: number of consecutive lost packets 259 * @lost_packets: number of consecutive lost packets
260 * @dummy: indicate a dummy station created for receiving
261 * EAP frames before association
262 * @sta: station information we share with the driver
245 */ 263 */
246struct sta_info { 264struct sta_info {
247 /* General information, mostly static */ 265 /* General information, mostly static */
@@ -254,7 +272,6 @@ struct sta_info {
254 struct rate_control_ref *rate_ctrl; 272 struct rate_control_ref *rate_ctrl;
255 void *rate_ctrl_priv; 273 void *rate_ctrl_priv;
256 spinlock_t lock; 274 spinlock_t lock;
257 spinlock_t flaglock;
258 275
259 struct work_struct drv_unblock_wk; 276 struct work_struct drv_unblock_wk;
260 277
@@ -264,18 +281,16 @@ struct sta_info {
264 281
265 bool uploaded; 282 bool uploaded;
266 283
267 /* 284 /* use the accessors defined below */
268 * frequently updated, locked with own spinlock (flaglock), 285 unsigned long _flags;
269 * use the accessors defined below
270 */
271 u32 flags;
272 286
273 /* 287 /*
274 * STA powersave frame queues, no more than the internal 288 * STA powersave frame queues, no more than the internal
275 * locking required. 289 * locking required.
276 */ 290 */
277 struct sk_buff_head ps_tx_buf; 291 struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
278 struct sk_buff_head tx_filtered; 292 struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
293 unsigned long driver_buffered_tids;
279 294
280 /* Updated from RX path only, no locking requirements */ 295 /* Updated from RX path only, no locking requirements */
281 unsigned long rx_packets, rx_bytes; 296 unsigned long rx_packets, rx_bytes;
@@ -336,6 +351,9 @@ struct sta_info {
336 351
337 unsigned int lost_packets; 352 unsigned int lost_packets;
338 353
354 /* should be right in front of sta to be in the same cache line */
355 bool dummy;
356
339 /* keep last! */ 357 /* keep last! */
340 struct ieee80211_sta sta; 358 struct ieee80211_sta sta;
341}; 359};
@@ -348,60 +366,28 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
348 return NL80211_PLINK_LISTEN; 366 return NL80211_PLINK_LISTEN;
349} 367}
350 368
351static inline void set_sta_flags(struct sta_info *sta, const u32 flags) 369static inline void set_sta_flag(struct sta_info *sta,
370 enum ieee80211_sta_info_flags flag)
352{ 371{
353 unsigned long irqfl; 372 set_bit(flag, &sta->_flags);
354
355 spin_lock_irqsave(&sta->flaglock, irqfl);
356 sta->flags |= flags;
357 spin_unlock_irqrestore(&sta->flaglock, irqfl);
358} 373}
359 374
360static inline void clear_sta_flags(struct sta_info *sta, const u32 flags) 375static inline void clear_sta_flag(struct sta_info *sta,
376 enum ieee80211_sta_info_flags flag)
361{ 377{
362 unsigned long irqfl; 378 clear_bit(flag, &sta->_flags);
363
364 spin_lock_irqsave(&sta->flaglock, irqfl);
365 sta->flags &= ~flags;
366 spin_unlock_irqrestore(&sta->flaglock, irqfl);
367} 379}
368 380
369static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) 381static inline int test_sta_flag(struct sta_info *sta,
382 enum ieee80211_sta_info_flags flag)
370{ 383{
371 u32 ret; 384 return test_bit(flag, &sta->_flags);
372 unsigned long irqfl;
373
374 spin_lock_irqsave(&sta->flaglock, irqfl);
375 ret = sta->flags & flags;
376 spin_unlock_irqrestore(&sta->flaglock, irqfl);
377
378 return ret;
379}
380
381static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
382 const u32 flags)
383{
384 u32 ret;
385 unsigned long irqfl;
386
387 spin_lock_irqsave(&sta->flaglock, irqfl);
388 ret = sta->flags & flags;
389 sta->flags &= ~flags;
390 spin_unlock_irqrestore(&sta->flaglock, irqfl);
391
392 return ret;
393} 385}
394 386
395static inline u32 get_sta_flags(struct sta_info *sta) 387static inline int test_and_clear_sta_flag(struct sta_info *sta,
388 enum ieee80211_sta_info_flags flag)
396{ 389{
397 u32 ret; 390 return test_and_clear_bit(flag, &sta->_flags);
398 unsigned long irqfl;
399
400 spin_lock_irqsave(&sta->flaglock, irqfl);
401 ret = sta->flags;
402 spin_unlock_irqrestore(&sta->flaglock, irqfl);
403
404 return ret;
405} 391}
406 392
407void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 393void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
@@ -419,8 +405,8 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
419#define STA_HASH(sta) (sta[5]) 405#define STA_HASH(sta) (sta[5])
420 406
421 407
422/* Maximum number of frames to buffer per power saving station */ 408/* Maximum number of frames to buffer per power saving station per AC */
423#define STA_MAX_TX_BUFFER 128 409#define STA_MAX_TX_BUFFER 64
424 410
425/* Minimum buffered frame expiry time. If STA uses listen interval that is 411/* Minimum buffered frame expiry time. If STA uses listen interval that is
426 * smaller than this value, the minimum value here is used instead. */ 412 * smaller than this value, the minimum value here is used instead. */
@@ -436,9 +422,15 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
436struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 422struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
437 const u8 *addr); 423 const u8 *addr);
438 424
425struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
426 const u8 *addr);
427
439struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 428struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
440 const u8 *addr); 429 const u8 *addr);
441 430
431struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
432 const u8 *addr);
433
442static inline 434static inline
443void for_each_sta_info_type_check(struct ieee80211_local *local, 435void for_each_sta_info_type_check(struct ieee80211_local *local,
444 const u8 *addr, 436 const u8 *addr,
@@ -459,6 +451,22 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
459 _sta = nxt, \ 451 _sta = nxt, \
460 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ 452 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
461 ) \ 453 ) \
454 /* run code only if address matches and it's not a dummy sta */ \
455 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
456 !_sta->dummy)
457
458#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
459 for ( /* initialise loop */ \
460 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
461 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
462 /* typecheck */ \
463 for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
464 /* continue condition */ \
465 _sta; \
466 /* advance loop */ \
467 _sta = nxt, \
468 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
469 ) \
462 /* compare address and run code only if it matches */ \ 470 /* compare address and run code only if it matches */ \
463 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0) 471 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
464 472
@@ -484,14 +492,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
484int sta_info_insert(struct sta_info *sta); 492int sta_info_insert(struct sta_info *sta);
485int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); 493int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
486int sta_info_insert_atomic(struct sta_info *sta); 494int sta_info_insert_atomic(struct sta_info *sta);
495int sta_info_reinsert(struct sta_info *sta);
487 496
488int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, 497int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
489 const u8 *addr); 498 const u8 *addr);
490int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 499int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
491 const u8 *addr); 500 const u8 *addr);
492 501
493void sta_info_set_tim_bit(struct sta_info *sta); 502void sta_info_recalc_tim(struct sta_info *sta);
494void sta_info_clear_tim_bit(struct sta_info *sta);
495 503
496void sta_info_init(struct ieee80211_local *local); 504void sta_info_init(struct ieee80211_local *local);
497void sta_info_stop(struct ieee80211_local *local); 505void sta_info_stop(struct ieee80211_local *local);
@@ -502,5 +510,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
502 510
503void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); 511void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
504void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); 512void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
513void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
505 514
506#endif /* STA_INFO_H */ 515#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1658efaa2e8e..80de436eae20 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -9,11 +9,13 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/export.h>
12#include <net/mac80211.h> 13#include <net/mac80211.h>
13#include "ieee80211_i.h" 14#include "ieee80211_i.h"
14#include "rate.h" 15#include "rate.h"
15#include "mesh.h" 16#include "mesh.h"
16#include "led.h" 17#include "led.h"
18#include "wme.h"
17 19
18 20
19void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 21void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -43,6 +45,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
43 struct sk_buff *skb) 45 struct sk_buff *skb)
44{ 46{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
48 struct ieee80211_hdr *hdr = (void *)skb->data;
49 int ac;
46 50
47 /* 51 /*
48 * This skb 'survived' a round-trip through the driver, and 52 * This skb 'survived' a round-trip through the driver, and
@@ -63,11 +67,37 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
63 sta->tx_filtered_count++; 67 sta->tx_filtered_count++;
64 68
65 /* 69 /*
70 * Clear more-data bit on filtered frames, it might be set
71 * but later frames might time out so it might have to be
72 * clear again ... It's all rather unlikely (this frame
73 * should time out first, right?) but let's not confuse
74 * peers unnecessarily.
75 */
76 if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
77 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
78
79 if (ieee80211_is_data_qos(hdr->frame_control)) {
80 u8 *p = ieee80211_get_qos_ctl(hdr);
81 int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
82
83 /*
84 * Clear EOSP if set, this could happen e.g.
85 * if an absence period (us being a P2P GO)
86 * shortens the SP.
87 */
88 if (*p & IEEE80211_QOS_CTL_EOSP)
89 *p &= ~IEEE80211_QOS_CTL_EOSP;
90 ac = ieee802_1d_to_ac[tid & 7];
91 } else {
92 ac = IEEE80211_AC_BE;
93 }
94
95 /*
66 * Clear the TX filter mask for this STA when sending the next 96 * Clear the TX filter mask for this STA when sending the next
67 * packet. If the STA went to power save mode, this will happen 97 * packet. If the STA went to power save mode, this will happen
68 * when it wakes up for the next time. 98 * when it wakes up for the next time.
69 */ 99 */
70 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT); 100 set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
71 101
72 /* 102 /*
73 * This code races in the following way: 103 * This code races in the following way:
@@ -103,13 +133,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
103 * changes before calling TX status events if ordering can be 133 * changes before calling TX status events if ordering can be
104 * unknown. 134 * unknown.
105 */ 135 */
106 if (test_sta_flags(sta, WLAN_STA_PS_STA) && 136 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
107 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 137 skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
108 skb_queue_tail(&sta->tx_filtered, skb); 138 skb_queue_tail(&sta->tx_filtered[ac], skb);
139 sta_info_recalc_tim(sta);
140
141 if (!timer_pending(&local->sta_cleanup))
142 mod_timer(&local->sta_cleanup,
143 round_jiffies(jiffies +
144 STA_INFO_CLEANUP_INTERVAL));
109 return; 145 return;
110 } 146 }
111 147
112 if (!test_sta_flags(sta, WLAN_STA_PS_STA) && 148 if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
113 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { 149 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
114 /* Software retry the packet once */ 150 /* Software retry the packet once */
115 info->flags |= IEEE80211_TX_INTFL_RETRIED; 151 info->flags |= IEEE80211_TX_INTFL_RETRIED;
@@ -121,18 +157,38 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
121 if (net_ratelimit()) 157 if (net_ratelimit())
122 wiphy_debug(local->hw.wiphy, 158 wiphy_debug(local->hw.wiphy,
123 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", 159 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
124 skb_queue_len(&sta->tx_filtered), 160 skb_queue_len(&sta->tx_filtered[ac]),
125 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies); 161 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
126#endif 162#endif
127 dev_kfree_skb(skb); 163 dev_kfree_skb(skb);
128} 164}
129 165
166static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
167{
168 struct tid_ampdu_tx *tid_tx;
169
170 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
171 if (!tid_tx || !tid_tx->bar_pending)
172 return;
173
174 tid_tx->bar_pending = false;
175 ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
176}
177
130static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) 178static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
131{ 179{
132 struct ieee80211_mgmt *mgmt = (void *) skb->data; 180 struct ieee80211_mgmt *mgmt = (void *) skb->data;
133 struct ieee80211_local *local = sta->local; 181 struct ieee80211_local *local = sta->local;
134 struct ieee80211_sub_if_data *sdata = sta->sdata; 182 struct ieee80211_sub_if_data *sdata = sta->sdata;
135 183
184 if (ieee80211_is_data_qos(mgmt->frame_control)) {
185 struct ieee80211_hdr *hdr = (void *) skb->data;
186 u8 *qc = ieee80211_get_qos_ctl(hdr);
187 u16 tid = qc[0] & 0xf;
188
189 ieee80211_check_pending_bar(sta, hdr->addr1, tid);
190 }
191
136 if (ieee80211_is_action(mgmt->frame_control) && 192 if (ieee80211_is_action(mgmt->frame_control) &&
137 sdata->vif.type == NL80211_IFTYPE_STATION && 193 sdata->vif.type == NL80211_IFTYPE_STATION &&
138 mgmt->u.action.category == WLAN_CATEGORY_HT && 194 mgmt->u.action.category == WLAN_CATEGORY_HT &&
@@ -161,6 +217,114 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
161 } 217 }
162} 218}
163 219
220static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
221{
222 struct tid_ampdu_tx *tid_tx;
223
224 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
225 if (!tid_tx)
226 return;
227
228 tid_tx->failed_bar_ssn = ssn;
229 tid_tx->bar_pending = true;
230}
231
232static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
233{
234 int len = sizeof(struct ieee80211_radiotap_header);
235
236 /* IEEE80211_RADIOTAP_RATE rate */
237 if (info->status.rates[0].idx >= 0 &&
238 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
239 len += 2;
240
241 /* IEEE80211_RADIOTAP_TX_FLAGS */
242 len += 2;
243
244 /* IEEE80211_RADIOTAP_DATA_RETRIES */
245 len += 1;
246
247 /* IEEE80211_TX_RC_MCS */
248 if (info->status.rates[0].idx >= 0 &&
249 info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
250 len += 3;
251
252 return len;
253}
254
255static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
256 *sband, struct sk_buff *skb,
257 int retry_count, int rtap_len)
258{
259 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
261 struct ieee80211_radiotap_header *rthdr;
262 unsigned char *pos;
263 __le16 txflags;
264
265 rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
266
267 memset(rthdr, 0, rtap_len);
268 rthdr->it_len = cpu_to_le16(rtap_len);
269 rthdr->it_present =
270 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
271 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
272 pos = (unsigned char *)(rthdr + 1);
273
274 /*
275 * XXX: Once radiotap gets the bitmap reset thing the vendor
276 * extensions proposal contains, we can actually report
277 * the whole set of tries we did.
278 */
279
280 /* IEEE80211_RADIOTAP_RATE */
281 if (info->status.rates[0].idx >= 0 &&
282 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
283 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
284 *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
285 /* padding for tx flags */
286 pos += 2;
287 }
288
289 /* IEEE80211_RADIOTAP_TX_FLAGS */
290 txflags = 0;
291 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
292 !is_multicast_ether_addr(hdr->addr1))
293 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
294
295 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
296 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
297 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
298 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
299 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
300
301 put_unaligned_le16(txflags, pos);
302 pos += 2;
303
304 /* IEEE80211_RADIOTAP_DATA_RETRIES */
305 /* for now report the total retry_count */
306 *pos = retry_count;
307 pos++;
308
309 /* IEEE80211_TX_RC_MCS */
310 if (info->status.rates[0].idx >= 0 &&
311 info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
312 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
313 pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
314 IEEE80211_RADIOTAP_MCS_HAVE_GI |
315 IEEE80211_RADIOTAP_MCS_HAVE_BW;
316 if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
317 pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
318 if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
319 pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
320 if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
321 pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
322 pos[2] = info->status.rates[0].idx;
323 pos += 3;
324 }
325
326}
327
164/* 328/*
165 * Use a static threshold for now, best value to be determined 329 * Use a static threshold for now, best value to be determined
166 * by testing ... 330 * by testing ...
@@ -179,7 +343,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
179 u16 frag, type; 343 u16 frag, type;
180 __le16 fc; 344 __le16 fc;
181 struct ieee80211_supported_band *sband; 345 struct ieee80211_supported_band *sband;
182 struct ieee80211_tx_status_rtap_hdr *rthdr;
183 struct ieee80211_sub_if_data *sdata; 346 struct ieee80211_sub_if_data *sdata;
184 struct net_device *prev_dev = NULL; 347 struct net_device *prev_dev = NULL;
185 struct sta_info *sta, *tmp; 348 struct sta_info *sta, *tmp;
@@ -187,6 +350,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
187 int rates_idx = -1; 350 int rates_idx = -1;
188 bool send_to_cooked; 351 bool send_to_cooked;
189 bool acked; 352 bool acked;
353 struct ieee80211_bar *bar;
354 u16 tid;
355 int rtap_len;
190 356
191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 357 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
192 if (info->status.rates[i].idx < 0) { 358 if (info->status.rates[i].idx < 0) {
@@ -215,8 +381,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
215 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN)) 381 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
216 continue; 382 continue;
217 383
384 if (info->flags & IEEE80211_TX_STATUS_EOSP)
385 clear_sta_flag(sta, WLAN_STA_SP);
386
218 acked = !!(info->flags & IEEE80211_TX_STAT_ACK); 387 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
219 if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) { 388 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
220 /* 389 /*
221 * The STA is in power save mode, so assume 390 * The STA is in power save mode, so assume
222 * that this TX packet failed because of that. 391 * that this TX packet failed because of that.
@@ -239,10 +408,31 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
239 tid = qc[0] & 0xf; 408 tid = qc[0] & 0xf;
240 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) 409 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
241 & IEEE80211_SCTL_SEQ); 410 & IEEE80211_SCTL_SEQ);
242 ieee80211_send_bar(sta->sdata, hdr->addr1, 411 ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
243 tid, ssn); 412 tid, ssn);
244 } 413 }
245 414
415 if (!acked && ieee80211_is_back_req(fc)) {
416 u16 control;
417
418 /*
419 * BAR failed, store the last SSN and retry sending
420 * the BAR when the next unicast transmission on the
421 * same TID succeeds.
422 */
423 bar = (struct ieee80211_bar *) skb->data;
424 control = le16_to_cpu(bar->control);
425 if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
426 u16 ssn = le16_to_cpu(bar->start_seq_num);
427
428 tid = (control &
429 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
430 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
431
432 ieee80211_set_bar_pending(sta, tid, ssn);
433 }
434 }
435
246 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { 436 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
247 ieee80211_handle_filtered_frame(local, sta, skb); 437 ieee80211_handle_filtered_frame(local, sta, skb);
248 rcu_read_unlock(); 438 rcu_read_unlock();
@@ -336,7 +526,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
336 continue; 526 continue;
337 if (wk->offchan_tx.frame != skb) 527 if (wk->offchan_tx.frame != skb)
338 continue; 528 continue;
339 wk->offchan_tx.frame = NULL; 529 wk->offchan_tx.status = true;
340 break; 530 break;
341 } 531 }
342 rcu_read_unlock(); 532 rcu_read_unlock();
@@ -345,9 +535,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
345 local->hw_roc_skb_for_status = NULL; 535 local->hw_roc_skb_for_status = NULL;
346 } 536 }
347 537
348 if (cookie == local->hw_offchan_tx_cookie)
349 local->hw_offchan_tx_cookie = 0;
350
351 cfg80211_mgmt_tx_status( 538 cfg80211_mgmt_tx_status(
352 skb->dev, cookie, skb->data, skb->len, 539 skb->dev, cookie, skb->data, skb->len,
353 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 540 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
@@ -370,44 +557,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
370 } 557 }
371 558
372 /* send frame to monitor interfaces now */ 559 /* send frame to monitor interfaces now */
373 560 rtap_len = ieee80211_tx_radiotap_len(info);
374 if (skb_headroom(skb) < sizeof(*rthdr)) { 561 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
375 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n"); 562 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
376 dev_kfree_skb(skb); 563 dev_kfree_skb(skb);
377 return; 564 return;
378 } 565 }
379 566 ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
380 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
381 skb_push(skb, sizeof(*rthdr));
382
383 memset(rthdr, 0, sizeof(*rthdr));
384 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
385 rthdr->hdr.it_present =
386 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
387 (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
388 (1 << IEEE80211_RADIOTAP_RATE));
389
390 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
391 !is_multicast_ether_addr(hdr->addr1))
392 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
393
394 /*
395 * XXX: Once radiotap gets the bitmap reset thing the vendor
396 * extensions proposal contains, we can actually report
397 * the whole set of tries we did.
398 */
399 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
400 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
401 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
402 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
403 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
404 if (info->status.rates[0].idx >= 0 &&
405 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
406 rthdr->rate = sband->bitrates[
407 info->status.rates[0].idx].bitrate / 5;
408
409 /* for now report the total retry_count */
410 rthdr->data_retries = retry_count;
411 567
412 /* XXX: is this sufficient for BPF? */ 568 /* XXX: is this sufficient for BPF? */
413 skb_set_mac_header(skb, 0); 569 skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index f49d00a4c7fd..51077a956a83 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -10,6 +10,7 @@
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/export.h>
13#include <asm/unaligned.h> 14#include <asm/unaligned.h>
14 15
15#include <net/mac80211.h> 16#include <net/mac80211.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8cb0d2d0ac69..1f8b120146d1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -18,6 +18,7 @@
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/bitmap.h> 19#include <linux/bitmap.h>
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/export.h>
21#include <net/net_namespace.h> 22#include <net/net_namespace.h>
22#include <net/ieee80211_radiotap.h> 23#include <net/ieee80211_radiotap.h>
23#include <net/cfg80211.h> 24#include <net/cfg80211.h>
@@ -253,7 +254,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
253 254
254 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 255 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
255 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 256 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
256 u32 sta_flags; 257 bool assoc = false;
257 258
258 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 259 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
259 return TX_CONTINUE; 260 return TX_CONTINUE;
@@ -284,10 +285,11 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
284 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 285 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
285 return TX_CONTINUE; 286 return TX_CONTINUE;
286 287
287 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 288 if (tx->sta)
289 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
288 290
289 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 291 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
290 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 292 if (unlikely(!assoc &&
291 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 293 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
292 ieee80211_is_data(hdr->frame_control))) { 294 ieee80211_is_data(hdr->frame_control))) {
293#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 295#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -343,13 +345,22 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
343 total += skb_queue_len(&ap->ps_bc_buf); 345 total += skb_queue_len(&ap->ps_bc_buf);
344 } 346 }
345 347
348 /*
349 * Drop one frame from each station from the lowest-priority
350 * AC that has frames at all.
351 */
346 list_for_each_entry_rcu(sta, &local->sta_list, list) { 352 list_for_each_entry_rcu(sta, &local->sta_list, list) {
347 skb = skb_dequeue(&sta->ps_tx_buf); 353 int ac;
348 if (skb) { 354
349 purged++; 355 for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
350 dev_kfree_skb(skb); 356 skb = skb_dequeue(&sta->ps_tx_buf[ac]);
357 total += skb_queue_len(&sta->ps_tx_buf[ac]);
358 if (skb) {
359 purged++;
360 dev_kfree_skb(skb);
361 break;
362 }
351 } 363 }
352 total += skb_queue_len(&sta->ps_tx_buf);
353 } 364 }
354 365
355 rcu_read_unlock(); 366 rcu_read_unlock();
@@ -418,7 +429,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
418 if (!ieee80211_is_mgmt(fc)) 429 if (!ieee80211_is_mgmt(fc))
419 return 0; 430 return 0;
420 431
421 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP)) 432 if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
422 return 0; 433 return 0;
423 434
424 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) 435 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
@@ -435,7 +446,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
435 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 446 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
436 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 447 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
437 struct ieee80211_local *local = tx->local; 448 struct ieee80211_local *local = tx->local;
438 u32 staflags;
439 449
440 if (unlikely(!sta || 450 if (unlikely(!sta ||
441 ieee80211_is_probe_resp(hdr->frame_control) || 451 ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -444,57 +454,52 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
444 ieee80211_is_reassoc_resp(hdr->frame_control))) 454 ieee80211_is_reassoc_resp(hdr->frame_control)))
445 return TX_CONTINUE; 455 return TX_CONTINUE;
446 456
447 staflags = get_sta_flags(sta); 457 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
458 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
459 !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) {
460 int ac = skb_get_queue_mapping(tx->skb);
448 461
449 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
450 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
451#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 462#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
452 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " 463 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
453 "before %d)\n", 464 sta->sta.addr, sta->sta.aid, ac);
454 sta->sta.addr, sta->sta.aid,
455 skb_queue_len(&sta->ps_tx_buf));
456#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 465#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
457 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 466 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
458 purge_old_ps_buffers(tx->local); 467 purge_old_ps_buffers(tx->local);
459 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 468 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
460 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); 469 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
461#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 470#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
462 if (net_ratelimit()) { 471 if (net_ratelimit())
463 printk(KERN_DEBUG "%s: STA %pM TX " 472 printk(KERN_DEBUG "%s: STA %pM TX buffer for "
464 "buffer full - dropping oldest frame\n", 473 "AC %d full - dropping oldest frame\n",
465 tx->sdata->name, sta->sta.addr); 474 tx->sdata->name, sta->sta.addr, ac);
466 }
467#endif 475#endif
468 dev_kfree_skb(old); 476 dev_kfree_skb(old);
469 } else 477 } else
470 tx->local->total_ps_buffered++; 478 tx->local->total_ps_buffered++;
471 479
472 /*
473 * Queue frame to be sent after STA wakes up/polls,
474 * but don't set the TIM bit if the driver is blocking
475 * wakeup or poll response transmissions anyway.
476 */
477 if (skb_queue_empty(&sta->ps_tx_buf) &&
478 !(staflags & WLAN_STA_PS_DRIVER))
479 sta_info_set_tim_bit(sta);
480
481 info->control.jiffies = jiffies; 480 info->control.jiffies = jiffies;
482 info->control.vif = &tx->sdata->vif; 481 info->control.vif = &tx->sdata->vif;
483 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 482 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
484 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 483 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
485 484
486 if (!timer_pending(&local->sta_cleanup)) 485 if (!timer_pending(&local->sta_cleanup))
487 mod_timer(&local->sta_cleanup, 486 mod_timer(&local->sta_cleanup,
488 round_jiffies(jiffies + 487 round_jiffies(jiffies +
489 STA_INFO_CLEANUP_INTERVAL)); 488 STA_INFO_CLEANUP_INTERVAL));
490 489
490 /*
491 * We queued up some frames, so the TIM bit might
492 * need to be set, recalculate it.
493 */
494 sta_info_recalc_tim(sta);
495
491 return TX_QUEUED; 496 return TX_QUEUED;
492 } 497 }
493#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 498#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
494 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 499 else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
495 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 500 printk(KERN_DEBUG
496 "set -> send frame\n", tx->sdata->name, 501 "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
497 sta->sta.addr); 502 tx->sdata->name, sta->sta.addr);
498 } 503 }
499#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 504#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
500 505
@@ -552,7 +557,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
552 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 557 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
553 (!ieee80211_is_robust_mgmt_frame(hdr) || 558 (!ieee80211_is_robust_mgmt_frame(hdr) ||
554 (ieee80211_is_action(hdr->frame_control) && 559 (ieee80211_is_action(hdr->frame_control) &&
555 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) { 560 tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
556 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 561 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
557 return TX_DROP; 562 return TX_DROP;
558 } else 563 } else
@@ -611,7 +616,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
611 u32 len; 616 u32 len;
612 bool inval = false, rts = false, short_preamble = false; 617 bool inval = false, rts = false, short_preamble = false;
613 struct ieee80211_tx_rate_control txrc; 618 struct ieee80211_tx_rate_control txrc;
614 u32 sta_flags; 619 bool assoc = false;
615 620
616 memset(&txrc, 0, sizeof(txrc)); 621 memset(&txrc, 0, sizeof(txrc));
617 622
@@ -647,17 +652,17 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
647 */ 652 */
648 if (tx->sdata->vif.bss_conf.use_short_preamble && 653 if (tx->sdata->vif.bss_conf.use_short_preamble &&
649 (ieee80211_is_data(hdr->frame_control) || 654 (ieee80211_is_data(hdr->frame_control) ||
650 (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) 655 (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
651 txrc.short_preamble = short_preamble = true; 656 txrc.short_preamble = short_preamble = true;
652 657
653 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 658 if (tx->sta)
659 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
654 660
655 /* 661 /*
656 * Lets not bother rate control if we're associated and cannot 662 * Lets not bother rate control if we're associated and cannot
657 * talk to the sta. This should not happen. 663 * talk to the sta. This should not happen.
658 */ 664 */
659 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && 665 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
660 (sta_flags & WLAN_STA_ASSOC) &&
661 !rate_usable_index_exists(sband, &tx->sta->sta), 666 !rate_usable_index_exists(sband, &tx->sta->sta),
662 "%s: Dropped data frame as no usable bitrate found while " 667 "%s: Dropped data frame as no usable bitrate found while "
663 "scanning and associated. Target station: " 668 "scanning and associated. Target station: "
@@ -800,6 +805,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
800 if (ieee80211_hdrlen(hdr->frame_control) < 24) 805 if (ieee80211_hdrlen(hdr->frame_control) < 24)
801 return TX_CONTINUE; 806 return TX_CONTINUE;
802 807
808 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
809 return TX_CONTINUE;
810
803 /* 811 /*
804 * Anything but QoS data that has a sequence number field 812 * Anything but QoS data that has a sequence number field
805 * (is long enough) gets a sequence number from the global 813 * (is long enough) gets a sequence number from the global
@@ -891,7 +899,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
891 int hdrlen; 899 int hdrlen;
892 int fragnum; 900 int fragnum;
893 901
894 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 902 if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
903 return TX_CONTINUE;
904
905 if (tx->local->ops->set_frag_threshold)
895 return TX_CONTINUE; 906 return TX_CONTINUE;
896 907
897 /* 908 /*
@@ -904,7 +915,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
904 915
905 hdrlen = ieee80211_hdrlen(hdr->frame_control); 916 hdrlen = ieee80211_hdrlen(hdr->frame_control);
906 917
907 /* internal error, why is TX_FRAGMENTED set? */ 918 /* internal error, why isn't DONTFRAG set? */
908 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) 919 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
909 return TX_DROP; 920 return TX_DROP;
910 921
@@ -1025,100 +1036,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
1025 1036
1026/* actual transmit path */ 1037/* actual transmit path */
1027 1038
1028/*
1029 * deal with packet injection down monitor interface
1030 * with Radiotap Header -- only called for monitor mode interface
1031 */
1032static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1033 struct sk_buff *skb)
1034{
1035 /*
1036 * this is the moment to interpret and discard the radiotap header that
1037 * must be at the start of the packet injected in Monitor mode
1038 *
1039 * Need to take some care with endian-ness since radiotap
1040 * args are little-endian
1041 */
1042
1043 struct ieee80211_radiotap_iterator iterator;
1044 struct ieee80211_radiotap_header *rthdr =
1045 (struct ieee80211_radiotap_header *) skb->data;
1046 bool hw_frag;
1047 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1048 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1049 NULL);
1050
1051 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1052 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1053
1054 /* packet is fragmented in HW if we have a non-NULL driver callback */
1055 hw_frag = (tx->local->ops->set_frag_threshold != NULL);
1056
1057 /*
1058 * for every radiotap entry that is present
1059 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1060 * entries present, or -EINVAL on error)
1061 */
1062
1063 while (!ret) {
1064 ret = ieee80211_radiotap_iterator_next(&iterator);
1065
1066 if (ret)
1067 continue;
1068
1069 /* see if this argument is something we can use */
1070 switch (iterator.this_arg_index) {
1071 /*
1072 * You must take care when dereferencing iterator.this_arg
1073 * for multibyte types... the pointer is not aligned. Use
1074 * get_unaligned((type *)iterator.this_arg) to dereference
1075 * iterator.this_arg for type "type" safely on all arches.
1076 */
1077 case IEEE80211_RADIOTAP_FLAGS:
1078 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1079 /*
1080 * this indicates that the skb we have been
1081 * handed has the 32-bit FCS CRC at the end...
1082 * we should react to that by snipping it off
1083 * because it will be recomputed and added
1084 * on transmission
1085 */
1086 if (skb->len < (iterator._max_length + FCS_LEN))
1087 return false;
1088
1089 skb_trim(skb, skb->len - FCS_LEN);
1090 }
1091 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1092 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1093 if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
1094 !hw_frag)
1095 tx->flags |= IEEE80211_TX_FRAGMENTED;
1096 break;
1097
1098 /*
1099 * Please update the file
1100 * Documentation/networking/mac80211-injection.txt
1101 * when parsing new fields here.
1102 */
1103
1104 default:
1105 break;
1106 }
1107 }
1108
1109 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1110 return false;
1111
1112 /*
1113 * remove the radiotap header
1114 * iterator->_max_length was sanity-checked against
1115 * skb->len by iterator init
1116 */
1117 skb_pull(skb, iterator._max_length);
1118
1119 return true;
1120}
1121
1122static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, 1039static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1123 struct sk_buff *skb, 1040 struct sk_buff *skb,
1124 struct ieee80211_tx_info *info, 1041 struct ieee80211_tx_info *info,
@@ -1183,7 +1100,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1183 struct ieee80211_local *local = sdata->local; 1100 struct ieee80211_local *local = sdata->local;
1184 struct ieee80211_hdr *hdr; 1101 struct ieee80211_hdr *hdr;
1185 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1186 int hdrlen, tid; 1103 int tid;
1187 u8 *qc; 1104 u8 *qc;
1188 1105
1189 memset(tx, 0, sizeof(*tx)); 1106 memset(tx, 0, sizeof(*tx));
@@ -1191,26 +1108,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1191 tx->local = local; 1108 tx->local = local;
1192 tx->sdata = sdata; 1109 tx->sdata = sdata;
1193 tx->channel = local->hw.conf.channel; 1110 tx->channel = local->hw.conf.channel;
1194 /*
1195 * Set this flag (used below to indicate "automatic fragmentation"),
1196 * it will be cleared/left by radiotap as desired.
1197 * Only valid when fragmentation is done by the stack.
1198 */
1199 if (!local->ops->set_frag_threshold)
1200 tx->flags |= IEEE80211_TX_FRAGMENTED;
1201
1202 /* process and remove the injection radiotap header */
1203 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1204 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1205 return TX_DROP;
1206
1207 /*
1208 * __ieee80211_parse_tx_radiotap has now removed
1209 * the radiotap header that was present and pre-filled
1210 * 'tx' with tx control information.
1211 */
1212 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1213 }
1214 1111
1215 /* 1112 /*
1216 * If this flag is set to true anywhere, and we get here, 1113 * If this flag is set to true anywhere, and we get here,
@@ -1232,7 +1129,9 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1232 tx->sta = sta_info_get(sdata, hdr->addr1); 1129 tx->sta = sta_info_get(sdata, hdr->addr1);
1233 1130
1234 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1131 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1235 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1132 !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
1133 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
1134 !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
1236 struct tid_ampdu_tx *tid_tx; 1135 struct tid_ampdu_tx *tid_tx;
1237 1136
1238 qc = ieee80211_get_qos_ctl(hdr); 1137 qc = ieee80211_get_qos_ctl(hdr);
@@ -1257,29 +1156,25 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1257 tx->flags |= IEEE80211_TX_UNICAST; 1156 tx->flags |= IEEE80211_TX_UNICAST;
1258 if (unlikely(local->wifi_wme_noack_test)) 1157 if (unlikely(local->wifi_wme_noack_test))
1259 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1158 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1260 else 1159 /*
1261 info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 1160 * Flags are initialized to 0. Hence, no need to
1161 * explicitly unset IEEE80211_TX_CTL_NO_ACK since
1162 * it might already be set for injected frames.
1163 */
1262 } 1164 }
1263 1165
1264 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1166 if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
1265 if ((tx->flags & IEEE80211_TX_UNICAST) && 1167 if (!(tx->flags & IEEE80211_TX_UNICAST) ||
1266 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold && 1168 skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
1267 !(info->flags & IEEE80211_TX_CTL_AMPDU)) 1169 info->flags & IEEE80211_TX_CTL_AMPDU)
1268 tx->flags |= IEEE80211_TX_FRAGMENTED; 1170 info->flags |= IEEE80211_TX_CTL_DONTFRAG;
1269 else
1270 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1271 } 1171 }
1272 1172
1273 if (!tx->sta) 1173 if (!tx->sta)
1274 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1174 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1275 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 1175 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1276 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1176 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1277 1177
1278 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1279 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1280 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1281 tx->ethertype = (pos[0] << 8) | pos[1];
1282 }
1283 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; 1178 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1284 1179
1285 return TX_CONTINUE; 1180 return TX_CONTINUE;
@@ -1490,11 +1385,6 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1490 tail_need = max_t(int, tail_need, 0); 1385 tail_need = max_t(int, tail_need, 0);
1491 } 1386 }
1492 1387
1493 if (head_need || tail_need) {
1494 /* Sorry. Can't account for this any more */
1495 skb_orphan(skb);
1496 }
1497
1498 if (skb_cloned(skb)) 1388 if (skb_cloned(skb))
1499 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1389 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1500 else if (head_need || tail_need) 1390 else if (head_need || tail_need)
@@ -1508,67 +1398,19 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1508 return -ENOMEM; 1398 return -ENOMEM;
1509 } 1399 }
1510 1400
1511 /* update truesize too */
1512 skb->truesize += head_need + tail_need;
1513
1514 return 0; 1401 return 0;
1515} 1402}
1516 1403
1517static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1404void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1518 struct sk_buff *skb)
1519{ 1405{
1520 struct ieee80211_local *local = sdata->local; 1406 struct ieee80211_local *local = sdata->local;
1521 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1407 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1522 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1523 struct ieee80211_sub_if_data *tmp_sdata;
1524 int headroom; 1409 int headroom;
1525 bool may_encrypt; 1410 bool may_encrypt;
1526 1411
1527 rcu_read_lock(); 1412 rcu_read_lock();
1528 1413
1529 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1530 int hdrlen;
1531 u16 len_rthdr;
1532
1533 info->flags |= IEEE80211_TX_CTL_INJECTED |
1534 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1535
1536 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1537 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1538 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1539
1540 /* check the header is complete in the frame */
1541 if (likely(skb->len >= len_rthdr + hdrlen)) {
1542 /*
1543 * We process outgoing injected frames that have a
1544 * local address we handle as though they are our
1545 * own frames.
1546 * This code here isn't entirely correct, the local
1547 * MAC address is not necessarily enough to find
1548 * the interface to use; for that proper VLAN/WDS
1549 * support we will need a different mechanism.
1550 */
1551
1552 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1553 list) {
1554 if (!ieee80211_sdata_running(tmp_sdata))
1555 continue;
1556 if (tmp_sdata->vif.type ==
1557 NL80211_IFTYPE_MONITOR ||
1558 tmp_sdata->vif.type ==
1559 NL80211_IFTYPE_AP_VLAN ||
1560 tmp_sdata->vif.type ==
1561 NL80211_IFTYPE_WDS)
1562 continue;
1563 if (compare_ether_addr(tmp_sdata->vif.addr,
1564 hdr->addr2) == 0) {
1565 sdata = tmp_sdata;
1566 break;
1567 }
1568 }
1569 }
1570 }
1571
1572 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); 1414 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1573 1415
1574 headroom = local->tx_headroom; 1416 headroom = local->tx_headroom;
@@ -1595,11 +1437,94 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1595 return; 1437 return;
1596 } 1438 }
1597 1439
1598 ieee80211_set_qos_hdr(local, skb); 1440 ieee80211_set_qos_hdr(sdata, skb);
1599 ieee80211_tx(sdata, skb, false); 1441 ieee80211_tx(sdata, skb, false);
1600 rcu_read_unlock(); 1442 rcu_read_unlock();
1601} 1443}
1602 1444
1445static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
1446{
1447 struct ieee80211_radiotap_iterator iterator;
1448 struct ieee80211_radiotap_header *rthdr =
1449 (struct ieee80211_radiotap_header *) skb->data;
1450 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1451 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1452 NULL);
1453 u16 txflags;
1454
1455 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
1456 IEEE80211_TX_CTL_DONTFRAG;
1457
1458 /*
1459 * for every radiotap entry that is present
1460 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1461 * entries present, or -EINVAL on error)
1462 */
1463
1464 while (!ret) {
1465 ret = ieee80211_radiotap_iterator_next(&iterator);
1466
1467 if (ret)
1468 continue;
1469
1470 /* see if this argument is something we can use */
1471 switch (iterator.this_arg_index) {
1472 /*
1473 * You must take care when dereferencing iterator.this_arg
1474 * for multibyte types... the pointer is not aligned. Use
1475 * get_unaligned((type *)iterator.this_arg) to dereference
1476 * iterator.this_arg for type "type" safely on all arches.
1477 */
1478 case IEEE80211_RADIOTAP_FLAGS:
1479 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1480 /*
1481 * this indicates that the skb we have been
1482 * handed has the 32-bit FCS CRC at the end...
1483 * we should react to that by snipping it off
1484 * because it will be recomputed and added
1485 * on transmission
1486 */
1487 if (skb->len < (iterator._max_length + FCS_LEN))
1488 return false;
1489
1490 skb_trim(skb, skb->len - FCS_LEN);
1491 }
1492 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1493 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1494 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
1495 info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
1496 break;
1497
1498 case IEEE80211_RADIOTAP_TX_FLAGS:
1499 txflags = get_unaligned_le16(iterator.this_arg);
1500 if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
1501 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1502 break;
1503
1504 /*
1505 * Please update the file
1506 * Documentation/networking/mac80211-injection.txt
1507 * when parsing new fields here.
1508 */
1509
1510 default:
1511 break;
1512 }
1513 }
1514
1515 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1516 return false;
1517
1518 /*
1519 * remove the radiotap header
1520 * iterator->_max_length was sanity-checked against
1521 * skb->len by iterator init
1522 */
1523 skb_pull(skb, iterator._max_length);
1524
1525 return true;
1526}
1527
1603netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, 1528netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1604 struct net_device *dev) 1529 struct net_device *dev)
1605{ 1530{
@@ -1608,7 +1533,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1608 struct ieee80211_radiotap_header *prthdr = 1533 struct ieee80211_radiotap_header *prthdr =
1609 (struct ieee80211_radiotap_header *)skb->data; 1534 (struct ieee80211_radiotap_header *)skb->data;
1610 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1535 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1536 struct ieee80211_hdr *hdr;
1537 struct ieee80211_sub_if_data *tmp_sdata, *sdata;
1611 u16 len_rthdr; 1538 u16 len_rthdr;
1539 int hdrlen;
1612 1540
1613 /* 1541 /*
1614 * Frame injection is not allowed if beaconing is not allowed 1542 * Frame injection is not allowed if beaconing is not allowed
@@ -1659,12 +1587,65 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1659 skb_set_network_header(skb, len_rthdr); 1587 skb_set_network_header(skb, len_rthdr);
1660 skb_set_transport_header(skb, len_rthdr); 1588 skb_set_transport_header(skb, len_rthdr);
1661 1589
1590 if (skb->len < len_rthdr + 2)
1591 goto fail;
1592
1593 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1594 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1595
1596 if (skb->len < len_rthdr + hdrlen)
1597 goto fail;
1598
1599 /*
1600 * Initialize skb->protocol if the injected frame is a data frame
1601 * carrying a rfc1042 header
1602 */
1603 if (ieee80211_is_data(hdr->frame_control) &&
1604 skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
1605 u8 *payload = (u8 *)hdr + hdrlen;
1606
1607 if (compare_ether_addr(payload, rfc1042_header) == 0)
1608 skb->protocol = cpu_to_be16((payload[6] << 8) |
1609 payload[7]);
1610 }
1611
1662 memset(info, 0, sizeof(*info)); 1612 memset(info, 0, sizeof(*info));
1663 1613
1664 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1614 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
1615 IEEE80211_TX_CTL_INJECTED;
1616
1617 /* process and remove the injection radiotap header */
1618 if (!ieee80211_parse_tx_radiotap(skb))
1619 goto fail;
1620
1621 rcu_read_lock();
1622
1623 /*
1624 * We process outgoing injected frames that have a local address
1625 * we handle as though they are non-injected frames.
1626 * This code here isn't entirely correct, the local MAC address
1627 * isn't always enough to find the interface to use; for proper
1628 * VLAN/WDS support we will need a different mechanism (which
1629 * likely isn't going to be monitor interfaces).
1630 */
1631 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1632
1633 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
1634 if (!ieee80211_sdata_running(tmp_sdata))
1635 continue;
1636 if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1637 tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1638 tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
1639 continue;
1640 if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
1641 sdata = tmp_sdata;
1642 break;
1643 }
1644 }
1645
1646 ieee80211_xmit(sdata, skb);
1647 rcu_read_unlock();
1665 1648
1666 /* pass the radiotap header up to xmit */
1667 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1668 return NETDEV_TX_OK; 1649 return NETDEV_TX_OK;
1669 1650
1670fail: 1651fail:
@@ -1703,8 +1684,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1703 int encaps_len, skip_header_bytes; 1684 int encaps_len, skip_header_bytes;
1704 int nh_pos, h_pos; 1685 int nh_pos, h_pos;
1705 struct sta_info *sta = NULL; 1686 struct sta_info *sta = NULL;
1706 u32 sta_flags = 0; 1687 bool wme_sta = false, authorized = false, tdls_auth = false;
1707 struct sk_buff *tmp_skb; 1688 struct sk_buff *tmp_skb;
1689 bool tdls_direct = false;
1708 1690
1709 if (unlikely(skb->len < ETH_HLEN)) { 1691 if (unlikely(skb->len < ETH_HLEN)) {
1710 ret = NETDEV_TX_OK; 1692 ret = NETDEV_TX_OK;
@@ -1728,7 +1710,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1728 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1710 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1729 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1711 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1730 hdrlen = 30; 1712 hdrlen = 30;
1731 sta_flags = get_sta_flags(sta); 1713 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1714 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1732 } 1715 }
1733 rcu_read_unlock(); 1716 rcu_read_unlock();
1734 if (sta) 1717 if (sta)
@@ -1816,11 +1799,50 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1816 break; 1799 break;
1817#endif 1800#endif
1818 case NL80211_IFTYPE_STATION: 1801 case NL80211_IFTYPE_STATION:
1819 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1802 if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1820 if (sdata->u.mgd.use_4addr && 1803 bool tdls_peer = false;
1821 cpu_to_be16(ethertype) != sdata->control_port_protocol) { 1804
1822 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1805 rcu_read_lock();
1806 sta = sta_info_get(sdata, skb->data);
1807 if (sta) {
1808 authorized = test_sta_flag(sta,
1809 WLAN_STA_AUTHORIZED);
1810 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1811 tdls_peer = test_sta_flag(sta,
1812 WLAN_STA_TDLS_PEER);
1813 tdls_auth = test_sta_flag(sta,
1814 WLAN_STA_TDLS_PEER_AUTH);
1815 }
1816 rcu_read_unlock();
1817
1818 /*
1819 * If the TDLS link is enabled, send everything
1820 * directly. Otherwise, allow TDLS setup frames
1821 * to be transmitted indirectly.
1822 */
1823 tdls_direct = tdls_peer && (tdls_auth ||
1824 !(ethertype == ETH_P_TDLS && skb->len > 14 &&
1825 skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
1826 }
1827
1828 if (tdls_direct) {
1829 /* link during setup - throw out frames to peer */
1830 if (!tdls_auth) {
1831 ret = NETDEV_TX_OK;
1832 goto fail;
1833 }
1834
1835 /* DA SA BSSID */
1836 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1837 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1838 memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
1839 hdrlen = 24;
1840 } else if (sdata->u.mgd.use_4addr &&
1841 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1842 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
1843 IEEE80211_FCTL_TODS);
1823 /* RA TA DA SA */ 1844 /* RA TA DA SA */
1845 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1824 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1846 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1825 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1847 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1826 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1848 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1828,6 +1850,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1828 } else { 1850 } else {
1829 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1851 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1830 /* BSSID SA DA */ 1852 /* BSSID SA DA */
1853 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1831 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1854 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1832 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1855 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1833 hdrlen = 24; 1856 hdrlen = 24;
@@ -1853,13 +1876,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1853 if (!is_multicast_ether_addr(hdr.addr1)) { 1876 if (!is_multicast_ether_addr(hdr.addr1)) {
1854 rcu_read_lock(); 1877 rcu_read_lock();
1855 sta = sta_info_get(sdata, hdr.addr1); 1878 sta = sta_info_get(sdata, hdr.addr1);
1856 if (sta) 1879 if (sta) {
1857 sta_flags = get_sta_flags(sta); 1880 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1881 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1882 }
1858 rcu_read_unlock(); 1883 rcu_read_unlock();
1859 } 1884 }
1860 1885
1886 /* For mesh, the use of the QoS header is mandatory */
1887 if (ieee80211_vif_is_mesh(&sdata->vif))
1888 wme_sta = true;
1889
1861 /* receiver and we are QoS enabled, use a QoS type frame */ 1890 /* receiver and we are QoS enabled, use a QoS type frame */
1862 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { 1891 if (wme_sta && local->hw.queues >= 4) {
1863 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1892 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1864 hdrlen += 2; 1893 hdrlen += 2;
1865 } 1894 }
@@ -1868,12 +1897,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1868 * Drop unicast frames to unauthorised stations unless they are 1897 * Drop unicast frames to unauthorised stations unless they are
1869 * EAPOL frames from the local station. 1898 * EAPOL frames from the local station.
1870 */ 1899 */
1871 if (!ieee80211_vif_is_mesh(&sdata->vif) && 1900 if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
1872 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1901 !is_multicast_ether_addr(hdr.addr1) && !authorized &&
1873 !(sta_flags & WLAN_STA_AUTHORIZED) && 1902 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1874 !(cpu_to_be16(ethertype) == sdata->control_port_protocol && 1903 compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1875 compare_ether_addr(sdata->vif.addr,
1876 skb->data + ETH_ALEN) == 0))) {
1877#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1904#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1878 if (net_ratelimit()) 1905 if (net_ratelimit())
1879 printk(KERN_DEBUG "%s: dropped frame to %pM" 1906 printk(KERN_DEBUG "%s: dropped frame to %pM"
@@ -2275,13 +2302,23 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2275 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 2302 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2276 mgmt->u.beacon.beacon_int = 2303 mgmt->u.beacon.beacon_int =
2277 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2304 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2278 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2305 mgmt->u.beacon.capab_info |= cpu_to_le16(
2306 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
2279 2307
2280 pos = skb_put(skb, 2); 2308 pos = skb_put(skb, 2);
2281 *pos++ = WLAN_EID_SSID; 2309 *pos++ = WLAN_EID_SSID;
2282 *pos++ = 0x0; 2310 *pos++ = 0x0;
2283 2311
2284 mesh_mgmt_ies_add(skb, sdata); 2312 if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
2313 mesh_add_ds_params_ie(skb, sdata) ||
2314 ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
2315 mesh_add_rsn_ie(skb, sdata) ||
2316 mesh_add_meshid_ie(skb, sdata) ||
2317 mesh_add_meshconf_ie(skb, sdata) ||
2318 mesh_add_vendor_ies(skb, sdata)) {
2319 pr_err("o11s: couldn't add ies!\n");
2320 goto out;
2321 }
2285 } else { 2322 } else {
2286 WARN_ON(1); 2323 WARN_ON(1);
2287 goto out; 2324 goto out;
@@ -2335,11 +2372,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2335 local = sdata->local; 2372 local = sdata->local;
2336 2373
2337 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 2374 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2338 if (!skb) { 2375 if (!skb)
2339 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2340 "pspoll template\n", sdata->name);
2341 return NULL; 2376 return NULL;
2342 } 2377
2343 skb_reserve(skb, local->hw.extra_tx_headroom); 2378 skb_reserve(skb, local->hw.extra_tx_headroom);
2344 2379
2345 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 2380 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
@@ -2375,11 +2410,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2375 local = sdata->local; 2410 local = sdata->local;
2376 2411
2377 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); 2412 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2378 if (!skb) { 2413 if (!skb)
2379 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2380 "template\n", sdata->name);
2381 return NULL; 2414 return NULL;
2382 } 2415
2383 skb_reserve(skb, local->hw.extra_tx_headroom); 2416 skb_reserve(skb, local->hw.extra_tx_headroom);
2384 2417
2385 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, 2418 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
@@ -2414,11 +2447,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2414 2447
2415 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + 2448 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2416 ie_ssid_len + ie_len); 2449 ie_ssid_len + ie_len);
2417 if (!skb) { 2450 if (!skb)
2418 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2419 "request template\n", sdata->name);
2420 return NULL; 2451 return NULL;
2421 }
2422 2452
2423 skb_reserve(skb, local->hw.extra_tx_headroom); 2453 skb_reserve(skb, local->hw.extra_tx_headroom);
2424 2454
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ddeb1b998383..51e256c5fb78 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -13,13 +13,13 @@
13 13
14#include <net/mac80211.h> 14#include <net/mac80211.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/export.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 21#include <linux/if_arp.h>
21#include <linux/bitmap.h> 22#include <linux/bitmap.h>
22#include <linux/crc32.h>
23#include <net/net_namespace.h> 23#include <net/net_namespace.h>
24#include <net/cfg80211.h> 24#include <net/cfg80211.h>
25#include <net/rtnetlink.h> 25#include <net/rtnetlink.h>
@@ -368,14 +368,14 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
368 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 368 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
369} 369}
370 370
371int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 371void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
372 struct sk_buff_head *skbs, 372 struct sk_buff_head *skbs,
373 void (*fn)(void *data), void *data) 373 void (*fn)(void *data), void *data)
374{ 374{
375 struct ieee80211_hw *hw = &local->hw; 375 struct ieee80211_hw *hw = &local->hw;
376 struct sk_buff *skb; 376 struct sk_buff *skb;
377 unsigned long flags; 377 unsigned long flags;
378 int queue, ret = 0, i; 378 int queue, i;
379 379
380 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 380 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
381 for (i = 0; i < hw->queues; i++) 381 for (i = 0; i < hw->queues; i++)
@@ -390,7 +390,6 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
390 continue; 390 continue;
391 } 391 }
392 392
393 ret++;
394 queue = skb_get_queue_mapping(skb); 393 queue = skb_get_queue_mapping(skb);
395 __skb_queue_tail(&local->pending[queue], skb); 394 __skb_queue_tail(&local->pending[queue], skb);
396 } 395 }
@@ -402,14 +401,12 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
402 __ieee80211_wake_queue(hw, i, 401 __ieee80211_wake_queue(hw, i,
403 IEEE80211_QUEUE_STOP_REASON_SKB_ADD); 402 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
404 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 403 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
405
406 return ret;
407} 404}
408 405
409int ieee80211_add_pending_skbs(struct ieee80211_local *local, 406void ieee80211_add_pending_skbs(struct ieee80211_local *local,
410 struct sk_buff_head *skbs) 407 struct sk_buff_head *skbs)
411{ 408{
412 return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); 409 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
413} 410}
414 411
415void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 412void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
@@ -573,172 +570,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
573 ieee802_11_parse_elems_crc(start, len, elems, 0, 0); 570 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
574} 571}
575 572
576u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
577 struct ieee802_11_elems *elems,
578 u64 filter, u32 crc)
579{
580 size_t left = len;
581 u8 *pos = start;
582 bool calc_crc = filter != 0;
583
584 memset(elems, 0, sizeof(*elems));
585 elems->ie_start = start;
586 elems->total_len = len;
587
588 while (left >= 2) {
589 u8 id, elen;
590
591 id = *pos++;
592 elen = *pos++;
593 left -= 2;
594
595 if (elen > left)
596 break;
597
598 if (calc_crc && id < 64 && (filter & (1ULL << id)))
599 crc = crc32_be(crc, pos - 2, elen + 2);
600
601 switch (id) {
602 case WLAN_EID_SSID:
603 elems->ssid = pos;
604 elems->ssid_len = elen;
605 break;
606 case WLAN_EID_SUPP_RATES:
607 elems->supp_rates = pos;
608 elems->supp_rates_len = elen;
609 break;
610 case WLAN_EID_FH_PARAMS:
611 elems->fh_params = pos;
612 elems->fh_params_len = elen;
613 break;
614 case WLAN_EID_DS_PARAMS:
615 elems->ds_params = pos;
616 elems->ds_params_len = elen;
617 break;
618 case WLAN_EID_CF_PARAMS:
619 elems->cf_params = pos;
620 elems->cf_params_len = elen;
621 break;
622 case WLAN_EID_TIM:
623 if (elen >= sizeof(struct ieee80211_tim_ie)) {
624 elems->tim = (void *)pos;
625 elems->tim_len = elen;
626 }
627 break;
628 case WLAN_EID_IBSS_PARAMS:
629 elems->ibss_params = pos;
630 elems->ibss_params_len = elen;
631 break;
632 case WLAN_EID_CHALLENGE:
633 elems->challenge = pos;
634 elems->challenge_len = elen;
635 break;
636 case WLAN_EID_VENDOR_SPECIFIC:
637 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
638 pos[2] == 0xf2) {
639 /* Microsoft OUI (00:50:F2) */
640
641 if (calc_crc)
642 crc = crc32_be(crc, pos - 2, elen + 2);
643
644 if (pos[3] == 1) {
645 /* OUI Type 1 - WPA IE */
646 elems->wpa = pos;
647 elems->wpa_len = elen;
648 } else if (elen >= 5 && pos[3] == 2) {
649 /* OUI Type 2 - WMM IE */
650 if (pos[4] == 0) {
651 elems->wmm_info = pos;
652 elems->wmm_info_len = elen;
653 } else if (pos[4] == 1) {
654 elems->wmm_param = pos;
655 elems->wmm_param_len = elen;
656 }
657 }
658 }
659 break;
660 case WLAN_EID_RSN:
661 elems->rsn = pos;
662 elems->rsn_len = elen;
663 break;
664 case WLAN_EID_ERP_INFO:
665 elems->erp_info = pos;
666 elems->erp_info_len = elen;
667 break;
668 case WLAN_EID_EXT_SUPP_RATES:
669 elems->ext_supp_rates = pos;
670 elems->ext_supp_rates_len = elen;
671 break;
672 case WLAN_EID_HT_CAPABILITY:
673 if (elen >= sizeof(struct ieee80211_ht_cap))
674 elems->ht_cap_elem = (void *)pos;
675 break;
676 case WLAN_EID_HT_INFORMATION:
677 if (elen >= sizeof(struct ieee80211_ht_info))
678 elems->ht_info_elem = (void *)pos;
679 break;
680 case WLAN_EID_MESH_ID:
681 elems->mesh_id = pos;
682 elems->mesh_id_len = elen;
683 break;
684 case WLAN_EID_MESH_CONFIG:
685 if (elen >= sizeof(struct ieee80211_meshconf_ie))
686 elems->mesh_config = (void *)pos;
687 break;
688 case WLAN_EID_PEER_LINK:
689 elems->peer_link = pos;
690 elems->peer_link_len = elen;
691 break;
692 case WLAN_EID_PREQ:
693 elems->preq = pos;
694 elems->preq_len = elen;
695 break;
696 case WLAN_EID_PREP:
697 elems->prep = pos;
698 elems->prep_len = elen;
699 break;
700 case WLAN_EID_PERR:
701 elems->perr = pos;
702 elems->perr_len = elen;
703 break;
704 case WLAN_EID_RANN:
705 if (elen >= sizeof(struct ieee80211_rann_ie))
706 elems->rann = (void *)pos;
707 break;
708 case WLAN_EID_CHANNEL_SWITCH:
709 elems->ch_switch_elem = pos;
710 elems->ch_switch_elem_len = elen;
711 break;
712 case WLAN_EID_QUIET:
713 if (!elems->quiet_elem) {
714 elems->quiet_elem = pos;
715 elems->quiet_elem_len = elen;
716 }
717 elems->num_of_quiet_elem++;
718 break;
719 case WLAN_EID_COUNTRY:
720 elems->country_elem = pos;
721 elems->country_elem_len = elen;
722 break;
723 case WLAN_EID_PWR_CONSTRAINT:
724 elems->pwr_constr_elem = pos;
725 elems->pwr_constr_elem_len = elen;
726 break;
727 case WLAN_EID_TIMEOUT_INTERVAL:
728 elems->timeout_int = pos;
729 elems->timeout_int_len = elen;
730 break;
731 default:
732 break;
733 }
734
735 left -= elen;
736 pos += elen;
737 }
738
739 return crc;
740}
741
742void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 573void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
743{ 574{
744 struct ieee80211_local *local = sdata->local; 575 struct ieee80211_local *local = sdata->local;
@@ -799,8 +630,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
799 630
800 qparam.uapsd = false; 631 qparam.uapsd = false;
801 632
802 local->tx_conf[queue] = qparam; 633 sdata->tx_conf[queue] = qparam;
803 drv_conf_tx(local, queue, &qparam); 634 drv_conf_tx(local, sdata, queue, &qparam);
804 } 635 }
805 636
806 /* after reinitialize QoS TX queues setting to default, 637 /* after reinitialize QoS TX queues setting to default,
@@ -874,11 +705,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
874 705
875 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 706 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
876 sizeof(*mgmt) + 6 + extra_len); 707 sizeof(*mgmt) + 6 + extra_len);
877 if (!skb) { 708 if (!skb)
878 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
879 "frame\n", sdata->name);
880 return; 709 return;
881 } 710
882 skb_reserve(skb, local->hw.extra_tx_headroom); 711 skb_reserve(skb, local->hw.extra_tx_headroom);
883 712
884 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 713 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
@@ -1031,11 +860,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1031 860
1032 /* FIXME: come up with a proper value */ 861 /* FIXME: come up with a proper value */
1033 buf = kmalloc(200 + ie_len, GFP_KERNEL); 862 buf = kmalloc(200 + ie_len, GFP_KERNEL);
1034 if (!buf) { 863 if (!buf)
1035 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1036 "buffer\n", sdata->name);
1037 return NULL; 864 return NULL;
1038 }
1039 865
1040 /* 866 /*
1041 * Do not send DS Channel parameter for directed probe requests 867 * Do not send DS Channel parameter for directed probe requests
@@ -1071,14 +897,18 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1071void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 897void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1072 const u8 *ssid, size_t ssid_len, 898 const u8 *ssid, size_t ssid_len,
1073 const u8 *ie, size_t ie_len, 899 const u8 *ie, size_t ie_len,
1074 u32 ratemask, bool directed) 900 u32 ratemask, bool directed, bool no_cck)
1075{ 901{
1076 struct sk_buff *skb; 902 struct sk_buff *skb;
1077 903
1078 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len, 904 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
1079 ie, ie_len, directed); 905 ie, ie_len, directed);
1080 if (skb) 906 if (skb) {
907 if (no_cck)
908 IEEE80211_SKB_CB(skb)->flags |=
909 IEEE80211_TX_CTL_NO_CCK_RATE;
1081 ieee80211_tx_skb(sdata, skb); 910 ieee80211_tx_skb(sdata, skb);
911 }
1082} 912}
1083 913
1084u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 914u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1205,14 +1035,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1205 struct ieee80211_sub_if_data, 1035 struct ieee80211_sub_if_data,
1206 u.ap); 1036 u.ap);
1207 1037
1038 memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
1208 WARN_ON(drv_sta_add(local, sdata, &sta->sta)); 1039 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1209 } 1040 }
1210 } 1041 }
1211 mutex_unlock(&local->sta_mtx); 1042 mutex_unlock(&local->sta_mtx);
1212 1043
1213 /* reconfigure tx conf */ 1044 /* reconfigure tx conf */
1214 for (i = 0; i < hw->queues; i++) 1045 list_for_each_entry(sdata, &local->interfaces, list) {
1215 drv_conf_tx(local, i, &local->tx_conf[i]); 1046 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1047 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1048 !ieee80211_sdata_running(sdata))
1049 continue;
1050
1051 for (i = 0; i < hw->queues; i++)
1052 drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
1053 }
1216 1054
1217 /* reconfigure hardware */ 1055 /* reconfigure hardware */
1218 ieee80211_hw_config(local, ~0); 1056 ieee80211_hw_config(local, ~0);
@@ -1248,6 +1086,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1248 changed |= BSS_CHANGED_IBSS; 1086 changed |= BSS_CHANGED_IBSS;
1249 /* fall through */ 1087 /* fall through */
1250 case NL80211_IFTYPE_AP: 1088 case NL80211_IFTYPE_AP:
1089 changed |= BSS_CHANGED_SSID;
1090 /* fall through */
1251 case NL80211_IFTYPE_MESH_POINT: 1091 case NL80211_IFTYPE_MESH_POINT:
1252 changed |= BSS_CHANGED_BEACON | 1092 changed |= BSS_CHANGED_BEACON |
1253 BSS_CHANGED_BEACON_ENABLED; 1093 BSS_CHANGED_BEACON_ENABLED;
@@ -1283,7 +1123,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1283 1123
1284 list_for_each_entry(sta, &local->sta_list, list) { 1124 list_for_each_entry(sta, &local->sta_list, list) {
1285 ieee80211_sta_tear_down_BA_sessions(sta, true); 1125 ieee80211_sta_tear_down_BA_sessions(sta, true);
1286 clear_sta_flags(sta, WLAN_STA_BLOCK_BA); 1126 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
1287 } 1127 }
1288 1128
1289 mutex_unlock(&local->sta_mtx); 1129 mutex_unlock(&local->sta_mtx);
@@ -1522,3 +1362,60 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
1522 _ieee80211_enable_rssi_reports(sdata, 0, 0); 1362 _ieee80211_enable_rssi_reports(sdata, 0, 0);
1523} 1363}
1524EXPORT_SYMBOL(ieee80211_disable_rssi_reports); 1364EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
1365
1366int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1367{
1368 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1369 struct ieee80211_local *local = sdata->local;
1370 struct ieee80211_supported_band *sband;
1371 int rate;
1372 u8 i, rates, *pos;
1373
1374 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1375 rates = sband->n_bitrates;
1376 if (rates > 8)
1377 rates = 8;
1378
1379 if (skb_tailroom(skb) < rates + 2)
1380 return -ENOMEM;
1381
1382 pos = skb_put(skb, rates + 2);
1383 *pos++ = WLAN_EID_SUPP_RATES;
1384 *pos++ = rates;
1385 for (i = 0; i < rates; i++) {
1386 rate = sband->bitrates[i].bitrate;
1387 *pos++ = (u8) (rate / 5);
1388 }
1389
1390 return 0;
1391}
1392
1393int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1394{
1395 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1396 struct ieee80211_local *local = sdata->local;
1397 struct ieee80211_supported_band *sband;
1398 int rate;
1399 u8 i, exrates, *pos;
1400
1401 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1402 exrates = sband->n_bitrates;
1403 if (exrates > 8)
1404 exrates -= 8;
1405 else
1406 exrates = 0;
1407
1408 if (skb_tailroom(skb) < exrates + 2)
1409 return -ENOMEM;
1410
1411 if (exrates) {
1412 pos = skb_put(skb, exrates + 2);
1413 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1414 *pos++ = exrates;
1415 for (i = 8; i < sband->n_bitrates; i++) {
1416 rate = sband->bitrates[i].bitrate;
1417 *pos++ = (u8) (rate / 5);
1418 }
1419 }
1420 return 0;
1421}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 7a49532f14cb..fd52e695c071 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -72,7 +72,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
72 case NL80211_IFTYPE_AP_VLAN: 72 case NL80211_IFTYPE_AP_VLAN:
73 sta = rcu_dereference(sdata->u.vlan.sta); 73 sta = rcu_dereference(sdata->u.vlan.sta);
74 if (sta) { 74 if (sta) {
75 qos = get_sta_flags(sta) & WLAN_STA_WME; 75 qos = test_sta_flag(sta, WLAN_STA_WME);
76 break; 76 break;
77 } 77 }
78 case NL80211_IFTYPE_AP: 78 case NL80211_IFTYPE_AP:
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
83 break; 83 break;
84#ifdef CONFIG_MAC80211_MESH 84#ifdef CONFIG_MAC80211_MESH
85 case NL80211_IFTYPE_MESH_POINT: 85 case NL80211_IFTYPE_MESH_POINT:
86 /* 86 ra = skb->data;
87 * XXX: This is clearly broken ... but already was before,
88 * because ieee80211_fill_mesh_addresses() would clear A1
89 * except for multicast addresses.
90 */
91 break; 87 break;
92#endif 88#endif
93 case NL80211_IFTYPE_STATION: 89 case NL80211_IFTYPE_STATION:
@@ -103,7 +99,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
103 if (!sta && ra && !is_multicast_ether_addr(ra)) { 99 if (!sta && ra && !is_multicast_ether_addr(ra)) {
104 sta = sta_info_get(sdata, ra); 100 sta = sta_info_get(sdata, ra);
105 if (sta) 101 if (sta)
106 qos = get_sta_flags(sta) & WLAN_STA_WME; 102 qos = test_sta_flag(sta, WLAN_STA_WME);
107 } 103 }
108 rcu_read_unlock(); 104 rcu_read_unlock();
109 105
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
139 return ieee802_1d_to_ac[skb->priority]; 135 return ieee802_1d_to_ac[skb->priority];
140} 136}
141 137
142void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) 138void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
139 struct sk_buff *skb)
143{ 140{
144 struct ieee80211_hdr *hdr = (void *)skb->data; 141 struct ieee80211_hdr *hdr = (void *)skb->data;
145 142
@@ -150,10 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
150 147
151 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 148 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
152 149
153 if (unlikely(local->wifi_wme_noack_test)) 150 if (unlikely(sdata->local->wifi_wme_noack_test))
154 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; 151 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
155 /* qos header is 2 bytes, second reserved */ 152 /* qos header is 2 bytes */
156 *p++ = ack_policy | tid; 153 *p++ = ack_policy | tid;
157 *p = 0; 154 *p = ieee80211_vif_is_mesh(&sdata->vif) ?
155 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
158 } 156 }
159} 157}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index faead6d02026..34e166fbf4d4 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -17,7 +17,8 @@ extern const int ieee802_1d_to_ac[8];
17 17
18u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, 18u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
19 struct sk_buff *skb); 19 struct sk_buff *skb);
20void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); 20void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
21 struct sk_buff *skb);
21u16 ieee80211_downgrade_queue(struct ieee80211_local *local, 22u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
22 struct sk_buff *skb); 23 struct sk_buff *skb);
23 24
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 380b9a7462b6..6c53b6d1002b 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -229,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
229 wk->ie_len + /* extra IEs */ 229 wk->ie_len + /* extra IEs */
230 9, /* WMM */ 230 9, /* WMM */
231 GFP_KERNEL); 231 GFP_KERNEL);
232 if (!skb) { 232 if (!skb)
233 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
234 "frame\n", sdata->name);
235 return; 233 return;
236 } 234
237 skb_reserve(skb, local->hw.extra_tx_headroom); 235 skb_reserve(skb, local->hw.extra_tx_headroom);
238 236
239 capab = WLAN_CAPABILITY_ESS; 237 capab = WLAN_CAPABILITY_ESS;
@@ -460,7 +458,7 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
460 */ 458 */
461 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid, 459 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
462 wk->probe_auth.ssid_len, NULL, 0, 460 wk->probe_auth.ssid_len, NULL, 0,
463 (u32) -1, true); 461 (u32) -1, true, false);
464 462
465 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 463 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
466 run_again(local, wk->timeout); 464 run_again(local, wk->timeout);
@@ -579,7 +577,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
579 /* 577 /*
580 * After this, offchan_tx.frame remains but now is no 578 * After this, offchan_tx.frame remains but now is no
581 * longer a valid pointer -- we still need it as the 579 * longer a valid pointer -- we still need it as the
582 * cookie for canceling this work. 580 * cookie for canceling this work/status matching.
583 */ 581 */
584 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame); 582 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
585 583
@@ -1086,14 +1084,13 @@ static void ieee80211_work_work(struct work_struct *work)
1086 continue; 1084 continue;
1087 if (wk->chan != local->tmp_channel) 1085 if (wk->chan != local->tmp_channel)
1088 continue; 1086 continue;
1089 if (ieee80211_work_ct_coexists(wk->chan_type, 1087 if (!ieee80211_work_ct_coexists(wk->chan_type,
1090 local->tmp_channel_type)) 1088 local->tmp_channel_type))
1091 continue; 1089 continue;
1092 remain_off_channel = true; 1090 remain_off_channel = true;
1093 } 1091 }
1094 1092
1095 if (!remain_off_channel && local->tmp_channel) { 1093 if (!remain_off_channel && local->tmp_channel) {
1096 bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
1097 local->tmp_channel = NULL; 1094 local->tmp_channel = NULL;
1098 /* If tmp_channel wasn't operating channel, then 1095 /* If tmp_channel wasn't operating channel, then
1099 * we need to go back on-channel. 1096 * we need to go back on-channel.
@@ -1103,7 +1100,7 @@ static void ieee80211_work_work(struct work_struct *work)
1103 * we still need to do a hardware config. Currently, 1100 * we still need to do a hardware config. Currently,
1104 * we cannot be here while scanning, however. 1101 * we cannot be here while scanning, however.
1105 */ 1102 */
1106 if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan) 1103 if (!ieee80211_cfg_on_oper_channel(local))
1107 ieee80211_hw_config(local, 0); 1104 ieee80211_hw_config(local, 0);
1108 1105
1109 /* At the least, we need to disable offchannel_ps, 1106 /* At the least, we need to disable offchannel_ps,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7bc8702808fa..f614ce7bb6e3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -53,7 +53,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
53 } 53 }
54 54
55 if (info->control.hw_key && 55 if (info->control.hw_key &&
56 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 56 (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
57 tx->local->ops->set_frag_threshold) &&
57 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { 58 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
58 /* hwaccel - with no need for SW-generated MMIC */ 59 /* hwaccel - with no need for SW-generated MMIC */
59 return TX_CONTINUE; 60 return TX_CONTINUE;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c0ff5d..afca6c78948c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo)
37 err = mutex_lock_interruptible(&afinfo_mutex); 37 err = mutex_lock_interruptible(&afinfo_mutex);
38 if (err < 0) 38 if (err < 0)
39 return err; 39 return err;
40 rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); 40 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
41 mutex_unlock(&afinfo_mutex); 41 mutex_unlock(&afinfo_mutex);
42 return 0; 42 return 0;
43} 43}
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo);
46void nf_unregister_afinfo(const struct nf_afinfo *afinfo) 46void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
47{ 47{
48 mutex_lock(&afinfo_mutex); 48 mutex_lock(&afinfo_mutex);
49 rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); 49 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
50 mutex_unlock(&afinfo_mutex); 50 mutex_unlock(&afinfo_mutex);
51 synchronize_rcu(); 51 synchronize_rcu();
52} 52}
@@ -180,17 +180,16 @@ next_hook:
180 if (ret == 0) 180 if (ret == 0)
181 ret = -EPERM; 181 ret = -EPERM;
182 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 182 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
183 ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, 183 int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
184 verdict >> NF_VERDICT_QBITS); 184 verdict >> NF_VERDICT_QBITS);
185 if (ret < 0) { 185 if (err < 0) {
186 if (ret == -ECANCELED) 186 if (err == -ECANCELED)
187 goto next_hook; 187 goto next_hook;
188 if (ret == -ESRCH && 188 if (err == -ESRCH &&
189 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) 189 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
190 goto next_hook; 190 goto next_hook;
191 kfree_skb(skb); 191 kfree_skb(skb);
192 } 192 }
193 ret = 0;
194 } 193 }
195 rcu_read_unlock(); 194 rcu_read_unlock();
196 return ret; 195 return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d7e86ef9d23a..86137b558f45 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1699,10 +1699,8 @@ ip_set_init(void)
1699 1699
1700 ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max, 1700 ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
1701 GFP_KERNEL); 1701 GFP_KERNEL);
1702 if (!ip_set_list) { 1702 if (!ip_set_list)
1703 pr_err("ip_set: Unable to create ip_set_list\n");
1704 return -ENOMEM; 1703 return -ENOMEM;
1705 }
1706 1704
1707 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); 1705 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
1708 if (ret != 0) { 1706 if (ret != 0) {
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 757143b2240a..052579fe389a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -17,6 +17,7 @@
17#include <net/ipv6.h> 17#include <net/ipv6.h>
18 18
19#include <linux/netfilter/ipset/ip_set_getport.h> 19#include <linux/netfilter/ipset/ip_set_getport.h>
20#include <linux/export.h>
20 21
21/* We must handle non-linear skbs */ 22/* We must handle non-linear skbs */
22static bool 23static bool
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
index bd13d66220f1..4f29fa97044b 100644
--- a/net/netfilter/ipset/pfxlen.c
+++ b/net/netfilter/ipset/pfxlen.c
@@ -1,3 +1,4 @@
1#include <linux/export.h>
1#include <linux/netfilter/ipset/pfxlen.h> 2#include <linux/netfilter/ipset/pfxlen.h>
2 3
3/* 4/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f77bb16d22a..093cc327020f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -188,14 +188,13 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
188} 188}
189 189
190 190
191static inline int 191static inline void
192ip_vs_set_state(struct ip_vs_conn *cp, int direction, 192ip_vs_set_state(struct ip_vs_conn *cp, int direction,
193 const struct sk_buff *skb, 193 const struct sk_buff *skb,
194 struct ip_vs_proto_data *pd) 194 struct ip_vs_proto_data *pd)
195{ 195{
196 if (unlikely(!pd->pp->state_transition)) 196 if (likely(pd->pp->state_transition))
197 return 0; 197 pd->pp->state_transition(cp, direction, skb, pd);
198 return pd->pp->state_transition(cp, direction, skb, pd);
199} 198}
200 199
201static inline int 200static inline int
@@ -530,7 +529,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
530 a cache_bypass connection entry */ 529 a cache_bypass connection entry */
531 ipvs = net_ipvs(net); 530 ipvs = net_ipvs(net);
532 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) { 531 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
533 int ret, cs; 532 int ret;
534 struct ip_vs_conn *cp; 533 struct ip_vs_conn *cp;
535 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && 534 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
536 iph.protocol == IPPROTO_UDP)? 535 iph.protocol == IPPROTO_UDP)?
@@ -557,7 +556,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
557 ip_vs_in_stats(cp, skb); 556 ip_vs_in_stats(cp, skb);
558 557
559 /* set state */ 558 /* set state */
560 cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 559 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
561 560
562 /* transmit the first SYN packet */ 561 /* transmit the first SYN packet */
563 ret = cp->packet_xmit(skb, cp, pd->pp); 562 ret = cp->packet_xmit(skb, cp, pd->pp);
@@ -1490,7 +1489,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1490 struct ip_vs_protocol *pp; 1489 struct ip_vs_protocol *pp;
1491 struct ip_vs_proto_data *pd; 1490 struct ip_vs_proto_data *pd;
1492 struct ip_vs_conn *cp; 1491 struct ip_vs_conn *cp;
1493 int ret, restart, pkts; 1492 int ret, pkts;
1494 struct netns_ipvs *ipvs; 1493 struct netns_ipvs *ipvs;
1495 1494
1496 /* Already marked as IPVS request or reply? */ 1495 /* Already marked as IPVS request or reply? */
@@ -1591,7 +1590,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1591 } 1590 }
1592 1591
1593 ip_vs_in_stats(cp, skb); 1592 ip_vs_in_stats(cp, skb);
1594 restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 1593 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1595 if (cp->packet_xmit) 1594 if (cp->packet_xmit)
1596 ret = cp->packet_xmit(skb, cp, pp); 1595 ret = cp->packet_xmit(skb, cp, pp);
1597 /* do not touch skb anymore */ 1596 /* do not touch skb anymore */
@@ -1878,10 +1877,9 @@ static int __net_init __ip_vs_init(struct net *net)
1878 struct netns_ipvs *ipvs; 1877 struct netns_ipvs *ipvs;
1879 1878
1880 ipvs = net_generic(net, ip_vs_net_id); 1879 ipvs = net_generic(net, ip_vs_net_id);
1881 if (ipvs == NULL) { 1880 if (ipvs == NULL)
1882 pr_err("%s(): no memory.\n", __func__);
1883 return -ENOMEM; 1881 return -ENOMEM;
1884 } 1882
1885 /* Hold the beast until a service is registerd */ 1883 /* Hold the beast until a service is registerd */
1886 ipvs->enable = 0; 1884 ipvs->enable = 0;
1887 ipvs->net = net; 1885 ipvs->net = net;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e3be48bf4dcd..008bf97cc91a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -856,15 +856,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
856 } 856 }
857 857
858 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL); 858 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
859 if (dest == NULL) { 859 if (dest == NULL)
860 pr_err("%s(): no memory.\n", __func__);
861 return -ENOMEM; 860 return -ENOMEM;
862 } 861
863 dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 862 dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
864 if (!dest->stats.cpustats) { 863 if (!dest->stats.cpustats)
865 pr_err("%s() alloc_percpu failed\n", __func__);
866 goto err_alloc; 864 goto err_alloc;
867 }
868 865
869 dest->af = svc->af; 866 dest->af = svc->af;
870 dest->protocol = svc->protocol; 867 dest->protocol = svc->protocol;
@@ -1168,10 +1165,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1168 goto out_err; 1165 goto out_err;
1169 } 1166 }
1170 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 1167 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1171 if (!svc->stats.cpustats) { 1168 if (!svc->stats.cpustats)
1172 pr_err("%s() alloc_percpu failed\n", __func__);
1173 goto out_err; 1169 goto out_err;
1174 }
1175 1170
1176 /* I'm the first user of the service */ 1171 /* I'm the first user of the service */
1177 atomic_set(&svc->usecnt, 0); 1172 atomic_set(&svc->usecnt, 0);
@@ -3326,10 +3321,8 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3326 int ret = 0, cmd; 3321 int ret = 0, cmd;
3327 int need_full_svc = 0, need_full_dest = 0; 3322 int need_full_svc = 0, need_full_dest = 0;
3328 struct net *net; 3323 struct net *net;
3329 struct netns_ipvs *ipvs;
3330 3324
3331 net = skb_sknet(skb); 3325 net = skb_sknet(skb);
3332 ipvs = net_ipvs(net);
3333 cmd = info->genlhdr->cmd; 3326 cmd = info->genlhdr->cmd;
3334 3327
3335 mutex_lock(&__ip_vs_mutex); 3328 mutex_lock(&__ip_vs_mutex);
@@ -3421,10 +3414,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3421 void *reply; 3414 void *reply;
3422 int ret, cmd, reply_cmd; 3415 int ret, cmd, reply_cmd;
3423 struct net *net; 3416 struct net *net;
3424 struct netns_ipvs *ipvs;
3425 3417
3426 net = skb_sknet(skb); 3418 net = skb_sknet(skb);
3427 ipvs = net_ipvs(net);
3428 cmd = info->genlhdr->cmd; 3419 cmd = info->genlhdr->cmd;
3429 3420
3430 if (cmd == IPVS_CMD_GET_SERVICE) 3421 if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3720,10 +3711,9 @@ int __net_init ip_vs_control_net_init(struct net *net)
3720 3711
3721 /* procfs stats */ 3712 /* procfs stats */
3722 ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 3713 ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
3723 if (!ipvs->tot_stats.cpustats) { 3714 if (!ipvs->tot_stats.cpustats)
3724 pr_err("%s(): alloc_percpu.\n", __func__);
3725 return -ENOMEM; 3715 return -ENOMEM;
3726 } 3716
3727 spin_lock_init(&ipvs->tot_stats.lock); 3717 spin_lock_init(&ipvs->tot_stats.lock);
3728 3718
3729 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); 3719 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 95fd0d14200b..1c269e56200a 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -150,10 +150,9 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
150 /* allocate the DH table for this service */ 150 /* allocate the DH table for this service */
151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, 151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
152 GFP_ATOMIC); 152 GFP_ATOMIC);
153 if (tbl == NULL) { 153 if (tbl == NULL)
154 pr_err("%s(): no memory\n", __func__);
155 return -ENOMEM; 154 return -ENOMEM;
156 } 155
157 svc->sched_data = tbl; 156 svc->sched_data = tbl;
158 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " 157 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for "
159 "current service\n", 158 "current service\n",
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 4490a32ad5b2..538d74ee4f68 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -52,8 +52,9 @@
52 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper 52 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
53 * First port is set to the default port. 53 * First port is set to the default port.
54 */ 54 */
55static unsigned int ports_count = 1;
55static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0}; 56static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0};
56module_param_array(ports, ushort, NULL, 0); 57module_param_array(ports, ushort, &ports_count, 0444);
57MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); 58MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands");
58 59
59 60
@@ -449,7 +450,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
449 if (ret) 450 if (ret)
450 goto err_exit; 451 goto err_exit;
451 452
452 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { 453 for (i = 0; i < ports_count; i++) {
453 if (!ports[i]) 454 if (!ports[i])
454 continue; 455 continue;
455 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); 456 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 87e40ea77a95..0f16283fd058 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -202,10 +202,8 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
202 en = ip_vs_lblc_get(dest->af, tbl, daddr); 202 en = ip_vs_lblc_get(dest->af, tbl, daddr);
203 if (!en) { 203 if (!en) {
204 en = kmalloc(sizeof(*en), GFP_ATOMIC); 204 en = kmalloc(sizeof(*en), GFP_ATOMIC);
205 if (!en) { 205 if (!en)
206 pr_err("%s(): no memory\n", __func__);
207 return NULL; 206 return NULL;
208 }
209 207
210 en->af = dest->af; 208 en->af = dest->af;
211 ip_vs_addr_copy(dest->af, &en->addr, daddr); 209 ip_vs_addr_copy(dest->af, &en->addr, daddr);
@@ -345,10 +343,9 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
345 * Allocate the ip_vs_lblc_table for this service 343 * Allocate the ip_vs_lblc_table for this service
346 */ 344 */
347 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); 345 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
348 if (tbl == NULL) { 346 if (tbl == NULL)
349 pr_err("%s(): no memory\n", __func__);
350 return -ENOMEM; 347 return -ENOMEM;
351 } 348
352 svc->sched_data = tbl; 349 svc->sched_data = tbl;
353 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " 350 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
354 "current service\n", sizeof(*tbl)); 351 "current service\n", sizeof(*tbl));
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 90f618ab6dda..eec797f8cce7 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -112,10 +112,8 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
112 } 112 }
113 113
114 e = kmalloc(sizeof(*e), GFP_ATOMIC); 114 e = kmalloc(sizeof(*e), GFP_ATOMIC);
115 if (e == NULL) { 115 if (e == NULL)
116 pr_err("%s(): no memory\n", __func__);
117 return NULL; 116 return NULL;
118 }
119 117
120 atomic_inc(&dest->refcnt); 118 atomic_inc(&dest->refcnt);
121 e->dest = dest; 119 e->dest = dest;
@@ -373,10 +371,8 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
373 en = ip_vs_lblcr_get(dest->af, tbl, daddr); 371 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
374 if (!en) { 372 if (!en) {
375 en = kmalloc(sizeof(*en), GFP_ATOMIC); 373 en = kmalloc(sizeof(*en), GFP_ATOMIC);
376 if (!en) { 374 if (!en)
377 pr_err("%s(): no memory\n", __func__);
378 return NULL; 375 return NULL;
379 }
380 376
381 en->af = dest->af; 377 en->af = dest->af;
382 ip_vs_addr_copy(dest->af, &en->addr, daddr); 378 ip_vs_addr_copy(dest->af, &en->addr, daddr);
@@ -516,10 +512,9 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
516 * Allocate the ip_vs_lblcr_table for this service 512 * Allocate the ip_vs_lblcr_table for this service
517 */ 513 */
518 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); 514 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
519 if (tbl == NULL) { 515 if (tbl == NULL)
520 pr_err("%s(): no memory\n", __func__);
521 return -ENOMEM; 516 return -ENOMEM;
522 } 517
523 svc->sched_data = tbl; 518 svc->sched_data = tbl;
524 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " 519 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
525 "current service\n", sizeof(*tbl)); 520 "current service\n", sizeof(*tbl));
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index f454c80df0a7..022e77e1e766 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -127,7 +127,7 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
127 nf_conntrack_alter_reply(ct, &new_tuple); 127 nf_conntrack_alter_reply(ct, &new_tuple);
128} 128}
129 129
130int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) 130int ip_vs_confirm_conntrack(struct sk_buff *skb)
131{ 131{
132 return nf_conntrack_confirm(skb); 132 return nf_conntrack_confirm(skb);
133} 133}
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 52d073c105e9..85312939695f 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -74,10 +74,9 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
74 struct ip_vs_proto_data *pd = 74 struct ip_vs_proto_data *pd =
75 kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); 75 kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
76 76
77 if (!pd) { 77 if (!pd)
78 pr_err("%s(): no memory.\n", __func__);
79 return -ENOMEM; 78 return -ENOMEM;
80 } 79
81 pd->pp = pp; /* For speed issues */ 80 pd->pp = pp; /* For speed issues */
82 pd->next = ipvs->proto_data_table[hash]; 81 pd->next = ipvs->proto_data_table[hash];
83 ipvs->proto_data_table[hash] = pd; 82 ipvs->proto_data_table[hash] = pd;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index d12ed53ec95f..1fbf7a2816f5 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -906,7 +906,7 @@ static const char *sctp_state_name(int state)
906 return "?"; 906 return "?";
907} 907}
908 908
909static inline int 909static inline void
910set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, 910set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
911 int direction, const struct sk_buff *skb) 911 int direction, const struct sk_buff *skb)
912{ 912{
@@ -924,7 +924,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
924 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), 924 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
925 sizeof(_sctpch), &_sctpch); 925 sizeof(_sctpch), &_sctpch);
926 if (sch == NULL) 926 if (sch == NULL)
927 return 0; 927 return;
928 928
929 chunk_type = sch->type; 929 chunk_type = sch->type;
930 /* 930 /*
@@ -993,21 +993,15 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
993 cp->timeout = pd->timeout_table[cp->state = next_state]; 993 cp->timeout = pd->timeout_table[cp->state = next_state];
994 else /* What to do ? */ 994 else /* What to do ? */
995 cp->timeout = sctp_timeouts[cp->state = next_state]; 995 cp->timeout = sctp_timeouts[cp->state = next_state];
996
997 return 1;
998} 996}
999 997
1000static int 998static void
1001sctp_state_transition(struct ip_vs_conn *cp, int direction, 999sctp_state_transition(struct ip_vs_conn *cp, int direction,
1002 const struct sk_buff *skb, struct ip_vs_proto_data *pd) 1000 const struct sk_buff *skb, struct ip_vs_proto_data *pd)
1003{ 1001{
1004 int ret = 0;
1005
1006 spin_lock(&cp->lock); 1002 spin_lock(&cp->lock);
1007 ret = set_sctp_state(pd, cp, direction, skb); 1003 set_sctp_state(pd, cp, direction, skb);
1008 spin_unlock(&cp->lock); 1004 spin_unlock(&cp->lock);
1009
1010 return ret;
1011} 1005}
1012 1006
1013static inline __u16 sctp_app_hashkey(__be16 port) 1007static inline __u16 sctp_app_hashkey(__be16 port)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index c0cc341b840d..ef8641f7af83 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -546,7 +546,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
546/* 546/*
547 * Handle state transitions 547 * Handle state transitions
548 */ 548 */
549static int 549static void
550tcp_state_transition(struct ip_vs_conn *cp, int direction, 550tcp_state_transition(struct ip_vs_conn *cp, int direction,
551 const struct sk_buff *skb, 551 const struct sk_buff *skb,
552 struct ip_vs_proto_data *pd) 552 struct ip_vs_proto_data *pd)
@@ -561,13 +561,11 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
561 561
562 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); 562 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
563 if (th == NULL) 563 if (th == NULL)
564 return 0; 564 return;
565 565
566 spin_lock(&cp->lock); 566 spin_lock(&cp->lock);
567 set_tcp_state(pd, cp, direction, th); 567 set_tcp_state(pd, cp, direction, th);
568 spin_unlock(&cp->lock); 568 spin_unlock(&cp->lock);
569
570 return 1;
571} 569}
572 570
573static inline __u16 tcp_app_hashkey(__be16 port) 571static inline __u16 tcp_app_hashkey(__be16 port)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f1282cbe6fe3..f4b7262896bb 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -454,18 +454,17 @@ static const char * udp_state_name(int state)
454 return udp_state_name_table[state] ? udp_state_name_table[state] : "?"; 454 return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
455} 455}
456 456
457static int 457static void
458udp_state_transition(struct ip_vs_conn *cp, int direction, 458udp_state_transition(struct ip_vs_conn *cp, int direction,
459 const struct sk_buff *skb, 459 const struct sk_buff *skb,
460 struct ip_vs_proto_data *pd) 460 struct ip_vs_proto_data *pd)
461{ 461{
462 if (unlikely(!pd)) { 462 if (unlikely(!pd)) {
463 pr_err("UDP no ns data\n"); 463 pr_err("UDP no ns data\n");
464 return 0; 464 return;
465 } 465 }
466 466
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468 return 1;
469} 468}
470 469
471static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index b5e2556c581a..33815f4fb451 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -147,10 +147,9 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
147 /* allocate the SH table for this service */ 147 /* allocate the SH table for this service */
148 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, 148 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
149 GFP_ATOMIC); 149 GFP_ATOMIC);
150 if (tbl == NULL) { 150 if (tbl == NULL)
151 pr_err("%s(): no memory\n", __func__);
152 return -ENOMEM; 151 return -ENOMEM;
153 } 152
154 svc->sched_data = tbl; 153 svc->sched_data = tbl;
155 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for " 154 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for "
156 "current service\n", 155 "current service\n",
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 1ef41f50723c..fd0d4e09876a 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -85,10 +85,9 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
85 * Allocate the mark variable for WRR scheduling 85 * Allocate the mark variable for WRR scheduling
86 */ 86 */
87 mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); 87 mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
88 if (mark == NULL) { 88 if (mark == NULL)
89 pr_err("%s(): no memory\n", __func__);
90 return -ENOMEM; 89 return -ENOMEM;
91 } 90
92 mark->cl = &svc->destinations; 91 mark->cl = &svc->destinations;
93 mark->cw = 0; 92 mark->cw = 0;
94 mark->mw = ip_vs_wrr_max_weight(svc); 93 mark->mw = ip_vs_wrr_max_weight(svc);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index ee319a4338b0..aa2d7206ee8a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -339,7 +339,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
339 \ 339 \
340 (skb)->ipvs_property = 1; \ 340 (skb)->ipvs_property = 1; \
341 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \ 341 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
342 __ret = ip_vs_confirm_conntrack(skb, cp); \ 342 __ret = ip_vs_confirm_conntrack(skb); \
343 if (__ret == NF_ACCEPT) { \ 343 if (__ret == NF_ACCEPT) { \
344 nf_reset(skb); \ 344 nf_reset(skb); \
345 skb_forward_csum(skb); \ 345 skb_forward_csum(skb); \
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 5178c691ecbf..369df3f08d42 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -12,6 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/export.h>
15 16
16#include <net/netfilter/nf_conntrack.h> 17#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_extend.h> 18#include <net/netfilter/nf_conntrack_extend.h>
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f7af8b866017..7202b0631cd6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -661,7 +661,6 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
661 */ 661 */
662 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); 662 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
663 if (ct == NULL) { 663 if (ct == NULL) {
664 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
665 atomic_dec(&net->ct.count); 664 atomic_dec(&net->ct.count);
666 return ERR_PTR(-ENOMEM); 665 return ERR_PTR(-ENOMEM);
667 } 666 }
@@ -749,10 +748,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
749 748
750 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, 749 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
751 hash); 750 hash);
752 if (IS_ERR(ct)) { 751 if (IS_ERR(ct))
753 pr_debug("Can't allocate conntrack.\n");
754 return (struct nf_conntrack_tuple_hash *)ct; 752 return (struct nf_conntrack_tuple_hash *)ct;
755 }
756 753
757 if (!l4proto->new(ct, skb, dataoff)) { 754 if (!l4proto->new(ct, skb, dataoff)) {
758 nf_conntrack_free(ct); 755 nf_conntrack_free(ct);
@@ -779,7 +776,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
779 if (exp->helper) { 776 if (exp->helper) {
780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 777 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
781 if (help) 778 if (help)
782 rcu_assign_pointer(help->helper, exp->helper); 779 RCU_INIT_POINTER(help->helper, exp->helper);
783 } 780 }
784 781
785#ifdef CONFIG_NF_CONNTRACK_MARK 782#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -1317,7 +1314,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1317void nf_conntrack_cleanup(struct net *net) 1314void nf_conntrack_cleanup(struct net *net)
1318{ 1315{
1319 if (net_eq(net, &init_net)) 1316 if (net_eq(net, &init_net))
1320 rcu_assign_pointer(ip_ct_attach, NULL); 1317 RCU_INIT_POINTER(ip_ct_attach, NULL);
1321 1318
1322 /* This makes sure all current packets have passed through 1319 /* This makes sure all current packets have passed through
1323 netfilter framework. Roll on, two-stage module 1320 netfilter framework. Roll on, two-stage module
@@ -1327,7 +1324,7 @@ void nf_conntrack_cleanup(struct net *net)
1327 nf_conntrack_cleanup_net(net); 1324 nf_conntrack_cleanup_net(net);
1328 1325
1329 if (net_eq(net, &init_net)) { 1326 if (net_eq(net, &init_net)) {
1330 rcu_assign_pointer(nf_ct_destroy, NULL); 1327 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1331 nf_conntrack_cleanup_init_net(); 1328 nf_conntrack_cleanup_init_net();
1332 } 1329 }
1333} 1330}
@@ -1576,11 +1573,11 @@ int nf_conntrack_init(struct net *net)
1576 1573
1577 if (net_eq(net, &init_net)) { 1574 if (net_eq(net, &init_net)) {
1578 /* For use by REJECT target */ 1575 /* For use by REJECT target */
1579 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1576 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1580 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1577 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1581 1578
1582 /* Howto get NAT offsets */ 1579 /* Howto get NAT offsets */
1583 rcu_assign_pointer(nf_ct_nat_offset, NULL); 1580 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1584 } 1581 }
1585 return 0; 1582 return 0;
1586 1583
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 63a1b915a7e4..6b368be937c6 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/export.h>
22 23
23#include <net/netfilter/nf_conntrack.h> 24#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h> 25#include <net/netfilter/nf_conntrack_core.h>
@@ -94,7 +95,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
94 ret = -EBUSY; 95 ret = -EBUSY;
95 goto out_unlock; 96 goto out_unlock;
96 } 97 }
97 rcu_assign_pointer(nf_conntrack_event_cb, new); 98 RCU_INIT_POINTER(nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex); 99 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret; 100 return ret;
100 101
@@ -112,7 +113,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
112 notify = rcu_dereference_protected(nf_conntrack_event_cb, 113 notify = rcu_dereference_protected(nf_conntrack_event_cb,
113 lockdep_is_held(&nf_ct_ecache_mutex)); 114 lockdep_is_held(&nf_ct_ecache_mutex));
114 BUG_ON(notify != new); 115 BUG_ON(notify != new);
115 rcu_assign_pointer(nf_conntrack_event_cb, NULL); 116 RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
116 mutex_unlock(&nf_ct_ecache_mutex); 117 mutex_unlock(&nf_ct_ecache_mutex);
117} 118}
118EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 119EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -129,7 +130,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
129 ret = -EBUSY; 130 ret = -EBUSY;
130 goto out_unlock; 131 goto out_unlock;
131 } 132 }
132 rcu_assign_pointer(nf_expect_event_cb, new); 133 RCU_INIT_POINTER(nf_expect_event_cb, new);
133 mutex_unlock(&nf_ct_ecache_mutex); 134 mutex_unlock(&nf_ct_ecache_mutex);
134 return ret; 135 return ret;
135 136
@@ -147,7 +148,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
147 notify = rcu_dereference_protected(nf_expect_event_cb, 148 notify = rcu_dereference_protected(nf_expect_event_cb,
148 lockdep_is_held(&nf_ct_ecache_mutex)); 149 lockdep_is_held(&nf_ct_ecache_mutex));
149 BUG_ON(notify != new); 150 BUG_ON(notify != new);
150 rcu_assign_pointer(nf_expect_event_cb, NULL); 151 RCU_INIT_POINTER(nf_expect_event_cb, NULL);
151 mutex_unlock(&nf_ct_ecache_mutex); 152 mutex_unlock(&nf_ct_ecache_mutex);
152} 153}
153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 154EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index cd1e8e0970f2..340c80d968d4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -20,6 +20,8 @@
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/jhash.h> 22#include <linux/jhash.h>
23#include <linux/moduleparam.h>
24#include <linux/export.h>
23#include <net/net_namespace.h> 25#include <net/net_namespace.h>
24 26
25#include <net/netfilter/nf_conntrack.h> 27#include <net/netfilter/nf_conntrack.h>
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 05ecdc281a53..4605c947dcc4 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
169 before updating alloc_size */ 169 before updating alloc_size */
170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align) 170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
171 + type->len; 171 + type->len;
172 rcu_assign_pointer(nf_ct_ext_types[type->id], type); 172 RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
173 update_alloc_size(type); 173 update_alloc_size(type);
174out: 174out:
175 mutex_unlock(&nf_ct_ext_type_mutex); 175 mutex_unlock(&nf_ct_ext_type_mutex);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register);
181void nf_ct_extend_unregister(struct nf_ct_ext_type *type) 181void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
182{ 182{
183 mutex_lock(&nf_ct_ext_type_mutex); 183 mutex_lock(&nf_ct_ext_type_mutex);
184 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); 184 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
185 update_alloc_size(type); 185 update_alloc_size(type);
186 mutex_unlock(&nf_ct_ext_type_mutex); 186 mutex_unlock(&nf_ct_ext_type_mutex);
187 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 187 rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 1bdfea357955..93c4bdbfc1ae 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
132 if (helper == NULL) { 132 if (helper == NULL) {
133 if (help) 133 if (help)
134 rcu_assign_pointer(help->helper, NULL); 134 RCU_INIT_POINTER(help->helper, NULL);
135 goto out; 135 goto out;
136 } 136 }
137 137
@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
145 memset(&help->help, 0, sizeof(help->help)); 145 memset(&help->help, 0, sizeof(help->help));
146 } 146 }
147 147
148 rcu_assign_pointer(help->helper, helper); 148 RCU_INIT_POINTER(help->helper, helper);
149out: 149out:
150 return ret; 150 return ret;
151} 151}
@@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
162 lockdep_is_held(&nf_conntrack_lock) 162 lockdep_is_held(&nf_conntrack_lock)
163 ) == me) { 163 ) == me) {
164 nf_conntrack_event(IPCT_HELPER, ct); 164 nf_conntrack_event(IPCT_HELPER, ct);
165 rcu_assign_pointer(help->helper, NULL); 165 RCU_INIT_POINTER(help->helper, NULL);
166 } 166 }
167 return 0; 167 return 0;
168} 168}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7dec88a1755b..e58aa9b1fe8a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1125 if (help && help->helper) { 1125 if (help && help->helper) {
1126 /* we had a helper before ... */ 1126 /* we had a helper before ... */
1127 nf_ct_remove_expectations(ct); 1127 nf_ct_remove_expectations(ct);
1128 rcu_assign_pointer(help->helper, NULL); 1128 RCU_INIT_POINTER(help->helper, NULL);
1129 } 1129 }
1130 1130
1131 return 0; 1131 return 0;
@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1163 return -EOPNOTSUPP; 1163 return -EOPNOTSUPP;
1164 } 1164 }
1165 1165
1166 rcu_assign_pointer(help->helper, helper); 1166 RCU_INIT_POINTER(help->helper, helper);
1167 1167
1168 return 0; 1168 return 0;
1169} 1169}
@@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1386 } 1386 }
1387 1387
1388 /* not in hash table yet so not strictly necessary */ 1388 /* not in hash table yet so not strictly necessary */
1389 rcu_assign_pointer(help->helper, helper); 1389 RCU_INIT_POINTER(help->helper, helper);
1390 } 1390 }
1391 } else { 1391 } else {
1392 /* try an implicit helper assignation */ 1392 /* try an implicit helper assignation */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20714edf6cd2..ce0c406f58a8 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
55 llog = rcu_dereference_protected(nf_loggers[pf], 55 llog = rcu_dereference_protected(nf_loggers[pf],
56 lockdep_is_held(&nf_log_mutex)); 56 lockdep_is_held(&nf_log_mutex));
57 if (llog == NULL) 57 if (llog == NULL)
58 rcu_assign_pointer(nf_loggers[pf], logger); 58 RCU_INIT_POINTER(nf_loggers[pf], logger);
59 } 59 }
60 60
61 mutex_unlock(&nf_log_mutex); 61 mutex_unlock(&nf_log_mutex);
@@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger)
74 c_logger = rcu_dereference_protected(nf_loggers[i], 74 c_logger = rcu_dereference_protected(nf_loggers[i],
75 lockdep_is_held(&nf_log_mutex)); 75 lockdep_is_held(&nf_log_mutex));
76 if (c_logger == logger) 76 if (c_logger == logger)
77 rcu_assign_pointer(nf_loggers[i], NULL); 77 RCU_INIT_POINTER(nf_loggers[i], NULL);
78 list_del(&logger->list[i]); 78 list_del(&logger->list[i]);
79 } 79 }
80 mutex_unlock(&nf_log_mutex); 80 mutex_unlock(&nf_log_mutex);
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
92 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
93 return -ENOENT; 93 return -ENOENT;
94 } 94 }
95 rcu_assign_pointer(nf_loggers[pf], logger); 95 RCU_INIT_POINTER(nf_loggers[pf], logger);
96 mutex_unlock(&nf_log_mutex); 96 mutex_unlock(&nf_log_mutex);
97 return 0; 97 return 0;
98} 98}
@@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf)
103 if (pf >= ARRAY_SIZE(nf_loggers)) 103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return; 104 return;
105 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
106 rcu_assign_pointer(nf_loggers[pf], NULL); 106 RCU_INIT_POINTER(nf_loggers[pf], NULL);
107 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
108} 108}
109EXPORT_SYMBOL(nf_log_unbind_pf); 109EXPORT_SYMBOL(nf_log_unbind_pf);
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
250 mutex_unlock(&nf_log_mutex); 250 mutex_unlock(&nf_log_mutex);
251 return -ENOENT; 251 return -ENOENT;
252 } 252 }
253 rcu_assign_pointer(nf_loggers[tindex], logger); 253 RCU_INIT_POINTER(nf_loggers[tindex], logger);
254 mutex_unlock(&nf_log_mutex); 254 mutex_unlock(&nf_log_mutex);
255 } else { 255 } else {
256 mutex_lock(&nf_log_mutex); 256 mutex_lock(&nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 84d0fd47636a..99ffd2885088 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
40 else if (old) 40 else if (old)
41 ret = -EBUSY; 41 ret = -EBUSY;
42 else { 42 else {
43 rcu_assign_pointer(queue_handler[pf], qh); 43 RCU_INIT_POINTER(queue_handler[pf], qh);
44 ret = 0; 44 ret = 0;
45 } 45 }
46 mutex_unlock(&queue_handler_mutex); 46 mutex_unlock(&queue_handler_mutex);
@@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
65 return -EINVAL; 65 return -EINVAL;
66 } 66 }
67 67
68 rcu_assign_pointer(queue_handler[pf], NULL); 68 RCU_INIT_POINTER(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex); 69 mutex_unlock(&queue_handler_mutex);
70 70
71 synchronize_rcu(); 71 synchronize_rcu();
@@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
84 queue_handler[pf], 84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex) 85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh) 86 ) == qh)
87 rcu_assign_pointer(queue_handler[pf], NULL); 87 RCU_INIT_POINTER(queue_handler[pf], NULL);
88 } 88 }
89 mutex_unlock(&queue_handler_mutex); 89 mutex_unlock(&queue_handler_mutex);
90 90
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 1905976b5135..c879c1a2370e 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
59 nfnl_unlock(); 59 nfnl_unlock();
60 return -EBUSY; 60 return -EBUSY;
61 } 61 }
62 rcu_assign_pointer(subsys_table[n->subsys_id], n); 62 RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
63 nfnl_unlock(); 63 nfnl_unlock();
64 64
65 return 0; 65 return 0;
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
210 if (!nfnl) 210 if (!nfnl)
211 return -ENOMEM; 211 return -ENOMEM;
212 net->nfnl_stash = nfnl; 212 net->nfnl_stash = nfnl;
213 rcu_assign_pointer(net->nfnl, nfnl); 213 RCU_INIT_POINTER(net->nfnl, nfnl);
214 return 0; 214 return 0;
215} 215}
216 216
@@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
219 struct net *net; 219 struct net *net;
220 220
221 list_for_each_entry(net, net_exit_list, exit_list) 221 list_for_each_entry(net, net_exit_list, exit_list)
222 rcu_assign_pointer(net->nfnl, NULL); 222 RCU_INIT_POINTER(net->nfnl, NULL);
223 synchronize_net(); 223 synchronize_net();
224 list_for_each_entry(net, net_exit_list, exit_list) 224 list_for_each_entry(net, net_exit_list, exit_list)
225 netlink_kernel_release(net->nfnl_stash); 225 netlink_kernel_release(net->nfnl_stash);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 2d8158acf6fa..66b2c54c544f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -307,17 +307,14 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
307 n = max(inst_size, pkt_size); 307 n = max(inst_size, pkt_size);
308 skb = alloc_skb(n, GFP_ATOMIC); 308 skb = alloc_skb(n, GFP_ATOMIC);
309 if (!skb) { 309 if (!skb) {
310 pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
311 inst_size);
312
313 if (n > pkt_size) { 310 if (n > pkt_size) {
314 /* try to allocate only as much as we need for current 311 /* try to allocate only as much as we need for current
315 * packet */ 312 * packet */
316 313
317 skb = alloc_skb(pkt_size, GFP_ATOMIC); 314 skb = alloc_skb(pkt_size, GFP_ATOMIC);
318 if (!skb) 315 if (!skb)
319 pr_err("nfnetlink_log: can't even alloc %u " 316 pr_err("nfnetlink_log: can't even alloc %u bytes\n",
320 "bytes\n", pkt_size); 317 pkt_size);
321 } 318 }
322 } 319 }
323 320
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 71441b934ffd..8d987c3573fd 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
17#include <linux/socket.h> 18#include <linux/socket.h>
18#include <linux/net.h> 19#include <linux/net.h>
19#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 3bdd443aaf15..f407ebc13481 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -122,14 +122,12 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
122 122
123 info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); 123 info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
124 if (!info->timer) { 124 if (!info->timer) {
125 pr_debug("couldn't alloc timer\n");
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto out; 126 goto out;
128 } 127 }
129 128
130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
131 if (!info->timer->attr.attr.name) { 130 if (!info->timer->attr.attr.name) {
132 pr_debug("couldn't alloc attribute name\n");
133 ret = -ENOMEM; 131 ret = -ENOMEM;
134 goto out_free_timer; 132 goto out_free_timer;
135 } 133 }
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9228ee0dc11a..dfd52bad1523 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -176,10 +176,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
176 ent = NULL; 176 ent = NULL;
177 } else 177 } else
178 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); 178 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
179 if (!ent) { 179 if (ent) {
180 if (net_ratelimit())
181 pr_err("cannot allocate dsthash_ent\n");
182 } else {
183 memcpy(&ent->dst, dst, sizeof(ent->dst)); 180 memcpy(&ent->dst, dst, sizeof(ent->dst));
184 spin_lock_init(&ent->lock); 181 spin_lock_init(&ent->lock);
185 182
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 70eb2b4984dd..44c8eb4c9d66 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/netfilter/x_tables.h> 10#include <linux/netfilter/x_tables.h>
11#include <linux/netfilter/xt_quota.h> 11#include <linux/netfilter/xt_quota.h>
12#include <linux/module.h>
12 13
13struct xt_quota_priv { 14struct xt_quota_priv {
14 spinlock_t lock; 15 spinlock_t lock;
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 42ecb71d445f..4fe4fb4276d0 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter/xt_statistic.h> 17#include <linux/netfilter/xt_statistic.h>
18#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
19#include <linux/module.h>
19 20
20struct xt_statistic_priv { 21struct xt_statistic_priv {
21 atomic_t count; 22 atomic_t count;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7d8083cde34f..3f905e5370c2 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); 282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
283 283
284 spin_lock(&netlbl_domhsh_lock); 284 spin_lock(&netlbl_domhsh_lock);
285 rcu_assign_pointer(netlbl_domhsh, hsh_tbl); 285 RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
286 spin_unlock(&netlbl_domhsh_lock); 286 spin_unlock(&netlbl_domhsh_lock);
287 287
288 return 0; 288 return 0;
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); 330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
331 } else { 331 } else {
332 INIT_LIST_HEAD(&entry->list); 332 INIT_LIST_HEAD(&entry->list);
333 rcu_assign_pointer(netlbl_domhsh_def, entry); 333 RCU_INIT_POINTER(netlbl_domhsh_def, entry);
334 } 334 }
335 335
336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { 336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
@@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
451 if (entry != rcu_dereference(netlbl_domhsh_def)) 451 if (entry != rcu_dereference(netlbl_domhsh_def))
452 list_del_rcu(&entry->list); 452 list_del_rcu(&entry->list);
453 else 453 else
454 rcu_assign_pointer(netlbl_domhsh_def, NULL); 454 RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
455 } else 455 } else
456 ret_val = -ENOENT; 456 ret_val = -ENOENT;
457 spin_unlock(&netlbl_domhsh_lock); 457 spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e6e823656f9d..e251c2c88521 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
354 INIT_LIST_HEAD(&iface->list); 354 INIT_LIST_HEAD(&iface->list);
355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) 355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
356 goto add_iface_failure; 356 goto add_iface_failure;
357 rcu_assign_pointer(netlbl_unlhsh_def, iface); 357 RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
358 } 358 }
359 spin_unlock(&netlbl_unlhsh_lock); 359 spin_unlock(&netlbl_unlhsh_lock);
360 360
@@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
621 if (iface->ifindex > 0) 621 if (iface->ifindex > 0)
622 list_del_rcu(&iface->list); 622 list_del_rcu(&iface->list);
623 else 623 else
624 rcu_assign_pointer(netlbl_unlhsh_def, NULL); 624 RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
625 spin_unlock(&netlbl_unlhsh_lock); 625 spin_unlock(&netlbl_unlhsh_lock);
626 626
627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); 627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size)
1449 1449
1450 rcu_read_lock(); 1450 rcu_read_lock();
1451 spin_lock(&netlbl_unlhsh_lock); 1451 spin_lock(&netlbl_unlhsh_lock);
1452 rcu_assign_pointer(netlbl_unlhsh, hsh_tbl); 1452 RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
1453 spin_unlock(&netlbl_unlhsh_lock); 1453 spin_unlock(&netlbl_unlhsh_lock);
1454 rcu_read_unlock(); 1454 rcu_read_unlock();
1455 1455
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a4db0211da0..1201b6d4183d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1324,10 +1324,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1324 if (msg->msg_flags&MSG_OOB) 1324 if (msg->msg_flags&MSG_OOB)
1325 return -EOPNOTSUPP; 1325 return -EOPNOTSUPP;
1326 1326
1327 if (NULL == siocb->scm) { 1327 if (NULL == siocb->scm)
1328 siocb->scm = &scm; 1328 siocb->scm = &scm;
1329 memset(&scm, 0, sizeof(scm)); 1329
1330 }
1331 err = scm_send(sock, msg, siocb->scm); 1330 err = scm_send(sock, msg, siocb->scm);
1332 if (err < 0) 1331 if (err < 0)
1333 return err; 1332 return err;
@@ -1578,7 +1577,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1578 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 1577 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1579 if (!new) 1578 if (!new)
1580 return -ENOMEM; 1579 return -ENOMEM;
1581 old = rcu_dereference_raw(tbl->listeners); 1580 old = rcu_dereference_protected(tbl->listeners, 1);
1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1581 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1583 rcu_assign_pointer(tbl->listeners, new); 1582 rcu_assign_pointer(tbl->listeners, new);
1584 1583
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index cd5ddb2ebc43..915a87ba23e1 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -37,6 +37,7 @@
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <net/netrom.h> 38#include <net/netrom.h>
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/export.h>
40 41
41static unsigned int nr_neigh_no = 1; 42static unsigned int nr_neigh_no = 1;
42 43
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 33e095b124b3..58cddadf8e8e 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -13,4 +13,6 @@ menuconfig NFC
13 To compile this support as a module, choose M here: the module will 13 To compile this support as a module, choose M here: the module will
14 be called nfc. 14 be called nfc.
15 15
16source "net/nfc/nci/Kconfig"
17
16source "drivers/nfc/Kconfig" 18source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 16250c353851..fbb550f2377b 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_NFC) += nfc.o 5obj-$(CONFIG_NFC) += nfc.o
6obj-$(CONFIG_NFC_NCI) += nci/
6 7
7nfc-objs := core.o netlink.o af_nfc.o rawsock.o 8nfc-objs := core.o netlink.o af_nfc.o rawsock.o
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index e982cef8f49d..da67756425ce 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/nfc.h> 24#include <linux/nfc.h>
25#include <linux/module.h>
25 26
26#include "nfc.h" 27#include "nfc.h"
27 28
diff --git a/net/nfc/core.c b/net/nfc/core.c
index b6fd4e1f2057..47e02c1b8c02 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -53,6 +53,80 @@ int nfc_printk(const char *level, const char *format, ...)
53EXPORT_SYMBOL(nfc_printk); 53EXPORT_SYMBOL(nfc_printk);
54 54
55/** 55/**
56 * nfc_dev_up - turn on the NFC device
57 *
58 * @dev: The nfc device to be turned on
59 *
60 * The device remains up until the nfc_dev_down function is called.
61 */
62int nfc_dev_up(struct nfc_dev *dev)
63{
64 int rc = 0;
65
66 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
67
68 device_lock(&dev->dev);
69
70 if (!device_is_registered(&dev->dev)) {
71 rc = -ENODEV;
72 goto error;
73 }
74
75 if (dev->dev_up) {
76 rc = -EALREADY;
77 goto error;
78 }
79
80 if (dev->ops->dev_up)
81 rc = dev->ops->dev_up(dev);
82
83 if (!rc)
84 dev->dev_up = true;
85
86error:
87 device_unlock(&dev->dev);
88 return rc;
89}
90
91/**
92 * nfc_dev_down - turn off the NFC device
93 *
94 * @dev: The nfc device to be turned off
95 */
96int nfc_dev_down(struct nfc_dev *dev)
97{
98 int rc = 0;
99
100 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
101
102 device_lock(&dev->dev);
103
104 if (!device_is_registered(&dev->dev)) {
105 rc = -ENODEV;
106 goto error;
107 }
108
109 if (!dev->dev_up) {
110 rc = -EALREADY;
111 goto error;
112 }
113
114 if (dev->polling || dev->remote_activated) {
115 rc = -EBUSY;
116 goto error;
117 }
118
119 if (dev->ops->dev_down)
120 dev->ops->dev_down(dev);
121
122 dev->dev_up = false;
123
124error:
125 device_unlock(&dev->dev);
126 return rc;
127}
128
129/**
56 * nfc_start_poll - start polling for nfc targets 130 * nfc_start_poll - start polling for nfc targets
57 * 131 *
58 * @dev: The nfc device that must start polling 132 * @dev: The nfc device that must start polling
@@ -144,6 +218,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
144 } 218 }
145 219
146 rc = dev->ops->activate_target(dev, target_idx, protocol); 220 rc = dev->ops->activate_target(dev, target_idx, protocol);
221 if (!rc)
222 dev->remote_activated = true;
147 223
148error: 224error:
149 device_unlock(&dev->dev); 225 device_unlock(&dev->dev);
@@ -170,6 +246,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
170 } 246 }
171 247
172 dev->ops->deactivate_target(dev, target_idx); 248 dev->ops->deactivate_target(dev, target_idx);
249 dev->remote_activated = false;
173 250
174error: 251error:
175 device_unlock(&dev->dev); 252 device_unlock(&dev->dev);
@@ -322,7 +399,9 @@ struct nfc_dev *nfc_get_device(unsigned idx)
322 * @supported_protocols: NFC protocols supported by the device 399 * @supported_protocols: NFC protocols supported by the device
323 */ 400 */
324struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 401struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
325 u32 supported_protocols) 402 u32 supported_protocols,
403 int tx_headroom,
404 int tx_tailroom)
326{ 405{
327 static atomic_t dev_no = ATOMIC_INIT(0); 406 static atomic_t dev_no = ATOMIC_INIT(0);
328 struct nfc_dev *dev; 407 struct nfc_dev *dev;
@@ -345,6 +424,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
345 424
346 dev->ops = ops; 425 dev->ops = ops;
347 dev->supported_protocols = supported_protocols; 426 dev->supported_protocols = supported_protocols;
427 dev->tx_headroom = tx_headroom;
428 dev->tx_tailroom = tx_tailroom;
348 429
349 spin_lock_init(&dev->targets_lock); 430 spin_lock_init(&dev->targets_lock);
350 nfc_genl_data_init(&dev->genl_data); 431 nfc_genl_data_init(&dev->genl_data);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
new file mode 100644
index 000000000000..decdc49b26d8
--- /dev/null
+++ b/net/nfc/nci/Kconfig
@@ -0,0 +1,10 @@
1config NFC_NCI
2 depends on NFC && EXPERIMENTAL
3 tristate "NCI protocol support (EXPERIMENTAL)"
4 default n
5 help
6 NCI (NFC Controller Interface) is a communication protocol between
7 an NFC Controller (NFCC) and a Device Host (DH).
8
9 Say Y here to compile NCI support into the kernel or say M to
10 compile it as module (nci).
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
new file mode 100644
index 000000000000..cdb3a2e44471
--- /dev/null
+++ b/net/nfc/nci/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux NFC NCI layer.
3#
4
5obj-$(CONFIG_NFC_NCI) += nci.o
6
7nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
new file mode 100644
index 000000000000..3925c6578767
--- /dev/null
+++ b/net/nfc/nci/core.c
@@ -0,0 +1,798 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/workqueue.h>
30#include <linux/completion.h>
31#include <linux/export.h>
32#include <linux/sched.h>
33#include <linux/bitops.h>
34#include <linux/skbuff.h>
35
36#include "../nfc.h"
37#include <net/nfc/nci.h>
38#include <net/nfc/nci_core.h>
39#include <linux/nfc.h>
40
41static void nci_cmd_work(struct work_struct *work);
42static void nci_rx_work(struct work_struct *work);
43static void nci_tx_work(struct work_struct *work);
44
45/* ---- NCI requests ---- */
46
47void nci_req_complete(struct nci_dev *ndev, int result)
48{
49 if (ndev->req_status == NCI_REQ_PEND) {
50 ndev->req_result = result;
51 ndev->req_status = NCI_REQ_DONE;
52 complete(&ndev->req_completion);
53 }
54}
55
56static void nci_req_cancel(struct nci_dev *ndev, int err)
57{
58 if (ndev->req_status == NCI_REQ_PEND) {
59 ndev->req_result = err;
60 ndev->req_status = NCI_REQ_CANCELED;
61 complete(&ndev->req_completion);
62 }
63}
64
65/* Execute request and wait for completion. */
66static int __nci_request(struct nci_dev *ndev,
67 void (*req)(struct nci_dev *ndev, unsigned long opt),
68 unsigned long opt,
69 __u32 timeout)
70{
71 int rc = 0;
72 unsigned long completion_rc;
73
74 ndev->req_status = NCI_REQ_PEND;
75
76 init_completion(&ndev->req_completion);
77 req(ndev, opt);
78 completion_rc = wait_for_completion_interruptible_timeout(
79 &ndev->req_completion,
80 timeout);
81
82 nfc_dbg("wait_for_completion return %ld", completion_rc);
83
84 if (completion_rc > 0) {
85 switch (ndev->req_status) {
86 case NCI_REQ_DONE:
87 rc = nci_to_errno(ndev->req_result);
88 break;
89
90 case NCI_REQ_CANCELED:
91 rc = -ndev->req_result;
92 break;
93
94 default:
95 rc = -ETIMEDOUT;
96 break;
97 }
98 } else {
99 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
100 completion_rc);
101
102 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
103 }
104
105 ndev->req_status = ndev->req_result = 0;
106
107 return rc;
108}
109
110static inline int nci_request(struct nci_dev *ndev,
111 void (*req)(struct nci_dev *ndev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113{
114 int rc;
115
116 if (!test_bit(NCI_UP, &ndev->flags))
117 return -ENETDOWN;
118
119 /* Serialize all requests */
120 mutex_lock(&ndev->req_lock);
121 rc = __nci_request(ndev, req, opt, timeout);
122 mutex_unlock(&ndev->req_lock);
123
124 return rc;
125}
126
127static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
128{
129 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
130}
131
132static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
133{
134 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
135}
136
137static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
138{
139 struct nci_core_conn_create_cmd conn_cmd;
140 struct nci_rf_disc_map_cmd cmd;
141 struct disc_map_config *cfg = cmd.mapping_configs;
142 __u8 *num = &cmd.num_mapping_configs;
143 int i;
144
145 /* create static rf connection */
146 conn_cmd.target_handle = 0;
147 conn_cmd.num_target_specific_params = 0;
148 nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
149
150 /* set rf mapping configurations */
151 *num = 0;
152
153 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
154 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
155 if (ndev->supported_rf_interfaces[i] ==
156 NCI_RF_INTERFACE_ISO_DEP) {
157 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
158 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
159 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
160 (*num)++;
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
165 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
166 (*num)++;
167 }
168
169 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
170 break;
171 }
172
173 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
174 (1 + ((*num)*sizeof(struct disc_map_config))),
175 &cmd);
176}
177
178static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
179{
180 struct nci_rf_disc_cmd cmd;
181 __u32 protocols = opt;
182
183 cmd.num_disc_configs = 0;
184
185 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
186 (protocols & NFC_PROTO_JEWEL_MASK
187 || protocols & NFC_PROTO_MIFARE_MASK
188 || protocols & NFC_PROTO_ISO14443_MASK
189 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
190 cmd.disc_configs[cmd.num_disc_configs].type =
191 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
192 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
193 cmd.num_disc_configs++;
194 }
195
196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
197 (protocols & NFC_PROTO_ISO14443_MASK)) {
198 cmd.disc_configs[cmd.num_disc_configs].type =
199 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
201 cmd.num_disc_configs++;
202 }
203
204 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
205 (protocols & NFC_PROTO_FELICA_MASK
206 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
207 cmd.disc_configs[cmd.num_disc_configs].type =
208 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
209 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
210 cmd.num_disc_configs++;
211 }
212
213 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
214 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
215 &cmd);
216}
217
218static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
219{
220 struct nci_rf_deactivate_cmd cmd;
221
222 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
223
224 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
225 sizeof(struct nci_rf_deactivate_cmd),
226 &cmd);
227}
228
229static int nci_open_device(struct nci_dev *ndev)
230{
231 int rc = 0;
232
233 mutex_lock(&ndev->req_lock);
234
235 if (test_bit(NCI_UP, &ndev->flags)) {
236 rc = -EALREADY;
237 goto done;
238 }
239
240 if (ndev->ops->open(ndev)) {
241 rc = -EIO;
242 goto done;
243 }
244
245 atomic_set(&ndev->cmd_cnt, 1);
246
247 set_bit(NCI_INIT, &ndev->flags);
248
249 rc = __nci_request(ndev, nci_reset_req, 0,
250 msecs_to_jiffies(NCI_RESET_TIMEOUT));
251
252 if (!rc) {
253 rc = __nci_request(ndev, nci_init_req, 0,
254 msecs_to_jiffies(NCI_INIT_TIMEOUT));
255 }
256
257 if (!rc) {
258 rc = __nci_request(ndev, nci_init_complete_req, 0,
259 msecs_to_jiffies(NCI_INIT_TIMEOUT));
260 }
261
262 clear_bit(NCI_INIT, &ndev->flags);
263
264 if (!rc) {
265 set_bit(NCI_UP, &ndev->flags);
266 } else {
267 /* Init failed, cleanup */
268 skb_queue_purge(&ndev->cmd_q);
269 skb_queue_purge(&ndev->rx_q);
270 skb_queue_purge(&ndev->tx_q);
271
272 ndev->ops->close(ndev);
273 ndev->flags = 0;
274 }
275
276done:
277 mutex_unlock(&ndev->req_lock);
278 return rc;
279}
280
281static int nci_close_device(struct nci_dev *ndev)
282{
283 nci_req_cancel(ndev, ENODEV);
284 mutex_lock(&ndev->req_lock);
285
286 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
287 del_timer_sync(&ndev->cmd_timer);
288 mutex_unlock(&ndev->req_lock);
289 return 0;
290 }
291
292 /* Drop RX and TX queues */
293 skb_queue_purge(&ndev->rx_q);
294 skb_queue_purge(&ndev->tx_q);
295
296 /* Flush RX and TX wq */
297 flush_workqueue(ndev->rx_wq);
298 flush_workqueue(ndev->tx_wq);
299
300 /* Reset device */
301 skb_queue_purge(&ndev->cmd_q);
302 atomic_set(&ndev->cmd_cnt, 1);
303
304 set_bit(NCI_INIT, &ndev->flags);
305 __nci_request(ndev, nci_reset_req, 0,
306 msecs_to_jiffies(NCI_RESET_TIMEOUT));
307 clear_bit(NCI_INIT, &ndev->flags);
308
309 /* Flush cmd wq */
310 flush_workqueue(ndev->cmd_wq);
311
312 /* After this point our queues are empty
313 * and no works are scheduled. */
314 ndev->ops->close(ndev);
315
316 /* Clear flags */
317 ndev->flags = 0;
318
319 mutex_unlock(&ndev->req_lock);
320
321 return 0;
322}
323
324/* NCI command timer function */
325static void nci_cmd_timer(unsigned long arg)
326{
327 struct nci_dev *ndev = (void *) arg;
328
329 nfc_dbg("entry");
330
331 atomic_set(&ndev->cmd_cnt, 1);
332 queue_work(ndev->cmd_wq, &ndev->cmd_work);
333}
334
335static int nci_dev_up(struct nfc_dev *nfc_dev)
336{
337 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
338
339 nfc_dbg("entry");
340
341 return nci_open_device(ndev);
342}
343
344static int nci_dev_down(struct nfc_dev *nfc_dev)
345{
346 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
347
348 nfc_dbg("entry");
349
350 return nci_close_device(ndev);
351}
352
353static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
354{
355 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
356 int rc;
357
358 nfc_dbg("entry");
359
360 if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
361 nfc_err("unable to start poll, since poll is already active");
362 return -EBUSY;
363 }
364
365 if (ndev->target_active_prot) {
366 nfc_err("there is an active target");
367 return -EBUSY;
368 }
369
370 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
371 nfc_dbg("target is active, implicitly deactivate...");
372
373 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
374 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
375 if (rc)
376 return -EBUSY;
377 }
378
379 rc = nci_request(ndev, nci_rf_discover_req, protocols,
380 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
381
382 if (!rc)
383 ndev->poll_prots = protocols;
384
385 return rc;
386}
387
388static void nci_stop_poll(struct nfc_dev *nfc_dev)
389{
390 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
391
392 nfc_dbg("entry");
393
394 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
395 nfc_err("unable to stop poll, since poll is not active");
396 return;
397 }
398
399 nci_request(ndev, nci_rf_deactivate_req, 0,
400 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
401}
402
403static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
404 __u32 protocol)
405{
406 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
407
408 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
409
410 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
411 nfc_err("there is no available target to activate");
412 return -EINVAL;
413 }
414
415 if (ndev->target_active_prot) {
416 nfc_err("there is already an active target");
417 return -EBUSY;
418 }
419
420 if (!(ndev->target_available_prots & (1 << protocol))) {
421 nfc_err("target does not support the requested protocol 0x%x",
422 protocol);
423 return -EINVAL;
424 }
425
426 ndev->target_active_prot = protocol;
427 ndev->target_available_prots = 0;
428
429 return 0;
430}
431
432static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
433{
434 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
435
436 nfc_dbg("entry, target_idx %d", target_idx);
437
438 if (!ndev->target_active_prot) {
439 nfc_err("unable to deactivate target, no active target");
440 return;
441 }
442
443 ndev->target_active_prot = 0;
444
445 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
446 nci_request(ndev, nci_rf_deactivate_req, 0,
447 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
448 }
449}
450
451static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
452 struct sk_buff *skb,
453 data_exchange_cb_t cb,
454 void *cb_context)
455{
456 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
457 int rc;
458
459 nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
460
461 if (!ndev->target_active_prot) {
462 nfc_err("unable to exchange data, no active target");
463 return -EINVAL;
464 }
465
466 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
467 return -EBUSY;
468
469 /* store cb and context to be used on receiving data */
470 ndev->data_exchange_cb = cb;
471 ndev->data_exchange_cb_context = cb_context;
472
473 rc = nci_send_data(ndev, ndev->conn_id, skb);
474 if (rc)
475 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
476
477 return rc;
478}
479
480static struct nfc_ops nci_nfc_ops = {
481 .dev_up = nci_dev_up,
482 .dev_down = nci_dev_down,
483 .start_poll = nci_start_poll,
484 .stop_poll = nci_stop_poll,
485 .activate_target = nci_activate_target,
486 .deactivate_target = nci_deactivate_target,
487 .data_exchange = nci_data_exchange,
488};
489
490/* ---- Interface to NCI drivers ---- */
491
492/**
493 * nci_allocate_device - allocate a new nci device
494 *
495 * @ops: device operations
496 * @supported_protocols: NFC protocols supported by the device
497 */
498struct nci_dev *nci_allocate_device(struct nci_ops *ops,
499 __u32 supported_protocols,
500 int tx_headroom,
501 int tx_tailroom)
502{
503 struct nci_dev *ndev;
504
505 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
506
507 if (!ops->open || !ops->close || !ops->send)
508 return NULL;
509
510 if (!supported_protocols)
511 return NULL;
512
513 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
514 if (!ndev)
515 return NULL;
516
517 ndev->ops = ops;
518 ndev->tx_headroom = tx_headroom;
519 ndev->tx_tailroom = tx_tailroom;
520
521 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
522 supported_protocols,
523 tx_headroom + NCI_DATA_HDR_SIZE,
524 tx_tailroom);
525 if (!ndev->nfc_dev)
526 goto free_exit;
527
528 nfc_set_drvdata(ndev->nfc_dev, ndev);
529
530 return ndev;
531
532free_exit:
533 kfree(ndev);
534 return NULL;
535}
536EXPORT_SYMBOL(nci_allocate_device);
537
538/**
539 * nci_free_device - deallocate nci device
540 *
541 * @ndev: The nci device to deallocate
542 */
543void nci_free_device(struct nci_dev *ndev)
544{
545 nfc_dbg("entry");
546
547 nfc_free_device(ndev->nfc_dev);
548 kfree(ndev);
549}
550EXPORT_SYMBOL(nci_free_device);
551
552/**
553 * nci_register_device - register a nci device in the nfc subsystem
554 *
555 * @dev: The nci device to register
556 */
557int nci_register_device(struct nci_dev *ndev)
558{
559 int rc;
560 struct device *dev = &ndev->nfc_dev->dev;
561 char name[32];
562
563 nfc_dbg("entry");
564
565 rc = nfc_register_device(ndev->nfc_dev);
566 if (rc)
567 goto exit;
568
569 ndev->flags = 0;
570
571 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
572 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
573 ndev->cmd_wq = create_singlethread_workqueue(name);
574 if (!ndev->cmd_wq) {
575 rc = -ENOMEM;
576 goto unreg_exit;
577 }
578
579 INIT_WORK(&ndev->rx_work, nci_rx_work);
580 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
581 ndev->rx_wq = create_singlethread_workqueue(name);
582 if (!ndev->rx_wq) {
583 rc = -ENOMEM;
584 goto destroy_cmd_wq_exit;
585 }
586
587 INIT_WORK(&ndev->tx_work, nci_tx_work);
588 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
589 ndev->tx_wq = create_singlethread_workqueue(name);
590 if (!ndev->tx_wq) {
591 rc = -ENOMEM;
592 goto destroy_rx_wq_exit;
593 }
594
595 skb_queue_head_init(&ndev->cmd_q);
596 skb_queue_head_init(&ndev->rx_q);
597 skb_queue_head_init(&ndev->tx_q);
598
599 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
600 (unsigned long) ndev);
601
602 mutex_init(&ndev->req_lock);
603
604 goto exit;
605
606destroy_rx_wq_exit:
607 destroy_workqueue(ndev->rx_wq);
608
609destroy_cmd_wq_exit:
610 destroy_workqueue(ndev->cmd_wq);
611
612unreg_exit:
613 nfc_unregister_device(ndev->nfc_dev);
614
615exit:
616 return rc;
617}
618EXPORT_SYMBOL(nci_register_device);
619
620/**
621 * nci_unregister_device - unregister a nci device in the nfc subsystem
622 *
623 * @dev: The nci device to unregister
624 */
625void nci_unregister_device(struct nci_dev *ndev)
626{
627 nfc_dbg("entry");
628
629 nci_close_device(ndev);
630
631 destroy_workqueue(ndev->cmd_wq);
632 destroy_workqueue(ndev->rx_wq);
633 destroy_workqueue(ndev->tx_wq);
634
635 nfc_unregister_device(ndev->nfc_dev);
636}
637EXPORT_SYMBOL(nci_unregister_device);
638
639/**
640 * nci_recv_frame - receive frame from NCI drivers
641 *
642 * @skb: The sk_buff to receive
643 */
644int nci_recv_frame(struct sk_buff *skb)
645{
646 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
647
648 nfc_dbg("entry, len %d", skb->len);
649
650 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
651 && !test_bit(NCI_INIT, &ndev->flags))) {
652 kfree_skb(skb);
653 return -ENXIO;
654 }
655
656 /* Queue frame for rx worker thread */
657 skb_queue_tail(&ndev->rx_q, skb);
658 queue_work(ndev->rx_wq, &ndev->rx_work);
659
660 return 0;
661}
662EXPORT_SYMBOL(nci_recv_frame);
663
664static int nci_send_frame(struct sk_buff *skb)
665{
666 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
667
668 nfc_dbg("entry, len %d", skb->len);
669
670 if (!ndev) {
671 kfree_skb(skb);
672 return -ENODEV;
673 }
674
675 /* Get rid of skb owner, prior to sending to the driver. */
676 skb_orphan(skb);
677
678 return ndev->ops->send(skb);
679}
680
681/* Send NCI command */
682int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
683{
684 struct nci_ctrl_hdr *hdr;
685 struct sk_buff *skb;
686
687 nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
688
689 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
690 if (!skb) {
691 nfc_err("no memory for command");
692 return -ENOMEM;
693 }
694
695 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
696 hdr->gid = nci_opcode_gid(opcode);
697 hdr->oid = nci_opcode_oid(opcode);
698 hdr->plen = plen;
699
700 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
701 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
702
703 if (plen)
704 memcpy(skb_put(skb, plen), payload, plen);
705
706 skb->dev = (void *) ndev;
707
708 skb_queue_tail(&ndev->cmd_q, skb);
709 queue_work(ndev->cmd_wq, &ndev->cmd_work);
710
711 return 0;
712}
713
714/* ---- NCI TX Data worker thread ---- */
715
716static void nci_tx_work(struct work_struct *work)
717{
718 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
719 struct sk_buff *skb;
720
721 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
722
723 /* Send queued tx data */
724 while (atomic_read(&ndev->credits_cnt)) {
725 skb = skb_dequeue(&ndev->tx_q);
726 if (!skb)
727 return;
728
729 atomic_dec(&ndev->credits_cnt);
730
731 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
732 nci_pbf(skb->data),
733 nci_conn_id(skb->data),
734 nci_plen(skb->data));
735
736 nci_send_frame(skb);
737 }
738}
739
740/* ----- NCI RX worker thread (data & control) ----- */
741
742static void nci_rx_work(struct work_struct *work)
743{
744 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
745 struct sk_buff *skb;
746
747 while ((skb = skb_dequeue(&ndev->rx_q))) {
748 /* Process frame */
749 switch (nci_mt(skb->data)) {
750 case NCI_MT_RSP_PKT:
751 nci_rsp_packet(ndev, skb);
752 break;
753
754 case NCI_MT_NTF_PKT:
755 nci_ntf_packet(ndev, skb);
756 break;
757
758 case NCI_MT_DATA_PKT:
759 nci_rx_data_packet(ndev, skb);
760 break;
761
762 default:
763 nfc_err("unknown MT 0x%x", nci_mt(skb->data));
764 kfree_skb(skb);
765 break;
766 }
767 }
768}
769
770/* ----- NCI TX CMD worker thread ----- */
771
772static void nci_cmd_work(struct work_struct *work)
773{
774 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
775 struct sk_buff *skb;
776
777 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
778
779 /* Send queued command */
780 if (atomic_read(&ndev->cmd_cnt)) {
781 skb = skb_dequeue(&ndev->cmd_q);
782 if (!skb)
783 return;
784
785 atomic_dec(&ndev->cmd_cnt);
786
787 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
788 nci_pbf(skb->data),
789 nci_opcode_gid(nci_opcode(skb->data)),
790 nci_opcode_oid(nci_opcode(skb->data)),
791 nci_plen(skb->data));
792
793 nci_send_frame(skb);
794
795 mod_timer(&ndev->cmd_timer,
796 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
797 }
798}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
new file mode 100644
index 000000000000..e5ed90fc1a9c
--- /dev/null
+++ b/net/nfc/nci/data.c
@@ -0,0 +1,247 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/types.h>
25#include <linux/interrupt.h>
26#include <linux/wait.h>
27#include <linux/bitops.h>
28#include <linux/skbuff.h>
29
30#include "../nfc.h"
31#include <net/nfc/nci.h>
32#include <net/nfc/nci_core.h>
33#include <linux/nfc.h>
34
35/* Complete data exchange transaction and forward skb to nfc core */
36void nci_data_exchange_complete(struct nci_dev *ndev,
37 struct sk_buff *skb,
38 int err)
39{
40 data_exchange_cb_t cb = ndev->data_exchange_cb;
41 void *cb_context = ndev->data_exchange_cb_context;
42
43 nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
44
45 if (cb) {
46 ndev->data_exchange_cb = NULL;
47 ndev->data_exchange_cb_context = 0;
48
49 /* forward skb to nfc core */
50 cb(cb_context, skb, err);
51 } else if (skb) {
52 nfc_err("no rx callback, dropping rx data...");
53
54 /* no waiting callback, free skb */
55 kfree_skb(skb);
56 }
57
58 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
59}
60
61/* ----------------- NCI TX Data ----------------- */
62
63static inline void nci_push_data_hdr(struct nci_dev *ndev,
64 __u8 conn_id,
65 struct sk_buff *skb,
66 __u8 pbf)
67{
68 struct nci_data_hdr *hdr;
69 int plen = skb->len;
70
71 hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE);
72 hdr->conn_id = conn_id;
73 hdr->rfu = 0;
74 hdr->plen = plen;
75
76 nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
77 nci_pbf_set((__u8 *)hdr, pbf);
78
79 skb->dev = (void *) ndev;
80}
81
82static int nci_queue_tx_data_frags(struct nci_dev *ndev,
83 __u8 conn_id,
84 struct sk_buff *skb) {
85 int total_len = skb->len;
86 unsigned char *data = skb->data;
87 unsigned long flags;
88 struct sk_buff_head frags_q;
89 struct sk_buff *skb_frag;
90 int frag_len;
91 int rc = 0;
92
93 nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
94
95 __skb_queue_head_init(&frags_q);
96
97 while (total_len) {
98 frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
99
100 skb_frag = nci_skb_alloc(ndev,
101 (NCI_DATA_HDR_SIZE + frag_len),
102 GFP_KERNEL);
103 if (skb_frag == NULL) {
104 rc = -ENOMEM;
105 goto free_exit;
106 }
107 skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
108
109 /* first, copy the data */
110 memcpy(skb_put(skb_frag, frag_len), data, frag_len);
111
112 /* second, set the header */
113 nci_push_data_hdr(ndev, conn_id, skb_frag,
114 ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));
115
116 __skb_queue_tail(&frags_q, skb_frag);
117
118 data += frag_len;
119 total_len -= frag_len;
120
121 nfc_dbg("frag_len %d, remaining total_len %d",
122 frag_len, total_len);
123 }
124
125 /* queue all fragments atomically */
126 spin_lock_irqsave(&ndev->tx_q.lock, flags);
127
128 while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
129 __skb_queue_tail(&ndev->tx_q, skb_frag);
130
131 spin_unlock_irqrestore(&ndev->tx_q.lock, flags);
132
133 /* free the original skb */
134 kfree_skb(skb);
135
136 goto exit;
137
138free_exit:
139 while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
140 kfree_skb(skb_frag);
141
142exit:
143 return rc;
144}
145
146/* Send NCI data */
147int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
148{
149 int rc = 0;
150
151 nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
152
153 /* check if the packet need to be fragmented */
154 if (skb->len <= ndev->max_pkt_payload_size) {
155 /* no need to fragment packet */
156 nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
157
158 skb_queue_tail(&ndev->tx_q, skb);
159 } else {
160 /* fragment packet and queue the fragments */
161 rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
162 if (rc) {
163 nfc_err("failed to fragment tx data packet");
164 goto free_exit;
165 }
166 }
167
168 queue_work(ndev->tx_wq, &ndev->tx_work);
169
170 goto exit;
171
172free_exit:
173 kfree_skb(skb);
174
175exit:
176 return rc;
177}
178
179/* ----------------- NCI RX Data ----------------- */
180
181static void nci_add_rx_data_frag(struct nci_dev *ndev,
182 struct sk_buff *skb,
183 __u8 pbf)
184{
185 int reassembly_len;
186 int err = 0;
187
188 if (ndev->rx_data_reassembly) {
189 reassembly_len = ndev->rx_data_reassembly->len;
190
191 /* first, make enough room for the already accumulated data */
192 if (skb_cow_head(skb, reassembly_len)) {
193 nfc_err("error adding room for accumulated rx data");
194
195 kfree_skb(skb);
196 skb = 0;
197
198 kfree_skb(ndev->rx_data_reassembly);
199 ndev->rx_data_reassembly = 0;
200
201 err = -ENOMEM;
202 goto exit;
203 }
204
205 /* second, combine the two fragments */
206 memcpy(skb_push(skb, reassembly_len),
207 ndev->rx_data_reassembly->data,
208 reassembly_len);
209
210 /* third, free old reassembly */
211 kfree_skb(ndev->rx_data_reassembly);
212 ndev->rx_data_reassembly = 0;
213 }
214
215 if (pbf == NCI_PBF_CONT) {
216 /* need to wait for next fragment, store skb and exit */
217 ndev->rx_data_reassembly = skb;
218 return;
219 }
220
221exit:
222 nci_data_exchange_complete(ndev, skb, err);
223}
224
225/* Rx Data packet */
226void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
227{
228 __u8 pbf = nci_pbf(skb->data);
229
230 nfc_dbg("entry, len %d", skb->len);
231
232 nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
233 nci_pbf(skb->data),
234 nci_conn_id(skb->data),
235 nci_plen(skb->data));
236
237 /* strip the nci data header */
238 skb_pull(skb, NCI_DATA_HDR_SIZE);
239
240 if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
241 /* frame I/F => remove the status byte */
242 nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
243 skb_trim(skb, (skb->len - 1));
244 }
245
246 nci_add_rx_data_frag(ndev, skb, pbf);
247}
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
new file mode 100644
index 000000000000..b19dc2fa90e1
--- /dev/null
+++ b/net/nfc/nci/lib.c
@@ -0,0 +1,94 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on lib.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/errno.h>
32
33#include <net/nfc/nci.h>
34
35/* NCI status codes to Unix errno mapping */
36int nci_to_errno(__u8 code)
37{
38 switch (code) {
39 case NCI_STATUS_OK:
40 return 0;
41
42 case NCI_STATUS_REJECTED:
43 return -EBUSY;
44
45 case NCI_STATUS_MESSAGE_CORRUPTED:
46 return -EBADMSG;
47
48 case NCI_STATUS_BUFFER_FULL:
49 return -ENOBUFS;
50
51 case NCI_STATUS_NOT_INITIALIZED:
52 return -EHOSTDOWN;
53
54 case NCI_STATUS_SYNTAX_ERROR:
55 case NCI_STATUS_SEMANTIC_ERROR:
56 case NCI_STATUS_INVALID_PARAM:
57 case NCI_STATUS_RF_PROTOCOL_ERROR:
58 case NCI_STATUS_NFCEE_PROTOCOL_ERROR:
59 return -EPROTO;
60
61 case NCI_STATUS_UNKNOWN_GID:
62 case NCI_STATUS_UNKNOWN_OID:
63 return -EBADRQC;
64
65 case NCI_STATUS_MESSAGE_SIZE_EXCEEDED:
66 return -EMSGSIZE;
67
68 case NCI_STATUS_DISCOVERY_ALREADY_STARTED:
69 return -EALREADY;
70
71 case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED:
72 case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED:
73 return -ECONNREFUSED;
74
75 case NCI_STATUS_RF_TRANSMISSION_ERROR:
76 case NCI_STATUS_NFCEE_TRANSMISSION_ERROR:
77 return -ECOMM;
78
79 case NCI_STATUS_RF_TIMEOUT_ERROR:
80 case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
81 return -ETIMEDOUT;
82
83 case NCI_STATUS_RF_LINK_LOSS_ERROR:
84 return -ENOLINK;
85
86 case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
87 return -EDQUOT;
88
89 case NCI_STATUS_FAILED:
90 default:
91 return -ENOSYS;
92 }
93}
94EXPORT_SYMBOL(nci_to_errno);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
new file mode 100644
index 000000000000..96633f5cda4f
--- /dev/null
+++ b/net/nfc/nci/ntf.c
@@ -0,0 +1,258 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_event.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/bitops.h>
31#include <linux/skbuff.h>
32
33#include "../nfc.h"
34#include <net/nfc/nci.h>
35#include <net/nfc/nci_core.h>
36#include <linux/nfc.h>
37
38/* Handle NCI Notification packets */
39
40static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
41 struct sk_buff *skb)
42{
43 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
44 int i;
45
46 nfc_dbg("entry, num_entries %d", ntf->num_entries);
47
48 if (ntf->num_entries > NCI_MAX_NUM_CONN)
49 ntf->num_entries = NCI_MAX_NUM_CONN;
50
51 /* update the credits */
52 for (i = 0; i < ntf->num_entries; i++) {
53 nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
54 ntf->conn_entries[i].conn_id,
55 ntf->conn_entries[i].credits);
56
57 if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
58 /* found static rf connection */
59 atomic_add(ntf->conn_entries[i].credits,
60 &ndev->credits_cnt);
61 }
62 }
63
64 /* trigger the next tx */
65 if (!skb_queue_empty(&ndev->tx_q))
66 queue_work(ndev->tx_wq, &ndev->tx_work);
67}
68
69static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
70 struct sk_buff *skb)
71{
72 struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
73
74 nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
75}
76
77static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
78 struct nci_rf_activate_ntf *ntf, __u8 *data)
79{
80 struct rf_tech_specific_params_nfca_poll *nfca_poll;
81 struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
82
83 nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
84 nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
85
86 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
87 data += 2;
88
89 nfca_poll->nfcid1_len = *data++;
90
91 nfc_dbg("sens_res 0x%x, nfcid1_len %d",
92 nfca_poll->sens_res,
93 nfca_poll->nfcid1_len);
94
95 memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
96 data += nfca_poll->nfcid1_len;
97
98 nfca_poll->sel_res_len = *data++;
99
100 if (nfca_poll->sel_res_len != 0)
101 nfca_poll->sel_res = *data++;
102
103 ntf->rf_interface_type = *data++;
104 ntf->activation_params_len = *data++;
105
106 nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
107 nfca_poll->sel_res_len,
108 nfca_poll->sel_res,
109 ntf->rf_interface_type,
110 ntf->activation_params_len);
111
112 switch (ntf->rf_interface_type) {
113 case NCI_RF_INTERFACE_ISO_DEP:
114 nfca_poll_iso_dep->rats_res_len = *data++;
115 if (nfca_poll_iso_dep->rats_res_len > 0) {
116 memcpy(nfca_poll_iso_dep->rats_res,
117 data,
118 nfca_poll_iso_dep->rats_res_len);
119 }
120 break;
121
122 case NCI_RF_INTERFACE_FRAME:
123 /* no activation params */
124 break;
125
126 default:
127 nfc_err("unsupported rf_interface_type 0x%x",
128 ntf->rf_interface_type);
129 return -EPROTO;
130 }
131
132 return 0;
133}
134
135static void nci_target_found(struct nci_dev *ndev,
136 struct nci_rf_activate_ntf *ntf)
137{
138 struct nfc_target nfc_tgt;
139
140 if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */
141 nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
142 else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
143 nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
144
145 nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
146 nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
147
148 if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
149 nfc_dbg("the target found does not have the desired protocol");
150 return;
151 }
152
153 nfc_dbg("new target found, supported_protocols 0x%x",
154 nfc_tgt.supported_protocols);
155
156 ndev->target_available_prots = nfc_tgt.supported_protocols;
157
158 nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
159}
160
161static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
162 struct sk_buff *skb)
163{
164 struct nci_rf_activate_ntf ntf;
165 __u8 *data = skb->data;
166 int rc = -1;
167
168 clear_bit(NCI_DISCOVERY, &ndev->flags);
169 set_bit(NCI_POLL_ACTIVE, &ndev->flags);
170
171 ntf.target_handle = *data++;
172 ntf.rf_protocol = *data++;
173 ntf.rf_tech_and_mode = *data++;
174 ntf.rf_tech_specific_params_len = *data++;
175
176 nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
177 ntf.target_handle,
178 ntf.rf_protocol,
179 ntf.rf_tech_and_mode,
180 ntf.rf_tech_specific_params_len);
181
182 switch (ntf.rf_tech_and_mode) {
183 case NCI_NFC_A_PASSIVE_POLL_MODE:
184 rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
185 data);
186 break;
187
188 default:
189 nfc_err("unsupported rf_tech_and_mode 0x%x",
190 ntf.rf_tech_and_mode);
191 return;
192 }
193
194 if (!rc)
195 nci_target_found(ndev, &ntf);
196}
197
198static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
199 struct sk_buff *skb)
200{
201 __u8 type = skb->data[0];
202
203 nfc_dbg("entry, type 0x%x", type);
204
205 clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
206 ndev->target_active_prot = 0;
207
208 /* drop tx data queue */
209 skb_queue_purge(&ndev->tx_q);
210
211 /* drop partial rx data packet */
212 if (ndev->rx_data_reassembly) {
213 kfree_skb(ndev->rx_data_reassembly);
214 ndev->rx_data_reassembly = 0;
215 }
216
217 /* complete the data exchange transaction, if exists */
218 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
219 nci_data_exchange_complete(ndev, NULL, -EIO);
220}
221
222void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
223{
224 __u16 ntf_opcode = nci_opcode(skb->data);
225
226 nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
227 nci_pbf(skb->data),
228 nci_opcode_gid(ntf_opcode),
229 nci_opcode_oid(ntf_opcode),
230 nci_plen(skb->data));
231
232 /* strip the nci control header */
233 skb_pull(skb, NCI_CTRL_HDR_SIZE);
234
235 switch (ntf_opcode) {
236 case NCI_OP_CORE_CONN_CREDITS_NTF:
237 nci_core_conn_credits_ntf_packet(ndev, skb);
238 break;
239
240 case NCI_OP_RF_FIELD_INFO_NTF:
241 nci_rf_field_info_ntf_packet(ndev, skb);
242 break;
243
244 case NCI_OP_RF_ACTIVATE_NTF:
245 nci_rf_activate_ntf_packet(ndev, skb);
246 break;
247
248 case NCI_OP_RF_DEACTIVATE_NTF:
249 nci_rf_deactivate_ntf_packet(ndev, skb);
250 break;
251
252 default:
253 nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
254 break;
255 }
256
257 kfree_skb(skb);
258}
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
new file mode 100644
index 000000000000..0403d4cd0917
--- /dev/null
+++ b/net/nfc/nci/rsp.c
@@ -0,0 +1,226 @@
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_event.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/bitops.h>
31#include <linux/skbuff.h>
32
33#include "../nfc.h"
34#include <net/nfc/nci.h>
35#include <net/nfc/nci_core.h>
36
37/* Handle NCI Response packets */
38
39static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
40{
41 struct nci_core_reset_rsp *rsp = (void *) skb->data;
42
43 nfc_dbg("entry, status 0x%x", rsp->status);
44
45 if (rsp->status == NCI_STATUS_OK)
46 ndev->nci_ver = rsp->nci_ver;
47
48 nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
49
50 nci_req_complete(ndev, rsp->status);
51}
52
53static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
54{
55 struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
56 struct nci_core_init_rsp_2 *rsp_2;
57
58 nfc_dbg("entry, status 0x%x", rsp_1->status);
59
60 if (rsp_1->status != NCI_STATUS_OK)
61 return;
62
63 ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
64 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
65
66 if (ndev->num_supported_rf_interfaces >
67 NCI_MAX_SUPPORTED_RF_INTERFACES) {
68 ndev->num_supported_rf_interfaces =
69 NCI_MAX_SUPPORTED_RF_INTERFACES;
70 }
71
72 memcpy(ndev->supported_rf_interfaces,
73 rsp_1->supported_rf_interfaces,
74 ndev->num_supported_rf_interfaces);
75
76 rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
77
78 ndev->max_logical_connections =
79 rsp_2->max_logical_connections;
80 ndev->max_routing_table_size =
81 __le16_to_cpu(rsp_2->max_routing_table_size);
82 ndev->max_control_packet_payload_length =
83 rsp_2->max_control_packet_payload_length;
84 ndev->rf_sending_buffer_size =
85 __le16_to_cpu(rsp_2->rf_sending_buffer_size);
86 ndev->rf_receiving_buffer_size =
87 __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
88 ndev->manufacturer_id =
89 __le16_to_cpu(rsp_2->manufacturer_id);
90
91 nfc_dbg("nfcc_features 0x%x",
92 ndev->nfcc_features);
93 nfc_dbg("num_supported_rf_interfaces %d",
94 ndev->num_supported_rf_interfaces);
95 nfc_dbg("supported_rf_interfaces[0] 0x%x",
96 ndev->supported_rf_interfaces[0]);
97 nfc_dbg("supported_rf_interfaces[1] 0x%x",
98 ndev->supported_rf_interfaces[1]);
99 nfc_dbg("supported_rf_interfaces[2] 0x%x",
100 ndev->supported_rf_interfaces[2]);
101 nfc_dbg("supported_rf_interfaces[3] 0x%x",
102 ndev->supported_rf_interfaces[3]);
103 nfc_dbg("max_logical_connections %d",
104 ndev->max_logical_connections);
105 nfc_dbg("max_routing_table_size %d",
106 ndev->max_routing_table_size);
107 nfc_dbg("max_control_packet_payload_length %d",
108 ndev->max_control_packet_payload_length);
109 nfc_dbg("rf_sending_buffer_size %d",
110 ndev->rf_sending_buffer_size);
111 nfc_dbg("rf_receiving_buffer_size %d",
112 ndev->rf_receiving_buffer_size);
113 nfc_dbg("manufacturer_id 0x%x",
114 ndev->manufacturer_id);
115
116 nci_req_complete(ndev, rsp_1->status);
117}
118
119static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
120 struct sk_buff *skb)
121{
122 struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
123
124 nfc_dbg("entry, status 0x%x", rsp->status);
125
126 if (rsp->status != NCI_STATUS_OK)
127 return;
128
129 ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
130 ndev->initial_num_credits = rsp->initial_num_credits;
131 ndev->conn_id = rsp->conn_id;
132
133 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
134
135 nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
136 nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
137 nfc_dbg("conn_id %d", ndev->conn_id);
138}
139
140static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
141 struct sk_buff *skb)
142{
143 __u8 status = skb->data[0];
144
145 nfc_dbg("entry, status 0x%x", status);
146
147 nci_req_complete(ndev, status);
148}
149
150static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
151{
152 __u8 status = skb->data[0];
153
154 nfc_dbg("entry, status 0x%x", status);
155
156 if (status == NCI_STATUS_OK)
157 set_bit(NCI_DISCOVERY, &ndev->flags);
158
159 nci_req_complete(ndev, status);
160}
161
162static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
163 struct sk_buff *skb)
164{
165 __u8 status = skb->data[0];
166
167 nfc_dbg("entry, status 0x%x", status);
168
169 clear_bit(NCI_DISCOVERY, &ndev->flags);
170
171 nci_req_complete(ndev, status);
172}
173
174void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
175{
176 __u16 rsp_opcode = nci_opcode(skb->data);
177
178 /* we got a rsp, stop the cmd timer */
179 del_timer(&ndev->cmd_timer);
180
181 nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
182 nci_pbf(skb->data),
183 nci_opcode_gid(rsp_opcode),
184 nci_opcode_oid(rsp_opcode),
185 nci_plen(skb->data));
186
187 /* strip the nci control header */
188 skb_pull(skb, NCI_CTRL_HDR_SIZE);
189
190 switch (rsp_opcode) {
191 case NCI_OP_CORE_RESET_RSP:
192 nci_core_reset_rsp_packet(ndev, skb);
193 break;
194
195 case NCI_OP_CORE_INIT_RSP:
196 nci_core_init_rsp_packet(ndev, skb);
197 break;
198
199 case NCI_OP_CORE_CONN_CREATE_RSP:
200 nci_core_conn_create_rsp_packet(ndev, skb);
201 break;
202
203 case NCI_OP_RF_DISCOVER_MAP_RSP:
204 nci_rf_disc_map_rsp_packet(ndev, skb);
205 break;
206
207 case NCI_OP_RF_DISCOVER_RSP:
208 nci_rf_disc_rsp_packet(ndev, skb);
209 break;
210
211 case NCI_OP_RF_DEACTIVATE_RSP:
212 nci_rf_deactivate_rsp_packet(ndev, skb);
213 break;
214
215 default:
216 nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
217 break;
218 }
219
220 kfree_skb(skb);
221
222 /* trigger the next cmd */
223 atomic_set(&ndev->cmd_cnt, 1);
224 if (!skb_queue_empty(&ndev->cmd_q))
225 queue_work(ndev->cmd_wq, &ndev->cmd_work);
226}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ccdff7953f7d..03f8818e1f16 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -367,6 +367,52 @@ out_putdev:
367 return rc; 367 return rc;
368} 368}
369 369
370static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
371{
372 struct nfc_dev *dev;
373 int rc;
374 u32 idx;
375
376 nfc_dbg("entry");
377
378 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
379 return -EINVAL;
380
381 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
382
383 dev = nfc_get_device(idx);
384 if (!dev)
385 return -ENODEV;
386
387 rc = nfc_dev_up(dev);
388
389 nfc_put_device(dev);
390 return rc;
391}
392
393static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
394{
395 struct nfc_dev *dev;
396 int rc;
397 u32 idx;
398
399 nfc_dbg("entry");
400
401 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
402 return -EINVAL;
403
404 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
405
406 dev = nfc_get_device(idx);
407 if (!dev)
408 return -ENODEV;
409
410 rc = nfc_dev_down(dev);
411
412 nfc_put_device(dev);
413 return rc;
414}
415
370static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) 416static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
371{ 417{
372 struct nfc_dev *dev; 418 struct nfc_dev *dev;
@@ -441,6 +487,16 @@ static struct genl_ops nfc_genl_ops[] = {
441 .policy = nfc_genl_policy, 487 .policy = nfc_genl_policy,
442 }, 488 },
443 { 489 {
490 .cmd = NFC_CMD_DEV_UP,
491 .doit = nfc_genl_dev_up,
492 .policy = nfc_genl_policy,
493 },
494 {
495 .cmd = NFC_CMD_DEV_DOWN,
496 .doit = nfc_genl_dev_down,
497 .policy = nfc_genl_policy,
498 },
499 {
444 .cmd = NFC_CMD_START_POLL, 500 .cmd = NFC_CMD_START_POLL,
445 .doit = nfc_genl_start_poll, 501 .doit = nfc_genl_start_poll,
446 .policy = nfc_genl_policy, 502 .policy = nfc_genl_policy,
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index aaf9832298f3..d86583f4831d 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -24,10 +24,10 @@
24#ifndef __LOCAL_NFC_H 24#ifndef __LOCAL_NFC_H
25#define __LOCAL_NFC_H 25#define __LOCAL_NFC_H
26 26
27#include <net/nfc.h> 27#include <net/nfc/nfc.h>
28#include <net/sock.h> 28#include <net/sock.h>
29 29
30__attribute__((format (printf, 2, 3))) 30__printf(2, 3)
31int nfc_printk(const char *level, const char *fmt, ...); 31int nfc_printk(const char *level, const char *fmt, ...);
32 32
33#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg) 33#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
@@ -101,6 +101,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
101 class_dev_iter_exit(iter); 101 class_dev_iter_exit(iter);
102} 102}
103 103
104int nfc_dev_up(struct nfc_dev *dev);
105
106int nfc_dev_down(struct nfc_dev *dev);
107
104int nfc_start_poll(struct nfc_dev *dev, u32 protocols); 108int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
105 109
106int nfc_stop_poll(struct nfc_dev *dev); 110int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 52de84a55115..ee7b2b365ef2 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -23,6 +23,7 @@
23 23
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <linux/nfc.h> 25#include <linux/nfc.h>
26#include <linux/export.h>
26 27
27#include "nfc.h" 28#include "nfc.h"
28 29
@@ -123,11 +124,7 @@ error:
123 124
124static int rawsock_add_header(struct sk_buff *skb) 125static int rawsock_add_header(struct sk_buff *skb)
125{ 126{
126 127 *skb_push(skb, NFC_HEADER_SIZE) = 0;
127 if (skb_cow_head(skb, 1))
128 return -ENOMEM;
129
130 *skb_push(skb, 1) = 0;
131 128
132 return 0; 129 return 0;
133} 130}
@@ -197,6 +194,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
197 struct msghdr *msg, size_t len) 194 struct msghdr *msg, size_t len)
198{ 195{
199 struct sock *sk = sock->sk; 196 struct sock *sk = sock->sk;
197 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
200 struct sk_buff *skb; 198 struct sk_buff *skb;
201 int rc; 199 int rc;
202 200
@@ -208,11 +206,13 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
208 if (sock->state != SS_CONNECTED) 206 if (sock->state != SS_CONNECTED)
209 return -ENOTCONN; 207 return -ENOTCONN;
210 208
211 skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, 209 skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
212 &rc); 210 msg->msg_flags & MSG_DONTWAIT, &rc);
213 if (!skb) 211 if (!skb)
214 return rc; 212 return rc;
215 213
214 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
215
216 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 216 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
217 if (rc < 0) { 217 if (rc < 0) {
218 kfree_skb(skb); 218 kfree_skb(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fabb4fafa281..82a6f34d39d0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -40,6 +40,10 @@
40 * byte arrays at the end of sockaddr_ll 40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq. 41 * and packet_mreq.
42 * Johann Baudy : Added TX RING. 42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
43 * 47 *
44 * This program is free software; you can redistribute it and/or 48 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License 49 * modify it under the terms of the GNU General Public License
@@ -161,9 +165,56 @@ struct packet_mreq_max {
161 unsigned char mr_address[MAX_ADDR_LEN]; 165 unsigned char mr_address[MAX_ADDR_LEN];
162}; 166};
163 167
164static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
165 int closing, int tx_ring); 169 int closing, int tx_ring);
166 170
171
172#define V3_ALIGNMENT (8)
173
174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179/* kbdq - kernel block descriptor queue */
180struct tpacket_kbdq_core {
181 struct pgv *pkbdq;
182 unsigned int feature_req_word;
183 unsigned int hdrlen;
184 unsigned char reset_pending_on_curr_blk;
185 unsigned char delete_blk_timer;
186 unsigned short kactive_blk_num;
187 unsigned short blk_sizeof_priv;
188
189 /* last_kactive_blk_num:
190 * trick to see if user-space has caught up
191 * in order to avoid refreshing timer when every single pkt arrives.
192 */
193 unsigned short last_kactive_blk_num;
194
195 char *pkblk_start;
196 char *pkblk_end;
197 int kblk_size;
198 unsigned int knum_blocks;
199 uint64_t knxt_seq_num;
200 char *prev;
201 char *nxt_offset;
202 struct sk_buff *skb;
203
204 atomic_t blk_fill_in_prog;
205
206 /* Default is set to 8ms */
207#define DEFAULT_PRB_RETIRE_TOV (8)
208
209 unsigned short retire_blk_tov;
210 unsigned short version;
211 unsigned long tov_in_jiffies;
212
213 /* timer to retire an outstanding block */
214 struct timer_list retire_blk_timer;
215};
216
217#define PGV_FROM_VMALLOC 1
167struct pgv { 218struct pgv {
168 char *buffer; 219 char *buffer;
169}; 220};
@@ -179,12 +230,44 @@ struct packet_ring_buffer {
179 unsigned int pg_vec_pages; 230 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len; 231 unsigned int pg_vec_len;
181 232
233 struct tpacket_kbdq_core prb_bdqc;
182 atomic_t pending; 234 atomic_t pending;
183}; 235};
184 236
237#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
239#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
240#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
241#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
242#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
244
185struct packet_sock; 245struct packet_sock;
186static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 246static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
187 247
248static void *packet_previous_frame(struct packet_sock *po,
249 struct packet_ring_buffer *rb,
250 int status);
251static void packet_increment_head(struct packet_ring_buffer *buff);
252static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253 struct tpacket_block_desc *);
254static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
255 struct packet_sock *);
256static void prb_retire_current_block(struct tpacket_kbdq_core *,
257 struct packet_sock *, unsigned int status);
258static int prb_queue_frozen(struct tpacket_kbdq_core *);
259static void prb_open_block(struct tpacket_kbdq_core *,
260 struct tpacket_block_desc *);
261static void prb_retire_rx_blk_timer_expired(unsigned long);
262static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263static void prb_init_blk_timer(struct packet_sock *,
264 struct tpacket_kbdq_core *,
265 void (*func) (unsigned long));
266static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268 struct tpacket3_hdr *);
269static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270 struct tpacket3_hdr *);
188static void packet_flush_mclist(struct sock *sk); 271static void packet_flush_mclist(struct sock *sk);
189 272
190struct packet_fanout; 273struct packet_fanout;
@@ -193,6 +276,7 @@ struct packet_sock {
193 struct sock sk; 276 struct sock sk;
194 struct packet_fanout *fanout; 277 struct packet_fanout *fanout;
195 struct tpacket_stats stats; 278 struct tpacket_stats stats;
279 union tpacket_stats_u stats_u;
196 struct packet_ring_buffer rx_ring; 280 struct packet_ring_buffer rx_ring;
197 struct packet_ring_buffer tx_ring; 281 struct packet_ring_buffer tx_ring;
198 int copy_thresh; 282 int copy_thresh;
@@ -242,7 +326,16 @@ struct packet_skb_cb {
242 326
243#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 327#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
244 328
245static inline struct packet_sock *pkt_sk(struct sock *sk) 329#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
330#define GET_PBLOCK_DESC(x, bid) \
331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
332#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
334#define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
337
338static struct packet_sock *pkt_sk(struct sock *sk)
246{ 339{
247 return (struct packet_sock *)sk; 340 return (struct packet_sock *)sk;
248} 341}
@@ -325,8 +418,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
325 h.h2->tp_status = status; 418 h.h2->tp_status = status;
326 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 419 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
327 break; 420 break;
421 case TPACKET_V3:
328 default: 422 default:
329 pr_err("TPACKET version not supported\n"); 423 WARN(1, "TPACKET version not supported.\n");
330 BUG(); 424 BUG();
331 } 425 }
332 426
@@ -351,8 +445,9 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
351 case TPACKET_V2: 445 case TPACKET_V2:
352 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 446 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
353 return h.h2->tp_status; 447 return h.h2->tp_status;
448 case TPACKET_V3:
354 default: 449 default:
355 pr_err("TPACKET version not supported\n"); 450 WARN(1, "TPACKET version not supported.\n");
356 BUG(); 451 BUG();
357 return 0; 452 return 0;
358 } 453 }
@@ -382,14 +477,678 @@ static void *packet_lookup_frame(struct packet_sock *po,
382 return h.raw; 477 return h.raw;
383} 478}
384 479
385static inline void *packet_current_frame(struct packet_sock *po, 480static void *packet_current_frame(struct packet_sock *po,
386 struct packet_ring_buffer *rb, 481 struct packet_ring_buffer *rb,
387 int status) 482 int status)
388{ 483{
389 return packet_lookup_frame(po, rb, rb->head, status); 484 return packet_lookup_frame(po, rb, rb->head, status);
390} 485}
391 486
392static inline void *packet_previous_frame(struct packet_sock *po, 487static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
488{
489 del_timer_sync(&pkc->retire_blk_timer);
490}
491
492static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
493 int tx_ring,
494 struct sk_buff_head *rb_queue)
495{
496 struct tpacket_kbdq_core *pkc;
497
498 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
499
500 spin_lock(&rb_queue->lock);
501 pkc->delete_blk_timer = 1;
502 spin_unlock(&rb_queue->lock);
503
504 prb_del_retire_blk_timer(pkc);
505}
506
507static void prb_init_blk_timer(struct packet_sock *po,
508 struct tpacket_kbdq_core *pkc,
509 void (*func) (unsigned long))
510{
511 init_timer(&pkc->retire_blk_timer);
512 pkc->retire_blk_timer.data = (long)po;
513 pkc->retire_blk_timer.function = func;
514 pkc->retire_blk_timer.expires = jiffies;
515}
516
517static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
518{
519 struct tpacket_kbdq_core *pkc;
520
521 if (tx_ring)
522 BUG();
523
524 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
525 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
526}
527
528static int prb_calc_retire_blk_tmo(struct packet_sock *po,
529 int blk_size_in_bytes)
530{
531 struct net_device *dev;
532 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
533 struct ethtool_cmd ecmd;
534 int err;
535
536 rtnl_lock();
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
539 rtnl_unlock();
540 return DEFAULT_PRB_RETIRE_TOV;
541 }
542 err = __ethtool_get_settings(dev, &ecmd);
543 rtnl_unlock();
544 if (!err) {
545 switch (ecmd.speed) {
546 case SPEED_10000:
547 msec = 1;
548 div = 10000/1000;
549 break;
550 case SPEED_1000:
551 msec = 1;
552 div = 1000/1000;
553 break;
554 /*
555 * If the link speed is so slow you don't really
556 * need to worry about perf anyways
557 */
558 case SPEED_100:
559 case SPEED_10:
560 default:
561 return DEFAULT_PRB_RETIRE_TOV;
562 }
563 }
564
565 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
566
567 if (div)
568 mbits /= div;
569
570 tmo = mbits * msec;
571
572 if (div)
573 return tmo+1;
574 return tmo;
575}
576
577static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
578 union tpacket_req_u *req_u)
579{
580 p1->feature_req_word = req_u->req3.tp_feature_req_word;
581}
582
583static void init_prb_bdqc(struct packet_sock *po,
584 struct packet_ring_buffer *rb,
585 struct pgv *pg_vec,
586 union tpacket_req_u *req_u, int tx_ring)
587{
588 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
589 struct tpacket_block_desc *pbd;
590
591 memset(p1, 0x0, sizeof(*p1));
592
593 p1->knxt_seq_num = 1;
594 p1->pkbdq = pg_vec;
595 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
596 p1->pkblk_start = (char *)pg_vec[0].buffer;
597 p1->kblk_size = req_u->req3.tp_block_size;
598 p1->knum_blocks = req_u->req3.tp_block_nr;
599 p1->hdrlen = po->tp_hdrlen;
600 p1->version = po->tp_version;
601 p1->last_kactive_blk_num = 0;
602 po->stats_u.stats3.tp_freeze_q_cnt = 0;
603 if (req_u->req3.tp_retire_blk_tov)
604 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
605 else
606 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
607 req_u->req3.tp_block_size);
608 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
609 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
610
611 prb_init_ft_ops(p1, req_u);
612 prb_setup_retire_blk_timer(po, tx_ring);
613 prb_open_block(p1, pbd);
614}
615
616/* Do NOT update the last_blk_num first.
617 * Assumes sk_buff_head lock is held.
618 */
619static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
620{
621 mod_timer(&pkc->retire_blk_timer,
622 jiffies + pkc->tov_in_jiffies);
623 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
624}
625
626/*
627 * Timer logic:
628 * 1) We refresh the timer only when we open a block.
629 * By doing this we don't waste cycles refreshing the timer
630 * on packet-by-packet basis.
631 *
632 * With a 1MB block-size, on a 1Gbps line, it will take
633 * i) ~8 ms to fill a block + ii) memcpy etc.
634 * In this cut we are not accounting for the memcpy time.
635 *
636 * So, if the user sets the 'tmo' to 10ms then the timer
637 * will never fire while the block is still getting filled
638 * (which is what we want). However, the user could choose
639 * to close a block early and that's fine.
640 *
641 * But when the timer does fire, we check whether or not to refresh it.
642 * Since the tmo granularity is in msecs, it is not too expensive
643 * to refresh the timer, lets say every '8' msecs.
644 * Either the user can set the 'tmo' or we can derive it based on
645 * a) line-speed and b) block-size.
646 * prb_calc_retire_blk_tmo() calculates the tmo.
647 *
648 */
649static void prb_retire_rx_blk_timer_expired(unsigned long data)
650{
651 struct packet_sock *po = (struct packet_sock *)data;
652 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
653 unsigned int frozen;
654 struct tpacket_block_desc *pbd;
655
656 spin_lock(&po->sk.sk_receive_queue.lock);
657
658 frozen = prb_queue_frozen(pkc);
659 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
660
661 if (unlikely(pkc->delete_blk_timer))
662 goto out;
663
664 /* We only need to plug the race when the block is partially filled.
665 * tpacket_rcv:
666 * lock(); increment BLOCK_NUM_PKTS; unlock()
667 * copy_bits() is in progress ...
668 * timer fires on other cpu:
669 * we can't retire the current block because copy_bits
670 * is in progress.
671 *
672 */
673 if (BLOCK_NUM_PKTS(pbd)) {
674 while (atomic_read(&pkc->blk_fill_in_prog)) {
675 /* Waiting for skb_copy_bits to finish... */
676 cpu_relax();
677 }
678 }
679
680 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
681 if (!frozen) {
682 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
683 if (!prb_dispatch_next_block(pkc, po))
684 goto refresh_timer;
685 else
686 goto out;
687 } else {
688 /* Case 1. Queue was frozen because user-space was
689 * lagging behind.
690 */
691 if (prb_curr_blk_in_use(pkc, pbd)) {
692 /*
693 * Ok, user-space is still behind.
694 * So just refresh the timer.
695 */
696 goto refresh_timer;
697 } else {
698 /* Case 2. queue was frozen,user-space caught up,
699 * now the link went idle && the timer fired.
700 * We don't have a block to close.So we open this
701 * block and restart the timer.
702 * opening a block thaws the queue,restarts timer
703 * Thawing/timer-refresh is a side effect.
704 */
705 prb_open_block(pkc, pbd);
706 goto out;
707 }
708 }
709 }
710
711refresh_timer:
712 _prb_refresh_rx_retire_blk_timer(pkc);
713
714out:
715 spin_unlock(&po->sk.sk_receive_queue.lock);
716}
717
718static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
719 struct tpacket_block_desc *pbd1, __u32 status)
720{
721 /* Flush everything minus the block header */
722
723#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
724 u8 *start, *end;
725
726 start = (u8 *)pbd1;
727
728 /* Skip the block header(we know header WILL fit in 4K) */
729 start += PAGE_SIZE;
730
731 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
732 for (; start < end; start += PAGE_SIZE)
733 flush_dcache_page(pgv_to_page(start));
734
735 smp_wmb();
736#endif
737
738 /* Now update the block status. */
739
740 BLOCK_STATUS(pbd1) = status;
741
742 /* Flush the block header */
743
744#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
745 start = (u8 *)pbd1;
746 flush_dcache_page(pgv_to_page(start));
747
748 smp_wmb();
749#endif
750}
751
752/*
753 * Side effect:
754 *
755 * 1) flush the block
756 * 2) Increment active_blk_num
757 *
758 * Note:We DONT refresh the timer on purpose.
759 * Because almost always the next block will be opened.
760 */
761static void prb_close_block(struct tpacket_kbdq_core *pkc1,
762 struct tpacket_block_desc *pbd1,
763 struct packet_sock *po, unsigned int stat)
764{
765 __u32 status = TP_STATUS_USER | stat;
766
767 struct tpacket3_hdr *last_pkt;
768 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
769
770 if (po->stats.tp_drops)
771 status |= TP_STATUS_LOSING;
772
773 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
774 last_pkt->tp_next_offset = 0;
775
776 /* Get the ts of the last pkt */
777 if (BLOCK_NUM_PKTS(pbd1)) {
778 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
779 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
780 } else {
781 /* Ok, we tmo'd - so get the current time */
782 struct timespec ts;
783 getnstimeofday(&ts);
784 h1->ts_last_pkt.ts_sec = ts.tv_sec;
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
786 }
787
788 smp_wmb();
789
790 /* Flush the block */
791 prb_flush_block(pkc1, pbd1, status);
792
793 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
794}
795
796static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
797{
798 pkc->reset_pending_on_curr_blk = 0;
799}
800
801/*
802 * Side effect of opening a block:
803 *
804 * 1) prb_queue is thawed.
805 * 2) retire_blk_timer is refreshed.
806 *
807 */
808static void prb_open_block(struct tpacket_kbdq_core *pkc1,
809 struct tpacket_block_desc *pbd1)
810{
811 struct timespec ts;
812 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
813
814 smp_rmb();
815
816 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
817
818 /* We could have just memset this but we will lose the
819 * flexibility of making the priv area sticky
820 */
821 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
822 BLOCK_NUM_PKTS(pbd1) = 0;
823 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
824 getnstimeofday(&ts);
825 h1->ts_first_pkt.ts_sec = ts.tv_sec;
826 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
827 pkc1->pkblk_start = (char *)pbd1;
828 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
829 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
830 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
831 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
832 pbd1->version = pkc1->version;
833 pkc1->prev = pkc1->nxt_offset;
834 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
835 prb_thaw_queue(pkc1);
836 _prb_refresh_rx_retire_blk_timer(pkc1);
837
838 smp_wmb();
839
840 return;
841 }
842
843 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
844 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
845 dump_stack();
846 BUG();
847}
848
849/*
850 * Queue freeze logic:
851 * 1) Assume tp_block_nr = 8 blocks.
852 * 2) At time 't0', user opens Rx ring.
853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
854 * 4) user-space is either sleeping or processing block '0'.
855 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
856 * it will close block-7,loop around and try to fill block '0'.
857 * call-flow:
858 * __packet_lookup_frame_in_block
859 * prb_retire_current_block()
860 * prb_dispatch_next_block()
861 * |->(BLOCK_STATUS == USER) evaluates to true
862 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
863 * 6) Now there are two cases:
864 * 6.1) Link goes idle right after the queue is frozen.
865 * But remember, the last open_block() refreshed the timer.
866 * When this timer expires,it will refresh itself so that we can
867 * re-open block-0 in near future.
868 * 6.2) Link is busy and keeps on receiving packets. This is a simple
869 * case and __packet_lookup_frame_in_block will check if block-0
870 * is free and can now be re-used.
871 */
872static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
873 struct packet_sock *po)
874{
875 pkc->reset_pending_on_curr_blk = 1;
876 po->stats_u.stats3.tp_freeze_q_cnt++;
877}
878
879#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880
881/*
882 * If the next block is free then we will dispatch it
883 * and return a good offset.
884 * Else, we will freeze the queue.
885 * So, caller must check the return value.
886 */
887static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
888 struct packet_sock *po)
889{
890 struct tpacket_block_desc *pbd;
891
892 smp_rmb();
893
894 /* 1. Get current block num */
895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
896
897 /* 2. If this block is currently in_use then freeze the queue */
898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
899 prb_freeze_queue(pkc, po);
900 return NULL;
901 }
902
903 /*
904 * 3.
905 * open this block and return the offset where the first packet
906 * needs to get stored.
907 */
908 prb_open_block(pkc, pbd);
909 return (void *)pkc->nxt_offset;
910}
911
912static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
913 struct packet_sock *po, unsigned int status)
914{
915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
916
917 /* retire/close the current block */
918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
919 /*
920 * Plug the case where copy_bits() is in progress on
921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
922 * have space to copy the pkt in the current block and
923 * called prb_retire_current_block()
924 *
925 * We don't need to worry about the TMO case because
926 * the timer-handler already handled this case.
927 */
928 if (!(status & TP_STATUS_BLK_TMO)) {
929 while (atomic_read(&pkc->blk_fill_in_prog)) {
930 /* Waiting for skb_copy_bits to finish... */
931 cpu_relax();
932 }
933 }
934 prb_close_block(pkc, pbd, po, status);
935 return;
936 }
937
938 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
939 dump_stack();
940 BUG();
941}
942
943static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
944 struct tpacket_block_desc *pbd)
945{
946 return TP_STATUS_USER & BLOCK_STATUS(pbd);
947}
948
949static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
950{
951 return pkc->reset_pending_on_curr_blk;
952}
953
954static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
955{
956 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
957 atomic_dec(&pkc->blk_fill_in_prog);
958}
959
960static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
961 struct tpacket3_hdr *ppd)
962{
963 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
964}
965
966static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
967 struct tpacket3_hdr *ppd)
968{
969 ppd->hv1.tp_rxhash = 0;
970}
971
972static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
973 struct tpacket3_hdr *ppd)
974{
975 if (vlan_tx_tag_present(pkc->skb)) {
976 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
977 ppd->tp_status = TP_STATUS_VLAN_VALID;
978 } else {
979 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
980 }
981}
982
983static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
984 struct tpacket3_hdr *ppd)
985{
986 prb_fill_vlan_info(pkc, ppd);
987
988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989 prb_fill_rxhash(pkc, ppd);
990 else
991 prb_clear_rxhash(pkc, ppd);
992}
993
994static void prb_fill_curr_block(char *curr,
995 struct tpacket_kbdq_core *pkc,
996 struct tpacket_block_desc *pbd,
997 unsigned int len)
998{
999 struct tpacket3_hdr *ppd;
1000
1001 ppd = (struct tpacket3_hdr *)curr;
1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->prev = curr;
1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_NUM_PKTS(pbd) += 1;
1007 atomic_inc(&pkc->blk_fill_in_prog);
1008 prb_run_all_ft_ops(pkc, ppd);
1009}
1010
1011/* Assumes caller has the sk->rx_queue.lock */
1012static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 struct sk_buff *skb,
1014 int status,
1015 unsigned int len
1016 )
1017{
1018 struct tpacket_kbdq_core *pkc;
1019 struct tpacket_block_desc *pbd;
1020 char *curr, *end;
1021
1022 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025 /* Queue is frozen when user space is lagging behind */
1026 if (prb_queue_frozen(pkc)) {
1027 /*
1028 * Check if that last block which caused the queue to freeze,
1029 * is still in_use by user-space.
1030 */
1031 if (prb_curr_blk_in_use(pkc, pbd)) {
1032 /* Can't record this packet */
1033 return NULL;
1034 } else {
1035 /*
1036 * Ok, the block was released by user-space.
1037 * Now let's open that block.
1038 * opening a block also thaws the queue.
1039 * Thawing is a side effect.
1040 */
1041 prb_open_block(pkc, pbd);
1042 }
1043 }
1044
1045 smp_mb();
1046 curr = pkc->nxt_offset;
1047 pkc->skb = skb;
1048 end = (char *) ((char *)pbd + pkc->kblk_size);
1049
1050 /* first try the current block */
1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052 prb_fill_curr_block(curr, pkc, pbd, len);
1053 return (void *)curr;
1054 }
1055
1056 /* Ok, close the current block */
1057 prb_retire_current_block(pkc, po, 0);
1058
1059 /* Now, try to dispatch the next block */
1060 curr = (char *)prb_dispatch_next_block(pkc, po);
1061 if (curr) {
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063 prb_fill_curr_block(curr, pkc, pbd, len);
1064 return (void *)curr;
1065 }
1066
1067 /*
1068 * No free blocks are available.user_space hasn't caught up yet.
1069 * Queue was just frozen and now this packet will get dropped.
1070 */
1071 return NULL;
1072}
1073
1074static void *packet_current_rx_frame(struct packet_sock *po,
1075 struct sk_buff *skb,
1076 int status, unsigned int len)
1077{
1078 char *curr = NULL;
1079 switch (po->tp_version) {
1080 case TPACKET_V1:
1081 case TPACKET_V2:
1082 curr = packet_lookup_frame(po, &po->rx_ring,
1083 po->rx_ring.head, status);
1084 return curr;
1085 case TPACKET_V3:
1086 return __packet_lookup_frame_in_block(po, skb, status, len);
1087 default:
1088 WARN(1, "TPACKET version not supported\n");
1089 BUG();
1090 return 0;
1091 }
1092}
1093
1094static void *prb_lookup_block(struct packet_sock *po,
1095 struct packet_ring_buffer *rb,
1096 unsigned int previous,
1097 int status)
1098{
1099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1101
1102 if (status != BLOCK_STATUS(pbd))
1103 return NULL;
1104 return pbd;
1105}
1106
1107static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108{
1109 unsigned int prev;
1110 if (rb->prb_bdqc.kactive_blk_num)
1111 prev = rb->prb_bdqc.kactive_blk_num-1;
1112 else
1113 prev = rb->prb_bdqc.knum_blocks-1;
1114 return prev;
1115}
1116
1117/* Assumes caller has held the rx_queue.lock */
1118static void *__prb_previous_block(struct packet_sock *po,
1119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 unsigned int previous = prb_previous_blk_num(rb);
1123 return prb_lookup_block(po, rb, previous, status);
1124}
1125
1126static void *packet_previous_rx_frame(struct packet_sock *po,
1127 struct packet_ring_buffer *rb,
1128 int status)
1129{
1130 if (po->tp_version <= TPACKET_V2)
1131 return packet_previous_frame(po, rb, status);
1132
1133 return __prb_previous_block(po, rb, status);
1134}
1135
1136static void packet_increment_rx_head(struct packet_sock *po,
1137 struct packet_ring_buffer *rb)
1138{
1139 switch (po->tp_version) {
1140 case TPACKET_V1:
1141 case TPACKET_V2:
1142 return packet_increment_head(rb);
1143 case TPACKET_V3:
1144 default:
1145 WARN(1, "TPACKET version not supported.\n");
1146 BUG();
1147 return;
1148 }
1149}
1150
1151static void *packet_previous_frame(struct packet_sock *po,
393 struct packet_ring_buffer *rb, 1152 struct packet_ring_buffer *rb,
394 int status) 1153 int status)
395{ 1154{
@@ -397,7 +1156,7 @@ static inline void *packet_previous_frame(struct packet_sock *po,
397 return packet_lookup_frame(po, rb, previous, status); 1156 return packet_lookup_frame(po, rb, previous, status);
398} 1157}
399 1158
400static inline void packet_increment_head(struct packet_ring_buffer *buff) 1159static void packet_increment_head(struct packet_ring_buffer *buff)
401{ 1160{
402 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1161 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
403} 1162}
@@ -454,43 +1213,6 @@ static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *sk
454 return f->arr[cpu % num]; 1213 return f->arr[cpu % num];
455} 1214}
456 1215
457static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
458{
459#ifdef CONFIG_INET
460 const struct iphdr *iph;
461 u32 len;
462
463 if (skb->protocol != htons(ETH_P_IP))
464 return skb;
465
466 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
467 return skb;
468
469 iph = ip_hdr(skb);
470 if (iph->ihl < 5 || iph->version != 4)
471 return skb;
472 if (!pskb_may_pull(skb, iph->ihl*4))
473 return skb;
474 iph = ip_hdr(skb);
475 len = ntohs(iph->tot_len);
476 if (skb->len < len || len < (iph->ihl * 4))
477 return skb;
478
479 if (ip_is_fragment(ip_hdr(skb))) {
480 skb = skb_share_check(skb, GFP_ATOMIC);
481 if (skb) {
482 if (pskb_trim_rcsum(skb, len))
483 return skb;
484 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
485 if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
486 return NULL;
487 skb->rxhash = 0;
488 }
489 }
490#endif
491 return skb;
492}
493
494static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1216static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
495 struct packet_type *pt, struct net_device *orig_dev) 1217 struct packet_type *pt, struct net_device *orig_dev)
496{ 1218{
@@ -509,7 +1231,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
509 case PACKET_FANOUT_HASH: 1231 case PACKET_FANOUT_HASH:
510 default: 1232 default:
511 if (f->defrag) { 1233 if (f->defrag) {
512 skb = fanout_check_defrag(skb); 1234 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
513 if (!skb) 1235 if (!skb)
514 return 0; 1236 return 0;
515 } 1237 }
@@ -836,7 +1558,7 @@ out_free:
836 return err; 1558 return err;
837} 1559}
838 1560
839static inline unsigned int run_filter(const struct sk_buff *skb, 1561static unsigned int run_filter(const struct sk_buff *skb,
840 const struct sock *sk, 1562 const struct sock *sk,
841 unsigned int res) 1563 unsigned int res)
842{ 1564{
@@ -985,12 +1707,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
985 union { 1707 union {
986 struct tpacket_hdr *h1; 1708 struct tpacket_hdr *h1;
987 struct tpacket2_hdr *h2; 1709 struct tpacket2_hdr *h2;
1710 struct tpacket3_hdr *h3;
988 void *raw; 1711 void *raw;
989 } h; 1712 } h;
990 u8 *skb_head = skb->data; 1713 u8 *skb_head = skb->data;
991 int skb_len = skb->len; 1714 int skb_len = skb->len;
992 unsigned int snaplen, res; 1715 unsigned int snaplen, res;
993 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; 1716 unsigned long status = TP_STATUS_USER;
994 unsigned short macoff, netoff, hdrlen; 1717 unsigned short macoff, netoff, hdrlen;
995 struct sk_buff *copy_skb = NULL; 1718 struct sk_buff *copy_skb = NULL;
996 struct timeval tv; 1719 struct timeval tv;
@@ -1036,37 +1759,46 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1036 po->tp_reserve; 1759 po->tp_reserve;
1037 macoff = netoff - maclen; 1760 macoff = netoff - maclen;
1038 } 1761 }
1039 1762 if (po->tp_version <= TPACKET_V2) {
1040 if (macoff + snaplen > po->rx_ring.frame_size) { 1763 if (macoff + snaplen > po->rx_ring.frame_size) {
1041 if (po->copy_thresh && 1764 if (po->copy_thresh &&
1042 atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 1765 atomic_read(&sk->sk_rmem_alloc) + skb->truesize
1043 (unsigned)sk->sk_rcvbuf) { 1766 < (unsigned)sk->sk_rcvbuf) {
1044 if (skb_shared(skb)) { 1767 if (skb_shared(skb)) {
1045 copy_skb = skb_clone(skb, GFP_ATOMIC); 1768 copy_skb = skb_clone(skb, GFP_ATOMIC);
1046 } else { 1769 } else {
1047 copy_skb = skb_get(skb); 1770 copy_skb = skb_get(skb);
1048 skb_head = skb->data; 1771 skb_head = skb->data;
1772 }
1773 if (copy_skb)
1774 skb_set_owner_r(copy_skb, sk);
1049 } 1775 }
1050 if (copy_skb) 1776 snaplen = po->rx_ring.frame_size - macoff;
1051 skb_set_owner_r(copy_skb, sk); 1777 if ((int)snaplen < 0)
1778 snaplen = 0;
1052 } 1779 }
1053 snaplen = po->rx_ring.frame_size - macoff;
1054 if ((int)snaplen < 0)
1055 snaplen = 0;
1056 } 1780 }
1057
1058 spin_lock(&sk->sk_receive_queue.lock); 1781 spin_lock(&sk->sk_receive_queue.lock);
1059 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL); 1782 h.raw = packet_current_rx_frame(po, skb,
1783 TP_STATUS_KERNEL, (macoff+snaplen));
1060 if (!h.raw) 1784 if (!h.raw)
1061 goto ring_is_full; 1785 goto ring_is_full;
1062 packet_increment_head(&po->rx_ring); 1786 if (po->tp_version <= TPACKET_V2) {
1787 packet_increment_rx_head(po, &po->rx_ring);
1788 /*
1789 * LOSING will be reported till you read the stats,
1790 * because it's COR - Clear On Read.
1791 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1792 * at packet level.
1793 */
1794 if (po->stats.tp_drops)
1795 status |= TP_STATUS_LOSING;
1796 }
1063 po->stats.tp_packets++; 1797 po->stats.tp_packets++;
1064 if (copy_skb) { 1798 if (copy_skb) {
1065 status |= TP_STATUS_COPY; 1799 status |= TP_STATUS_COPY;
1066 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 1800 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1067 } 1801 }
1068 if (!po->stats.tp_drops)
1069 status &= ~TP_STATUS_LOSING;
1070 spin_unlock(&sk->sk_receive_queue.lock); 1802 spin_unlock(&sk->sk_receive_queue.lock);
1071 1803
1072 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 1804 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
@@ -1117,6 +1849,29 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1117 h.h2->tp_padding = 0; 1849 h.h2->tp_padding = 0;
1118 hdrlen = sizeof(*h.h2); 1850 hdrlen = sizeof(*h.h2);
1119 break; 1851 break;
1852 case TPACKET_V3:
1853 /* tp_nxt_offset,vlan are already populated above.
1854 * So DONT clear those fields here
1855 */
1856 h.h3->tp_status |= status;
1857 h.h3->tp_len = skb->len;
1858 h.h3->tp_snaplen = snaplen;
1859 h.h3->tp_mac = macoff;
1860 h.h3->tp_net = netoff;
1861 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1862 && shhwtstamps->syststamp.tv64)
1863 ts = ktime_to_timespec(shhwtstamps->syststamp);
1864 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1865 && shhwtstamps->hwtstamp.tv64)
1866 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1867 else if (skb->tstamp.tv64)
1868 ts = ktime_to_timespec(skb->tstamp);
1869 else
1870 getnstimeofday(&ts);
1871 h.h3->tp_sec = ts.tv_sec;
1872 h.h3->tp_nsec = ts.tv_nsec;
1873 hdrlen = sizeof(*h.h3);
1874 break;
1120 default: 1875 default:
1121 BUG(); 1876 BUG();
1122 } 1877 }
@@ -1137,13 +1892,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1137 { 1892 {
1138 u8 *start, *end; 1893 u8 *start, *end;
1139 1894
1140 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen); 1895 if (po->tp_version <= TPACKET_V2) {
1141 for (start = h.raw; start < end; start += PAGE_SIZE) 1896 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1142 flush_dcache_page(pgv_to_page(start)); 1897 + macoff + snaplen);
1898 for (start = h.raw; start < end; start += PAGE_SIZE)
1899 flush_dcache_page(pgv_to_page(start));
1900 }
1143 smp_wmb(); 1901 smp_wmb();
1144 } 1902 }
1145#endif 1903#endif
1146 __packet_set_status(po, h.raw, status); 1904 if (po->tp_version <= TPACKET_V2)
1905 __packet_set_status(po, h.raw, status);
1906 else
1907 prb_clear_blk_fill_status(&po->rx_ring);
1147 1908
1148 sk->sk_data_ready(sk, 0); 1909 sk->sk_data_ready(sk, 0);
1149 1910
@@ -1170,8 +1931,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
1170 struct packet_sock *po = pkt_sk(skb->sk); 1931 struct packet_sock *po = pkt_sk(skb->sk);
1171 void *ph; 1932 void *ph;
1172 1933
1173 BUG_ON(skb == NULL);
1174
1175 if (likely(po->tx_ring.pg_vec)) { 1934 if (likely(po->tx_ring.pg_vec)) {
1176 ph = skb_shinfo(skb)->destructor_arg; 1935 ph = skb_shinfo(skb)->destructor_arg;
1177 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); 1936 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
@@ -1408,10 +2167,10 @@ out:
1408 return err; 2167 return err;
1409} 2168}
1410 2169
1411static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2170static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1412 size_t reserve, size_t len, 2171 size_t reserve, size_t len,
1413 size_t linear, int noblock, 2172 size_t linear, int noblock,
1414 int *err) 2173 int *err)
1415{ 2174{
1416 struct sk_buff *skb; 2175 struct sk_buff *skb;
1417 2176
@@ -1634,7 +2393,7 @@ static int packet_release(struct socket *sock)
1634 struct sock *sk = sock->sk; 2393 struct sock *sk = sock->sk;
1635 struct packet_sock *po; 2394 struct packet_sock *po;
1636 struct net *net; 2395 struct net *net;
1637 struct tpacket_req req; 2396 union tpacket_req_u req_u;
1638 2397
1639 if (!sk) 2398 if (!sk)
1640 return 0; 2399 return 0;
@@ -1657,13 +2416,13 @@ static int packet_release(struct socket *sock)
1657 2416
1658 packet_flush_mclist(sk); 2417 packet_flush_mclist(sk);
1659 2418
1660 memset(&req, 0, sizeof(req)); 2419 memset(&req_u, 0, sizeof(req_u));
1661 2420
1662 if (po->rx_ring.pg_vec) 2421 if (po->rx_ring.pg_vec)
1663 packet_set_ring(sk, &req, 1, 0); 2422 packet_set_ring(sk, &req_u, 1, 0);
1664 2423
1665 if (po->tx_ring.pg_vec) 2424 if (po->tx_ring.pg_vec)
1666 packet_set_ring(sk, &req, 1, 1); 2425 packet_set_ring(sk, &req_u, 1, 1);
1667 2426
1668 fanout_release(sk); 2427 fanout_release(sk);
1669 2428
@@ -2283,15 +3042,27 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2283 case PACKET_RX_RING: 3042 case PACKET_RX_RING:
2284 case PACKET_TX_RING: 3043 case PACKET_TX_RING:
2285 { 3044 {
2286 struct tpacket_req req; 3045 union tpacket_req_u req_u;
3046 int len;
2287 3047
2288 if (optlen < sizeof(req)) 3048 switch (po->tp_version) {
3049 case TPACKET_V1:
3050 case TPACKET_V2:
3051 len = sizeof(req_u.req);
3052 break;
3053 case TPACKET_V3:
3054 default:
3055 len = sizeof(req_u.req3);
3056 break;
3057 }
3058 if (optlen < len)
2289 return -EINVAL; 3059 return -EINVAL;
2290 if (pkt_sk(sk)->has_vnet_hdr) 3060 if (pkt_sk(sk)->has_vnet_hdr)
2291 return -EINVAL; 3061 return -EINVAL;
2292 if (copy_from_user(&req, optval, sizeof(req))) 3062 if (copy_from_user(&req_u.req, optval, len))
2293 return -EFAULT; 3063 return -EFAULT;
2294 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 3064 return packet_set_ring(sk, &req_u, 0,
3065 optname == PACKET_TX_RING);
2295 } 3066 }
2296 case PACKET_COPY_THRESH: 3067 case PACKET_COPY_THRESH:
2297 { 3068 {
@@ -2318,6 +3089,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2318 switch (val) { 3089 switch (val) {
2319 case TPACKET_V1: 3090 case TPACKET_V1:
2320 case TPACKET_V2: 3091 case TPACKET_V2:
3092 case TPACKET_V3:
2321 po->tp_version = val; 3093 po->tp_version = val;
2322 return 0; 3094 return 0;
2323 default: 3095 default:
@@ -2427,6 +3199,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2427 struct packet_sock *po = pkt_sk(sk); 3199 struct packet_sock *po = pkt_sk(sk);
2428 void *data; 3200 void *data;
2429 struct tpacket_stats st; 3201 struct tpacket_stats st;
3202 union tpacket_stats_u st_u;
2430 3203
2431 if (level != SOL_PACKET) 3204 if (level != SOL_PACKET)
2432 return -ENOPROTOOPT; 3205 return -ENOPROTOOPT;
@@ -2439,15 +3212,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2439 3212
2440 switch (optname) { 3213 switch (optname) {
2441 case PACKET_STATISTICS: 3214 case PACKET_STATISTICS:
2442 if (len > sizeof(struct tpacket_stats)) 3215 if (po->tp_version == TPACKET_V3) {
2443 len = sizeof(struct tpacket_stats); 3216 len = sizeof(struct tpacket_stats_v3);
3217 } else {
3218 if (len > sizeof(struct tpacket_stats))
3219 len = sizeof(struct tpacket_stats);
3220 }
2444 spin_lock_bh(&sk->sk_receive_queue.lock); 3221 spin_lock_bh(&sk->sk_receive_queue.lock);
2445 st = po->stats; 3222 if (po->tp_version == TPACKET_V3) {
3223 memcpy(&st_u.stats3, &po->stats,
3224 sizeof(struct tpacket_stats));
3225 st_u.stats3.tp_freeze_q_cnt =
3226 po->stats_u.stats3.tp_freeze_q_cnt;
3227 st_u.stats3.tp_packets += po->stats.tp_drops;
3228 data = &st_u.stats3;
3229 } else {
3230 st = po->stats;
3231 st.tp_packets += st.tp_drops;
3232 data = &st;
3233 }
2446 memset(&po->stats, 0, sizeof(st)); 3234 memset(&po->stats, 0, sizeof(st));
2447 spin_unlock_bh(&sk->sk_receive_queue.lock); 3235 spin_unlock_bh(&sk->sk_receive_queue.lock);
2448 st.tp_packets += st.tp_drops;
2449
2450 data = &st;
2451 break; 3236 break;
2452 case PACKET_AUXDATA: 3237 case PACKET_AUXDATA:
2453 if (len > sizeof(int)) 3238 if (len > sizeof(int))
@@ -2488,6 +3273,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2488 case TPACKET_V2: 3273 case TPACKET_V2:
2489 val = sizeof(struct tpacket2_hdr); 3274 val = sizeof(struct tpacket2_hdr);
2490 break; 3275 break;
3276 case TPACKET_V3:
3277 val = sizeof(struct tpacket3_hdr);
3278 break;
2491 default: 3279 default:
2492 return -EINVAL; 3280 return -EINVAL;
2493 } 3281 }
@@ -2644,7 +3432,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
2644 3432
2645 spin_lock_bh(&sk->sk_receive_queue.lock); 3433 spin_lock_bh(&sk->sk_receive_queue.lock);
2646 if (po->rx_ring.pg_vec) { 3434 if (po->rx_ring.pg_vec) {
2647 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) 3435 if (!packet_previous_rx_frame(po, &po->rx_ring,
3436 TP_STATUS_KERNEL))
2648 mask |= POLLIN | POLLRDNORM; 3437 mask |= POLLIN | POLLRDNORM;
2649 } 3438 }
2650 spin_unlock_bh(&sk->sk_receive_queue.lock); 3439 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2705,7 +3494,7 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2705 kfree(pg_vec); 3494 kfree(pg_vec);
2706} 3495}
2707 3496
2708static inline char *alloc_one_pg_vec_page(unsigned long order) 3497static char *alloc_one_pg_vec_page(unsigned long order)
2709{ 3498{
2710 char *buffer = NULL; 3499 char *buffer = NULL;
2711 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 3500 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
@@ -2763,7 +3552,7 @@ out_free_pgvec:
2763 goto out; 3552 goto out;
2764} 3553}
2765 3554
2766static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 3555static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2767 int closing, int tx_ring) 3556 int closing, int tx_ring)
2768{ 3557{
2769 struct pgv *pg_vec = NULL; 3558 struct pgv *pg_vec = NULL;
@@ -2772,7 +3561,15 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2772 struct packet_ring_buffer *rb; 3561 struct packet_ring_buffer *rb;
2773 struct sk_buff_head *rb_queue; 3562 struct sk_buff_head *rb_queue;
2774 __be16 num; 3563 __be16 num;
2775 int err; 3564 int err = -EINVAL;
3565 /* Added to avoid minimal code churn */
3566 struct tpacket_req *req = &req_u->req;
3567
3568 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3569 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3570 WARN(1, "Tx-ring is not supported.\n");
3571 goto out;
3572 }
2776 3573
2777 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 3574 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2778 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 3575 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@ -2798,6 +3595,9 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2798 case TPACKET_V2: 3595 case TPACKET_V2:
2799 po->tp_hdrlen = TPACKET2_HDRLEN; 3596 po->tp_hdrlen = TPACKET2_HDRLEN;
2800 break; 3597 break;
3598 case TPACKET_V3:
3599 po->tp_hdrlen = TPACKET3_HDRLEN;
3600 break;
2801 } 3601 }
2802 3602
2803 err = -EINVAL; 3603 err = -EINVAL;
@@ -2823,6 +3623,17 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2823 pg_vec = alloc_pg_vec(req, order); 3623 pg_vec = alloc_pg_vec(req, order);
2824 if (unlikely(!pg_vec)) 3624 if (unlikely(!pg_vec))
2825 goto out; 3625 goto out;
3626 switch (po->tp_version) {
3627 case TPACKET_V3:
3628 /* Transmit path is not supported. We checked
3629 * it above but just being paranoid
3630 */
3631 if (!tx_ring)
3632 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3633 break;
3634 default:
3635 break;
3636 }
2826 } 3637 }
2827 /* Done */ 3638 /* Done */
2828 else { 3639 else {
@@ -2875,7 +3686,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2875 register_prot_hook(sk); 3686 register_prot_hook(sk);
2876 } 3687 }
2877 spin_unlock(&po->bind_lock); 3688 spin_unlock(&po->bind_lock);
2878 3689 if (closing && (po->tp_version > TPACKET_V2)) {
3690 /* Because we don't support block-based V3 on tx-ring */
3691 if (!tx_ring)
3692 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3693 }
2879 release_sock(sk); 3694 release_sock(sk);
2880 3695
2881 if (pg_vec) 3696 if (pg_vec)
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index c6fffd946d42..bf10ea8fbbf9 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
480 if (proto_tab[protocol]) 480 if (proto_tab[protocol])
481 err = -EBUSY; 481 err = -EBUSY;
482 else 482 else
483 rcu_assign_pointer(proto_tab[protocol], pp); 483 RCU_INIT_POINTER(proto_tab[protocol], pp);
484 mutex_unlock(&proto_tab_lock); 484 mutex_unlock(&proto_tab_lock);
485 485
486 return err; 486 return err;
@@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
491{ 491{
492 mutex_lock(&proto_tab_lock); 492 mutex_lock(&proto_tab_lock);
493 BUG_ON(proto_tab[protocol] != pp); 493 BUG_ON(proto_tab[protocol] != pp);
494 rcu_assign_pointer(proto_tab[protocol], NULL); 494 RCU_INIT_POINTER(proto_tab[protocol], NULL);
495 mutex_unlock(&proto_tab_lock); 495 mutex_unlock(&proto_tab_lock);
496 synchronize_rcu(); 496 synchronize_rcu();
497 proto_unregister(pp->prot); 497 proto_unregister(pp->prot);
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 2f032381bd45..bf35b4e1a14c 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -30,6 +30,7 @@
30#include <net/sock.h> 30#include <net/sock.h>
31 31
32#include <linux/phonet.h> 32#include <linux/phonet.h>
33#include <linux/export.h>
33#include <net/phonet/phonet.h> 34#include <net/phonet/phonet.h>
34 35
35static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb); 36static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index f17fd841f948..2ba6e9fb4cbc 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -30,6 +30,7 @@
30#include <asm/ioctls.h> 30#include <asm/ioctls.h>
31 31
32#include <linux/phonet.h> 32#include <linux/phonet.h>
33#include <linux/module.h>
33#include <net/phonet/phonet.h> 34#include <net/phonet/phonet.h>
34#include <net/phonet/pep.h> 35#include <net/phonet/pep.h>
35#include <net/phonet/gprs.h> 36#include <net/phonet/gprs.h>
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index d2df8f33160b..c5827614376b 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev)
276 mutex_lock(&pnn->routes.lock); 276 mutex_lock(&pnn->routes.lock);
277 for (i = 0; i < 64; i++) 277 for (i = 0; i < 64; i++)
278 if (dev == pnn->routes.table[i]) { 278 if (dev == pnn->routes.table[i]) {
279 rcu_assign_pointer(pnn->routes.table[i], NULL); 279 RCU_INIT_POINTER(pnn->routes.table[i], NULL);
280 set_bit(i, deleted); 280 set_bit(i, deleted);
281 } 281 }
282 mutex_unlock(&pnn->routes.lock); 282 mutex_unlock(&pnn->routes.lock);
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
390 daddr = daddr >> 2; 390 daddr = daddr >> 2;
391 mutex_lock(&routes->lock); 391 mutex_lock(&routes->lock);
392 if (routes->table[daddr] == NULL) { 392 if (routes->table[daddr] == NULL) {
393 rcu_assign_pointer(routes->table[daddr], dev); 393 RCU_INIT_POINTER(routes->table[daddr], dev);
394 dev_hold(dev); 394 dev_hold(dev);
395 err = 0; 395 err = 0;
396 } 396 }
@@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
406 daddr = daddr >> 2; 406 daddr = daddr >> 2;
407 mutex_lock(&routes->lock); 407 mutex_lock(&routes->lock);
408 if (dev == routes->table[daddr]) 408 if (dev == routes->table[daddr])
409 rcu_assign_pointer(routes->table[daddr], NULL); 409 RCU_INIT_POINTER(routes->table[daddr], NULL);
410 else 410 else
411 dev = NULL; 411 dev = NULL;
412 mutex_unlock(&routes->lock); 412 mutex_unlock(&routes->lock);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ab07711cf2f4..3f8d0b1603b9 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -31,6 +31,7 @@
31#include <net/tcp_states.h> 31#include <net/tcp_states.h>
32 32
33#include <linux/phonet.h> 33#include <linux/phonet.h>
34#include <linux/export.h>
34#include <net/phonet/phonet.h> 35#include <net/phonet/phonet.h>
35#include <net/phonet/pep.h> 36#include <net/phonet/pep.h>
36#include <net/phonet/pn_dev.h> 37#include <net/phonet/pn_dev.h>
@@ -679,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
679 mutex_lock(&resource_mutex); 680 mutex_lock(&resource_mutex);
680 if (pnres.sk[res] == NULL) { 681 if (pnres.sk[res] == NULL) {
681 sock_hold(sk); 682 sock_hold(sk);
682 rcu_assign_pointer(pnres.sk[res], sk); 683 RCU_INIT_POINTER(pnres.sk[res], sk);
683 ret = 0; 684 ret = 0;
684 } 685 }
685 mutex_unlock(&resource_mutex); 686 mutex_unlock(&resource_mutex);
@@ -695,7 +696,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
695 696
696 mutex_lock(&resource_mutex); 697 mutex_lock(&resource_mutex);
697 if (pnres.sk[res] == sk) { 698 if (pnres.sk[res] == sk) {
698 rcu_assign_pointer(pnres.sk[res], NULL); 699 RCU_INIT_POINTER(pnres.sk[res], NULL);
699 ret = 0; 700 ret = 0;
700 } 701 }
701 mutex_unlock(&resource_mutex); 702 mutex_unlock(&resource_mutex);
@@ -714,7 +715,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
714 mutex_lock(&resource_mutex); 715 mutex_lock(&resource_mutex);
715 for (res = 0; res < 256; res++) { 716 for (res = 0; res < 256; res++) {
716 if (pnres.sk[res] == sk) { 717 if (pnres.sk[res] == sk) {
717 rcu_assign_pointer(pnres.sk[res], NULL); 718 RCU_INIT_POINTER(pnres.sk[res], NULL);
718 match++; 719 match++;
719 } 720 }
720 } 721 }
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index ec753b3ae72a..4cf6dc7910e4 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,6 +9,7 @@ config RDS
9 9
10config RDS_RDMA 10config RDS_RDMA
11 tristate "RDS over Infiniband and iWARP" 11 tristate "RDS over Infiniband and iWARP"
12 select LLIST
12 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 13 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
13 ---help--- 14 ---help---
14 Allow RDS to use Infiniband and iWARP as a transport. 15 Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 6daaa49d133f..e5b65acd650b 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -34,6 +34,7 @@
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/rbtree.h> 35#include <linux/rbtree.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/export.h>
37 38
38#include "rds.h" 39#include "rds.h"
39 40
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9334d892366e..9e07c756d1f9 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -33,6 +33,7 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/export.h>
36#include <net/inet_hashtables.h> 37#include <net/inet_hashtables.h>
37 38
38#include "rds.h" 39#include "rds.h"
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 3b83086bcc30..b4c8b0022fee 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -38,6 +38,7 @@
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/module.h>
41 42
42#include "rds.h" 43#include "rds.h"
43#include "ib.h" 44#include "ib.h"
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 819c35a0d9cb..e8fdb172adbb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -33,10 +33,10 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/rculist.h> 35#include <linux/rculist.h>
36#include <linux/llist.h>
36 37
37#include "rds.h" 38#include "rds.h"
38#include "ib.h" 39#include "ib.h"
39#include "xlist.h"
40 40
41static DEFINE_PER_CPU(unsigned long, clean_list_grace); 41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0 42#define CLEAN_LIST_BUSY_BIT 0
@@ -49,7 +49,7 @@ struct rds_ib_mr {
49 struct rds_ib_mr_pool *pool; 49 struct rds_ib_mr_pool *pool;
50 struct ib_fmr *fmr; 50 struct ib_fmr *fmr;
51 51
52 struct xlist_head xlist; 52 struct llist_node llnode;
53 53
54 /* unmap_list is for freeing */ 54 /* unmap_list is for freeing */
55 struct list_head unmap_list; 55 struct list_head unmap_list;
@@ -71,9 +71,9 @@ struct rds_ib_mr_pool {
71 atomic_t item_count; /* total # of MRs */ 71 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */ 72 atomic_t dirty_count; /* # dirty of MRs */
73 73
74 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */ 74 struct llist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct xlist_head free_list; /* unused MRs */ 75 struct llist_head free_list; /* unused MRs */
76 struct xlist_head clean_list; /* global unused & unamapped MRs */ 76 struct llist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait; 77 wait_queue_head_t flush_wait;
78 78
79 atomic_t free_pinned; /* memory pinned by free MRs */ 79 atomic_t free_pinned; /* memory pinned by free MRs */
@@ -220,9 +220,9 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
220 if (!pool) 220 if (!pool)
221 return ERR_PTR(-ENOMEM); 221 return ERR_PTR(-ENOMEM);
222 222
223 INIT_XLIST_HEAD(&pool->free_list); 223 init_llist_head(&pool->free_list);
224 INIT_XLIST_HEAD(&pool->drop_list); 224 init_llist_head(&pool->drop_list);
225 INIT_XLIST_HEAD(&pool->clean_list); 225 init_llist_head(&pool->clean_list);
226 mutex_init(&pool->flush_lock); 226 mutex_init(&pool->flush_lock);
227 init_waitqueue_head(&pool->flush_wait); 227 init_waitqueue_head(&pool->flush_wait);
228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
@@ -260,26 +260,18 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
260 kfree(pool); 260 kfree(pool);
261} 261}
262 262
263static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
264 struct rds_ib_mr **ibmr_ret)
265{
266 struct xlist_head *ibmr_xl;
267 ibmr_xl = xlist_del_head_fast(xl);
268 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
269}
270
271static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 263static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
272{ 264{
273 struct rds_ib_mr *ibmr = NULL; 265 struct rds_ib_mr *ibmr = NULL;
274 struct xlist_head *ret; 266 struct llist_node *ret;
275 unsigned long *flag; 267 unsigned long *flag;
276 268
277 preempt_disable(); 269 preempt_disable();
278 flag = &__get_cpu_var(clean_list_grace); 270 flag = &__get_cpu_var(clean_list_grace);
279 set_bit(CLEAN_LIST_BUSY_BIT, flag); 271 set_bit(CLEAN_LIST_BUSY_BIT, flag);
280 ret = xlist_del_head(&pool->clean_list); 272 ret = llist_del_first(&pool->clean_list);
281 if (ret) 273 if (ret)
282 ibmr = list_entry(ret, struct rds_ib_mr, xlist); 274 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
283 275
284 clear_bit(CLEAN_LIST_BUSY_BIT, flag); 276 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
285 preempt_enable(); 277 preempt_enable();
@@ -529,46 +521,44 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
529} 521}
530 522
531/* 523/*
532 * given an xlist of mrs, put them all into the list_head for more processing 524 * given an llist of mrs, put them all into the list_head for more processing
533 */ 525 */
534static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list) 526static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
535{ 527{
536 struct rds_ib_mr *ibmr; 528 struct rds_ib_mr *ibmr;
537 struct xlist_head splice; 529 struct llist_node *node;
538 struct xlist_head *cur; 530 struct llist_node *next;
539 struct xlist_head *next; 531
540 532 node = llist_del_all(llist);
541 splice.next = NULL; 533 while (node) {
542 xlist_splice(xlist, &splice); 534 next = node->next;
543 cur = splice.next; 535 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
544 while (cur) {
545 next = cur->next;
546 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
547 list_add_tail(&ibmr->unmap_list, list); 536 list_add_tail(&ibmr->unmap_list, list);
548 cur = next; 537 node = next;
549 } 538 }
550} 539}
551 540
552/* 541/*
553 * this takes a list head of mrs and turns it into an xlist of clusters. 542 * this takes a list head of mrs and turns it into linked llist nodes
554 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for 543 * of clusters. Each cluster has linked llist nodes of
555 * reuse. 544 * MR_CLUSTER_SIZE mrs that are ready for reuse.
556 */ 545 */
557static void list_append_to_xlist(struct rds_ib_mr_pool *pool, 546static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
558 struct list_head *list, struct xlist_head *xlist, 547 struct list_head *list,
559 struct xlist_head **tail_ret) 548 struct llist_node **nodes_head,
549 struct llist_node **nodes_tail)
560{ 550{
561 struct rds_ib_mr *ibmr; 551 struct rds_ib_mr *ibmr;
562 struct xlist_head *cur_mr = xlist; 552 struct llist_node *cur = NULL;
563 struct xlist_head *tail_mr = NULL; 553 struct llist_node **next = nodes_head;
564 554
565 list_for_each_entry(ibmr, list, unmap_list) { 555 list_for_each_entry(ibmr, list, unmap_list) {
566 tail_mr = &ibmr->xlist; 556 cur = &ibmr->llnode;
567 tail_mr->next = NULL; 557 *next = cur;
568 cur_mr->next = tail_mr; 558 next = &cur->next;
569 cur_mr = tail_mr;
570 } 559 }
571 *tail_ret = tail_mr; 560 *next = NULL;
561 *nodes_tail = cur;
572} 562}
573 563
574/* 564/*
@@ -581,8 +571,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
581 int free_all, struct rds_ib_mr **ibmr_ret) 571 int free_all, struct rds_ib_mr **ibmr_ret)
582{ 572{
583 struct rds_ib_mr *ibmr, *next; 573 struct rds_ib_mr *ibmr, *next;
584 struct xlist_head clean_xlist; 574 struct llist_node *clean_nodes;
585 struct xlist_head *clean_tail; 575 struct llist_node *clean_tail;
586 LIST_HEAD(unmap_list); 576 LIST_HEAD(unmap_list);
587 LIST_HEAD(fmr_list); 577 LIST_HEAD(fmr_list);
588 unsigned long unpinned = 0; 578 unsigned long unpinned = 0;
@@ -603,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
603 593
604 prepare_to_wait(&pool->flush_wait, &wait, 594 prepare_to_wait(&pool->flush_wait, &wait,
605 TASK_UNINTERRUPTIBLE); 595 TASK_UNINTERRUPTIBLE);
606 if (xlist_empty(&pool->clean_list)) 596 if (llist_empty(&pool->clean_list))
607 schedule(); 597 schedule();
608 598
609 ibmr = rds_ib_reuse_fmr(pool); 599 ibmr = rds_ib_reuse_fmr(pool);
@@ -628,10 +618,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
628 /* Get the list of all MRs to be dropped. Ordering matters - 618 /* Get the list of all MRs to be dropped. Ordering matters -
629 * we want to put drop_list ahead of free_list. 619 * we want to put drop_list ahead of free_list.
630 */ 620 */
631 xlist_append_to_list(&pool->drop_list, &unmap_list); 621 llist_append_to_list(&pool->drop_list, &unmap_list);
632 xlist_append_to_list(&pool->free_list, &unmap_list); 622 llist_append_to_list(&pool->free_list, &unmap_list);
633 if (free_all) 623 if (free_all)
634 xlist_append_to_list(&pool->clean_list, &unmap_list); 624 llist_append_to_list(&pool->clean_list, &unmap_list);
635 625
636 free_goal = rds_ib_flush_goal(pool, free_all); 626 free_goal = rds_ib_flush_goal(pool, free_all);
637 627
@@ -663,22 +653,22 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
663 if (!list_empty(&unmap_list)) { 653 if (!list_empty(&unmap_list)) {
664 /* we have to make sure that none of the things we're about 654 /* we have to make sure that none of the things we're about
665 * to put on the clean list would race with other cpus trying 655 * to put on the clean list would race with other cpus trying
666 * to pull items off. The xlist would explode if we managed to 656 * to pull items off. The llist would explode if we managed to
667 * remove something from the clean list and then add it back again 657 * remove something from the clean list and then add it back again
668 * while another CPU was spinning on that same item in xlist_del_head. 658 * while another CPU was spinning on that same item in llist_del_first.
669 * 659 *
670 * This is pretty unlikely, but just in case wait for an xlist grace period 660 * This is pretty unlikely, but just in case wait for an llist grace period
671 * here before adding anything back into the clean list. 661 * here before adding anything back into the clean list.
672 */ 662 */
673 wait_clean_list_grace(); 663 wait_clean_list_grace();
674 664
675 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail); 665 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
676 if (ibmr_ret) 666 if (ibmr_ret)
677 refill_local(pool, &clean_xlist, ibmr_ret); 667 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
678 668
679 /* refill_local may have emptied our list */ 669 /* more than one entry in llist nodes */
680 if (!xlist_empty(&clean_xlist)) 670 if (clean_nodes->next)
681 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list); 671 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
682 672
683 } 673 }
684 674
@@ -711,9 +701,9 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
711 701
712 /* Return it to the pool's free list */ 702 /* Return it to the pool's free list */
713 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 703 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
714 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list); 704 llist_add(&ibmr->llnode, &pool->drop_list);
715 else 705 else
716 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list); 706 llist_add(&ibmr->llnode, &pool->free_list);
717 707
718 atomic_add(ibmr->sg_len, &pool->free_pinned); 708 atomic_add(ibmr->sg_len, &pool->free_pinned);
719 atomic_inc(&pool->dirty_count); 709 atomic_inc(&pool->dirty_count);
diff --git a/net/rds/info.c b/net/rds/info.c
index 4fdf1b6e84ff..f1c016c4146e 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -34,6 +34,7 @@
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/export.h>
37 38
38#include "rds.h" 39#include "rds.h"
39 40
diff --git a/net/rds/iw.c b/net/rds/iw.c
index f7474844f096..7826d46baa70 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -38,6 +38,7 @@
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/module.h>
41 42
42#include "rds.h" 43#include "rds.h"
43#include "iw.h" 44#include "iw.h"
diff --git a/net/rds/message.c b/net/rds/message.c
index 1fd3d29023d7..f0a4658f3273 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h>
35 36
36#include "rds.h" 37#include "rds.h"
37 38
diff --git a/net/rds/page.c b/net/rds/page.c
index b82d63e77b03..2499cd108421 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -33,6 +33,7 @@
33#include <linux/highmem.h> 33#include <linux/highmem.h>
34#include <linux/gfp.h> 34#include <linux/gfp.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/export.h>
36 37
37#include "rds.h" 38#include "rds.h"
38 39
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index f8760e1b6688..c2be901d19ee 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -30,6 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 * 31 *
32 */ 32 */
33#include <linux/module.h>
33#include <rdma/rdma_cm.h> 34#include <rdma/rdma_cm.h>
34 35
35#include "rdma_transport.h" 36#include "rdma_transport.h"
diff --git a/net/rds/rds.h b/net/rds/rds.h
index da8adac2bf06..7eaba1831f0d 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -36,8 +36,8 @@
36#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 36#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
37#else 37#else
38/* sigh, pr_debug() causes unused variable warnings */ 38/* sigh, pr_debug() causes unused variable warnings */
39static inline void __attribute__ ((format (printf, 1, 2))) 39static inline __printf(1, 2)
40rdsdebug(char *fmt, ...) 40void rdsdebug(char *fmt, ...)
41{ 41{
42} 42}
43#endif 43#endif
@@ -625,8 +625,8 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
625 struct rds_info_lengths *lens, 625 struct rds_info_lengths *lens,
626 int (*visitor)(struct rds_connection *, void *), 626 int (*visitor)(struct rds_connection *, void *),
627 size_t item_len); 627 size_t item_len);
628void __rds_conn_error(struct rds_connection *conn, const char *, ...) 628__printf(2, 3)
629 __attribute__ ((format (printf, 2, 3))); 629void __rds_conn_error(struct rds_connection *conn, const char *, ...);
630#define rds_conn_error(conn, fmt...) \ 630#define rds_conn_error(conn, fmt...) \
631 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt) 631 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
632 632
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 596689e59272..bc3f8cd6d070 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/sock.h> 35#include <net/sock.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/export.h>
37 38
38#include "rds.h" 39#include "rds.h"
39 40
diff --git a/net/rds/send.c b/net/rds/send.c
index aa57e22539ef..e2d63c59e7c2 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -31,11 +31,13 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
34#include <linux/gfp.h> 35#include <linux/gfp.h>
35#include <net/sock.h> 36#include <net/sock.h>
36#include <linux/in.h> 37#include <linux/in.h>
37#include <linux/list.h> 38#include <linux/list.h>
38#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
40#include <linux/export.h>
39 41
40#include "rds.h" 42#include "rds.h"
41 43
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 10c759ccac0c..7be790d60b90 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -33,6 +33,7 @@
33#include <linux/percpu.h> 33#include <linux/percpu.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/export.h>
36 37
37#include "rds.h" 38#include "rds.h"
38 39
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 8e0a32001c90..edac9ef2bc8b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -33,6 +33,7 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/module.h>
36#include <net/tcp.h> 37#include <net/tcp.h>
37 38
38#include "rds.h" 39#include "rds.h"
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 0fd90f8c5f59..65eaefcab241 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/export.h>
35 36
36#include "rds.h" 37#include "rds.h"
37 38
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
deleted file mode 100644
index e6b5190daddd..000000000000
--- a/net/rds/xlist.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _LINUX_XLIST_H
2#define _LINUX_XLIST_H
3
4#include <linux/stddef.h>
5#include <linux/poison.h>
6#include <linux/prefetch.h>
7#include <asm/system.h>
8
9struct xlist_head {
10 struct xlist_head *next;
11};
12
13static inline void INIT_XLIST_HEAD(struct xlist_head *list)
14{
15 list->next = NULL;
16}
17
18static inline int xlist_empty(struct xlist_head *head)
19{
20 return head->next == NULL;
21}
22
23static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
24 struct xlist_head *head)
25{
26 struct xlist_head *cur;
27 struct xlist_head *check;
28
29 while (1) {
30 cur = head->next;
31 tail->next = cur;
32 check = cmpxchg(&head->next, cur, new);
33 if (check == cur)
34 break;
35 }
36}
37
38static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
39{
40 struct xlist_head *cur;
41 struct xlist_head *check;
42 struct xlist_head *next;
43
44 while (1) {
45 cur = head->next;
46 if (!cur)
47 goto out;
48
49 next = cur->next;
50 check = cmpxchg(&head->next, cur, next);
51 if (check == cur)
52 goto out;
53 }
54out:
55 return cur;
56}
57
58static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
59{
60 struct xlist_head *cur;
61
62 cur = head->next;
63 if (!cur)
64 return NULL;
65
66 head->next = cur->next;
67 return cur;
68}
69
70static inline void xlist_splice(struct xlist_head *list,
71 struct xlist_head *head)
72{
73 struct xlist_head *cur;
74
75 WARN_ON(head->next);
76 cur = xchg(&list->next, NULL);
77 head->next = cur;
78}
79
80#endif
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a2774..5be19575c340 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -235,7 +235,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
235 else 235 else
236 rfkill->state &= ~RFKILL_BLOCK_HW; 236 rfkill->state &= ~RFKILL_BLOCK_HW;
237 *change = prev != blocked; 237 *change = prev != blocked;
238 any = rfkill->state & RFKILL_BLOCK_ANY; 238 any = !!(rfkill->state & RFKILL_BLOCK_ANY);
239 spin_unlock_irqrestore(&rfkill->lock, flags); 239 spin_unlock_irqrestore(&rfkill->lock, flags);
240 240
241 rfkill_led_trigger_event(rfkill); 241 rfkill_led_trigger_event(rfkill);
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 1bca6d49ec96..24c55c53e6a2 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/input.h> 16#include <linux/input.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/moduleparam.h>
18#include <linux/workqueue.h> 19#include <linux/workqueue.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/rfkill.h> 21#include <linux/rfkill.h>
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 256c5ddd2d72..128677d69056 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -101,6 +101,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
101 if (!rfkill) 101 if (!rfkill)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 if (pdata->gpio_runtime_setup) {
105 ret = pdata->gpio_runtime_setup(pdev);
106 if (ret) {
107 pr_warn("%s: can't set up gpio\n", __func__);
108 return ret;
109 }
110 }
111
104 rfkill->pdata = pdata; 112 rfkill->pdata = pdata;
105 113
106 len = strlen(pdata->name); 114 len = strlen(pdata->name);
@@ -182,7 +190,10 @@ fail_alloc:
182static int rfkill_gpio_remove(struct platform_device *pdev) 190static int rfkill_gpio_remove(struct platform_device *pdev)
183{ 191{
184 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); 192 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
193 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
185 194
195 if (pdata->gpio_runtime_close)
196 pdata->gpio_runtime_close(pdev);
186 rfkill_unregister(rfkill->rfkill_dev); 197 rfkill_unregister(rfkill->rfkill_dev);
187 rfkill_destroy(rfkill->rfkill_dev); 198 rfkill_destroy(rfkill->rfkill_dev);
188 if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) 199 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 18dc512a10f3..3ca7277a3c36 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -90,7 +90,6 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
90 pdata->type, 90 pdata->type,
91 &rfkill_regulator_ops, rfkill_data); 91 &rfkill_regulator_ops, rfkill_data);
92 if (rf_kill == NULL) { 92 if (rf_kill == NULL) {
93 dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
94 ret = -ENOMEM; 93 ret = -ENOMEM;
95 goto err_rfkill_alloc; 94 goto err_rfkill_alloc;
96 } 95 }
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index d389de197089..cd9b7ee60f3e 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <net/rose.h> 37#include <net/rose.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/export.h>
39 40
40static unsigned int rose_neigh_no = 1; 41static unsigned int rose_neigh_no = 1;
41 42
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 5f22e263eda7..338d793c7113 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -13,6 +13,7 @@
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
16#include <linux/export.h>
16#include <net/sock.h> 17#include <net/sock.h>
17#include <net/af_rxrpc.h> 18#include <net/af_rxrpc.h>
18#include "ar-internal.h" 19#include "ar-internal.h"
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 0c65013e3bfe..4b48687c3890 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/net.h> 12#include <linux/net.h>
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/export.h>
14#include <net/sock.h> 15#include <net/sock.h>
15#include <net/af_rxrpc.h> 16#include <net/af_rxrpc.h>
16#include "ar-internal.h" 17#include "ar-internal.h"
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f2fb67e701a3..93fdf131bd75 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kmod.h> 21#include <linux/kmod.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/module.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/sch_generic.h> 26#include <net/sch_generic.h>
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6994214db8f8..7b582300d051 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -21,6 +21,7 @@
21#include <linux/ipv6.h> 21#include <linux/ipv6.h>
22#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
24 25
25#include <net/pkt_cls.h> 26#include <net/pkt_cls.h>
26#include <net/ip.h> 27#include <net/ip.h>
@@ -65,132 +66,134 @@ static inline u32 addr_fold(void *addr)
65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 66 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
66} 67}
67 68
68static u32 flow_get_src(struct sk_buff *skb) 69static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
69{ 70{
71 __be32 *data = NULL, hdata;
72
70 switch (skb->protocol) { 73 switch (skb->protocol) {
71 case htons(ETH_P_IP): 74 case htons(ETH_P_IP):
72 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 75 data = skb_header_pointer(skb,
73 return ntohl(ip_hdr(skb)->saddr); 76 nhoff + offsetof(struct iphdr,
77 saddr),
78 4, &hdata);
74 break; 79 break;
75 case htons(ETH_P_IPV6): 80 case htons(ETH_P_IPV6):
76 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 81 data = skb_header_pointer(skb,
77 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 82 nhoff + offsetof(struct ipv6hdr,
83 saddr.s6_addr32[3]),
84 4, &hdata);
78 break; 85 break;
79 } 86 }
80 87
88 if (data)
89 return ntohl(*data);
81 return addr_fold(skb->sk); 90 return addr_fold(skb->sk);
82} 91}
83 92
84static u32 flow_get_dst(struct sk_buff *skb) 93static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
85{ 94{
95 __be32 *data = NULL, hdata;
96
86 switch (skb->protocol) { 97 switch (skb->protocol) {
87 case htons(ETH_P_IP): 98 case htons(ETH_P_IP):
88 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 99 data = skb_header_pointer(skb,
89 return ntohl(ip_hdr(skb)->daddr); 100 nhoff + offsetof(struct iphdr,
101 daddr),
102 4, &hdata);
90 break; 103 break;
91 case htons(ETH_P_IPV6): 104 case htons(ETH_P_IPV6):
92 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 105 data = skb_header_pointer(skb,
93 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 106 nhoff + offsetof(struct ipv6hdr,
107 daddr.s6_addr32[3]),
108 4, &hdata);
94 break; 109 break;
95 } 110 }
96 111
112 if (data)
113 return ntohl(*data);
97 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 114 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
98} 115}
99 116
100static u32 flow_get_proto(struct sk_buff *skb) 117static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
101{ 118{
119 __u8 *data = NULL, hdata;
120
102 switch (skb->protocol) { 121 switch (skb->protocol) {
103 case htons(ETH_P_IP): 122 case htons(ETH_P_IP):
104 return pskb_network_may_pull(skb, sizeof(struct iphdr)) ? 123 data = skb_header_pointer(skb,
105 ip_hdr(skb)->protocol : 0; 124 nhoff + offsetof(struct iphdr,
125 protocol),
126 1, &hdata);
127 break;
106 case htons(ETH_P_IPV6): 128 case htons(ETH_P_IPV6):
107 return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ? 129 data = skb_header_pointer(skb,
108 ipv6_hdr(skb)->nexthdr : 0; 130 nhoff + offsetof(struct ipv6hdr,
109 default: 131 nexthdr),
110 return 0; 132 1, &hdata);
133 break;
111 } 134 }
135 if (data)
136 return *data;
137 return 0;
112} 138}
113 139
114static u32 flow_get_proto_src(struct sk_buff *skb) 140/* helper function to get either src or dst port */
141static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
142 __be16 *_port, int dst)
115{ 143{
144 __be16 *port = NULL;
145 int poff;
146
116 switch (skb->protocol) { 147 switch (skb->protocol) {
117 case htons(ETH_P_IP): { 148 case htons(ETH_P_IP): {
118 struct iphdr *iph; 149 struct iphdr *iph, _iph;
119 int poff;
120 150
121 if (!pskb_network_may_pull(skb, sizeof(*iph))) 151 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
152 if (!iph)
122 break; 153 break;
123 iph = ip_hdr(skb);
124 if (ip_is_fragment(iph)) 154 if (ip_is_fragment(iph))
125 break; 155 break;
126 poff = proto_ports_offset(iph->protocol); 156 poff = proto_ports_offset(iph->protocol);
127 if (poff >= 0 && 157 if (poff >= 0)
128 pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) { 158 port = skb_header_pointer(skb,
129 iph = ip_hdr(skb); 159 nhoff + iph->ihl * 4 + poff + dst,
130 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 160 sizeof(*_port), _port);
131 poff));
132 }
133 break; 161 break;
134 } 162 }
135 case htons(ETH_P_IPV6): { 163 case htons(ETH_P_IPV6): {
136 struct ipv6hdr *iph; 164 struct ipv6hdr *iph, _iph;
137 int poff;
138 165
139 if (!pskb_network_may_pull(skb, sizeof(*iph))) 166 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
167 if (!iph)
140 break; 168 break;
141 iph = ipv6_hdr(skb);
142 poff = proto_ports_offset(iph->nexthdr); 169 poff = proto_ports_offset(iph->nexthdr);
143 if (poff >= 0 && 170 if (poff >= 0)
144 pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) { 171 port = skb_header_pointer(skb,
145 iph = ipv6_hdr(skb); 172 nhoff + sizeof(*iph) + poff + dst,
146 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 173 sizeof(*_port), _port);
147 poff));
148 }
149 break; 174 break;
150 } 175 }
151 } 176 }
152 177
153 return addr_fold(skb->sk); 178 return port;
154} 179}
155 180
156static u32 flow_get_proto_dst(struct sk_buff *skb) 181static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
157{ 182{
158 switch (skb->protocol) { 183 __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
159 case htons(ETH_P_IP): {
160 struct iphdr *iph;
161 int poff;
162 184
163 if (!pskb_network_may_pull(skb, sizeof(*iph))) 185 if (port)
164 break; 186 return ntohs(*port);
165 iph = ip_hdr(skb);
166 if (ip_is_fragment(iph))
167 break;
168 poff = proto_ports_offset(iph->protocol);
169 if (poff >= 0 &&
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
171 iph = ip_hdr(skb);
172 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
173 2 + poff));
174 }
175 break;
176 }
177 case htons(ETH_P_IPV6): {
178 struct ipv6hdr *iph;
179 int poff;
180 187
181 if (!pskb_network_may_pull(skb, sizeof(*iph))) 188 return addr_fold(skb->sk);
182 break; 189}
183 iph = ipv6_hdr(skb); 190
184 poff = proto_ports_offset(iph->nexthdr); 191static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
185 if (poff >= 0 && 192{
186 pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) { 193 __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
187 iph = ipv6_hdr(skb); 194
188 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 195 if (port)
189 poff + 2)); 196 return ntohs(*port);
190 }
191 break;
192 }
193 }
194 197
195 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 198 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
196} 199}
@@ -223,7 +226,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
223#define CTTUPLE(skb, member) \ 226#define CTTUPLE(skb, member) \
224({ \ 227({ \
225 enum ip_conntrack_info ctinfo; \ 228 enum ip_conntrack_info ctinfo; \
226 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ 229 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
227 if (ct == NULL) \ 230 if (ct == NULL) \
228 goto fallback; \ 231 goto fallback; \
229 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ 232 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
@@ -236,7 +239,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
236}) 239})
237#endif 240#endif
238 241
239static u32 flow_get_nfct_src(struct sk_buff *skb) 242static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
240{ 243{
241 switch (skb->protocol) { 244 switch (skb->protocol) {
242 case htons(ETH_P_IP): 245 case htons(ETH_P_IP):
@@ -245,10 +248,10 @@ static u32 flow_get_nfct_src(struct sk_buff *skb)
245 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 248 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
246 } 249 }
247fallback: 250fallback:
248 return flow_get_src(skb); 251 return flow_get_src(skb, nhoff);
249} 252}
250 253
251static u32 flow_get_nfct_dst(struct sk_buff *skb) 254static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
252{ 255{
253 switch (skb->protocol) { 256 switch (skb->protocol) {
254 case htons(ETH_P_IP): 257 case htons(ETH_P_IP):
@@ -257,21 +260,21 @@ static u32 flow_get_nfct_dst(struct sk_buff *skb)
257 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 260 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
258 } 261 }
259fallback: 262fallback:
260 return flow_get_dst(skb); 263 return flow_get_dst(skb, nhoff);
261} 264}
262 265
263static u32 flow_get_nfct_proto_src(struct sk_buff *skb) 266static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
264{ 267{
265 return ntohs(CTTUPLE(skb, src.u.all)); 268 return ntohs(CTTUPLE(skb, src.u.all));
266fallback: 269fallback:
267 return flow_get_proto_src(skb); 270 return flow_get_proto_src(skb, nhoff);
268} 271}
269 272
270static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) 273static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
271{ 274{
272 return ntohs(CTTUPLE(skb, dst.u.all)); 275 return ntohs(CTTUPLE(skb, dst.u.all));
273fallback: 276fallback:
274 return flow_get_proto_dst(skb); 277 return flow_get_proto_dst(skb, nhoff);
275} 278}
276 279
277static u32 flow_get_rtclassid(const struct sk_buff *skb) 280static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -313,17 +316,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
313 316
314static u32 flow_key_get(struct sk_buff *skb, int key) 317static u32 flow_key_get(struct sk_buff *skb, int key)
315{ 318{
319 int nhoff = skb_network_offset(skb);
320
316 switch (key) { 321 switch (key) {
317 case FLOW_KEY_SRC: 322 case FLOW_KEY_SRC:
318 return flow_get_src(skb); 323 return flow_get_src(skb, nhoff);
319 case FLOW_KEY_DST: 324 case FLOW_KEY_DST:
320 return flow_get_dst(skb); 325 return flow_get_dst(skb, nhoff);
321 case FLOW_KEY_PROTO: 326 case FLOW_KEY_PROTO:
322 return flow_get_proto(skb); 327 return flow_get_proto(skb, nhoff);
323 case FLOW_KEY_PROTO_SRC: 328 case FLOW_KEY_PROTO_SRC:
324 return flow_get_proto_src(skb); 329 return flow_get_proto_src(skb, nhoff);
325 case FLOW_KEY_PROTO_DST: 330 case FLOW_KEY_PROTO_DST:
326 return flow_get_proto_dst(skb); 331 return flow_get_proto_dst(skb, nhoff);
327 case FLOW_KEY_IIF: 332 case FLOW_KEY_IIF:
328 return flow_get_iif(skb); 333 return flow_get_iif(skb);
329 case FLOW_KEY_PRIORITY: 334 case FLOW_KEY_PRIORITY:
@@ -333,13 +338,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
333 case FLOW_KEY_NFCT: 338 case FLOW_KEY_NFCT:
334 return flow_get_nfct(skb); 339 return flow_get_nfct(skb);
335 case FLOW_KEY_NFCT_SRC: 340 case FLOW_KEY_NFCT_SRC:
336 return flow_get_nfct_src(skb); 341 return flow_get_nfct_src(skb, nhoff);
337 case FLOW_KEY_NFCT_DST: 342 case FLOW_KEY_NFCT_DST:
338 return flow_get_nfct_dst(skb); 343 return flow_get_nfct_dst(skb, nhoff);
339 case FLOW_KEY_NFCT_PROTO_SRC: 344 case FLOW_KEY_NFCT_PROTO_SRC:
340 return flow_get_nfct_proto_src(skb); 345 return flow_get_nfct_proto_src(skb, nhoff);
341 case FLOW_KEY_NFCT_PROTO_DST: 346 case FLOW_KEY_NFCT_PROTO_DST:
342 return flow_get_nfct_proto_dst(skb); 347 return flow_get_nfct_proto_dst(skb, nhoff);
343 case FLOW_KEY_RTCLASSID: 348 case FLOW_KEY_RTCLASSID:
344 return flow_get_rtclassid(skb); 349 return flow_get_rtclassid(skb);
345 case FLOW_KEY_SKUID: 350 case FLOW_KEY_SKUID:
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ec5cbc848963..0a4b2f9a0094 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/export.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ea17cbed29ef..f88256cbacbf 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/module.h>
17#include <net/netlink.h> 18#include <net/netlink.h>
18#include <net/pkt_sched.h> 19#include <net/pkt_sched.h>
19#include <net/sch_generic.h> 20#include <net/sch_generic.h>
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0a833d0c1f61..e83c272c0325 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
287 u32 r, slot, salt, sfbhash; 287 u32 r, slot, salt, sfbhash;
288 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 288 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
289 289
290 if (unlikely(sch->q.qlen >= q->limit)) {
291 sch->qstats.overlimits++;
292 q->stats.queuedrop++;
293 goto drop;
294 }
295
290 if (q->rehash_interval > 0) { 296 if (q->rehash_interval > 0) {
291 unsigned long limit = q->rehash_time + q->rehash_interval; 297 unsigned long limit = q->rehash_time + q->rehash_interval;
292 298
@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 slot ^= 1; 338 slot ^= 1;
333 sfb_skb_cb(skb)->hashes[slot] = 0; 339 sfb_skb_cb(skb)->hashes[slot] = 0;
334 340
335 if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) { 341 if (unlikely(minqlen >= q->max)) {
336 sch->qstats.overlimits++; 342 sch->qstats.overlimits++;
337 if (minqlen >= q->max) 343 q->stats.bucketdrop++;
338 q->stats.bucketdrop++;
339 else
340 q->stats.queuedrop++;
341 goto drop; 344 goto drop;
342 } 345 }
343 346
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index dc16b90ddb6f..152b5b3c3fff 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -282,6 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
282 asoc->peer.asconf_capable = 1; 282 asoc->peer.asconf_capable = 1;
283 asoc->asconf_addr_del_pending = NULL; 283 asoc->asconf_addr_del_pending = NULL;
284 asoc->src_out_of_asoc_ok = 0; 284 asoc->src_out_of_asoc_ok = 0;
285 asoc->new_transport = NULL;
285 286
286 /* Create an input queue. */ 287 /* Create an input queue. */
287 sctp_inq_init(&asoc->base.inqueue); 288 sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index aabaee41dd3e..810427833bcd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -243,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
243 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 243 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
244 skb->local_df = 1; 244 skb->local_df = 1;
245 245
246 return ip6_xmit(sk, skb, &fl6, np->opt); 246 return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
247} 247}
248 248
249/* Returns the dst cache entry for the given source and destination ip 249/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a6d27bf563a5..14c2b06028ff 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -917,6 +917,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
917 * current cwnd). 917 * current cwnd).
918 */ 918 */
919 if (!list_empty(&q->retransmit)) { 919 if (!list_empty(&q->retransmit)) {
920 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
921 goto sctp_flush_out;
920 if (transport == asoc->peer.retran_path) 922 if (transport == asoc->peer.retran_path)
921 goto retran; 923 goto retran;
922 924
@@ -989,6 +991,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
989 ((new_transport->state == SCTP_INACTIVE) || 991 ((new_transport->state == SCTP_INACTIVE) ||
990 (new_transport->state == SCTP_UNCONFIRMED))) 992 (new_transport->state == SCTP_UNCONFIRMED)))
991 new_transport = asoc->peer.active_path; 993 new_transport = asoc->peer.active_path;
994 if (new_transport->state == SCTP_UNCONFIRMED)
995 continue;
992 996
993 /* Change packets if necessary. */ 997 /* Change packets if necessary. */
994 if (new_transport != transport) { 998 if (new_transport != transport) {
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 05a6ce214714..1e2eee88c3ea 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -37,6 +37,7 @@
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/export.h>
40#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 42#include <net/ip.h> /* for snmp_fold_field */
42 43
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 91784f44a2e2..61b9fca5a173 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1299,7 +1299,7 @@ SCTP_STATIC __init int sctp_init(void)
1299 max_share = min(4UL*1024*1024, limit); 1299 max_share = min(4UL*1024*1024, limit);
1300 1300
1301 sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ 1301 sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
1302 sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); 1302 sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
1303 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); 1303 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
1304 1304
1305 sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; 1305 sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 81db4e385352..0121e0ab0351 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3015,6 +3015,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3015 /* Start the heartbeat timer. */ 3015 /* Start the heartbeat timer. */
3016 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3016 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
3017 sctp_transport_hold(peer); 3017 sctp_transport_hold(peer);
3018 asoc->new_transport = peer;
3018 break; 3019 break;
3019 case SCTP_PARAM_DEL_IP: 3020 case SCTP_PARAM_DEL_IP:
3020 /* ADDIP 4.3 D7) If a request is received to delete the 3021 /* ADDIP 4.3 D7) If a request is received to delete the
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a0f31e6c1c63..891f5db8cc31 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3618,6 +3618,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3618 */ 3618 */
3619 asconf_ack->dest = chunk->source; 3619 asconf_ack->dest = chunk->source;
3620 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3620 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
3621 if (asoc->new_transport) {
3622 sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
3623 commands);
3624 ((struct sctp_association *)asoc)->new_transport = NULL;
3625 }
3621 3626
3622 return SCTP_DISPOSITION_CONSUME; 3627 return SCTP_DISPOSITION_CONSUME;
3623} 3628}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 836aa63ee121..13bf5fcdbff1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -78,6 +78,7 @@
78#include <net/inet_common.h> 78#include <net/inet_common.h>
79 79
80#include <linux/socket.h> /* for sa_family_t */ 80#include <linux/socket.h> /* for sa_family_t */
81#include <linux/export.h>
81#include <net/sock.h> 82#include <net/sock.h>
82#include <net/sctp/sctp.h> 83#include <net/sctp/sctp.h>
83#include <net/sctp/sm.h> 84#include <net/sctp/sm.h>
diff --git a/net/socket.c b/net/socket.c
index ffe92ca32f2a..2877647f347b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto_family *ops)
2472 lockdep_is_held(&net_family_lock))) 2472 lockdep_is_held(&net_family_lock)))
2473 err = -EEXIST; 2473 err = -EEXIST;
2474 else { 2474 else {
2475 rcu_assign_pointer(net_families[ops->family], ops); 2475 RCU_INIT_POINTER(net_families[ops->family], ops);
2476 err = 0; 2476 err = 0;
2477 } 2477 }
2478 spin_unlock(&net_family_lock); 2478 spin_unlock(&net_family_lock);
@@ -2500,7 +2500,7 @@ void sock_unregister(int family)
2500 BUG_ON(family < 0 || family >= NPROTO); 2500 BUG_ON(family < 0 || family >= NPROTO);
2501 2501
2502 spin_lock(&net_family_lock); 2502 spin_lock(&net_family_lock);
2503 rcu_assign_pointer(net_families[family], NULL); 2503 RCU_INIT_POINTER(net_families[family], NULL);
2504 spin_unlock(&net_family_lock); 2504 spin_unlock(&net_family_lock);
2505 2505
2506 synchronize_rcu(); 2506 synchronize_rcu();
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 4195233c4914..67a655ee82a9 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -19,6 +19,7 @@
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <linux/sunrpc/clnt.h> 20#include <linux/sunrpc/clnt.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/export.h>
22 23
23#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 24#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
24 25
@@ -255,12 +256,13 @@ EXPORT_SYMBOL_GPL(rpc_pton);
255/** 256/**
256 * rpc_sockaddr2uaddr - Construct a universal address string from @sap. 257 * rpc_sockaddr2uaddr - Construct a universal address string from @sap.
257 * @sap: socket address 258 * @sap: socket address
259 * @gfp_flags: allocation mode
258 * 260 *
259 * Returns a %NUL-terminated string in dynamically allocated memory; 261 * Returns a %NUL-terminated string in dynamically allocated memory;
260 * otherwise NULL is returned if an error occurred. Caller must 262 * otherwise NULL is returned if an error occurred. Caller must
261 * free the returned string. 263 * free the returned string.
262 */ 264 */
263char *rpc_sockaddr2uaddr(const struct sockaddr *sap) 265char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
264{ 266{
265 char portbuf[RPCBIND_MAXUADDRPLEN]; 267 char portbuf[RPCBIND_MAXUADDRPLEN];
266 char addrbuf[RPCBIND_MAXUADDRLEN]; 268 char addrbuf[RPCBIND_MAXUADDRLEN];
@@ -288,9 +290,8 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
288 if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf)) 290 if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
289 return NULL; 291 return NULL;
290 292
291 return kstrdup(addrbuf, GFP_KERNEL); 293 return kstrdup(addrbuf, gfp_flags);
292} 294}
293EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
294 295
295/** 296/**
296 * rpc_uaddr2sockaddr - convert a universal address to a socket address. 297 * rpc_uaddr2sockaddr - convert a universal address to a socket address.
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 364eb45e989d..afb56553dfe7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
123 return; 123 return;
124 gss_get_ctx(ctx); 124 gss_get_ctx(ctx);
125 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 125 RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
127 smp_mb__before_clear_bit(); 127 smp_mb__before_clear_bit();
128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
@@ -603,26 +603,6 @@ out:
603 return err; 603 return err;
604} 604}
605 605
606static ssize_t
607gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
608 char __user *dst, size_t buflen)
609{
610 char *data = (char *)msg->data + msg->copied;
611 size_t mlen = min(msg->len, buflen);
612 unsigned long left;
613
614 left = copy_to_user(dst, data, mlen);
615 if (left == mlen) {
616 msg->errno = -EFAULT;
617 return -EFAULT;
618 }
619
620 mlen -= left;
621 msg->copied += mlen;
622 msg->errno = 0;
623 return mlen;
624}
625
626#define MSG_BUF_MAXSIZE 1024 606#define MSG_BUF_MAXSIZE 1024
627 607
628static ssize_t 608static ssize_t
@@ -970,7 +950,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
970 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 950 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
971 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 951 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
972 952
973 rcu_assign_pointer(gss_cred->gc_ctx, NULL); 953 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
974 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 954 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
975 if (ctx) 955 if (ctx)
976 gss_put_ctx(ctx); 956 gss_put_ctx(ctx);
@@ -1590,7 +1570,7 @@ static const struct rpc_credops gss_nullops = {
1590}; 1570};
1591 1571
1592static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1572static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1593 .upcall = gss_pipe_upcall, 1573 .upcall = rpc_pipe_generic_upcall,
1594 .downcall = gss_pipe_downcall, 1574 .downcall = gss_pipe_downcall,
1595 .destroy_msg = gss_pipe_destroy_msg, 1575 .destroy_msg = gss_pipe_destroy_msg,
1596 .open_pipe = gss_pipe_open_v0, 1576 .open_pipe = gss_pipe_open_v0,
@@ -1598,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1598}; 1578};
1599 1579
1600static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1580static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1601 .upcall = gss_pipe_upcall, 1581 .upcall = rpc_pipe_generic_upcall,
1602 .downcall = gss_pipe_downcall, 1582 .downcall = gss_pipe_downcall,
1603 .destroy_msg = gss_pipe_destroy_msg, 1583 .destroy_msg = gss_pipe_destroy_msg,
1604 .open_pipe = gss_pipe_open_v1, 1584 .open_pipe = gss_pipe_open_v1,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4cb70dc6e7ad..e50502d8ceb7 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
129 for (i = 0; i < groups ; i++) 129 for (i = 0; i < groups ; i++)
130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) 130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
131 return 0; 131 return 0;
132 if (groups < NFS_NGROUPS &&
133 cred->uc_gids[groups] != NOGROUP)
134 return 0;
132 return 1; 135 return 1;
133} 136}
134 137
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 91eaa26e4c42..3ad435a14ada 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -24,6 +24,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h> 26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h>
27 28
28#ifdef RPC_DEBUG 29#ifdef RPC_DEBUG
29#define RPCDBG_FACILITY RPCDBG_TRANS 30#define RPCDBG_FACILITY RPCDBG_TRANS
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c5347d29cfb7..f0268ea7e711 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -850,7 +850,9 @@ rpc_restart_call_prepare(struct rpc_task *task)
850{ 850{
851 if (RPC_ASSASSINATED(task)) 851 if (RPC_ASSASSINATED(task))
852 return 0; 852 return 0;
853 task->tk_action = rpc_prepare_task; 853 task->tk_action = call_start;
854 if (task->tk_ops->rpc_call_prepare != NULL)
855 task->tk_action = rpc_prepare_task;
854 return 1; 856 return 1;
855} 857}
856EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 858EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index b181e3441323..bfddd68b31d3 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work)
77 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); 77 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
78} 78}
79 79
80ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
81 char __user *dst, size_t buflen)
82{
83 char *data = (char *)msg->data + msg->copied;
84 size_t mlen = min(msg->len - msg->copied, buflen);
85 unsigned long left;
86
87 left = copy_to_user(dst, data, mlen);
88 if (left == mlen) {
89 msg->errno = -EFAULT;
90 return -EFAULT;
91 }
92
93 mlen -= left;
94 msg->copied += mlen;
95 msg->errno = 0;
96 return mlen;
97}
98EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
99
80/** 100/**
81 * rpc_queue_upcall - queue an upcall message to userspace 101 * rpc_queue_upcall - queue an upcall message to userspace
82 * @inode: inode of upcall pipe on which to queue given message 102 * @inode: inode of upcall pipe on which to queue given message
@@ -1084,3 +1104,6 @@ void unregister_rpc_pipefs(void)
1084 kmem_cache_destroy(rpc_inode_cachep); 1104 kmem_cache_destroy(rpc_inode_cachep);
1085 unregister_filesystem(&rpc_pipe_fs_type); 1105 unregister_filesystem(&rpc_pipe_fs_type);
1086} 1106}
1107
1108/* Make 'mount -t rpc_pipefs ...' autoload this module. */
1109MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index e45d2fbbe5a8..8761bf8e36fc 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -114,6 +114,9 @@ static struct rpc_program rpcb_program;
114static struct rpc_clnt * rpcb_local_clnt; 114static struct rpc_clnt * rpcb_local_clnt;
115static struct rpc_clnt * rpcb_local_clnt4; 115static struct rpc_clnt * rpcb_local_clnt4;
116 116
117DEFINE_SPINLOCK(rpcb_clnt_lock);
118unsigned int rpcb_users;
119
117struct rpcbind_args { 120struct rpcbind_args {
118 struct rpc_xprt * r_xprt; 121 struct rpc_xprt * r_xprt;
119 122
@@ -161,6 +164,56 @@ static void rpcb_map_release(void *data)
161 kfree(map); 164 kfree(map);
162} 165}
163 166
167static int rpcb_get_local(void)
168{
169 int cnt;
170
171 spin_lock(&rpcb_clnt_lock);
172 if (rpcb_users)
173 rpcb_users++;
174 cnt = rpcb_users;
175 spin_unlock(&rpcb_clnt_lock);
176
177 return cnt;
178}
179
180void rpcb_put_local(void)
181{
182 struct rpc_clnt *clnt = rpcb_local_clnt;
183 struct rpc_clnt *clnt4 = rpcb_local_clnt4;
184 int shutdown;
185
186 spin_lock(&rpcb_clnt_lock);
187 if (--rpcb_users == 0) {
188 rpcb_local_clnt = NULL;
189 rpcb_local_clnt4 = NULL;
190 }
191 shutdown = !rpcb_users;
192 spin_unlock(&rpcb_clnt_lock);
193
194 if (shutdown) {
195 /*
196 * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
197 */
198 if (clnt4)
199 rpc_shutdown_client(clnt4);
200 if (clnt)
201 rpc_shutdown_client(clnt);
202 }
203}
204
205static void rpcb_set_local(struct rpc_clnt *clnt, struct rpc_clnt *clnt4)
206{
207 /* Protected by rpcb_create_local_mutex */
208 rpcb_local_clnt = clnt;
209 rpcb_local_clnt4 = clnt4;
210 smp_wmb();
211 rpcb_users = 1;
212 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
213 "%p, rpcb_local_clnt4: %p)\n", rpcb_local_clnt,
214 rpcb_local_clnt4);
215}
216
164/* 217/*
165 * Returns zero on success, otherwise a negative errno value 218 * Returns zero on success, otherwise a negative errno value
166 * is returned. 219 * is returned.
@@ -205,9 +258,7 @@ static int rpcb_create_local_unix(void)
205 clnt4 = NULL; 258 clnt4 = NULL;
206 } 259 }
207 260
208 /* Protected by rpcb_create_local_mutex */ 261 rpcb_set_local(clnt, clnt4);
209 rpcb_local_clnt = clnt;
210 rpcb_local_clnt4 = clnt4;
211 262
212out: 263out:
213 return result; 264 return result;
@@ -259,9 +310,7 @@ static int rpcb_create_local_net(void)
259 clnt4 = NULL; 310 clnt4 = NULL;
260 } 311 }
261 312
262 /* Protected by rpcb_create_local_mutex */ 313 rpcb_set_local(clnt, clnt4);
263 rpcb_local_clnt = clnt;
264 rpcb_local_clnt4 = clnt4;
265 314
266out: 315out:
267 return result; 316 return result;
@@ -271,16 +320,16 @@ out:
271 * Returns zero on success, otherwise a negative errno value 320 * Returns zero on success, otherwise a negative errno value
272 * is returned. 321 * is returned.
273 */ 322 */
274static int rpcb_create_local(void) 323int rpcb_create_local(void)
275{ 324{
276 static DEFINE_MUTEX(rpcb_create_local_mutex); 325 static DEFINE_MUTEX(rpcb_create_local_mutex);
277 int result = 0; 326 int result = 0;
278 327
279 if (rpcb_local_clnt) 328 if (rpcb_get_local())
280 return result; 329 return result;
281 330
282 mutex_lock(&rpcb_create_local_mutex); 331 mutex_lock(&rpcb_create_local_mutex);
283 if (rpcb_local_clnt) 332 if (rpcb_get_local())
284 goto out; 333 goto out;
285 334
286 if (rpcb_create_local_unix() != 0) 335 if (rpcb_create_local_unix() != 0)
@@ -382,11 +431,6 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
382 struct rpc_message msg = { 431 struct rpc_message msg = {
383 .rpc_argp = &map, 432 .rpc_argp = &map,
384 }; 433 };
385 int error;
386
387 error = rpcb_create_local();
388 if (error)
389 return error;
390 434
391 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 435 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
392 "rpcbind\n", (port ? "" : "un"), 436 "rpcbind\n", (port ? "" : "un"),
@@ -410,7 +454,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
410 unsigned short port = ntohs(sin->sin_port); 454 unsigned short port = ntohs(sin->sin_port);
411 int result; 455 int result;
412 456
413 map->r_addr = rpc_sockaddr2uaddr(sap); 457 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
414 458
415 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with " 459 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
416 "local rpcbind\n", (port ? "" : "un"), 460 "local rpcbind\n", (port ? "" : "un"),
@@ -437,7 +481,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
437 unsigned short port = ntohs(sin6->sin6_port); 481 unsigned short port = ntohs(sin6->sin6_port);
438 int result; 482 int result;
439 483
440 map->r_addr = rpc_sockaddr2uaddr(sap); 484 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
441 485
442 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with " 486 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
443 "local rpcbind\n", (port ? "" : "un"), 487 "local rpcbind\n", (port ? "" : "un"),
@@ -522,11 +566,7 @@ int rpcb_v4_register(const u32 program, const u32 version,
522 struct rpc_message msg = { 566 struct rpc_message msg = {
523 .rpc_argp = &map, 567 .rpc_argp = &map,
524 }; 568 };
525 int error;
526 569
527 error = rpcb_create_local();
528 if (error)
529 return error;
530 if (rpcb_local_clnt4 == NULL) 570 if (rpcb_local_clnt4 == NULL)
531 return -EPROTONOSUPPORT; 571 return -EPROTONOSUPPORT;
532 572
@@ -686,7 +726,7 @@ void rpcb_getport_async(struct rpc_task *task)
686 case RPCBVERS_4: 726 case RPCBVERS_4:
687 case RPCBVERS_3: 727 case RPCBVERS_3:
688 map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID); 728 map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
689 map->r_addr = rpc_sockaddr2uaddr(sap); 729 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
690 map->r_owner = ""; 730 map->r_owner = "";
691 break; 731 break;
692 case RPCBVERS_2: 732 case RPCBVERS_2:
@@ -1060,15 +1100,3 @@ static struct rpc_program rpcb_program = {
1060 .version = rpcb_version, 1100 .version = rpcb_version,
1061 .stats = &rpcb_stats, 1101 .stats = &rpcb_stats,
1062}; 1102};
1063
1064/**
1065 * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
1066 *
1067 */
1068void cleanup_rpcb_clnt(void)
1069{
1070 if (rpcb_local_clnt4)
1071 rpc_shutdown_client(rpcb_local_clnt4);
1072 if (rpcb_local_clnt)
1073 rpc_shutdown_client(rpcb_local_clnt);
1074}
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 10b4319ebbca..145e6784f508 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/udp.h> 15#include <linux/udp.h>
16#include <linux/sunrpc/xdr.h> 16#include <linux/sunrpc/xdr.h>
17#include <linux/export.h>
17 18
18 19
19/** 20/**
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 9d0809160994..8ec9778c3f4a 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -61,8 +61,6 @@ static struct pernet_operations sunrpc_net_ops = {
61 61
62extern struct cache_detail unix_gid_cache; 62extern struct cache_detail unix_gid_cache;
63 63
64extern void cleanup_rpcb_clnt(void);
65
66static int __init 64static int __init
67init_sunrpc(void) 65init_sunrpc(void)
68{ 66{
@@ -102,7 +100,6 @@ out:
102static void __exit 100static void __exit
103cleanup_sunrpc(void) 101cleanup_sunrpc(void)
104{ 102{
105 cleanup_rpcb_clnt();
106 rpcauth_remove_module(); 103 rpcauth_remove_module();
107 cleanup_socket_xprt(); 104 cleanup_socket_xprt();
108 svc_cleanup_xprt_sock(); 105 svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6a69a1131fb7..6e038884ae0c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -295,6 +295,18 @@ svc_pool_map_put(void)
295} 295}
296 296
297 297
298static int svc_pool_map_get_node(unsigned int pidx)
299{
300 const struct svc_pool_map *m = &svc_pool_map;
301
302 if (m->count) {
303 if (m->mode == SVC_POOL_PERCPU)
304 return cpu_to_node(m->pool_to[pidx]);
305 if (m->mode == SVC_POOL_PERNODE)
306 return m->pool_to[pidx];
307 }
308 return NUMA_NO_NODE;
309}
298/* 310/*
299 * Set the given thread's cpus_allowed mask so that it 311 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool. 312 * will only run on cpus in the given pool.
@@ -354,6 +366,42 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu)
354 return &serv->sv_pools[pidx % serv->sv_nrpools]; 366 return &serv->sv_pools[pidx % serv->sv_nrpools];
355} 367}
356 368
369static int svc_rpcb_setup(struct svc_serv *serv)
370{
371 int err;
372
373 err = rpcb_create_local();
374 if (err)
375 return err;
376
377 /* Remove any stale portmap registrations */
378 svc_unregister(serv);
379 return 0;
380}
381
382void svc_rpcb_cleanup(struct svc_serv *serv)
383{
384 svc_unregister(serv);
385 rpcb_put_local();
386}
387EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
388
389static int svc_uses_rpcbind(struct svc_serv *serv)
390{
391 struct svc_program *progp;
392 unsigned int i;
393
394 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
395 for (i = 0; i < progp->pg_nvers; i++) {
396 if (progp->pg_vers[i] == NULL)
397 continue;
398 if (progp->pg_vers[i]->vs_hidden == 0)
399 return 1;
400 }
401 }
402
403 return 0;
404}
357 405
358/* 406/*
359 * Create an RPC service 407 * Create an RPC service
@@ -419,8 +467,15 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
419 spin_lock_init(&pool->sp_lock); 467 spin_lock_init(&pool->sp_lock);
420 } 468 }
421 469
422 /* Remove any stale portmap registrations */ 470 if (svc_uses_rpcbind(serv)) {
423 svc_unregister(serv); 471 if (svc_rpcb_setup(serv) < 0) {
472 kfree(serv->sv_pools);
473 kfree(serv);
474 return NULL;
475 }
476 if (!serv->sv_shutdown)
477 serv->sv_shutdown = svc_rpcb_cleanup;
478 }
424 479
425 return serv; 480 return serv;
426} 481}
@@ -488,7 +543,6 @@ svc_destroy(struct svc_serv *serv)
488 if (svc_serv_is_pooled(serv)) 543 if (svc_serv_is_pooled(serv))
489 svc_pool_map_put(); 544 svc_pool_map_put();
490 545
491 svc_unregister(serv);
492 kfree(serv->sv_pools); 546 kfree(serv->sv_pools);
493 kfree(serv); 547 kfree(serv);
494} 548}
@@ -499,7 +553,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
499 * We allocate pages and place them in rq_argpages. 553 * We allocate pages and place them in rq_argpages.
500 */ 554 */
501static int 555static int
502svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 556svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
503{ 557{
504 unsigned int pages, arghi; 558 unsigned int pages, arghi;
505 559
@@ -513,7 +567,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
513 arghi = 0; 567 arghi = 0;
514 BUG_ON(pages > RPCSVC_MAXPAGES); 568 BUG_ON(pages > RPCSVC_MAXPAGES);
515 while (pages) { 569 while (pages) {
516 struct page *p = alloc_page(GFP_KERNEL); 570 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
517 if (!p) 571 if (!p)
518 break; 572 break;
519 rqstp->rq_pages[arghi++] = p; 573 rqstp->rq_pages[arghi++] = p;
@@ -536,11 +590,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
536} 590}
537 591
538struct svc_rqst * 592struct svc_rqst *
539svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 593svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
540{ 594{
541 struct svc_rqst *rqstp; 595 struct svc_rqst *rqstp;
542 596
543 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 597 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
544 if (!rqstp) 598 if (!rqstp)
545 goto out_enomem; 599 goto out_enomem;
546 600
@@ -554,15 +608,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
554 rqstp->rq_server = serv; 608 rqstp->rq_server = serv;
555 rqstp->rq_pool = pool; 609 rqstp->rq_pool = pool;
556 610
557 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 611 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
558 if (!rqstp->rq_argp) 612 if (!rqstp->rq_argp)
559 goto out_thread; 613 goto out_thread;
560 614
561 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 615 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
562 if (!rqstp->rq_resp) 616 if (!rqstp->rq_resp)
563 goto out_thread; 617 goto out_thread;
564 618
565 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 619 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
566 goto out_thread; 620 goto out_thread;
567 621
568 return rqstp; 622 return rqstp;
@@ -647,6 +701,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
647 struct svc_pool *chosen_pool; 701 struct svc_pool *chosen_pool;
648 int error = 0; 702 int error = 0;
649 unsigned int state = serv->sv_nrthreads-1; 703 unsigned int state = serv->sv_nrthreads-1;
704 int node;
650 705
651 if (pool == NULL) { 706 if (pool == NULL) {
652 /* The -1 assumes caller has done a svc_get() */ 707 /* The -1 assumes caller has done a svc_get() */
@@ -662,14 +717,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
662 nrservs--; 717 nrservs--;
663 chosen_pool = choose_pool(serv, pool, &state); 718 chosen_pool = choose_pool(serv, pool, &state);
664 719
665 rqstp = svc_prepare_thread(serv, chosen_pool); 720 node = svc_pool_map_get_node(chosen_pool->sp_id);
721 rqstp = svc_prepare_thread(serv, chosen_pool, node);
666 if (IS_ERR(rqstp)) { 722 if (IS_ERR(rqstp)) {
667 error = PTR_ERR(rqstp); 723 error = PTR_ERR(rqstp);
668 break; 724 break;
669 } 725 }
670 726
671 __module_get(serv->sv_module); 727 __module_get(serv->sv_module);
672 task = kthread_create(serv->sv_function, rqstp, serv->sv_name); 728 task = kthread_create_on_node(serv->sv_function, rqstp,
729 node, serv->sv_name);
673 if (IS_ERR(task)) { 730 if (IS_ERR(task)) {
674 error = PTR_ERR(task); 731 error = PTR_ERR(task);
675 module_put(serv->sv_module); 732 module_put(serv->sv_module);
@@ -956,9 +1013,8 @@ static void svc_unregister(const struct svc_serv *serv)
956/* 1013/*
957 * Printk the given error with the address of the client that caused it. 1014 * Printk the given error with the address of the client that caused it.
958 */ 1015 */
959static int 1016static __printf(2, 3)
960__attribute__ ((format (printf, 2, 3))) 1017int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
961svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
962{ 1018{
963 va_list args; 1019 va_list args;
964 int r; 1020 int r;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bd31208bbb61..447cd0eb415c 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -14,6 +14,7 @@
14#include <linux/sunrpc/svc_xprt.h> 14#include <linux/sunrpc/svc_xprt.h>
15#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
16#include <linux/sunrpc/xprt.h> 16#include <linux/sunrpc/xprt.h>
17#include <linux/module.h>
17 18
18#define RPCDBG_FACILITY RPCDBG_SVCXPRT 19#define RPCDBG_FACILITY RPCDBG_SVCXPRT
19 20
@@ -254,8 +255,6 @@ EXPORT_SYMBOL_GPL(svc_create_xprt);
254 */ 255 */
255void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 256void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
256{ 257{
257 struct sockaddr *sin;
258
259 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 258 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
260 rqstp->rq_addrlen = xprt->xpt_remotelen; 259 rqstp->rq_addrlen = xprt->xpt_remotelen;
261 260
@@ -263,15 +262,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
263 * Destination address in request is needed for binding the 262 * Destination address in request is needed for binding the
264 * source address in RPC replies/callbacks later. 263 * source address in RPC replies/callbacks later.
265 */ 264 */
266 sin = (struct sockaddr *)&xprt->xpt_local; 265 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
267 switch (sin->sa_family) { 266 rqstp->rq_daddrlen = xprt->xpt_locallen;
268 case AF_INET:
269 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
270 break;
271 case AF_INET6:
272 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
273 break;
274 }
275} 267}
276EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 268EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
277 269
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 767d494de7a2..71bed1c1c77a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/module.h>
24#include <linux/errno.h> 25#include <linux/errno.h>
25#include <linux/fcntl.h> 26#include <linux/fcntl.h>
26#include <linux/net.h> 27#include <linux/net.h>
@@ -143,19 +144,20 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
143 cmh->cmsg_level = SOL_IP; 144 cmh->cmsg_level = SOL_IP;
144 cmh->cmsg_type = IP_PKTINFO; 145 cmh->cmsg_type = IP_PKTINFO;
145 pki->ipi_ifindex = 0; 146 pki->ipi_ifindex = 0;
146 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 147 pki->ipi_spec_dst.s_addr =
148 svc_daddr_in(rqstp)->sin_addr.s_addr;
147 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 149 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
148 } 150 }
149 break; 151 break;
150 152
151 case AF_INET6: { 153 case AF_INET6: {
152 struct in6_pktinfo *pki = CMSG_DATA(cmh); 154 struct in6_pktinfo *pki = CMSG_DATA(cmh);
155 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
153 156
154 cmh->cmsg_level = SOL_IPV6; 157 cmh->cmsg_level = SOL_IPV6;
155 cmh->cmsg_type = IPV6_PKTINFO; 158 cmh->cmsg_type = IPV6_PKTINFO;
156 pki->ipi6_ifindex = 0; 159 pki->ipi6_ifindex = daddr->sin6_scope_id;
157 ipv6_addr_copy(&pki->ipi6_addr, 160 ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
158 &rqstp->rq_daddr.addr6);
159 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 161 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
160 } 162 }
161 break; 163 break;
@@ -498,9 +500,13 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
498 struct cmsghdr *cmh) 500 struct cmsghdr *cmh)
499{ 501{
500 struct in_pktinfo *pki = CMSG_DATA(cmh); 502 struct in_pktinfo *pki = CMSG_DATA(cmh);
503 struct sockaddr_in *daddr = svc_daddr_in(rqstp);
504
501 if (cmh->cmsg_type != IP_PKTINFO) 505 if (cmh->cmsg_type != IP_PKTINFO)
502 return 0; 506 return 0;
503 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; 507
508 daddr->sin_family = AF_INET;
509 daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
504 return 1; 510 return 1;
505} 511}
506 512
@@ -511,9 +517,14 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
511 struct cmsghdr *cmh) 517 struct cmsghdr *cmh)
512{ 518{
513 struct in6_pktinfo *pki = CMSG_DATA(cmh); 519 struct in6_pktinfo *pki = CMSG_DATA(cmh);
520 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
521
514 if (cmh->cmsg_type != IPV6_PKTINFO) 522 if (cmh->cmsg_type != IPV6_PKTINFO)
515 return 0; 523 return 0;
516 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); 524
525 daddr->sin6_family = AF_INET6;
526 ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
527 daddr->sin6_scope_id = pki->ipi6_ifindex;
517 return 1; 528 return 1;
518} 529}
519 530
@@ -614,6 +625,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
614 skb_free_datagram_locked(svsk->sk_sk, skb); 625 skb_free_datagram_locked(svsk->sk_sk, skb);
615 return 0; 626 return 0;
616 } 627 }
628 rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
617 629
618 if (skb_is_nonlinear(skb)) { 630 if (skb_is_nonlinear(skb)) {
619 /* we have to copy */ 631 /* we have to copy */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index a385430c722a..ba1296d88de0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -50,6 +50,7 @@
50#include <rdma/ib_verbs.h> 50#include <rdma/ib_verbs.h>
51#include <rdma/rdma_cm.h> 51#include <rdma/rdma_cm.h>
52#include <linux/sunrpc/svc_rdma.h> 52#include <linux/sunrpc/svc_rdma.h>
53#include <linux/export.h>
53 54
54#define RPCDBG_FACILITY RPCDBG_SVCXPRT 55#define RPCDBG_FACILITY RPCDBG_SVCXPRT
55 56
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index ca84212cfbfe..e75813904f26 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/export.h>
15#include <linux/sysctl.h> 16#include <linux/sysctl.h>
16#include <linux/nsproxy.h> 17#include <linux/nsproxy.h>
17 18
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 759b318b5ffb..28908f54459e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -39,6 +39,7 @@
39#include "link.h" 39#include "link.h"
40#include "port.h" 40#include "port.h"
41#include "bcast.h" 41#include "bcast.h"
42#include "name_distr.h"
42 43
43#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
44 45
@@ -298,14 +299,9 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
298 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 299 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
299 msg_set_bcast_tag(msg, tipc_own_tag); 300 msg_set_bcast_tag(msg, tipc_own_tag);
300 301
301 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { 302 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
302 bcl->stats.sent_nacks++; 303 bcl->stats.sent_nacks++;
303 buf_discard(buf); 304 buf_discard(buf);
304 } else {
305 tipc_bearer_schedule(bcl->b_ptr, bcl);
306 bcl->proto_msg_queue = buf;
307 bcl->stats.bearer_congs++;
308 }
309 305
310 /* 306 /*
311 * Ensure we doesn't send another NACK msg to the node 307 * Ensure we doesn't send another NACK msg to the node
@@ -426,20 +422,28 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
426void tipc_bclink_recv_pkt(struct sk_buff *buf) 422void tipc_bclink_recv_pkt(struct sk_buff *buf)
427{ 423{
428 struct tipc_msg *msg = buf_msg(buf); 424 struct tipc_msg *msg = buf_msg(buf);
429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 425 struct tipc_node *node;
430 u32 next_in; 426 u32 next_in;
431 u32 seqno; 427 u32 seqno;
432 struct sk_buff *deferred; 428 struct sk_buff *deferred;
433 429
434 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 430 /* Screen out unwanted broadcast messages */
435 (msg_mc_netid(msg) != tipc_net_id))) { 431
436 buf_discard(buf); 432 if (msg_mc_netid(msg) != tipc_net_id)
437 return; 433 goto exit;
438 } 434
435 node = tipc_node_find(msg_prevnode(msg));
436 if (unlikely(!node))
437 goto exit;
438
439 tipc_node_lock(node);
440 if (unlikely(!node->bclink.supported))
441 goto unlock;
439 442
440 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 443 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
444 if (msg_type(msg) != STATE_MSG)
445 goto unlock;
441 if (msg_destnode(msg) == tipc_own_addr) { 446 if (msg_destnode(msg) == tipc_own_addr) {
442 tipc_node_lock(node);
443 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 447 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
444 tipc_node_unlock(node); 448 tipc_node_unlock(node);
445 spin_lock_bh(&bc_lock); 449 spin_lock_bh(&bc_lock);
@@ -449,18 +453,18 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
449 msg_bcgap_to(msg)); 453 msg_bcgap_to(msg));
450 spin_unlock_bh(&bc_lock); 454 spin_unlock_bh(&bc_lock);
451 } else { 455 } else {
456 tipc_node_unlock(node);
452 tipc_bclink_peek_nack(msg_destnode(msg), 457 tipc_bclink_peek_nack(msg_destnode(msg),
453 msg_bcast_tag(msg), 458 msg_bcast_tag(msg),
454 msg_bcgap_after(msg), 459 msg_bcgap_after(msg),
455 msg_bcgap_to(msg)); 460 msg_bcgap_to(msg));
456 } 461 }
457 buf_discard(buf); 462 goto exit;
458 return;
459 } 463 }
460 464
461 tipc_node_lock(node); 465 /* Handle in-sequence broadcast message */
466
462receive: 467receive:
463 deferred = node->bclink.deferred_head;
464 next_in = mod(node->bclink.last_in + 1); 468 next_in = mod(node->bclink.last_in + 1);
465 seqno = msg_seqno(msg); 469 seqno = msg_seqno(msg);
466 470
@@ -474,7 +478,10 @@ receive:
474 } 478 }
475 if (likely(msg_isdata(msg))) { 479 if (likely(msg_isdata(msg))) {
476 tipc_node_unlock(node); 480 tipc_node_unlock(node);
477 tipc_port_recv_mcast(buf, NULL); 481 if (likely(msg_mcast(msg)))
482 tipc_port_recv_mcast(buf, NULL);
483 else
484 buf_discard(buf);
478 } else if (msg_user(msg) == MSG_BUNDLER) { 485 } else if (msg_user(msg) == MSG_BUNDLER) {
479 bcl->stats.recv_bundles++; 486 bcl->stats.recv_bundles++;
480 bcl->stats.recv_bundled += msg_msgcnt(msg); 487 bcl->stats.recv_bundled += msg_msgcnt(msg);
@@ -487,18 +494,22 @@ receive:
487 bcl->stats.recv_fragmented++; 494 bcl->stats.recv_fragmented++;
488 tipc_node_unlock(node); 495 tipc_node_unlock(node);
489 tipc_net_route_msg(buf); 496 tipc_net_route_msg(buf);
497 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
498 tipc_node_unlock(node);
499 tipc_named_recv(buf);
490 } else { 500 } else {
491 tipc_node_unlock(node); 501 tipc_node_unlock(node);
492 tipc_net_route_msg(buf); 502 buf_discard(buf);
493 } 503 }
504 buf = NULL;
505 tipc_node_lock(node);
506 deferred = node->bclink.deferred_head;
494 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 507 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
495 tipc_node_lock(node);
496 buf = deferred; 508 buf = deferred;
497 msg = buf_msg(buf); 509 msg = buf_msg(buf);
498 node->bclink.deferred_head = deferred->next; 510 node->bclink.deferred_head = deferred->next;
499 goto receive; 511 goto receive;
500 } 512 }
501 return;
502 } else if (less(next_in, seqno)) { 513 } else if (less(next_in, seqno)) {
503 u32 gap_after = node->bclink.gap_after; 514 u32 gap_after = node->bclink.gap_after;
504 u32 gap_to = node->bclink.gap_to; 515 u32 gap_to = node->bclink.gap_to;
@@ -513,6 +524,7 @@ receive:
513 else if (less(gap_after, seqno) && less(seqno, gap_to)) 524 else if (less(gap_after, seqno) && less(seqno, gap_to))
514 node->bclink.gap_to = seqno; 525 node->bclink.gap_to = seqno;
515 } 526 }
527 buf = NULL;
516 if (bclink_ack_allowed(node->bclink.nack_sync)) { 528 if (bclink_ack_allowed(node->bclink.nack_sync)) {
517 if (gap_to != gap_after) 529 if (gap_to != gap_after)
518 bclink_send_nack(node); 530 bclink_send_nack(node);
@@ -520,9 +532,11 @@ receive:
520 } 532 }
521 } else { 533 } else {
522 bcl->stats.duplicates++; 534 bcl->stats.duplicates++;
523 buf_discard(buf);
524 } 535 }
536unlock:
525 tipc_node_unlock(node); 537 tipc_node_unlock(node);
538exit:
539 buf_discard(buf);
526} 540}
527 541
528u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 542u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
@@ -535,10 +549,11 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
535/** 549/**
536 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 550 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
537 * 551 *
538 * Send through as many bearers as necessary to reach all nodes 552 * Send packet over as many bearers as necessary to reach all nodes
539 * that support TIPC multicasting. 553 * that have joined the broadcast link.
540 * 554 *
541 * Returns 0 if packet sent successfully, non-zero if not 555 * Returns 0 (packet sent successfully) under all circumstances,
556 * since the broadcast link's pseudo-bearer never blocks
542 */ 557 */
543 558
544static int tipc_bcbearer_send(struct sk_buff *buf, 559static int tipc_bcbearer_send(struct sk_buff *buf,
@@ -547,7 +562,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
547{ 562{
548 int bp_index; 563 int bp_index;
549 564
550 /* Prepare buffer for broadcasting (if first time trying to send it) */ 565 /*
566 * Prepare broadcast link message for reliable transmission,
567 * if first time trying to send it;
568 * preparation is skipped for broadcast link protocol messages
569 * since they are sent in an unreliable manner and don't need it
570 */
551 571
552 if (likely(!msg_non_seq(buf_msg(buf)))) { 572 if (likely(!msg_non_seq(buf_msg(buf)))) {
553 struct tipc_msg *msg; 573 struct tipc_msg *msg;
@@ -596,18 +616,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
596 } 616 }
597 617
598 if (bcbearer->remains_new.count == 0) 618 if (bcbearer->remains_new.count == 0)
599 return 0; 619 break; /* all targets reached */
600 620
601 bcbearer->remains = bcbearer->remains_new; 621 bcbearer->remains = bcbearer->remains_new;
602 } 622 }
603 623
604 /* 624 return 0;
605 * Unable to reach all targets (indicate success, since currently
606 * there isn't code in place to properly block & unblock the
607 * pseudo-bearer used by the broadcast link)
608 */
609
610 return TIPC_OK;
611} 625}
612 626
613/** 627/**
@@ -667,27 +681,6 @@ void tipc_bcbearer_sort(void)
667 spin_unlock_bh(&bc_lock); 681 spin_unlock_bh(&bc_lock);
668} 682}
669 683
670/**
671 * tipc_bcbearer_push - resolve bearer congestion
672 *
673 * Forces bclink to push out any unsent packets, until all packets are gone
674 * or congestion reoccurs.
675 * No locks set when function called
676 */
677
678void tipc_bcbearer_push(void)
679{
680 struct tipc_bearer *b_ptr;
681
682 spin_lock_bh(&bc_lock);
683 b_ptr = &bcbearer->bearer;
684 if (b_ptr->blocked) {
685 b_ptr->blocked = 0;
686 tipc_bearer_lock_push(b_ptr);
687 }
688 spin_unlock_bh(&bc_lock);
689}
690
691 684
692int tipc_bclink_stats(char *buf, const u32 buf_size) 685int tipc_bclink_stats(char *buf, const u32 buf_size)
693{ 686{
@@ -764,7 +757,7 @@ int tipc_bclink_init(void)
764 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 757 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
765 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 758 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
766 if (!bcbearer || !bclink) { 759 if (!bcbearer || !bclink) {
767 warn("Multicast link creation failed, no memory\n"); 760 warn("Broadcast link creation failed, no memory\n");
768 kfree(bcbearer); 761 kfree(bcbearer);
769 bcbearer = NULL; 762 bcbearer = NULL;
770 kfree(bclink); 763 kfree(bclink);
@@ -775,7 +768,7 @@ int tipc_bclink_init(void)
775 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 768 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
776 bcbearer->bearer.media = &bcbearer->media; 769 bcbearer->bearer.media = &bcbearer->media;
777 bcbearer->media.send_msg = tipc_bcbearer_send; 770 bcbearer->media.send_msg = tipc_bcbearer_send;
778 sprintf(bcbearer->media.name, "tipc-multicast"); 771 sprintf(bcbearer->media.name, "tipc-broadcast");
779 772
780 bcl = &bclink->link; 773 bcl = &bclink->link;
781 INIT_LIST_HEAD(&bcl->waiting_ports); 774 INIT_LIST_HEAD(&bcl->waiting_ports);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 500c97f1c859..06740da5ae61 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -101,6 +101,5 @@ int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
101int tipc_bclink_reset_stats(void); 101int tipc_bclink_reset_stats(void);
102int tipc_bclink_set_queue_limits(u32 limit); 102int tipc_bclink_set_queue_limits(u32 limit);
103void tipc_bcbearer_sort(void); 103void tipc_bcbearer_sort(void);
104void tipc_bcbearer_push(void);
105 104
106#endif 105#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 85eba9c08ee9..e2202de3d93e 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -385,13 +385,9 @@ static int bearer_push(struct tipc_bearer *b_ptr)
385 385
386void tipc_bearer_lock_push(struct tipc_bearer *b_ptr) 386void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
387{ 387{
388 int res;
389
390 spin_lock_bh(&b_ptr->lock); 388 spin_lock_bh(&b_ptr->lock);
391 res = bearer_push(b_ptr); 389 bearer_push(b_ptr);
392 spin_unlock_bh(&b_ptr->lock); 390 spin_unlock_bh(&b_ptr->lock);
393 if (res)
394 tipc_bcbearer_push();
395} 391}
396 392
397 393
@@ -608,6 +604,7 @@ int tipc_block_bearer(const char *name)
608 info("Blocking bearer <%s>\n", name); 604 info("Blocking bearer <%s>\n", name);
609 spin_lock_bh(&b_ptr->lock); 605 spin_lock_bh(&b_ptr->lock);
610 b_ptr->blocked = 1; 606 b_ptr->blocked = 1;
607 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
611 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 608 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
612 struct tipc_node *n_ptr = l_ptr->owner; 609 struct tipc_node *n_ptr = l_ptr->owner;
613 610
@@ -635,6 +632,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
635 spin_lock_bh(&b_ptr->lock); 632 spin_lock_bh(&b_ptr->lock);
636 b_ptr->blocked = 1; 633 b_ptr->blocked = 1;
637 b_ptr->media->disable_bearer(b_ptr); 634 b_ptr->media->disable_bearer(b_ptr);
635 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
638 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 636 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
639 tipc_link_delete(l_ptr); 637 tipc_link_delete(l_ptr);
640 } 638 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5ad70eff1ebf..d696f9e414e3 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,8 +39,8 @@
39 39
40#include "bcast.h" 40#include "bcast.h"
41 41
42#define MAX_BEARERS 8 42#define MAX_BEARERS 2
43#define MAX_MEDIA 4 43#define MAX_MEDIA 2
44 44
45/* 45/*
46 * Identifiers of supported TIPC media types 46 * Identifiers of supported TIPC media types
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 443159a166fd..80da6ebc2785 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -65,7 +65,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
65 const void *req_tlv_area, int req_tlv_space, 65 const void *req_tlv_area, int req_tlv_space,
66 int headroom); 66 int headroom);
67 67
68void tipc_cfg_link_event(u32 addr, char *name, int up);
69int tipc_cfg_init(void); 68int tipc_cfg_init(void);
70void tipc_cfg_stop(void); 69void tipc_cfg_stop(void);
71 70
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 943b6af84265..c21331d58fdb 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,6 +34,8 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <linux/module.h>
38
37#include "core.h" 39#include "core.h"
38#include "ref.h" 40#include "ref.h"
39#include "name_table.h" 41#include "name_table.h"
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 0987933155b9..f2fb96e86ee8 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -159,12 +159,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
159 } 159 }
160 tipc_node_lock(n_ptr); 160 tipc_node_lock(n_ptr);
161 161
162 /* Don't talk to neighbor during cleanup after last session */
163 if (n_ptr->cleanup_required) {
164 tipc_node_unlock(n_ptr);
165 return;
166 }
167
168 link = n_ptr->links[b_ptr->identity]; 162 link = n_ptr->links[b_ptr->identity];
169 163
170 /* Create a link endpoint for this bearer, if necessary */ 164 /* Create a link endpoint for this bearer, if necessary */
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index b69092eb95d8..e728d4ce2a1b 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2008, 2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "bearer.h" 38#include "bearer.h"
39 39
40#define MAX_ETH_BEARERS 2 40#define MAX_ETH_BEARERS MAX_BEARERS
41#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI 41#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
42#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL 42#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
43#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN 43#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
@@ -144,31 +144,27 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
144 144
145 /* Find device with specified name */ 145 /* Find device with specified name */
146 146
147 read_lock(&dev_base_lock);
147 for_each_netdev(&init_net, pdev) { 148 for_each_netdev(&init_net, pdev) {
148 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { 149 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
149 dev = pdev; 150 dev = pdev;
151 dev_hold(dev);
150 break; 152 break;
151 } 153 }
152 } 154 }
155 read_unlock(&dev_base_lock);
153 if (!dev) 156 if (!dev)
154 return -ENODEV; 157 return -ENODEV;
155 158
156 /* Find Ethernet bearer for device (or create one) */ 159 /* Create Ethernet bearer for device */
157 160
158 while ((eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev)) 161 eb_ptr->dev = dev;
159 eb_ptr++; 162 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
160 if (eb_ptr == stop) 163 eb_ptr->tipc_packet_type.dev = dev;
161 return -EDQUOT; 164 eb_ptr->tipc_packet_type.func = recv_msg;
162 if (!eb_ptr->dev) { 165 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
163 eb_ptr->dev = dev; 166 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
164 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); 167 dev_add_pack(&eb_ptr->tipc_packet_type);
165 eb_ptr->tipc_packet_type.dev = dev;
166 eb_ptr->tipc_packet_type.func = recv_msg;
167 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
168 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
169 dev_hold(dev);
170 dev_add_pack(&eb_ptr->tipc_packet_type);
171 }
172 168
173 /* Associate TIPC bearer with Ethernet bearer */ 169 /* Associate TIPC bearer with Ethernet bearer */
174 170
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f89570c54f54..ae98a72da11a 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -332,15 +332,16 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
332 332
333 l_ptr->addr = peer; 333 l_ptr->addr = peer;
334 if_name = strchr(b_ptr->name, ':') + 1; 334 if_name = strchr(b_ptr->name, ':') + 1;
335 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:", 335 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
336 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 336 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
337 tipc_node(tipc_own_addr), 337 tipc_node(tipc_own_addr),
338 if_name, 338 if_name,
339 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 339 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
340 /* note: peer i/f is appended to link name by reset/activate */ 340 /* note: peer i/f name is updated by reset/activate message */
341 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 341 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
342 l_ptr->owner = n_ptr; 342 l_ptr->owner = n_ptr;
343 l_ptr->checkpoint = 1; 343 l_ptr->checkpoint = 1;
344 l_ptr->peer_session = INVALID_SESSION;
344 l_ptr->b_ptr = b_ptr; 345 l_ptr->b_ptr = b_ptr;
345 link_set_supervision_props(l_ptr, b_ptr->media->tolerance); 346 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
346 l_ptr->state = RESET_UNKNOWN; 347 l_ptr->state = RESET_UNKNOWN;
@@ -536,9 +537,6 @@ void tipc_link_stop(struct link *l_ptr)
536 l_ptr->proto_msg_queue = NULL; 537 l_ptr->proto_msg_queue = NULL;
537} 538}
538 539
539/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
540#define link_send_event(fcn, l_ptr, up) do { } while (0)
541
542void tipc_link_reset(struct link *l_ptr) 540void tipc_link_reset(struct link *l_ptr)
543{ 541{
544 struct sk_buff *buf; 542 struct sk_buff *buf;
@@ -596,10 +594,6 @@ void tipc_link_reset(struct link *l_ptr)
596 l_ptr->fsm_msg_cnt = 0; 594 l_ptr->fsm_msg_cnt = 0;
597 l_ptr->stale_count = 0; 595 l_ptr->stale_count = 0;
598 link_reset_statistics(l_ptr); 596 link_reset_statistics(l_ptr);
599
600 link_send_event(tipc_cfg_link_event, l_ptr, 0);
601 if (!in_own_cluster(l_ptr->addr))
602 link_send_event(tipc_disc_link_event, l_ptr, 0);
603} 597}
604 598
605 599
@@ -608,9 +602,6 @@ static void link_activate(struct link *l_ptr)
608 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 602 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
609 tipc_node_link_up(l_ptr->owner, l_ptr); 603 tipc_node_link_up(l_ptr->owner, l_ptr);
610 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 604 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
611 link_send_event(tipc_cfg_link_event, l_ptr, 1);
612 if (!in_own_cluster(l_ptr->addr))
613 link_send_event(tipc_disc_link_event, l_ptr, 1);
614} 605}
615 606
616/** 607/**
@@ -985,6 +976,51 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
985} 976}
986 977
987/* 978/*
979 * tipc_link_send_names - send name table entries to new neighbor
980 *
981 * Send routine for bulk delivery of name table messages when contact
982 * with a new neighbor occurs. No link congestion checking is performed
983 * because name table messages *must* be delivered. The messages must be
984 * small enough not to require fragmentation.
985 * Called without any locks held.
986 */
987
988void tipc_link_send_names(struct list_head *message_list, u32 dest)
989{
990 struct tipc_node *n_ptr;
991 struct link *l_ptr;
992 struct sk_buff *buf;
993 struct sk_buff *temp_buf;
994
995 if (list_empty(message_list))
996 return;
997
998 read_lock_bh(&tipc_net_lock);
999 n_ptr = tipc_node_find(dest);
1000 if (n_ptr) {
1001 tipc_node_lock(n_ptr);
1002 l_ptr = n_ptr->active_links[0];
1003 if (l_ptr) {
1004 /* convert circular list to linear list */
1005 ((struct sk_buff *)message_list->prev)->next = NULL;
1006 link_add_chain_to_outqueue(l_ptr,
1007 (struct sk_buff *)message_list->next, 0);
1008 tipc_link_push_queue(l_ptr);
1009 INIT_LIST_HEAD(message_list);
1010 }
1011 tipc_node_unlock(n_ptr);
1012 }
1013 read_unlock_bh(&tipc_net_lock);
1014
1015 /* discard the messages if they couldn't be sent */
1016
1017 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1018 list_del((struct list_head *)buf);
1019 buf_discard(buf);
1020 }
1021}
1022
1023/*
988 * link_send_buf_fast: Entry for data messages where the 1024 * link_send_buf_fast: Entry for data messages where the
989 * destination link is known and the header is complete, 1025 * destination link is known and the header is complete,
990 * inclusive total message length. Very time critical. 1026 * inclusive total message length. Very time critical.
@@ -1031,9 +1067,6 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1031 u32 selector = msg_origport(buf_msg(buf)) & 1; 1067 u32 selector = msg_origport(buf_msg(buf)) & 1;
1032 u32 dummy; 1068 u32 dummy;
1033 1069
1034 if (destnode == tipc_own_addr)
1035 return tipc_port_recv_msg(buf);
1036
1037 read_lock_bh(&tipc_net_lock); 1070 read_lock_bh(&tipc_net_lock);
1038 n_ptr = tipc_node_find(destnode); 1071 n_ptr = tipc_node_find(destnode);
1039 if (likely(n_ptr)) { 1072 if (likely(n_ptr)) {
@@ -1658,19 +1691,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1658 continue; 1691 continue;
1659 } 1692 }
1660 1693
1694 /* Discard unicast link messages destined for another node */
1695
1661 if (unlikely(!msg_short(msg) && 1696 if (unlikely(!msg_short(msg) &&
1662 (msg_destnode(msg) != tipc_own_addr))) 1697 (msg_destnode(msg) != tipc_own_addr)))
1663 goto cont; 1698 goto cont;
1664 1699
1665 /* Discard non-routeable messages destined for another node */
1666
1667 if (unlikely(!msg_isdata(msg) &&
1668 (msg_destnode(msg) != tipc_own_addr))) {
1669 if ((msg_user(msg) != CONN_MANAGER) &&
1670 (msg_user(msg) != MSG_FRAGMENTER))
1671 goto cont;
1672 }
1673
1674 /* Locate neighboring node that sent message */ 1700 /* Locate neighboring node that sent message */
1675 1701
1676 n_ptr = tipc_node_find(msg_prevnode(msg)); 1702 n_ptr = tipc_node_find(msg_prevnode(msg));
@@ -1678,17 +1704,24 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1678 goto cont; 1704 goto cont;
1679 tipc_node_lock(n_ptr); 1705 tipc_node_lock(n_ptr);
1680 1706
1681 /* Don't talk to neighbor during cleanup after last session */ 1707 /* Locate unicast link endpoint that should handle message */
1682 1708
1683 if (n_ptr->cleanup_required) { 1709 l_ptr = n_ptr->links[b_ptr->identity];
1710 if (unlikely(!l_ptr)) {
1684 tipc_node_unlock(n_ptr); 1711 tipc_node_unlock(n_ptr);
1685 goto cont; 1712 goto cont;
1686 } 1713 }
1687 1714
1688 /* Locate unicast link endpoint that should handle message */ 1715 /* Verify that communication with node is currently allowed */
1689 1716
1690 l_ptr = n_ptr->links[b_ptr->identity]; 1717 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1691 if (unlikely(!l_ptr)) { 1718 msg_user(msg) == LINK_PROTOCOL &&
1719 (msg_type(msg) == RESET_MSG ||
1720 msg_type(msg) == ACTIVATE_MSG) &&
1721 !msg_redundant_link(msg))
1722 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1723
1724 if (n_ptr->block_setup) {
1692 tipc_node_unlock(n_ptr); 1725 tipc_node_unlock(n_ptr);
1693 goto cont; 1726 goto cont;
1694 } 1727 }
@@ -1923,6 +1956,12 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1923 1956
1924 if (link_blocked(l_ptr)) 1957 if (link_blocked(l_ptr))
1925 return; 1958 return;
1959
1960 /* Abort non-RESET send if communication with node is prohibited */
1961
1962 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1963 return;
1964
1926 msg_set_type(msg, msg_typ); 1965 msg_set_type(msg, msg_typ);
1927 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1966 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1928 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1967 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
@@ -2051,9 +2090,19 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2051 case RESET_MSG: 2090 case RESET_MSG:
2052 if (!link_working_unknown(l_ptr) && 2091 if (!link_working_unknown(l_ptr) &&
2053 (l_ptr->peer_session != INVALID_SESSION)) { 2092 (l_ptr->peer_session != INVALID_SESSION)) {
2054 if (msg_session(msg) == l_ptr->peer_session) 2093 if (less_eq(msg_session(msg), l_ptr->peer_session))
2055 break; /* duplicate: ignore */ 2094 break; /* duplicate or old reset: ignore */
2095 }
2096
2097 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2098 link_working_unknown(l_ptr))) {
2099 /*
2100 * peer has lost contact -- don't allow peer's links
2101 * to reactivate before we recognize loss & clean up
2102 */
2103 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2056 } 2104 }
2105
2057 /* fall thru' */ 2106 /* fall thru' */
2058 case ACTIVATE_MSG: 2107 case ACTIVATE_MSG:
2059 /* Update link settings according other endpoint's values */ 2108 /* Update link settings according other endpoint's values */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 74fbecab1ea0..e56cb532913e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -223,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
223struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space); 223struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
224void tipc_link_reset(struct link *l_ptr); 224void tipc_link_reset(struct link *l_ptr);
225int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector); 225int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
226void tipc_link_send_names(struct list_head *message_list, u32 dest);
226int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf); 227int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
227u32 tipc_link_get_max_pkt(u32 dest, u32 selector); 228u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
228int tipc_link_send_sections_fast(struct tipc_port *sender, 229int tipc_link_send_sections_fast(struct tipc_port *sender,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index cd356e504332..b7ca1bd7b151 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -173,18 +173,40 @@ void tipc_named_withdraw(struct publication *publ)
173 * tipc_named_node_up - tell specified node about all publications by this node 173 * tipc_named_node_up - tell specified node about all publications by this node
174 */ 174 */
175 175
176void tipc_named_node_up(unsigned long node) 176void tipc_named_node_up(unsigned long nodearg)
177{ 177{
178 struct tipc_node *n_ptr;
179 struct link *l_ptr;
178 struct publication *publ; 180 struct publication *publ;
179 struct distr_item *item = NULL; 181 struct distr_item *item = NULL;
180 struct sk_buff *buf = NULL; 182 struct sk_buff *buf = NULL;
183 struct list_head message_list;
184 u32 node = (u32)nodearg;
181 u32 left = 0; 185 u32 left = 0;
182 u32 rest; 186 u32 rest;
183 u32 max_item_buf; 187 u32 max_item_buf = 0;
188
189 /* compute maximum amount of publication data to send per message */
190
191 read_lock_bh(&tipc_net_lock);
192 n_ptr = tipc_node_find(node);
193 if (n_ptr) {
194 tipc_node_lock(n_ptr);
195 l_ptr = n_ptr->active_links[0];
196 if (l_ptr)
197 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
198 ITEM_SIZE) * ITEM_SIZE;
199 tipc_node_unlock(n_ptr);
200 }
201 read_unlock_bh(&tipc_net_lock);
202 if (!max_item_buf)
203 return;
204
205 /* create list of publication messages, then send them as a unit */
206
207 INIT_LIST_HEAD(&message_list);
184 208
185 read_lock_bh(&tipc_nametbl_lock); 209 read_lock_bh(&tipc_nametbl_lock);
186 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
187 max_item_buf *= ITEM_SIZE;
188 rest = publ_cnt * ITEM_SIZE; 210 rest = publ_cnt * ITEM_SIZE;
189 211
190 list_for_each_entry(publ, &publ_root, local_list) { 212 list_for_each_entry(publ, &publ_root, local_list) {
@@ -202,13 +224,14 @@ void tipc_named_node_up(unsigned long node)
202 item++; 224 item++;
203 left -= ITEM_SIZE; 225 left -= ITEM_SIZE;
204 if (!left) { 226 if (!left) {
205 msg_set_link_selector(buf_msg(buf), node); 227 list_add_tail((struct list_head *)buf, &message_list);
206 tipc_link_send(buf, node, node);
207 buf = NULL; 228 buf = NULL;
208 } 229 }
209 } 230 }
210exit: 231exit:
211 read_unlock_bh(&tipc_nametbl_lock); 232 read_unlock_bh(&tipc_nametbl_lock);
233
234 tipc_link_send_names(&message_list, (u32)node);
212} 235}
213 236
214/** 237/**
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 68b3dd637291..fafef6c3c0f6 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -141,17 +141,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
141 return; 141 return;
142 msg = buf_msg(buf); 142 msg = buf_msg(buf);
143 143
144 msg_incr_reroute_cnt(msg);
145 if (msg_reroute_cnt(msg) > 6) {
146 if (msg_errcode(msg)) {
147 buf_discard(buf);
148 } else {
149 tipc_reject_msg(buf, msg_destport(msg) ?
150 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
151 }
152 return;
153 }
154
155 /* Handle message for this node */ 144 /* Handle message for this node */
156 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); 145 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
157 if (tipc_in_scope(dnode, tipc_own_addr)) { 146 if (tipc_in_scope(dnode, tipc_own_addr)) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2d106ef4fa4c..27b4bb0cca6c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -112,6 +112,7 @@ struct tipc_node *tipc_node_create(u32 addr)
112 break; 112 break;
113 } 113 }
114 list_add_tail(&n_ptr->list, &temp_node->list); 114 list_add_tail(&n_ptr->list, &temp_node->list);
115 n_ptr->block_setup = WAIT_PEER_DOWN;
115 116
116 tipc_num_nodes++; 117 tipc_num_nodes++;
117 118
@@ -312,7 +313,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
312 } 313 }
313} 314}
314 315
315static void node_cleanup_finished(unsigned long node_addr) 316static void node_name_purge_complete(unsigned long node_addr)
316{ 317{
317 struct tipc_node *n_ptr; 318 struct tipc_node *n_ptr;
318 319
@@ -320,7 +321,7 @@ static void node_cleanup_finished(unsigned long node_addr)
320 n_ptr = tipc_node_find(node_addr); 321 n_ptr = tipc_node_find(node_addr);
321 if (n_ptr) { 322 if (n_ptr) {
322 tipc_node_lock(n_ptr); 323 tipc_node_lock(n_ptr);
323 n_ptr->cleanup_required = 0; 324 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
324 tipc_node_unlock(n_ptr); 325 tipc_node_unlock(n_ptr);
325 } 326 }
326 read_unlock_bh(&tipc_net_lock); 327 read_unlock_bh(&tipc_net_lock);
@@ -331,28 +332,32 @@ static void node_lost_contact(struct tipc_node *n_ptr)
331 char addr_string[16]; 332 char addr_string[16];
332 u32 i; 333 u32 i;
333 334
334 /* Clean up broadcast reception remains */ 335 info("Lost contact with %s\n",
335 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 336 tipc_addr_string_fill(addr_string, n_ptr->addr));
336 while (n_ptr->bclink.deferred_head) { 337
337 struct sk_buff *buf = n_ptr->bclink.deferred_head; 338 /* Flush broadcast link info associated with lost node */
338 n_ptr->bclink.deferred_head = buf->next;
339 buf_discard(buf);
340 }
341 if (n_ptr->bclink.defragm) {
342 buf_discard(n_ptr->bclink.defragm);
343 n_ptr->bclink.defragm = NULL;
344 }
345 339
346 if (n_ptr->bclink.supported) { 340 if (n_ptr->bclink.supported) {
341 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
342 while (n_ptr->bclink.deferred_head) {
343 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 n_ptr->bclink.deferred_head = buf->next;
345 buf_discard(buf);
346 }
347
348 if (n_ptr->bclink.defragm) {
349 buf_discard(n_ptr->bclink.defragm);
350 n_ptr->bclink.defragm = NULL;
351 }
352
353 tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
347 tipc_bclink_acknowledge(n_ptr, 354 tipc_bclink_acknowledge(n_ptr,
348 mod(n_ptr->bclink.acked + 10000)); 355 mod(n_ptr->bclink.acked + 10000));
349 tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
350 if (n_ptr->addr < tipc_own_addr) 356 if (n_ptr->addr < tipc_own_addr)
351 tipc_own_tag--; 357 tipc_own_tag--;
352 }
353 358
354 info("Lost contact with %s\n", 359 n_ptr->bclink.supported = 0;
355 tipc_addr_string_fill(addr_string, n_ptr->addr)); 360 }
356 361
357 /* Abort link changeover */ 362 /* Abort link changeover */
358 for (i = 0; i < MAX_BEARERS; i++) { 363 for (i = 0; i < MAX_BEARERS; i++) {
@@ -367,10 +372,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
367 /* Notify subscribers */ 372 /* Notify subscribers */
368 tipc_nodesub_notify(n_ptr); 373 tipc_nodesub_notify(n_ptr);
369 374
370 /* Prevent re-contact with node until all cleanup is done */ 375 /* Prevent re-contact with node until cleanup is done */
371 376
372 n_ptr->cleanup_required = 1; 377 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
373 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); 378 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
374} 379}
375 380
376struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 381struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5c61afc7a0b9..4f15cb40aaa4 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,6 +42,12 @@
42#include "net.h" 42#include "net.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* Flags used to block (re)establishment of contact with a neighboring node */
46
47#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
48#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
49#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
50
45/** 51/**
46 * struct tipc_node - TIPC node structure 52 * struct tipc_node - TIPC node structure
47 * @addr: network address of node 53 * @addr: network address of node
@@ -52,7 +58,7 @@
52 * @active_links: pointers to active links to node 58 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 59 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby) 60 * @working_links: number of working links to node (both active and standby)
55 * @cleanup_required: non-zero if cleaning up after a prior loss of contact 61 * @block_setup: bit mask of conditions preventing link establishment to node
56 * @link_cnt: number of links to node 62 * @link_cnt: number of links to node
57 * @permit_changeover: non-zero if node has redundant links to this system 63 * @permit_changeover: non-zero if node has redundant links to this system
58 * @bclink: broadcast-related info 64 * @bclink: broadcast-related info
@@ -77,7 +83,7 @@ struct tipc_node {
77 struct link *links[MAX_BEARERS]; 83 struct link *links[MAX_BEARERS];
78 int link_cnt; 84 int link_cnt;
79 int working_links; 85 int working_links;
80 int cleanup_required; 86 int block_setup;
81 int permit_changeover; 87 int permit_changeover;
82 struct { 88 struct {
83 int supported; 89 int supported;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adb2eff4a102..42b8324ff2ee 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,6 +34,7 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <linux/export.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include "core.h" 40#include "core.h"
@@ -49,7 +50,7 @@ struct tipc_sock {
49 struct sock sk; 50 struct sock sk;
50 struct tipc_port *p; 51 struct tipc_port *p;
51 struct tipc_portid peer_name; 52 struct tipc_portid peer_name;
52 long conn_timeout; 53 unsigned int conn_timeout;
53}; 54};
54 55
55#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 56#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -231,7 +232,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
231 sock_init_data(sock, sk); 232 sock_init_data(sock, sk);
232 sk->sk_backlog_rcv = backlog_rcv; 233 sk->sk_backlog_rcv = backlog_rcv;
233 tipc_sk(sk)->p = tp_ptr; 234 tipc_sk(sk)->p = tp_ptr;
234 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); 235 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
235 236
236 spin_unlock_bh(tp_ptr->lock); 237 spin_unlock_bh(tp_ptr->lock);
237 238
@@ -525,6 +526,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
525 struct tipc_port *tport = tipc_sk_port(sk); 526 struct tipc_port *tport = tipc_sk_port(sk);
526 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 527 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
527 int needs_conn; 528 int needs_conn;
529 long timeout_val;
528 int res = -EINVAL; 530 int res = -EINVAL;
529 531
530 if (unlikely(!dest)) 532 if (unlikely(!dest))
@@ -564,6 +566,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
564 reject_rx_queue(sk); 566 reject_rx_queue(sk);
565 } 567 }
566 568
569 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
570
567 do { 571 do {
568 if (dest->addrtype == TIPC_ADDR_NAME) { 572 if (dest->addrtype == TIPC_ADDR_NAME) {
569 res = dest_name_check(dest, m); 573 res = dest_name_check(dest, m);
@@ -600,16 +604,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
600 sock->state = SS_CONNECTING; 604 sock->state = SS_CONNECTING;
601 break; 605 break;
602 } 606 }
603 if (m->msg_flags & MSG_DONTWAIT) { 607 if (timeout_val <= 0L) {
604 res = -EWOULDBLOCK; 608 res = timeout_val ? timeout_val : -EWOULDBLOCK;
605 break; 609 break;
606 } 610 }
607 release_sock(sk); 611 release_sock(sk);
608 res = wait_event_interruptible(*sk_sleep(sk), 612 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
609 !tport->congested); 613 !tport->congested, timeout_val);
610 lock_sock(sk); 614 lock_sock(sk);
611 if (res)
612 break;
613 } while (1); 615 } while (1);
614 616
615exit: 617exit:
@@ -636,6 +638,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
636 struct sock *sk = sock->sk; 638 struct sock *sk = sock->sk;
637 struct tipc_port *tport = tipc_sk_port(sk); 639 struct tipc_port *tport = tipc_sk_port(sk);
638 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 640 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
641 long timeout_val;
639 int res; 642 int res;
640 643
641 /* Handle implied connection establishment */ 644 /* Handle implied connection establishment */
@@ -650,6 +653,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
650 if (iocb) 653 if (iocb)
651 lock_sock(sk); 654 lock_sock(sk);
652 655
656 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
657
653 do { 658 do {
654 if (unlikely(sock->state != SS_CONNECTED)) { 659 if (unlikely(sock->state != SS_CONNECTED)) {
655 if (sock->state == SS_DISCONNECTING) 660 if (sock->state == SS_DISCONNECTING)
@@ -663,16 +668,14 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
663 total_len); 668 total_len);
664 if (likely(res != -ELINKCONG)) 669 if (likely(res != -ELINKCONG))
665 break; 670 break;
666 if (m->msg_flags & MSG_DONTWAIT) { 671 if (timeout_val <= 0L) {
667 res = -EWOULDBLOCK; 672 res = timeout_val ? timeout_val : -EWOULDBLOCK;
668 break; 673 break;
669 } 674 }
670 release_sock(sk); 675 release_sock(sk);
671 res = wait_event_interruptible(*sk_sleep(sk), 676 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
672 (!tport->congested || !tport->connected)); 677 (!tport->congested || !tport->connected), timeout_val);
673 lock_sock(sk); 678 lock_sock(sk);
674 if (res)
675 break;
676 } while (1); 679 } while (1);
677 680
678 if (iocb) 681 if (iocb)
@@ -1369,7 +1372,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1369 struct msghdr m = {NULL,}; 1372 struct msghdr m = {NULL,};
1370 struct sk_buff *buf; 1373 struct sk_buff *buf;
1371 struct tipc_msg *msg; 1374 struct tipc_msg *msg;
1372 long timeout; 1375 unsigned int timeout;
1373 int res; 1376 int res;
1374 1377
1375 lock_sock(sk); 1378 lock_sock(sk);
@@ -1434,7 +1437,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1434 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1437 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1435 (!skb_queue_empty(&sk->sk_receive_queue) || 1438 (!skb_queue_empty(&sk->sk_receive_queue) ||
1436 (sock->state != SS_CONNECTING)), 1439 (sock->state != SS_CONNECTING)),
1437 timeout ? timeout : MAX_SCHEDULE_TIMEOUT); 1440 timeout ? (long)msecs_to_jiffies(timeout)
1441 : MAX_SCHEDULE_TIMEOUT);
1438 lock_sock(sk); 1442 lock_sock(sk);
1439 1443
1440 if (res > 0) { 1444 if (res > 0) {
@@ -1480,9 +1484,7 @@ static int listen(struct socket *sock, int len)
1480 1484
1481 lock_sock(sk); 1485 lock_sock(sk);
1482 1486
1483 if (sock->state == SS_READY) 1487 if (sock->state != SS_UNCONNECTED)
1484 res = -EOPNOTSUPP;
1485 else if (sock->state != SS_UNCONNECTED)
1486 res = -EINVAL; 1488 res = -EINVAL;
1487 else { 1489 else {
1488 sock->state = SS_LISTENING; 1490 sock->state = SS_LISTENING;
@@ -1510,10 +1512,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1510 1512
1511 lock_sock(sk); 1513 lock_sock(sk);
1512 1514
1513 if (sock->state == SS_READY) {
1514 res = -EOPNOTSUPP;
1515 goto exit;
1516 }
1517 if (sock->state != SS_LISTENING) { 1515 if (sock->state != SS_LISTENING) {
1518 res = -EINVAL; 1516 res = -EINVAL;
1519 goto exit; 1517 goto exit;
@@ -1696,7 +1694,7 @@ static int setsockopt(struct socket *sock,
1696 res = tipc_set_portunreturnable(tport->ref, value); 1694 res = tipc_set_portunreturnable(tport->ref, value);
1697 break; 1695 break;
1698 case TIPC_CONN_TIMEOUT: 1696 case TIPC_CONN_TIMEOUT:
1699 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value); 1697 tipc_sk(sk)->conn_timeout = value;
1700 /* no need to set "res", since already 0 at this point */ 1698 /* no need to set "res", since already 0 at this point */
1701 break; 1699 break;
1702 default: 1700 default:
@@ -1752,7 +1750,7 @@ static int getsockopt(struct socket *sock,
1752 res = tipc_portunreturnable(tport->ref, &value); 1750 res = tipc_portunreturnable(tport->ref, &value);
1753 break; 1751 break;
1754 case TIPC_CONN_TIMEOUT: 1752 case TIPC_CONN_TIMEOUT:
1755 value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout); 1753 value = tipc_sk(sk)->conn_timeout;
1756 /* no need to set "res", since already 0 at this point */ 1754 /* no need to set "res", since already 0 at this point */
1757 break; 1755 break;
1758 case TIPC_NODE_RECVQ_DEPTH: 1756 case TIPC_NODE_RECVQ_DEPTH:
@@ -1790,11 +1788,11 @@ static const struct proto_ops msg_ops = {
1790 .bind = bind, 1788 .bind = bind,
1791 .connect = connect, 1789 .connect = connect,
1792 .socketpair = sock_no_socketpair, 1790 .socketpair = sock_no_socketpair,
1793 .accept = accept, 1791 .accept = sock_no_accept,
1794 .getname = get_name, 1792 .getname = get_name,
1795 .poll = poll, 1793 .poll = poll,
1796 .ioctl = sock_no_ioctl, 1794 .ioctl = sock_no_ioctl,
1797 .listen = listen, 1795 .listen = sock_no_listen,
1798 .shutdown = shutdown, 1796 .shutdown = shutdown,
1799 .setsockopt = setsockopt, 1797 .setsockopt = setsockopt,
1800 .getsockopt = getsockopt, 1798 .getsockopt = getsockopt,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 6cf726863485..198371723b41 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -151,7 +151,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
151 if (!must && !(sub->filter & TIPC_SUB_PORTS)) 151 if (!must && !(sub->filter & TIPC_SUB_PORTS))
152 return; 152 return;
153 153
154 sub->event_cb(sub, found_lower, found_upper, event, port_ref, node); 154 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
155} 155}
156 156
157/** 157/**
@@ -365,7 +365,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
365 subscr_terminate(subscriber); 365 subscr_terminate(subscriber);
366 return NULL; 366 return NULL;
367 } 367 }
368 sub->event_cb = subscr_send_event;
369 INIT_LIST_HEAD(&sub->nameseq_list); 368 INIT_LIST_HEAD(&sub->nameseq_list);
370 list_add(&sub->subscription_list, &subscriber->subscription_list); 369 list_add(&sub->subscription_list, &subscriber->subscription_list);
371 sub->server_ref = subscriber->port_ref; 370 sub->server_ref = subscriber->port_ref;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d202..4b06ef6f8401 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,16 +39,11 @@
39 39
40struct subscription; 40struct subscription;
41 41
42typedef void (*tipc_subscr_event) (struct subscription *sub,
43 u32 found_lower, u32 found_upper,
44 u32 event, u32 port_ref, u32 node);
45
46/** 42/**
47 * struct subscription - TIPC network topology subscription object 43 * struct subscription - TIPC network topology subscription object
48 * @seq: name sequence associated with subscription 44 * @seq: name sequence associated with subscription
49 * @timeout: duration of subscription (in ms) 45 * @timeout: duration of subscription (in ms)
50 * @filter: event filtering to be done for subscription 46 * @filter: event filtering to be done for subscription
51 * @event_cb: routine invoked when a subscription event is detected
52 * @timer: timer governing subscription duration (optional) 47 * @timer: timer governing subscription duration (optional)
53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 48 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
54 * @subscription_list: adjacent subscriptions in subscriber's subscription list 49 * @subscription_list: adjacent subscriptions in subscriber's subscription list
@@ -61,7 +56,6 @@ struct subscription {
61 struct tipc_name_seq seq; 56 struct tipc_name_seq seq;
62 u32 timeout; 57 u32 timeout;
63 u32 filter; 58 u32 filter;
64 tipc_subscr_event event_cb;
65 struct timer_list timer; 59 struct timer_list timer;
66 struct list_head nameseq_list; 60 struct list_head nameseq_list;
67 struct list_head subscription_list; 61 struct list_head subscription_list;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ec68e1c05b85..466fbcc5cf77 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1381,8 +1381,10 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1382{ 1382{
1383 int err = 0; 1383 int err = 0;
1384
1384 UNIXCB(skb).pid = get_pid(scm->pid); 1385 UNIXCB(skb).pid = get_pid(scm->pid);
1385 UNIXCB(skb).cred = get_cred(scm->cred); 1386 if (scm->cred)
1387 UNIXCB(skb).cred = get_cred(scm->cred);
1386 UNIXCB(skb).fp = NULL; 1388 UNIXCB(skb).fp = NULL;
1387 if (scm->fp && send_fds) 1389 if (scm->fp && send_fds)
1388 err = unix_attach_fds(scm, skb); 1390 err = unix_attach_fds(scm, skb);
@@ -1392,6 +1394,24 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1392} 1394}
1393 1395
1394/* 1396/*
1397 * Some apps rely on write() giving SCM_CREDENTIALS
1398 * We include credentials if source or destination socket
1399 * asserted SOCK_PASSCRED.
1400 */
1401static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1402 const struct sock *other)
1403{
1404 if (UNIXCB(skb).cred)
1405 return;
1406 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1407 !other->sk_socket ||
1408 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1409 UNIXCB(skb).pid = get_pid(task_tgid(current));
1410 UNIXCB(skb).cred = get_current_cred();
1411 }
1412}
1413
1414/*
1395 * Send AF_UNIX data. 1415 * Send AF_UNIX data.
1396 */ 1416 */
1397 1417
@@ -1538,6 +1558,7 @@ restart:
1538 1558
1539 if (sock_flag(other, SOCK_RCVTSTAMP)) 1559 if (sock_flag(other, SOCK_RCVTSTAMP))
1540 __net_timestamp(skb); 1560 __net_timestamp(skb);
1561 maybe_add_creds(skb, sock, other);
1541 skb_queue_tail(&other->sk_receive_queue, skb); 1562 skb_queue_tail(&other->sk_receive_queue, skb);
1542 if (max_level > unix_sk(other)->recursion_level) 1563 if (max_level > unix_sk(other)->recursion_level)
1543 unix_sk(other)->recursion_level = max_level; 1564 unix_sk(other)->recursion_level = max_level;
@@ -1652,6 +1673,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1652 (other->sk_shutdown & RCV_SHUTDOWN)) 1673 (other->sk_shutdown & RCV_SHUTDOWN))
1653 goto pipe_err_free; 1674 goto pipe_err_free;
1654 1675
1676 maybe_add_creds(skb, sock, other);
1655 skb_queue_tail(&other->sk_receive_queue, skb); 1677 skb_queue_tail(&other->sk_receive_queue, skb);
1656 if (max_level > unix_sk(other)->recursion_level) 1678 if (max_level > unix_sk(other)->recursion_level)
1657 unix_sk(other)->recursion_level = max_level; 1679 unix_sk(other)->recursion_level = max_level;
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index f346395314ba..c43612ee96bb 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -81,7 +81,6 @@ static struct proc_dir_entry *proc_router;
81 * Iterator 81 * Iterator
82 */ 82 */
83static void *r_start(struct seq_file *m, loff_t *pos) 83static void *r_start(struct seq_file *m, loff_t *pos)
84 __acquires(kernel_lock)
85{ 84{
86 struct wan_device *wandev; 85 struct wan_device *wandev;
87 loff_t l = *pos; 86 loff_t l = *pos;
@@ -103,7 +102,6 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
103} 102}
104 103
105static void r_stop(struct seq_file *m, void *v) 104static void r_stop(struct seq_file *m, void *v)
106 __releases(kernel_lock)
107{ 105{
108 mutex_unlock(&config_mutex); 106 mutex_unlock(&config_mutex);
109} 107}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d5b7c3779c43..0694d62e4dbc 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -77,6 +77,7 @@
77#include <linux/netdevice.h> 77#include <linux/netdevice.h>
78#include <linux/wimax.h> 78#include <linux/wimax.h>
79#include <linux/security.h> 79#include <linux/security.h>
80#include <linux/export.h>
80#include "wimax-internal.h" 81#include "wimax-internal.h"
81 82
82 83
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 68bedf3e5443..7ceffe39d70e 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -32,6 +32,7 @@
32#include <net/genetlink.h> 32#include <net/genetlink.h>
33#include <linux/wimax.h> 33#include <linux/wimax.h>
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/export.h>
35#include "wimax-internal.h" 36#include "wimax-internal.h"
36 37
37#define D_SUBMODULE op_reset 38#define D_SUBMODULE op_reset
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 2609e445fe7d..7ab60babdd22 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -65,6 +65,7 @@
65#include <linux/wimax.h> 65#include <linux/wimax.h>
66#include <linux/security.h> 66#include <linux/security.h>
67#include <linux/rfkill.h> 67#include <linux/rfkill.h>
68#include <linux/export.h>
68#include "wimax-internal.h" 69#include "wimax-internal.h"
69 70
70#define D_SUBMODULE op_rfkill 71#define D_SUBMODULE op_rfkill
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index ee99e7dfcdba..3c65eae701c4 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -55,6 +55,7 @@
55#include <net/genetlink.h> 55#include <net/genetlink.h>
56#include <linux/netdevice.h> 56#include <linux/netdevice.h>
57#include <linux/wimax.h> 57#include <linux/wimax.h>
58#include <linux/module.h>
58#include "wimax-internal.h" 59#include "wimax-internal.h"
59 60
60 61
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c14865172da7..220f3bd176f8 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -582,7 +582,7 @@ int wiphy_register(struct wiphy *wiphy)
582 } 582 }
583 583
584 /* set up regulatory info */ 584 /* set up regulatory info */
585 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 585 regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
586 586
587 list_add_rcu(&rdev->list, &cfg80211_rdev_list); 587 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
588 cfg80211_rdev_list_generation++; 588 cfg80211_rdev_list_generation++;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8672e028022f..b9ec3061ed72 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -279,8 +279,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
279 char *newname); 279 char *newname);
280 280
281void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 281void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
282void wiphy_update_regulatory(struct wiphy *wiphy,
283 enum nl80211_reg_initiator setby);
284 282
285void cfg80211_bss_expire(struct cfg80211_registered_device *dev); 283void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
286void cfg80211_bss_age(struct cfg80211_registered_device *dev, 284void cfg80211_bss_age(struct cfg80211_registered_device *dev,
@@ -377,7 +375,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
377 struct ieee80211_channel *chan, bool offchan, 375 struct ieee80211_channel *chan, bool offchan,
378 enum nl80211_channel_type channel_type, 376 enum nl80211_channel_type channel_type,
379 bool channel_type_valid, unsigned int wait, 377 bool channel_type_valid, unsigned int wait,
380 const u8 *buf, size_t len, u64 *cookie); 378 const u8 *buf, size_t len, bool no_cck,
379 u64 *cookie);
381 380
382/* SME */ 381/* SME */
383int __cfg80211_connect(struct cfg80211_registered_device *rdev, 382int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -408,6 +407,7 @@ void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
408bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); 407bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
409 408
410/* internal helpers */ 409/* internal helpers */
410bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
411int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, 411int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
412 struct key_params *params, int key_idx, 412 struct key_params *params, int key_idx,
413 bool pairwise, const u8 *mac_addr); 413 bool pairwise, const u8 *mac_addr);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index f33fbb79437c..30f20fe4a5fe 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -7,6 +7,7 @@
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/if_arp.h> 8#include <linux/if_arp.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/export.h>
10#include <net/cfg80211.h> 11#include <net/cfg80211.h>
11#include "wext-compat.h" 12#include "wext-compat.h"
12#include "nl80211.h" 13#include "nl80211.h"
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 3268fac5ab22..a55c27b75ee5 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -41,6 +41,11 @@ struct lib80211_crypto_alg {
41static LIST_HEAD(lib80211_crypto_algs); 41static LIST_HEAD(lib80211_crypto_algs);
42static DEFINE_SPINLOCK(lib80211_crypto_lock); 42static DEFINE_SPINLOCK(lib80211_crypto_lock);
43 43
44static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
45 int force);
46static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
47static void lib80211_crypt_deinit_handler(unsigned long data);
48
44const char *print_ssid(char *buf, const char *ssid, u8 ssid_len) 49const char *print_ssid(char *buf, const char *ssid, u8 ssid_len)
45{ 50{
46 const char *s = ssid; 51 const char *s = ssid;
@@ -111,7 +116,8 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
111} 116}
112EXPORT_SYMBOL(lib80211_crypt_info_free); 117EXPORT_SYMBOL(lib80211_crypt_info_free);
113 118
114void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) 119static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
120 int force)
115{ 121{
116 struct lib80211_crypt_data *entry, *next; 122 struct lib80211_crypt_data *entry, *next;
117 unsigned long flags; 123 unsigned long flags;
@@ -131,10 +137,9 @@ void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
131 } 137 }
132 spin_unlock_irqrestore(info->lock, flags); 138 spin_unlock_irqrestore(info->lock, flags);
133} 139}
134EXPORT_SYMBOL(lib80211_crypt_deinit_entries);
135 140
136/* After this, crypt_deinit_list won't accept new members */ 141/* After this, crypt_deinit_list won't accept new members */
137void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) 142static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
138{ 143{
139 unsigned long flags; 144 unsigned long flags;
140 145
@@ -142,9 +147,8 @@ void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
142 info->crypt_quiesced = 1; 147 info->crypt_quiesced = 1;
143 spin_unlock_irqrestore(info->lock, flags); 148 spin_unlock_irqrestore(info->lock, flags);
144} 149}
145EXPORT_SYMBOL(lib80211_crypt_quiescing);
146 150
147void lib80211_crypt_deinit_handler(unsigned long data) 151static void lib80211_crypt_deinit_handler(unsigned long data)
148{ 152{
149 struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data; 153 struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
150 unsigned long flags; 154 unsigned long flags;
@@ -160,7 +164,6 @@ void lib80211_crypt_deinit_handler(unsigned long data)
160 } 164 }
161 spin_unlock_irqrestore(info->lock, flags); 165 spin_unlock_irqrestore(info->lock, flags);
162} 166}
163EXPORT_SYMBOL(lib80211_crypt_deinit_handler);
164 167
165void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, 168void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
166 struct lib80211_crypt_data **crypt) 169 struct lib80211_crypt_data **crypt)
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index dacb3b4b1bdb..755738d26bb4 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -77,8 +77,6 @@ static void *lib80211_ccmp_init(int key_idx)
77 77
78 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 78 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
79 if (IS_ERR(priv->tfm)) { 79 if (IS_ERR(priv->tfm)) {
80 printk(KERN_DEBUG "lib80211_crypt_ccmp: could not allocate "
81 "crypto API aes\n");
82 priv->tfm = NULL; 80 priv->tfm = NULL;
83 goto fail; 81 goto fail;
84 } 82 }
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 7ea4f2b0770e..38734846c19e 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -101,7 +101,6 @@ static void *lib80211_tkip_init(int key_idx)
101 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 101 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
102 CRYPTO_ALG_ASYNC); 102 CRYPTO_ALG_ASYNC);
103 if (IS_ERR(priv->tx_tfm_arc4)) { 103 if (IS_ERR(priv->tx_tfm_arc4)) {
104 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
105 priv->tx_tfm_arc4 = NULL; 104 priv->tx_tfm_arc4 = NULL;
106 goto fail; 105 goto fail;
107 } 106 }
@@ -109,7 +108,6 @@ static void *lib80211_tkip_init(int key_idx)
109 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 108 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
110 CRYPTO_ALG_ASYNC); 109 CRYPTO_ALG_ASYNC);
111 if (IS_ERR(priv->tx_tfm_michael)) { 110 if (IS_ERR(priv->tx_tfm_michael)) {
112 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
113 priv->tx_tfm_michael = NULL; 111 priv->tx_tfm_michael = NULL;
114 goto fail; 112 goto fail;
115 } 113 }
@@ -117,7 +115,6 @@ static void *lib80211_tkip_init(int key_idx)
117 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 115 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
118 CRYPTO_ALG_ASYNC); 116 CRYPTO_ALG_ASYNC);
119 if (IS_ERR(priv->rx_tfm_arc4)) { 117 if (IS_ERR(priv->rx_tfm_arc4)) {
120 printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
121 priv->rx_tfm_arc4 = NULL; 118 priv->rx_tfm_arc4 = NULL;
122 goto fail; 119 goto fail;
123 } 120 }
@@ -125,7 +122,6 @@ static void *lib80211_tkip_init(int key_idx)
125 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, 122 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
126 CRYPTO_ALG_ASYNC); 123 CRYPTO_ALG_ASYNC);
127 if (IS_ERR(priv->rx_tfm_michael)) { 124 if (IS_ERR(priv->rx_tfm_michael)) {
128 printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
129 priv->rx_tfm_michael = NULL; 125 priv->rx_tfm_michael = NULL;
130 goto fail; 126 goto fail;
131 } 127 }
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 2f265e033ae2..c1304018fc1c 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -50,16 +50,12 @@ static void *lib80211_wep_init(int keyidx)
50 50
51 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 51 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
52 if (IS_ERR(priv->tx_tfm)) { 52 if (IS_ERR(priv->tx_tfm)) {
53 printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
54 "crypto API arc4\n");
55 priv->tx_tfm = NULL; 53 priv->tx_tfm = NULL;
56 goto fail; 54 goto fail;
57 } 55 }
58 56
59 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 57 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
60 if (IS_ERR(priv->rx_tfm)) { 58 if (IS_ERR(priv->rx_tfm)) {
61 printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
62 "crypto API arc4\n");
63 priv->rx_tfm = NULL; 59 priv->rx_tfm = NULL;
64 goto fail; 60 goto fail;
65 } 61 }
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5c116083eeca..b7b7868f4128 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,4 +1,5 @@
1#include <linux/ieee80211.h> 1#include <linux/ieee80211.h>
2#include <linux/export.h>
2#include <net/cfg80211.h> 3#include <net/cfg80211.h>
3#include "nl80211.h" 4#include "nl80211.h"
4#include "core.h" 5#include "core.h"
@@ -12,6 +13,7 @@
12#define MESH_HOLD_T 100 13#define MESH_HOLD_T 100
13 14
14#define MESH_PATH_TIMEOUT 5000 15#define MESH_PATH_TIMEOUT 5000
16#define MESH_RANN_INTERVAL 5000
15 17
16/* 18/*
17 * Minimum interval between two consecutive PREQs originated by the same 19 * Minimum interval between two consecutive PREQs originated by the same
@@ -49,6 +51,8 @@ const struct mesh_config default_mesh_config = {
49 .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES, 51 .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
50 .path_refresh_time = MESH_PATH_REFRESH_TIME, 52 .path_refresh_time = MESH_PATH_REFRESH_TIME,
51 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, 53 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
54 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
55 .dot11MeshGateAnnouncementProtocol = false,
52}; 56};
53 57
54const struct mesh_setup default_mesh_setup = { 58const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 832f6574e4ed..21fc9702f81c 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -900,7 +900,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
900 struct ieee80211_channel *chan, bool offchan, 900 struct ieee80211_channel *chan, bool offchan,
901 enum nl80211_channel_type channel_type, 901 enum nl80211_channel_type channel_type,
902 bool channel_type_valid, unsigned int wait, 902 bool channel_type_valid, unsigned int wait,
903 const u8 *buf, size_t len, u64 *cookie) 903 const u8 *buf, size_t len, bool no_cck,
904 u64 *cookie)
904{ 905{
905 struct wireless_dev *wdev = dev->ieee80211_ptr; 906 struct wireless_dev *wdev = dev->ieee80211_ptr;
906 const struct ieee80211_mgmt *mgmt; 907 const struct ieee80211_mgmt *mgmt;
@@ -991,7 +992,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
991 /* Transmit the Action frame as requested by user space */ 992 /* Transmit the Action frame as requested by user space */
992 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, 993 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
993 channel_type, channel_type_valid, 994 channel_type, channel_type_valid,
994 wait, buf, len, cookie); 995 wait, buf, len, no_cck, cookie);
995} 996}
996 997
997bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, 998bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1095,3 +1096,14 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
1095 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 1096 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
1096} 1097}
1097EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); 1098EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
1099
1100void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
1101 const u8 *bssid, bool preauth, gfp_t gfp)
1102{
1103 struct wireless_dev *wdev = dev->ieee80211_ptr;
1104 struct wiphy *wiphy = wdev->wiphy;
1105 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1106
1107 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
1108}
1109EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ea40d540a990..48260c2d092a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -23,6 +23,12 @@
23#include "nl80211.h" 23#include "nl80211.h"
24#include "reg.h" 24#include "reg.h"
25 25
26static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
27static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
28 struct genl_info *info,
29 struct cfg80211_crypto_settings *settings,
30 int cipher_limit);
31
26static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, 32static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
27 struct genl_info *info); 33 struct genl_info *info);
28static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, 34static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
@@ -178,6 +184,19 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, 184 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
179 [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, 185 [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
180 [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, 186 [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
187 [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
188 [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
189 .len = IEEE80211_MAX_DATA_LEN },
190 [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
191 .len = IEEE80211_MAX_DATA_LEN },
192 [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
193 [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
194 [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
195 [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 },
196 [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 },
197 [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
198 [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
199 [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
181}; 200};
182 201
183/* policy for the key attributes */ 202/* policy for the key attributes */
@@ -220,6 +239,12 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
220 [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, 239 [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
221}; 240};
222 241
242static const struct nla_policy
243nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
244 [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
245 .len = IEEE80211_MAX_SSID_LEN },
246};
247
223/* ifidx get helper */ 248/* ifidx get helper */
224static int nl80211_get_ifidx(struct netlink_callback *cb) 249static int nl80211_get_ifidx(struct netlink_callback *cb)
225{ 250{
@@ -703,11 +728,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
703 dev->wiphy.max_scan_ie_len); 728 dev->wiphy.max_scan_ie_len);
704 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 729 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
705 dev->wiphy.max_sched_scan_ie_len); 730 dev->wiphy.max_sched_scan_ie_len);
731 NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
732 dev->wiphy.max_match_sets);
706 733
707 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 734 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
708 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 735 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
709 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) 736 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
710 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); 737 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
738 if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
739 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
740 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
741 NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
742 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
743 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
744 if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
745 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
711 746
712 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 747 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
713 sizeof(u32) * dev->wiphy.n_cipher_suites, 748 sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -850,6 +885,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
850 } 885 }
851 CMD(set_channel, SET_CHANNEL); 886 CMD(set_channel, SET_CHANNEL);
852 CMD(set_wds_peer, SET_WDS_PEER); 887 CMD(set_wds_peer, SET_WDS_PEER);
888 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
889 CMD(tdls_mgmt, TDLS_MGMT);
890 CMD(tdls_oper, TDLS_OPER);
891 }
853 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 892 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
854 CMD(sched_scan_start, START_SCHED_SCAN); 893 CMD(sched_scan_start, START_SCHED_SCAN);
855 894
@@ -871,8 +910,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
871 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 910 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
872 dev->wiphy.max_remain_on_channel_duration); 911 dev->wiphy.max_remain_on_channel_duration);
873 912
874 /* for now at least assume all drivers have it */ 913 if (dev->ops->mgmt_tx_cancel_wait)
875 if (dev->ops->mgmt_tx)
876 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); 914 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
877 915
878 if (mgmt_stypes) { 916 if (mgmt_stypes) {
@@ -1210,6 +1248,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1210 goto bad_res; 1248 goto bad_res;
1211 } 1249 }
1212 1250
1251 if (!netdev) {
1252 result = -EINVAL;
1253 goto bad_res;
1254 }
1255
1213 nla_for_each_nested(nl_txq_params, 1256 nla_for_each_nested(nl_txq_params,
1214 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1257 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1215 rem_txq_params) { 1258 rem_txq_params) {
@@ -1222,6 +1265,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1222 goto bad_res; 1265 goto bad_res;
1223 1266
1224 result = rdev->ops->set_txq_params(&rdev->wiphy, 1267 result = rdev->ops->set_txq_params(&rdev->wiphy,
1268 netdev,
1225 &txq_params); 1269 &txq_params);
1226 if (result) 1270 if (result)
1227 goto bad_res; 1271 goto bad_res;
@@ -1985,7 +2029,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1985 struct beacon_parameters params; 2029 struct beacon_parameters params;
1986 int haveinfo = 0, err; 2030 int haveinfo = 0, err;
1987 2031
1988 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) 2032 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
2033 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
2034 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
2035 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
1989 return -EINVAL; 2036 return -EINVAL;
1990 2037
1991 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2038 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
@@ -2011,6 +2058,49 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2011 if (err) 2058 if (err)
2012 return err; 2059 return err;
2013 2060
2061 /*
2062 * In theory, some of these attributes could be required for
2063 * NEW_BEACON, but since they were not used when the command was
2064 * originally added, keep them optional for old user space
2065 * programs to work with drivers that do not need the additional
2066 * information.
2067 */
2068 if (info->attrs[NL80211_ATTR_SSID]) {
2069 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2070 params.ssid_len =
2071 nla_len(info->attrs[NL80211_ATTR_SSID]);
2072 if (params.ssid_len == 0 ||
2073 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2074 return -EINVAL;
2075 }
2076
2077 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2078 params.hidden_ssid = nla_get_u32(
2079 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2080 if (params.hidden_ssid !=
2081 NL80211_HIDDEN_SSID_NOT_IN_USE &&
2082 params.hidden_ssid !=
2083 NL80211_HIDDEN_SSID_ZERO_LEN &&
2084 params.hidden_ssid !=
2085 NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2086 return -EINVAL;
2087 }
2088
2089 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2090
2091 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2092 params.auth_type = nla_get_u32(
2093 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2094 if (!nl80211_valid_auth_type(params.auth_type))
2095 return -EINVAL;
2096 } else
2097 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2098
2099 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2100 NL80211_MAX_NR_CIPHER_SUITES);
2101 if (err)
2102 return err;
2103
2014 call = rdev->ops->add_beacon; 2104 call = rdev->ops->add_beacon;
2015 break; 2105 break;
2016 case NL80211_CMD_SET_BEACON: 2106 case NL80211_CMD_SET_BEACON:
@@ -2041,6 +2131,25 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2041 if (!haveinfo) 2131 if (!haveinfo)
2042 return -EINVAL; 2132 return -EINVAL;
2043 2133
2134 if (info->attrs[NL80211_ATTR_IE]) {
2135 params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2136 params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2137 }
2138
2139 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
2140 params.proberesp_ies =
2141 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2142 params.proberesp_ies_len =
2143 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2144 }
2145
2146 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
2147 params.assocresp_ies =
2148 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2149 params.assocresp_ies_len =
2150 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2151 }
2152
2044 err = call(&rdev->wiphy, dev, &params); 2153 err = call(&rdev->wiphy, dev, &params);
2045 if (!err && params.interval) 2154 if (!err && params.interval)
2046 wdev->beacon_interval = params.interval; 2155 wdev->beacon_interval = params.interval;
@@ -2235,8 +2344,16 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2235 2344
2236 nla_nest_end(msg, bss_param); 2345 nla_nest_end(msg, bss_param);
2237 } 2346 }
2347 if (sinfo->filled & STATION_INFO_STA_FLAGS)
2348 NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
2349 sizeof(struct nl80211_sta_flag_update),
2350 &sinfo->sta_flags);
2238 nla_nest_end(msg, sinfoattr); 2351 nla_nest_end(msg, sinfoattr);
2239 2352
2353 if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
2354 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2355 sinfo->assoc_req_ies);
2356
2240 return genlmsg_end(msg, hdr); 2357 return genlmsg_end(msg, hdr);
2241 2358
2242 nla_put_failure: 2359 nla_put_failure:
@@ -2264,6 +2381,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2264 } 2381 }
2265 2382
2266 while (1) { 2383 while (1) {
2384 memset(&sinfo, 0, sizeof(sinfo));
2267 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 2385 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
2268 mac_addr, &sinfo); 2386 mac_addr, &sinfo);
2269 if (err == -ENOENT) 2387 if (err == -ENOENT)
@@ -2416,18 +2534,25 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2416 break; 2534 break;
2417 case NL80211_IFTYPE_P2P_CLIENT: 2535 case NL80211_IFTYPE_P2P_CLIENT:
2418 case NL80211_IFTYPE_STATION: 2536 case NL80211_IFTYPE_STATION:
2419 /* disallow everything but AUTHORIZED flag */ 2537 /* disallow things sta doesn't support */
2420 if (params.plink_action) 2538 if (params.plink_action)
2421 err = -EINVAL; 2539 err = -EINVAL;
2422 if (params.vlan) 2540 if (params.vlan)
2423 err = -EINVAL; 2541 err = -EINVAL;
2424 if (params.supported_rates) 2542 if (params.supported_rates &&
2543 !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2425 err = -EINVAL; 2544 err = -EINVAL;
2426 if (params.ht_capa) 2545 if (params.ht_capa)
2427 err = -EINVAL; 2546 err = -EINVAL;
2428 if (params.listen_interval >= 0) 2547 if (params.listen_interval >= 0)
2429 err = -EINVAL; 2548 err = -EINVAL;
2430 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 2549 if (params.sta_flags_mask &
2550 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
2551 BIT(NL80211_STA_FLAG_TDLS_PEER)))
2552 err = -EINVAL;
2553 /* can't change the TDLS bit */
2554 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
2555 (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2431 err = -EINVAL; 2556 err = -EINVAL;
2432 break; 2557 break;
2433 case NL80211_IFTYPE_MESH_POINT: 2558 case NL80211_IFTYPE_MESH_POINT:
@@ -2465,6 +2590,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2465 return err; 2590 return err;
2466} 2591}
2467 2592
2593static struct nla_policy
2594nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
2595 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
2596 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
2597};
2598
2468static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) 2599static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2469{ 2600{
2470 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2601 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2510,10 +2641,50 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2510 if (parse_station_flags(info, &params)) 2641 if (parse_station_flags(info, &params))
2511 return -EINVAL; 2642 return -EINVAL;
2512 2643
2644 /* parse WME attributes if sta is WME capable */
2645 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
2646 (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
2647 info->attrs[NL80211_ATTR_STA_WME]) {
2648 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
2649 struct nlattr *nla;
2650
2651 nla = info->attrs[NL80211_ATTR_STA_WME];
2652 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
2653 nl80211_sta_wme_policy);
2654 if (err)
2655 return err;
2656
2657 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
2658 params.uapsd_queues =
2659 nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
2660 if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
2661 return -EINVAL;
2662
2663 if (tb[NL80211_STA_WME_MAX_SP])
2664 params.max_sp =
2665 nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
2666
2667 if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
2668 return -EINVAL;
2669
2670 params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
2671 }
2672
2513 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2673 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2514 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2674 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2515 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && 2675 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2516 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2676 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
2677 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
2678 return -EINVAL;
2679
2680 /*
2681 * Only managed stations can add TDLS peers, and only when the
2682 * wiphy supports external TDLS setup.
2683 */
2684 if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
2685 !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
2686 (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
2687 (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
2517 return -EINVAL; 2688 return -EINVAL;
2518 2689
2519 err = get_vlan(info, rdev, &params.vlan); 2690 err = get_vlan(info, rdev, &params.vlan);
@@ -2955,6 +3126,10 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
2955 cur_params.dot11MeshHWMPnetDiameterTraversalTime); 3126 cur_params.dot11MeshHWMPnetDiameterTraversalTime);
2956 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, 3127 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
2957 cur_params.dot11MeshHWMPRootMode); 3128 cur_params.dot11MeshHWMPRootMode);
3129 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3130 cur_params.dot11MeshHWMPRannInterval);
3131 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3132 cur_params.dot11MeshGateAnnouncementProtocol);
2958 nla_nest_end(msg, pinfoattr); 3133 nla_nest_end(msg, pinfoattr);
2959 genlmsg_end(msg, hdr); 3134 genlmsg_end(msg, hdr);
2960 return genlmsg_reply(msg, info); 3135 return genlmsg_reply(msg, info);
@@ -2982,6 +3157,9 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
2982 [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, 3157 [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
2983 [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, 3158 [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
2984 [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, 3159 [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
3160 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
3161 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3162 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
2985}; 3163};
2986 3164
2987static const struct nla_policy 3165static const struct nla_policy
@@ -3060,6 +3238,14 @@ do {\
3060 dot11MeshHWMPRootMode, mask, 3238 dot11MeshHWMPRootMode, mask,
3061 NL80211_MESHCONF_HWMP_ROOTMODE, 3239 NL80211_MESHCONF_HWMP_ROOTMODE,
3062 nla_get_u8); 3240 nla_get_u8);
3241 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3242 dot11MeshHWMPRannInterval, mask,
3243 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3244 nla_get_u16);
3245 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3246 dot11MeshGateAnnouncementProtocol, mask,
3247 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3248 nla_get_u8);
3063 if (mask_out) 3249 if (mask_out)
3064 *mask_out = mask; 3250 *mask_out = mask;
3065 3251
@@ -3477,6 +3663,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3477 } 3663 }
3478 } 3664 }
3479 3665
3666 request->no_cck =
3667 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
3668
3480 request->dev = dev; 3669 request->dev = dev;
3481 request->wiphy = &rdev->wiphy; 3670 request->wiphy = &rdev->wiphy;
3482 3671
@@ -3503,10 +3692,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3503 struct net_device *dev = info->user_ptr[1]; 3692 struct net_device *dev = info->user_ptr[1];
3504 struct nlattr *attr; 3693 struct nlattr *attr;
3505 struct wiphy *wiphy; 3694 struct wiphy *wiphy;
3506 int err, tmp, n_ssids = 0, n_channels, i; 3695 int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
3507 u32 interval; 3696 u32 interval;
3508 enum ieee80211_band band; 3697 enum ieee80211_band band;
3509 size_t ie_len; 3698 size_t ie_len;
3699 struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
3510 3700
3511 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || 3701 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3512 !rdev->ops->sched_scan_start) 3702 !rdev->ops->sched_scan_start)
@@ -3545,6 +3735,15 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3545 if (n_ssids > wiphy->max_sched_scan_ssids) 3735 if (n_ssids > wiphy->max_sched_scan_ssids)
3546 return -EINVAL; 3736 return -EINVAL;
3547 3737
3738 if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
3739 nla_for_each_nested(attr,
3740 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
3741 tmp)
3742 n_match_sets++;
3743
3744 if (n_match_sets > wiphy->max_match_sets)
3745 return -EINVAL;
3746
3548 if (info->attrs[NL80211_ATTR_IE]) 3747 if (info->attrs[NL80211_ATTR_IE])
3549 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3748 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3550 else 3749 else
@@ -3562,6 +3761,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3562 3761
3563 request = kzalloc(sizeof(*request) 3762 request = kzalloc(sizeof(*request)
3564 + sizeof(*request->ssids) * n_ssids 3763 + sizeof(*request->ssids) * n_ssids
3764 + sizeof(*request->match_sets) * n_match_sets
3565 + sizeof(*request->channels) * n_channels 3765 + sizeof(*request->channels) * n_channels
3566 + ie_len, GFP_KERNEL); 3766 + ie_len, GFP_KERNEL);
3567 if (!request) { 3767 if (!request) {
@@ -3579,6 +3779,18 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3579 request->ie = (void *)(request->channels + n_channels); 3779 request->ie = (void *)(request->channels + n_channels);
3580 } 3780 }
3581 3781
3782 if (n_match_sets) {
3783 if (request->ie)
3784 request->match_sets = (void *)(request->ie + ie_len);
3785 else if (request->ssids)
3786 request->match_sets =
3787 (void *)(request->ssids + n_ssids);
3788 else
3789 request->match_sets =
3790 (void *)(request->channels + n_channels);
3791 }
3792 request->n_match_sets = n_match_sets;
3793
3582 i = 0; 3794 i = 0;
3583 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 3795 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3584 /* user specified, bail out if channel not found */ 3796 /* user specified, bail out if channel not found */
@@ -3643,6 +3855,31 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3643 } 3855 }
3644 } 3856 }
3645 3857
3858 i = 0;
3859 if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
3860 nla_for_each_nested(attr,
3861 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
3862 tmp) {
3863 struct nlattr *ssid;
3864
3865 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
3866 nla_data(attr), nla_len(attr),
3867 nl80211_match_policy);
3868 ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
3869 if (ssid) {
3870 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
3871 err = -EINVAL;
3872 goto out_free;
3873 }
3874 memcpy(request->match_sets[i].ssid.ssid,
3875 nla_data(ssid), nla_len(ssid));
3876 request->match_sets[i].ssid.ssid_len =
3877 nla_len(ssid);
3878 }
3879 i++;
3880 }
3881 }
3882
3646 if (info->attrs[NL80211_ATTR_IE]) { 3883 if (info->attrs[NL80211_ATTR_IE]) {
3647 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3884 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3648 memcpy((void *)request->ie, 3885 memcpy((void *)request->ie,
@@ -3935,22 +4172,6 @@ static bool nl80211_valid_wpa_versions(u32 wpa_versions)
3935 NL80211_WPA_VERSION_2)); 4172 NL80211_WPA_VERSION_2));
3936} 4173}
3937 4174
3938static bool nl80211_valid_akm_suite(u32 akm)
3939{
3940 return akm == WLAN_AKM_SUITE_8021X ||
3941 akm == WLAN_AKM_SUITE_PSK;
3942}
3943
3944static bool nl80211_valid_cipher_suite(u32 cipher)
3945{
3946 return cipher == WLAN_CIPHER_SUITE_WEP40 ||
3947 cipher == WLAN_CIPHER_SUITE_WEP104 ||
3948 cipher == WLAN_CIPHER_SUITE_TKIP ||
3949 cipher == WLAN_CIPHER_SUITE_CCMP ||
3950 cipher == WLAN_CIPHER_SUITE_AES_CMAC;
3951}
3952
3953
3954static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) 4175static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3955{ 4176{
3956 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 4177 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4083,7 +4304,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4083 memcpy(settings->ciphers_pairwise, data, len); 4304 memcpy(settings->ciphers_pairwise, data, len);
4084 4305
4085 for (i = 0; i < settings->n_ciphers_pairwise; i++) 4306 for (i = 0; i < settings->n_ciphers_pairwise; i++)
4086 if (!nl80211_valid_cipher_suite( 4307 if (!cfg80211_supported_cipher_suite(
4308 &rdev->wiphy,
4087 settings->ciphers_pairwise[i])) 4309 settings->ciphers_pairwise[i]))
4088 return -EINVAL; 4310 return -EINVAL;
4089 } 4311 }
@@ -4091,7 +4313,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4091 if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { 4313 if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
4092 settings->cipher_group = 4314 settings->cipher_group =
4093 nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); 4315 nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
4094 if (!nl80211_valid_cipher_suite(settings->cipher_group)) 4316 if (!cfg80211_supported_cipher_suite(&rdev->wiphy,
4317 settings->cipher_group))
4095 return -EINVAL; 4318 return -EINVAL;
4096 } 4319 }
4097 4320
@@ -4104,7 +4327,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4104 4327
4105 if (info->attrs[NL80211_ATTR_AKM_SUITES]) { 4328 if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
4106 void *data; 4329 void *data;
4107 int len, i; 4330 int len;
4108 4331
4109 data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); 4332 data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
4110 len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); 4333 len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
@@ -4117,10 +4340,6 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4117 return -EINVAL; 4340 return -EINVAL;
4118 4341
4119 memcpy(settings->akm_suites, data, len); 4342 memcpy(settings->akm_suites, data, len);
4120
4121 for (i = 0; i < settings->n_akm_suites; i++)
4122 if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
4123 return -EINVAL;
4124 } 4343 }
4125 4344
4126 return 0; 4345 return 0;
@@ -4339,8 +4558,12 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4339 4558
4340 wiphy = &rdev->wiphy; 4559 wiphy = &rdev->wiphy;
4341 4560
4342 if (info->attrs[NL80211_ATTR_MAC]) 4561 if (info->attrs[NL80211_ATTR_MAC]) {
4343 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 4562 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
4563
4564 if (!is_valid_ether_addr(ibss.bssid))
4565 return -EINVAL;
4566 }
4344 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); 4567 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
4345 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); 4568 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
4346 4569
@@ -4777,6 +5000,57 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4777 return rdev->ops->flush_pmksa(&rdev->wiphy, dev); 5000 return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
4778} 5001}
4779 5002
5003static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
5004{
5005 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5006 struct net_device *dev = info->user_ptr[1];
5007 u8 action_code, dialog_token;
5008 u16 status_code;
5009 u8 *peer;
5010
5011 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
5012 !rdev->ops->tdls_mgmt)
5013 return -EOPNOTSUPP;
5014
5015 if (!info->attrs[NL80211_ATTR_TDLS_ACTION] ||
5016 !info->attrs[NL80211_ATTR_STATUS_CODE] ||
5017 !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] ||
5018 !info->attrs[NL80211_ATTR_IE] ||
5019 !info->attrs[NL80211_ATTR_MAC])
5020 return -EINVAL;
5021
5022 peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
5023 action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
5024 status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
5025 dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
5026
5027 return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
5028 dialog_token, status_code,
5029 nla_data(info->attrs[NL80211_ATTR_IE]),
5030 nla_len(info->attrs[NL80211_ATTR_IE]));
5031}
5032
5033static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
5034{
5035 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5036 struct net_device *dev = info->user_ptr[1];
5037 enum nl80211_tdls_operation operation;
5038 u8 *peer;
5039
5040 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
5041 !rdev->ops->tdls_oper)
5042 return -EOPNOTSUPP;
5043
5044 if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] ||
5045 !info->attrs[NL80211_ATTR_MAC])
5046 return -EINVAL;
5047
5048 operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
5049 peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
5050
5051 return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation);
5052}
5053
4780static int nl80211_remain_on_channel(struct sk_buff *skb, 5054static int nl80211_remain_on_channel(struct sk_buff *skb,
4781 struct genl_info *info) 5055 struct genl_info *info)
4782{ 5056{
@@ -4997,6 +5271,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
4997 struct sk_buff *msg; 5271 struct sk_buff *msg;
4998 unsigned int wait = 0; 5272 unsigned int wait = 0;
4999 bool offchan; 5273 bool offchan;
5274 bool no_cck;
5000 5275
5001 if (!info->attrs[NL80211_ATTR_FRAME] || 5276 if (!info->attrs[NL80211_ATTR_FRAME] ||
5002 !info->attrs[NL80211_ATTR_WIPHY_FREQ]) 5277 !info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5033,6 +5308,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5033 5308
5034 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; 5309 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
5035 5310
5311 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
5312
5036 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 5313 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
5037 chan = rdev_freq_to_chan(rdev, freq, channel_type); 5314 chan = rdev_freq_to_chan(rdev, freq, channel_type);
5038 if (chan == NULL) 5315 if (chan == NULL)
@@ -5053,7 +5330,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5053 channel_type_valid, wait, 5330 channel_type_valid, wait,
5054 nla_data(info->attrs[NL80211_ATTR_FRAME]), 5331 nla_data(info->attrs[NL80211_ATTR_FRAME]),
5055 nla_len(info->attrs[NL80211_ATTR_FRAME]), 5332 nla_len(info->attrs[NL80211_ATTR_FRAME]),
5056 &cookie); 5333 no_cck, &cookie);
5057 if (err) 5334 if (err)
5058 goto free_msg; 5335 goto free_msg;
5059 5336
@@ -6089,6 +6366,22 @@ static struct genl_ops nl80211_ops[] = {
6089 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 6366 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6090 NL80211_FLAG_NEED_RTNL, 6367 NL80211_FLAG_NEED_RTNL,
6091 }, 6368 },
6369 {
6370 .cmd = NL80211_CMD_TDLS_MGMT,
6371 .doit = nl80211_tdls_mgmt,
6372 .policy = nl80211_policy,
6373 .flags = GENL_ADMIN_PERM,
6374 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6375 NL80211_FLAG_NEED_RTNL,
6376 },
6377 {
6378 .cmd = NL80211_CMD_TDLS_OPER,
6379 .doit = nl80211_tdls_oper,
6380 .policy = nl80211_policy,
6381 .flags = GENL_ADMIN_PERM,
6382 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6383 NL80211_FLAG_NEED_RTNL,
6384 },
6092}; 6385};
6093 6386
6094static struct genl_multicast_group nl80211_mlme_mcgrp = { 6387static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7078,6 +7371,52 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7078 nlmsg_free(msg); 7371 nlmsg_free(msg);
7079} 7372}
7080 7373
7374void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7375 struct net_device *netdev, int index,
7376 const u8 *bssid, bool preauth, gfp_t gfp)
7377{
7378 struct sk_buff *msg;
7379 struct nlattr *attr;
7380 void *hdr;
7381
7382 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
7383 if (!msg)
7384 return;
7385
7386 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE);
7387 if (!hdr) {
7388 nlmsg_free(msg);
7389 return;
7390 }
7391
7392 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
7393 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
7394
7395 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
7396 if (!attr)
7397 goto nla_put_failure;
7398
7399 NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
7400 NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
7401 if (preauth)
7402 NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
7403
7404 nla_nest_end(msg, attr);
7405
7406 if (genlmsg_end(msg, hdr) < 0) {
7407 nlmsg_free(msg);
7408 return;
7409 }
7410
7411 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
7412 nl80211_mlme_mcgrp.id, gfp);
7413 return;
7414
7415 nla_put_failure:
7416 genlmsg_cancel(msg, hdr);
7417 nlmsg_free(msg);
7418}
7419
7081void 7420void
7082nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 7421nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7083 struct net_device *netdev, const u8 *peer, 7422 struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 5d69c56400ae..f24a1fbeaf19 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -113,4 +113,8 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
113 struct net_device *netdev, const u8 *bssid, 113 struct net_device *netdev, const u8 *bssid,
114 const u8 *replay_ctr, gfp_t gfp); 114 const u8 *replay_ctr, gfp_t gfp);
115 115
116void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
117 struct net_device *netdev, int index,
118 const u8 *bssid, bool preauth, gfp_t gfp);
119
116#endif /* __NET_WIRELESS_NL80211_H */ 120#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index dbe35e138e94..c4ad7958af52 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/export.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
19#include <net/ieee80211_radiotap.h> 20#include <net/ieee80211_radiotap.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 68a471ba193f..6acba9d18cc8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -36,12 +36,14 @@
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 37
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/export.h>
39#include <linux/slab.h> 40#include <linux/slab.h>
40#include <linux/list.h> 41#include <linux/list.h>
41#include <linux/random.h> 42#include <linux/random.h>
42#include <linux/ctype.h> 43#include <linux/ctype.h>
43#include <linux/nl80211.h> 44#include <linux/nl80211.h>
44#include <linux/platform_device.h> 45#include <linux/platform_device.h>
46#include <linux/moduleparam.h>
45#include <net/cfg80211.h> 47#include <net/cfg80211.h>
46#include "core.h" 48#include "core.h"
47#include "reg.h" 49#include "reg.h"
@@ -49,10 +51,8 @@
49#include "nl80211.h" 51#include "nl80211.h"
50 52
51#ifdef CONFIG_CFG80211_REG_DEBUG 53#ifdef CONFIG_CFG80211_REG_DEBUG
52#define REG_DBG_PRINT(format, args...) \ 54#define REG_DBG_PRINT(format, args...) \
53 do { \ 55 printk(KERN_DEBUG pr_fmt(format), ##args)
54 printk(KERN_DEBUG pr_fmt(format), ##args); \
55 } while (0)
56#else 56#else
57#define REG_DBG_PRINT(args...) 57#define REG_DBG_PRINT(args...)
58#endif 58#endif
@@ -753,9 +753,10 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
753 chan->center_freq, 753 chan->center_freq,
754 KHZ_TO_MHZ(desired_bw_khz)); 754 KHZ_TO_MHZ(desired_bw_khz));
755 755
756 REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n", 756 REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
757 freq_range->start_freq_khz, 757 freq_range->start_freq_khz,
758 freq_range->end_freq_khz, 758 freq_range->end_freq_khz,
759 freq_range->max_bandwidth_khz,
759 max_antenna_gain, 760 max_antenna_gain,
760 power_rule->max_eirp); 761 power_rule->max_eirp);
761} 762}
@@ -891,7 +892,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
891 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { 892 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
892 REG_DBG_PRINT("Ignoring regulatory request %s " 893 REG_DBG_PRINT("Ignoring regulatory request %s "
893 "since the driver uses its own custom " 894 "since the driver uses its own custom "
894 "regulatory domain ", 895 "regulatory domain\n",
895 reg_initiator_name(initiator)); 896 reg_initiator_name(initiator));
896 return true; 897 return true;
897 } 898 }
@@ -905,7 +906,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
905 !is_world_regdom(last_request->alpha2)) { 906 !is_world_regdom(last_request->alpha2)) {
906 REG_DBG_PRINT("Ignoring regulatory request %s " 907 REG_DBG_PRINT("Ignoring regulatory request %s "
907 "since the driver requires its own regulatory " 908 "since the driver requires its own regulatory "
908 "domain to be set first", 909 "domain to be set first\n",
909 reg_initiator_name(initiator)); 910 reg_initiator_name(initiator));
910 return true; 911 return true;
911 } 912 }
@@ -913,14 +914,6 @@ static bool ignore_reg_update(struct wiphy *wiphy,
913 return false; 914 return false;
914} 915}
915 916
916static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
917{
918 struct cfg80211_registered_device *rdev;
919
920 list_for_each_entry(rdev, &cfg80211_rdev_list, list)
921 wiphy_update_regulatory(&rdev->wiphy, initiator);
922}
923
924static void handle_reg_beacon(struct wiphy *wiphy, 917static void handle_reg_beacon(struct wiphy *wiphy,
925 unsigned int chan_idx, 918 unsigned int chan_idx,
926 struct reg_beacon *reg_beacon) 919 struct reg_beacon *reg_beacon)
@@ -1120,11 +1113,13 @@ static void reg_process_ht_flags(struct wiphy *wiphy)
1120 1113
1121} 1114}
1122 1115
1123void wiphy_update_regulatory(struct wiphy *wiphy, 1116static void wiphy_update_regulatory(struct wiphy *wiphy,
1124 enum nl80211_reg_initiator initiator) 1117 enum nl80211_reg_initiator initiator)
1125{ 1118{
1126 enum ieee80211_band band; 1119 enum ieee80211_band band;
1127 1120
1121 assert_reg_lock();
1122
1128 if (ignore_reg_update(wiphy, initiator)) 1123 if (ignore_reg_update(wiphy, initiator))
1129 return; 1124 return;
1130 1125
@@ -1139,6 +1134,22 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1139 wiphy->reg_notifier(wiphy, last_request); 1134 wiphy->reg_notifier(wiphy, last_request);
1140} 1135}
1141 1136
1137void regulatory_update(struct wiphy *wiphy,
1138 enum nl80211_reg_initiator setby)
1139{
1140 mutex_lock(&reg_mutex);
1141 wiphy_update_regulatory(wiphy, setby);
1142 mutex_unlock(&reg_mutex);
1143}
1144
1145static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
1146{
1147 struct cfg80211_registered_device *rdev;
1148
1149 list_for_each_entry(rdev, &cfg80211_rdev_list, list)
1150 wiphy_update_regulatory(&rdev->wiphy, initiator);
1151}
1152
1142static void handle_channel_custom(struct wiphy *wiphy, 1153static void handle_channel_custom(struct wiphy *wiphy,
1143 enum ieee80211_band band, 1154 enum ieee80211_band band,
1144 unsigned int chan_idx, 1155 unsigned int chan_idx,
@@ -1475,7 +1486,7 @@ static void reg_process_pending_hints(void)
1475 /* When last_request->processed becomes true this will be rescheduled */ 1486 /* When last_request->processed becomes true this will be rescheduled */
1476 if (last_request && !last_request->processed) { 1487 if (last_request && !last_request->processed) {
1477 REG_DBG_PRINT("Pending regulatory request, waiting " 1488 REG_DBG_PRINT("Pending regulatory request, waiting "
1478 "for it to be processed..."); 1489 "for it to be processed...\n");
1479 goto out; 1490 goto out;
1480 } 1491 }
1481 1492
@@ -2188,7 +2199,7 @@ out:
2188static void reg_timeout_work(struct work_struct *work) 2199static void reg_timeout_work(struct work_struct *work)
2189{ 2200{
2190 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, " 2201 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
2191 "restoring regulatory settings"); 2202 "restoring regulatory settings\n");
2192 restore_regulatory_settings(true); 2203 restore_regulatory_settings(true);
2193} 2204}
2194 2205
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b67d1c3a2fb9..4a56799d868d 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,6 +16,8 @@ void regulatory_exit(void);
16 16
17int set_regdom(const struct ieee80211_regdomain *rd); 17int set_regdom(const struct ieee80211_regdomain *rd);
18 18
19void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
20
19/** 21/**
20 * regulatory_hint_found_beacon - hints a beacon was found on a channel 22 * regulatory_hint_found_beacon - hints a beacon was found on a channel
21 * @wiphy: the wireless device where the beacon was found on 23 * @wiphy: the wireless device where the beacon was found on
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2936cb809152..0fb142410404 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -12,6 +12,7 @@
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <net/arp.h> 13#include <net/arp.h>
14#include <net/cfg80211.h> 14#include <net/cfg80211.h>
15#include <net/cfg80211-wext.h>
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include "core.h" 17#include "core.h"
17#include "nl80211.h" 18#include "nl80211.h"
@@ -227,6 +228,33 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
227} 228}
228EXPORT_SYMBOL(cfg80211_find_ie); 229EXPORT_SYMBOL(cfg80211_find_ie);
229 230
231const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
232 const u8 *ies, int len)
233{
234 struct ieee80211_vendor_ie *ie;
235 const u8 *pos = ies, *end = ies + len;
236 int ie_oui;
237
238 while (pos < end) {
239 pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
240 end - pos);
241 if (!pos)
242 return NULL;
243
244 if (end - pos < sizeof(*ie))
245 return NULL;
246
247 ie = (struct ieee80211_vendor_ie *)pos;
248 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
249 if (ie_oui == oui && ie->oui_type == oui_type)
250 return pos;
251
252 pos += 2 + ie->len;
253 }
254 return NULL;
255}
256EXPORT_SYMBOL(cfg80211_find_vendor_ie);
257
230static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 258static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
231{ 259{
232 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); 260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dec0fa28372e..0acfdc9beacf 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/wireless.h> 12#include <linux/wireless.h>
13#include <linux/export.h>
13#include <net/iw_handler.h> 14#include <net/iw_handler.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
@@ -110,17 +111,22 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
110 else { 111 else {
111 int i = 0, j; 112 int i = 0, j;
112 enum ieee80211_band band; 113 enum ieee80211_band band;
114 struct ieee80211_supported_band *bands;
115 struct ieee80211_channel *channel;
113 116
114 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 117 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
115 if (!wdev->wiphy->bands[band]) 118 bands = wdev->wiphy->bands[band];
119 if (!bands)
116 continue; 120 continue;
117 for (j = 0; j < wdev->wiphy->bands[band]->n_channels; 121 for (j = 0; j < bands->n_channels; j++) {
118 i++, j++) 122 channel = &bands->channels[j];
119 request->channels[i] = 123 if (channel->flags & IEEE80211_CHAN_DISABLED)
120 &wdev->wiphy->bands[band]->channels[j]; 124 continue;
121 request->rates[band] = 125 request->channels[i++] = channel;
122 (1 << wdev->wiphy->bands[band]->n_bitrates) - 1; 126 }
127 request->rates[band] = (1 << bands->n_bitrates) - 1;
123 } 128 }
129 n_channels = i;
124 } 130 }
125 request->n_channels = n_channels; 131 request->n_channels = n_channels;
126 request->ssids = (void *)&request->channels[n_channels]; 132 request->ssids = (void *)&request->channels[n_channels];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index be75a3a0424e..4dde429441d2 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -3,9 +3,11 @@
3 * 3 *
4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <linux/export.h>
6#include <linux/bitops.h> 7#include <linux/bitops.h>
7#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/crc32.h>
9#include <net/cfg80211.h> 11#include <net/cfg80211.h>
10#include <net/ip.h> 12#include <net/ip.h>
11#include "core.h" 13#include "core.h"
@@ -150,12 +152,19 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
150 set_mandatory_flags_band(wiphy->bands[band], band); 152 set_mandatory_flags_band(wiphy->bands[band], band);
151} 153}
152 154
155bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
156{
157 int i;
158 for (i = 0; i < wiphy->n_cipher_suites; i++)
159 if (cipher == wiphy->cipher_suites[i])
160 return true;
161 return false;
162}
163
153int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, 164int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
154 struct key_params *params, int key_idx, 165 struct key_params *params, int key_idx,
155 bool pairwise, const u8 *mac_addr) 166 bool pairwise, const u8 *mac_addr)
156{ 167{
157 int i;
158
159 if (key_idx > 5) 168 if (key_idx > 5)
160 return -EINVAL; 169 return -EINVAL;
161 170
@@ -225,10 +234,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
225 } 234 }
226 } 235 }
227 236
228 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) 237 if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher))
229 if (params->cipher == rdev->wiphy.cipher_suites[i])
230 break;
231 if (i == rdev->wiphy.n_cipher_suites)
232 return -EINVAL; 238 return -EINVAL;
233 239
234 return 0; 240 return 0;
@@ -391,8 +397,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
391 } 397 }
392 break; 398 break;
393 case cpu_to_le16(0): 399 case cpu_to_le16(0):
394 if (iftype != NL80211_IFTYPE_ADHOC) 400 if (iftype != NL80211_IFTYPE_ADHOC &&
395 return -1; 401 iftype != NL80211_IFTYPE_STATION)
402 return -1;
396 break; 403 break;
397 } 404 }
398 405
@@ -512,10 +519,9 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
512 if (head_need) 519 if (head_need)
513 skb_orphan(skb); 520 skb_orphan(skb);
514 521
515 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) { 522 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
516 pr_err("failed to reallocate Tx buffer\n");
517 return -ENOMEM; 523 return -ENOMEM;
518 } 524
519 skb->truesize += head_need; 525 skb->truesize += head_need;
520 } 526 }
521 527
@@ -1044,3 +1050,170 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
1044 1050
1045 return 0; 1051 return 0;
1046} 1052}
1053
1054u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
1055 struct ieee802_11_elems *elems,
1056 u64 filter, u32 crc)
1057{
1058 size_t left = len;
1059 u8 *pos = start;
1060 bool calc_crc = filter != 0;
1061
1062 memset(elems, 0, sizeof(*elems));
1063 elems->ie_start = start;
1064 elems->total_len = len;
1065
1066 while (left >= 2) {
1067 u8 id, elen;
1068
1069 id = *pos++;
1070 elen = *pos++;
1071 left -= 2;
1072
1073 if (elen > left)
1074 break;
1075
1076 if (calc_crc && id < 64 && (filter & (1ULL << id)))
1077 crc = crc32_be(crc, pos - 2, elen + 2);
1078
1079 switch (id) {
1080 case WLAN_EID_SSID:
1081 elems->ssid = pos;
1082 elems->ssid_len = elen;
1083 break;
1084 case WLAN_EID_SUPP_RATES:
1085 elems->supp_rates = pos;
1086 elems->supp_rates_len = elen;
1087 break;
1088 case WLAN_EID_FH_PARAMS:
1089 elems->fh_params = pos;
1090 elems->fh_params_len = elen;
1091 break;
1092 case WLAN_EID_DS_PARAMS:
1093 elems->ds_params = pos;
1094 elems->ds_params_len = elen;
1095 break;
1096 case WLAN_EID_CF_PARAMS:
1097 elems->cf_params = pos;
1098 elems->cf_params_len = elen;
1099 break;
1100 case WLAN_EID_TIM:
1101 if (elen >= sizeof(struct ieee80211_tim_ie)) {
1102 elems->tim = (void *)pos;
1103 elems->tim_len = elen;
1104 }
1105 break;
1106 case WLAN_EID_IBSS_PARAMS:
1107 elems->ibss_params = pos;
1108 elems->ibss_params_len = elen;
1109 break;
1110 case WLAN_EID_CHALLENGE:
1111 elems->challenge = pos;
1112 elems->challenge_len = elen;
1113 break;
1114 case WLAN_EID_VENDOR_SPECIFIC:
1115 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
1116 pos[2] == 0xf2) {
1117 /* Microsoft OUI (00:50:F2) */
1118
1119 if (calc_crc)
1120 crc = crc32_be(crc, pos - 2, elen + 2);
1121
1122 if (pos[3] == 1) {
1123 /* OUI Type 1 - WPA IE */
1124 elems->wpa = pos;
1125 elems->wpa_len = elen;
1126 } else if (elen >= 5 && pos[3] == 2) {
1127 /* OUI Type 2 - WMM IE */
1128 if (pos[4] == 0) {
1129 elems->wmm_info = pos;
1130 elems->wmm_info_len = elen;
1131 } else if (pos[4] == 1) {
1132 elems->wmm_param = pos;
1133 elems->wmm_param_len = elen;
1134 }
1135 }
1136 }
1137 break;
1138 case WLAN_EID_RSN:
1139 elems->rsn = pos;
1140 elems->rsn_len = elen;
1141 break;
1142 case WLAN_EID_ERP_INFO:
1143 elems->erp_info = pos;
1144 elems->erp_info_len = elen;
1145 break;
1146 case WLAN_EID_EXT_SUPP_RATES:
1147 elems->ext_supp_rates = pos;
1148 elems->ext_supp_rates_len = elen;
1149 break;
1150 case WLAN_EID_HT_CAPABILITY:
1151 if (elen >= sizeof(struct ieee80211_ht_cap))
1152 elems->ht_cap_elem = (void *)pos;
1153 break;
1154 case WLAN_EID_HT_INFORMATION:
1155 if (elen >= sizeof(struct ieee80211_ht_info))
1156 elems->ht_info_elem = (void *)pos;
1157 break;
1158 case WLAN_EID_MESH_ID:
1159 elems->mesh_id = pos;
1160 elems->mesh_id_len = elen;
1161 break;
1162 case WLAN_EID_MESH_CONFIG:
1163 if (elen >= sizeof(struct ieee80211_meshconf_ie))
1164 elems->mesh_config = (void *)pos;
1165 break;
1166 case WLAN_EID_PEER_MGMT:
1167 elems->peering = pos;
1168 elems->peering_len = elen;
1169 break;
1170 case WLAN_EID_PREQ:
1171 elems->preq = pos;
1172 elems->preq_len = elen;
1173 break;
1174 case WLAN_EID_PREP:
1175 elems->prep = pos;
1176 elems->prep_len = elen;
1177 break;
1178 case WLAN_EID_PERR:
1179 elems->perr = pos;
1180 elems->perr_len = elen;
1181 break;
1182 case WLAN_EID_RANN:
1183 if (elen >= sizeof(struct ieee80211_rann_ie))
1184 elems->rann = (void *)pos;
1185 break;
1186 case WLAN_EID_CHANNEL_SWITCH:
1187 elems->ch_switch_elem = pos;
1188 elems->ch_switch_elem_len = elen;
1189 break;
1190 case WLAN_EID_QUIET:
1191 if (!elems->quiet_elem) {
1192 elems->quiet_elem = pos;
1193 elems->quiet_elem_len = elen;
1194 }
1195 elems->num_of_quiet_elem++;
1196 break;
1197 case WLAN_EID_COUNTRY:
1198 elems->country_elem = pos;
1199 elems->country_elem_len = elen;
1200 break;
1201 case WLAN_EID_PWR_CONSTRAINT:
1202 elems->pwr_constr_elem = pos;
1203 elems->pwr_constr_elem_len = elen;
1204 break;
1205 case WLAN_EID_TIMEOUT_INTERVAL:
1206 elems->timeout_int = pos;
1207 elems->timeout_int_len = elen;
1208 break;
1209 default:
1210 break;
1211 }
1212
1213 left -= elen;
1214 pos += elen;
1215 }
1216
1217 return crc;
1218}
1219EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0bf169bb770e..6897436b1d3f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -8,6 +8,7 @@
8 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net> 8 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
9 */ 9 */
10 10
11#include <linux/export.h>
11#include <linux/wireless.h> 12#include <linux/wireless.h>
12#include <linux/nl80211.h> 13#include <linux/nl80211.h>
13#include <linux/if_arp.h> 14#include <linux/if_arp.h>
@@ -15,6 +16,7 @@
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <net/iw_handler.h> 17#include <net/iw_handler.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
19#include <net/cfg80211-wext.h>
18#include "wext-compat.h" 20#include "wext-compat.h"
19#include "core.h" 21#include "core.h"
20 22
@@ -363,9 +365,9 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
363} 365}
364EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag); 366EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
365 367
366int cfg80211_wext_siwretry(struct net_device *dev, 368static int cfg80211_wext_siwretry(struct net_device *dev,
367 struct iw_request_info *info, 369 struct iw_request_info *info,
368 struct iw_param *retry, char *extra) 370 struct iw_param *retry, char *extra)
369{ 371{
370 struct wireless_dev *wdev = dev->ieee80211_ptr; 372 struct wireless_dev *wdev = dev->ieee80211_ptr;
371 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 373 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -402,7 +404,6 @@ int cfg80211_wext_siwretry(struct net_device *dev,
402 404
403 return err; 405 return err;
404} 406}
405EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
406 407
407int cfg80211_wext_giwretry(struct net_device *dev, 408int cfg80211_wext_giwretry(struct net_device *dev,
408 struct iw_request_info *info, 409 struct iw_request_info *info,
@@ -593,9 +594,9 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
593 return err; 594 return err;
594} 595}
595 596
596int cfg80211_wext_siwencode(struct net_device *dev, 597static int cfg80211_wext_siwencode(struct net_device *dev,
597 struct iw_request_info *info, 598 struct iw_request_info *info,
598 struct iw_point *erq, char *keybuf) 599 struct iw_point *erq, char *keybuf)
599{ 600{
600 struct wireless_dev *wdev = dev->ieee80211_ptr; 601 struct wireless_dev *wdev = dev->ieee80211_ptr;
601 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 602 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -652,11 +653,10 @@ int cfg80211_wext_siwencode(struct net_device *dev,
652 wdev->wext.default_key == -1, 653 wdev->wext.default_key == -1,
653 idx, &params); 654 idx, &params);
654} 655}
655EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
656 656
657int cfg80211_wext_siwencodeext(struct net_device *dev, 657static int cfg80211_wext_siwencodeext(struct net_device *dev,
658 struct iw_request_info *info, 658 struct iw_request_info *info,
659 struct iw_point *erq, char *extra) 659 struct iw_point *erq, char *extra)
660{ 660{
661 struct wireless_dev *wdev = dev->ieee80211_ptr; 661 struct wireless_dev *wdev = dev->ieee80211_ptr;
662 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 662 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -744,11 +744,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
744 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, 744 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
745 idx, &params); 745 idx, &params);
746} 746}
747EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
748 747
749int cfg80211_wext_giwencode(struct net_device *dev, 748static int cfg80211_wext_giwencode(struct net_device *dev,
750 struct iw_request_info *info, 749 struct iw_request_info *info,
751 struct iw_point *erq, char *keybuf) 750 struct iw_point *erq, char *keybuf)
752{ 751{
753 struct wireless_dev *wdev = dev->ieee80211_ptr; 752 struct wireless_dev *wdev = dev->ieee80211_ptr;
754 int idx; 753 int idx;
@@ -782,11 +781,10 @@ int cfg80211_wext_giwencode(struct net_device *dev,
782 781
783 return 0; 782 return 0;
784} 783}
785EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
786 784
787int cfg80211_wext_siwfreq(struct net_device *dev, 785static int cfg80211_wext_siwfreq(struct net_device *dev,
788 struct iw_request_info *info, 786 struct iw_request_info *info,
789 struct iw_freq *wextfreq, char *extra) 787 struct iw_freq *wextfreq, char *extra)
790{ 788{
791 struct wireless_dev *wdev = dev->ieee80211_ptr; 789 struct wireless_dev *wdev = dev->ieee80211_ptr;
792 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 790 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -815,11 +813,10 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
815 return -EOPNOTSUPP; 813 return -EOPNOTSUPP;
816 } 814 }
817} 815}
818EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
819 816
820int cfg80211_wext_giwfreq(struct net_device *dev, 817static int cfg80211_wext_giwfreq(struct net_device *dev,
821 struct iw_request_info *info, 818 struct iw_request_info *info,
822 struct iw_freq *freq, char *extra) 819 struct iw_freq *freq, char *extra)
823{ 820{
824 struct wireless_dev *wdev = dev->ieee80211_ptr; 821 struct wireless_dev *wdev = dev->ieee80211_ptr;
825 822
@@ -836,11 +833,10 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
836 return 0; 833 return 0;
837 } 834 }
838} 835}
839EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq);
840 836
841int cfg80211_wext_siwtxpower(struct net_device *dev, 837static int cfg80211_wext_siwtxpower(struct net_device *dev,
842 struct iw_request_info *info, 838 struct iw_request_info *info,
843 union iwreq_data *data, char *extra) 839 union iwreq_data *data, char *extra)
844{ 840{
845 struct wireless_dev *wdev = dev->ieee80211_ptr; 841 struct wireless_dev *wdev = dev->ieee80211_ptr;
846 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 842 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -889,11 +885,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
889 885
890 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm)); 886 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
891} 887}
892EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
893 888
894int cfg80211_wext_giwtxpower(struct net_device *dev, 889static int cfg80211_wext_giwtxpower(struct net_device *dev,
895 struct iw_request_info *info, 890 struct iw_request_info *info,
896 union iwreq_data *data, char *extra) 891 union iwreq_data *data, char *extra)
897{ 892{
898 struct wireless_dev *wdev = dev->ieee80211_ptr; 893 struct wireless_dev *wdev = dev->ieee80211_ptr;
899 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 894 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -919,7 +914,6 @@ int cfg80211_wext_giwtxpower(struct net_device *dev,
919 914
920 return 0; 915 return 0;
921} 916}
922EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
923 917
924static int cfg80211_set_auth_alg(struct wireless_dev *wdev, 918static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
925 s32 auth_alg) 919 s32 auth_alg)
@@ -1070,9 +1064,9 @@ static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
1070 return 0; 1064 return 0;
1071} 1065}
1072 1066
1073int cfg80211_wext_siwauth(struct net_device *dev, 1067static int cfg80211_wext_siwauth(struct net_device *dev,
1074 struct iw_request_info *info, 1068 struct iw_request_info *info,
1075 struct iw_param *data, char *extra) 1069 struct iw_param *data, char *extra)
1076{ 1070{
1077 struct wireless_dev *wdev = dev->ieee80211_ptr; 1071 struct wireless_dev *wdev = dev->ieee80211_ptr;
1078 1072
@@ -1102,21 +1096,19 @@ int cfg80211_wext_siwauth(struct net_device *dev,
1102 return -EOPNOTSUPP; 1096 return -EOPNOTSUPP;
1103 } 1097 }
1104} 1098}
1105EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth);
1106 1099
1107int cfg80211_wext_giwauth(struct net_device *dev, 1100static int cfg80211_wext_giwauth(struct net_device *dev,
1108 struct iw_request_info *info, 1101 struct iw_request_info *info,
1109 struct iw_param *data, char *extra) 1102 struct iw_param *data, char *extra)
1110{ 1103{
1111 /* XXX: what do we need? */ 1104 /* XXX: what do we need? */
1112 1105
1113 return -EOPNOTSUPP; 1106 return -EOPNOTSUPP;
1114} 1107}
1115EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth);
1116 1108
1117int cfg80211_wext_siwpower(struct net_device *dev, 1109static int cfg80211_wext_siwpower(struct net_device *dev,
1118 struct iw_request_info *info, 1110 struct iw_request_info *info,
1119 struct iw_param *wrq, char *extra) 1111 struct iw_param *wrq, char *extra)
1120{ 1112{
1121 struct wireless_dev *wdev = dev->ieee80211_ptr; 1113 struct wireless_dev *wdev = dev->ieee80211_ptr;
1122 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1114 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1160,11 +1152,10 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1160 return 0; 1152 return 0;
1161 1153
1162} 1154}
1163EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower);
1164 1155
1165int cfg80211_wext_giwpower(struct net_device *dev, 1156static int cfg80211_wext_giwpower(struct net_device *dev,
1166 struct iw_request_info *info, 1157 struct iw_request_info *info,
1167 struct iw_param *wrq, char *extra) 1158 struct iw_param *wrq, char *extra)
1168{ 1159{
1169 struct wireless_dev *wdev = dev->ieee80211_ptr; 1160 struct wireless_dev *wdev = dev->ieee80211_ptr;
1170 1161
@@ -1172,7 +1163,6 @@ int cfg80211_wext_giwpower(struct net_device *dev,
1172 1163
1173 return 0; 1164 return 0;
1174} 1165}
1175EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower);
1176 1166
1177static int cfg80211_wds_wext_siwap(struct net_device *dev, 1167static int cfg80211_wds_wext_siwap(struct net_device *dev,
1178 struct iw_request_info *info, 1168 struct iw_request_info *info,
@@ -1218,9 +1208,9 @@ static int cfg80211_wds_wext_giwap(struct net_device *dev,
1218 return 0; 1208 return 0;
1219} 1209}
1220 1210
1221int cfg80211_wext_siwrate(struct net_device *dev, 1211static int cfg80211_wext_siwrate(struct net_device *dev,
1222 struct iw_request_info *info, 1212 struct iw_request_info *info,
1223 struct iw_param *rate, char *extra) 1213 struct iw_param *rate, char *extra)
1224{ 1214{
1225 struct wireless_dev *wdev = dev->ieee80211_ptr; 1215 struct wireless_dev *wdev = dev->ieee80211_ptr;
1226 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1216 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1268,11 +1258,10 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1268 1258
1269 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1259 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1270} 1260}
1271EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
1272 1261
1273int cfg80211_wext_giwrate(struct net_device *dev, 1262static int cfg80211_wext_giwrate(struct net_device *dev,
1274 struct iw_request_info *info, 1263 struct iw_request_info *info,
1275 struct iw_param *rate, char *extra) 1264 struct iw_param *rate, char *extra)
1276{ 1265{
1277 struct wireless_dev *wdev = dev->ieee80211_ptr; 1266 struct wireless_dev *wdev = dev->ieee80211_ptr;
1278 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1267 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1308,10 +1297,9 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1308 1297
1309 return 0; 1298 return 0;
1310} 1299}
1311EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate);
1312 1300
1313/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ 1301/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
1314struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) 1302static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1315{ 1303{
1316 struct wireless_dev *wdev = dev->ieee80211_ptr; 1304 struct wireless_dev *wdev = dev->ieee80211_ptr;
1317 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1305 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1376,11 +1364,10 @@ struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1376 1364
1377 return &wstats; 1365 return &wstats;
1378} 1366}
1379EXPORT_SYMBOL_GPL(cfg80211_wireless_stats);
1380 1367
1381int cfg80211_wext_siwap(struct net_device *dev, 1368static int cfg80211_wext_siwap(struct net_device *dev,
1382 struct iw_request_info *info, 1369 struct iw_request_info *info,
1383 struct sockaddr *ap_addr, char *extra) 1370 struct sockaddr *ap_addr, char *extra)
1384{ 1371{
1385 struct wireless_dev *wdev = dev->ieee80211_ptr; 1372 struct wireless_dev *wdev = dev->ieee80211_ptr;
1386 1373
@@ -1395,11 +1382,10 @@ int cfg80211_wext_siwap(struct net_device *dev,
1395 return -EOPNOTSUPP; 1382 return -EOPNOTSUPP;
1396 } 1383 }
1397} 1384}
1398EXPORT_SYMBOL_GPL(cfg80211_wext_siwap);
1399 1385
1400int cfg80211_wext_giwap(struct net_device *dev, 1386static int cfg80211_wext_giwap(struct net_device *dev,
1401 struct iw_request_info *info, 1387 struct iw_request_info *info,
1402 struct sockaddr *ap_addr, char *extra) 1388 struct sockaddr *ap_addr, char *extra)
1403{ 1389{
1404 struct wireless_dev *wdev = dev->ieee80211_ptr; 1390 struct wireless_dev *wdev = dev->ieee80211_ptr;
1405 1391
@@ -1414,11 +1400,10 @@ int cfg80211_wext_giwap(struct net_device *dev,
1414 return -EOPNOTSUPP; 1400 return -EOPNOTSUPP;
1415 } 1401 }
1416} 1402}
1417EXPORT_SYMBOL_GPL(cfg80211_wext_giwap);
1418 1403
1419int cfg80211_wext_siwessid(struct net_device *dev, 1404static int cfg80211_wext_siwessid(struct net_device *dev,
1420 struct iw_request_info *info, 1405 struct iw_request_info *info,
1421 struct iw_point *data, char *ssid) 1406 struct iw_point *data, char *ssid)
1422{ 1407{
1423 struct wireless_dev *wdev = dev->ieee80211_ptr; 1408 struct wireless_dev *wdev = dev->ieee80211_ptr;
1424 1409
@@ -1431,11 +1416,10 @@ int cfg80211_wext_siwessid(struct net_device *dev,
1431 return -EOPNOTSUPP; 1416 return -EOPNOTSUPP;
1432 } 1417 }
1433} 1418}
1434EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid);
1435 1419
1436int cfg80211_wext_giwessid(struct net_device *dev, 1420static int cfg80211_wext_giwessid(struct net_device *dev,
1437 struct iw_request_info *info, 1421 struct iw_request_info *info,
1438 struct iw_point *data, char *ssid) 1422 struct iw_point *data, char *ssid)
1439{ 1423{
1440 struct wireless_dev *wdev = dev->ieee80211_ptr; 1424 struct wireless_dev *wdev = dev->ieee80211_ptr;
1441 1425
@@ -1451,11 +1435,10 @@ int cfg80211_wext_giwessid(struct net_device *dev,
1451 return -EOPNOTSUPP; 1435 return -EOPNOTSUPP;
1452 } 1436 }
1453} 1437}
1454EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
1455 1438
1456int cfg80211_wext_siwpmksa(struct net_device *dev, 1439static int cfg80211_wext_siwpmksa(struct net_device *dev,
1457 struct iw_request_info *info, 1440 struct iw_request_info *info,
1458 struct iw_point *data, char *extra) 1441 struct iw_point *data, char *extra)
1459{ 1442{
1460 struct wireless_dev *wdev = dev->ieee80211_ptr; 1443 struct wireless_dev *wdev = dev->ieee80211_ptr;
1461 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1444 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1493,7 +1476,6 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
1493 return -EOPNOTSUPP; 1476 return -EOPNOTSUPP;
1494 } 1477 }
1495} 1478}
1496EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa);
1497 1479
1498static const iw_handler cfg80211_handlers[] = { 1480static const iw_handler cfg80211_handlers[] = {
1499 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, 1481 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 20b3daef6964..5d766b0118e8 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -42,6 +42,14 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
42 struct iw_request_info *info, 42 struct iw_request_info *info,
43 struct iw_point *data, char *ssid); 43 struct iw_point *data, char *ssid);
44 44
45int cfg80211_wext_siwmlme(struct net_device *dev,
46 struct iw_request_info *info,
47 struct iw_point *data, char *extra);
48int cfg80211_wext_siwgenie(struct net_device *dev,
49 struct iw_request_info *info,
50 struct iw_point *data, char *extra);
51
52
45int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); 53int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
46 54
47 55
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index fdbc23c10d8c..0af7f54e4f61 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/wireless.h> 14#include <linux/wireless.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/export.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include <net/iw_handler.h> 18#include <net/iw_handler.h>
18#include <net/netlink.h> 19#include <net/netlink.h>
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 6fffe62d7c25..326750b99151 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -5,10 +5,12 @@
5 * Copyright (C) 2009 Intel Corporation. All rights reserved. 5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 */ 6 */
7 7
8#include <linux/export.h>
8#include <linux/etherdevice.h> 9#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 10#include <linux/if_arp.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <net/cfg80211.h> 12#include <net/cfg80211.h>
13#include <net/cfg80211-wext.h>
12#include "wext-compat.h" 14#include "wext-compat.h"
13#include "nl80211.h" 15#include "nl80211.h"
14 16
@@ -365,7 +367,6 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
365 wdev_unlock(wdev); 367 wdev_unlock(wdev);
366 return err; 368 return err;
367} 369}
368EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie);
369 370
370int cfg80211_wext_siwmlme(struct net_device *dev, 371int cfg80211_wext_siwmlme(struct net_device *dev,
371 struct iw_request_info *info, 372 struct iw_request_info *info,
@@ -402,4 +403,3 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
402 403
403 return err; 404 return err;
404} 405}
405EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 6dcfe65a2d1a..5d643a548feb 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -10,6 +10,7 @@
10#include <linux/wireless.h> 10#include <linux/wireless.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/export.h>
13#include <net/iw_handler.h> 14#include <net/iw_handler.h>
14#include <net/arp.h> 15#include <net/arp.h>
15#include <net/wext.h> 16#include <net/wext.h>
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5f03e4ea65bf..3e16c6abde4f 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1261,14 +1261,19 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1261 struct x25_sock *x25 = x25_sk(sk); 1261 struct x25_sock *x25 = x25_sk(sk);
1262 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1262 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
1263 size_t copied; 1263 size_t copied;
1264 int qbit, header_len = x25->neighbour->extended ? 1264 int qbit, header_len;
1265 X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
1266
1267 struct sk_buff *skb; 1265 struct sk_buff *skb;
1268 unsigned char *asmptr; 1266 unsigned char *asmptr;
1269 int rc = -ENOTCONN; 1267 int rc = -ENOTCONN;
1270 1268
1271 lock_sock(sk); 1269 lock_sock(sk);
1270
1271 if (x25->neighbour == NULL)
1272 goto out;
1273
1274 header_len = x25->neighbour->extended ?
1275 X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
1276
1272 /* 1277 /*
1273 * This works for seqpacket too. The receiver has ordered the queue for 1278 * This works for seqpacket too. The receiver has ordered the queue for
1274 * us! We do one quick check first though 1279 * us! We do one quick check first though
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 7ff373792324..2ffde4631ae2 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/export.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/x25.h> 26#include <net/x25.h>
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index fc91ad7ee26e..e5246fbe36c4 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -70,26 +70,29 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
70 70
71 while ((scratch += len, dlen -= len) > 0) { 71 while ((scratch += len, dlen -= len) > 0) {
72 skb_frag_t *frag; 72 skb_frag_t *frag;
73 struct page *page;
73 74
74 err = -EMSGSIZE; 75 err = -EMSGSIZE;
75 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) 76 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
76 goto out; 77 goto out;
77 78
78 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; 79 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
79 frag->page = alloc_page(GFP_ATOMIC); 80 page = alloc_page(GFP_ATOMIC);
80 81
81 err = -ENOMEM; 82 err = -ENOMEM;
82 if (!frag->page) 83 if (!page)
83 goto out; 84 goto out;
84 85
86 __skb_frag_set_page(frag, page);
87
85 len = PAGE_SIZE; 88 len = PAGE_SIZE;
86 if (dlen < len) 89 if (dlen < len)
87 len = dlen; 90 len = dlen;
88 91
89 memcpy(page_address(frag->page), scratch, len);
90
91 frag->page_offset = 0; 92 frag->page_offset = 0;
92 frag->size = len; 93 skb_frag_size_set(frag, len);
94 memcpy(skb_frag_address(frag), scratch, len);
95
93 skb->truesize += len; 96 skb->truesize += len;
94 skb->data_len += len; 97 skb->data_len += len;
95 skb->len += len; 98 skb->len += len;
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 58d9ae005597..d0a1af8ed584 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/export.h>
15#include <net/snmp.h> 16#include <net/snmp.h>
16#include <net/xfrm.h> 17#include <net/xfrm.h>
17 18
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index b11ea692bd7d..39e02c54ed26 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -18,6 +18,7 @@
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */ 19 */
20 20
21#include <linux/export.h>
21#include <net/xfrm.h> 22#include <net/xfrm.h>
22 23
23u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) 24u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
@@ -203,8 +204,6 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
203 if (!replay_esn->replay_window) 204 if (!replay_esn->replay_window)
204 return 0; 205 return 0;
205 206
206 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
207
208 if (unlikely(seq == 0)) 207 if (unlikely(seq == 0))
209 goto err; 208 goto err;
210 209
@@ -216,19 +215,18 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
216 goto err; 215 goto err;
217 } 216 }
218 217
219 if (pos >= diff) { 218 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
219
220 if (pos >= diff)
220 bitnr = (pos - diff) % replay_esn->replay_window; 221 bitnr = (pos - diff) % replay_esn->replay_window;
221 nr = bitnr >> 5; 222 else
222 bitnr = bitnr & 0x1F;
223 if (replay_esn->bmp[nr] & (1U << bitnr))
224 goto err_replay;
225 } else {
226 bitnr = replay_esn->replay_window - (diff - pos); 223 bitnr = replay_esn->replay_window - (diff - pos);
227 nr = bitnr >> 5; 224
228 bitnr = bitnr & 0x1F; 225 nr = bitnr >> 5;
229 if (replay_esn->bmp[nr] & (1U << bitnr)) 226 bitnr = bitnr & 0x1F;
230 goto err_replay; 227 if (replay_esn->bmp[nr] & (1U << bitnr))
231 } 228 goto err_replay;
229
232 return 0; 230 return 0;
233 231
234err_replay: 232err_replay:
@@ -259,39 +257,27 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
259 bitnr = bitnr & 0x1F; 257 bitnr = bitnr & 0x1F;
260 replay_esn->bmp[nr] &= ~(1U << bitnr); 258 replay_esn->bmp[nr] &= ~(1U << bitnr);
261 } 259 }
262
263 bitnr = (pos + diff) % replay_esn->replay_window;
264 nr = bitnr >> 5;
265 bitnr = bitnr & 0x1F;
266 replay_esn->bmp[nr] |= (1U << bitnr);
267 } else { 260 } else {
268 nr = (replay_esn->replay_window - 1) >> 5; 261 nr = (replay_esn->replay_window - 1) >> 5;
269 for (i = 0; i <= nr; i++) 262 for (i = 0; i <= nr; i++)
270 replay_esn->bmp[i] = 0; 263 replay_esn->bmp[i] = 0;
271
272 bitnr = (pos + diff) % replay_esn->replay_window;
273 nr = bitnr >> 5;
274 bitnr = bitnr & 0x1F;
275 replay_esn->bmp[nr] |= (1U << bitnr);
276 } 264 }
277 265
266 bitnr = (pos + diff) % replay_esn->replay_window;
278 replay_esn->seq = seq; 267 replay_esn->seq = seq;
279 } else { 268 } else {
280 diff = replay_esn->seq - seq; 269 diff = replay_esn->seq - seq;
281 270
282 if (pos >= diff) { 271 if (pos >= diff)
283 bitnr = (pos - diff) % replay_esn->replay_window; 272 bitnr = (pos - diff) % replay_esn->replay_window;
284 nr = bitnr >> 5; 273 else
285 bitnr = bitnr & 0x1F;
286 replay_esn->bmp[nr] |= (1U << bitnr);
287 } else {
288 bitnr = replay_esn->replay_window - (diff - pos); 274 bitnr = replay_esn->replay_window - (diff - pos);
289 nr = bitnr >> 5;
290 bitnr = bitnr & 0x1F;
291 replay_esn->bmp[nr] |= (1U << bitnr);
292 }
293 } 275 }
294 276
277 nr = bitnr >> 5;
278 bitnr = bitnr & 0x1F;
279 replay_esn->bmp[nr] |= (1U << bitnr);
280
295 if (xfrm_aevent_is_on(xs_net(x))) 281 if (xfrm_aevent_is_on(xs_net(x)))
296 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 282 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
297} 283}
@@ -390,8 +376,6 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
390 if (!wsize) 376 if (!wsize)
391 return 0; 377 return 0;
392 378
393 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
394
395 if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && 379 if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
396 (replay_esn->seq < replay_esn->replay_window - 1))) 380 (replay_esn->seq < replay_esn->replay_window - 1)))
397 goto err; 381 goto err;
@@ -415,19 +399,18 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
415 goto err; 399 goto err;
416 } 400 }
417 401
418 if (pos >= diff) { 402 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
403
404 if (pos >= diff)
419 bitnr = (pos - diff) % replay_esn->replay_window; 405 bitnr = (pos - diff) % replay_esn->replay_window;
420 nr = bitnr >> 5; 406 else
421 bitnr = bitnr & 0x1F;
422 if (replay_esn->bmp[nr] & (1U << bitnr))
423 goto err_replay;
424 } else {
425 bitnr = replay_esn->replay_window - (diff - pos); 407 bitnr = replay_esn->replay_window - (diff - pos);
426 nr = bitnr >> 5; 408
427 bitnr = bitnr & 0x1F; 409 nr = bitnr >> 5;
428 if (replay_esn->bmp[nr] & (1U << bitnr)) 410 bitnr = bitnr & 0x1F;
429 goto err_replay; 411 if (replay_esn->bmp[nr] & (1U << bitnr))
430 } 412 goto err_replay;
413
431 return 0; 414 return 0;
432 415
433err_replay: 416err_replay:
@@ -465,22 +448,13 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
465 bitnr = bitnr & 0x1F; 448 bitnr = bitnr & 0x1F;
466 replay_esn->bmp[nr] &= ~(1U << bitnr); 449 replay_esn->bmp[nr] &= ~(1U << bitnr);
467 } 450 }
468
469 bitnr = (pos + diff) % replay_esn->replay_window;
470 nr = bitnr >> 5;
471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr);
473 } else { 451 } else {
474 nr = (replay_esn->replay_window - 1) >> 5; 452 nr = (replay_esn->replay_window - 1) >> 5;
475 for (i = 0; i <= nr; i++) 453 for (i = 0; i <= nr; i++)
476 replay_esn->bmp[i] = 0; 454 replay_esn->bmp[i] = 0;
477
478 bitnr = (pos + diff) % replay_esn->replay_window;
479 nr = bitnr >> 5;
480 bitnr = bitnr & 0x1F;
481 replay_esn->bmp[nr] |= (1U << bitnr);
482 } 455 }
483 456
457 bitnr = (pos + diff) % replay_esn->replay_window;
484 replay_esn->seq = seq; 458 replay_esn->seq = seq;
485 459
486 if (unlikely(wrap > 0)) 460 if (unlikely(wrap > 0))
@@ -488,19 +462,16 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
488 } else { 462 } else {
489 diff = replay_esn->seq - seq; 463 diff = replay_esn->seq - seq;
490 464
491 if (pos >= diff) { 465 if (pos >= diff)
492 bitnr = (pos - diff) % replay_esn->replay_window; 466 bitnr = (pos - diff) % replay_esn->replay_window;
493 nr = bitnr >> 5; 467 else
494 bitnr = bitnr & 0x1F;
495 replay_esn->bmp[nr] |= (1U << bitnr);
496 } else {
497 bitnr = replay_esn->replay_window - (diff - pos); 468 bitnr = replay_esn->replay_window - (diff - pos);
498 nr = bitnr >> 5;
499 bitnr = bitnr & 0x1F;
500 replay_esn->bmp[nr] |= (1U << bitnr);
501 }
502 } 469 }
503 470
471 nr = bitnr >> 5;
472 bitnr = bitnr & 0x1F;
473 replay_esn->bmp[nr] |= (1U << bitnr);
474
504 if (xfrm_aevent_is_on(xs_net(x))) 475 if (xfrm_aevent_is_on(xs_net(x)))
505 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 476 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
506} 477}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0256b8a0a7cf..d0a42df5160e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
2927 if (nlsk == NULL) 2927 if (nlsk == NULL)
2928 return -ENOMEM; 2928 return -ENOMEM;
2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2930 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2930 RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
2931 return 0; 2931 return 0;
2932} 2932}
2933 2933
@@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2935{ 2935{
2936 struct net *net; 2936 struct net *net;
2937 list_for_each_entry(net, net_exit_list, exit_list) 2937 list_for_each_entry(net, net_exit_list, exit_list)
2938 rcu_assign_pointer(net->xfrm.nlsk, NULL); 2938 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
2939 synchronize_net(); 2939 synchronize_net();
2940 list_for_each_entry(net, net_exit_list, exit_list) 2940 list_for_each_entry(net, net_exit_list, exit_list)
2941 netlink_kernel_release(net->xfrm.nlsk_stash); 2941 netlink_kernel_release(net->xfrm.nlsk_stash);