aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-13 15:39:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-13 15:39:10 -0400
commit0980bd9cd32de2fef7eaa2858345c49d14498625 (patch)
tree41f5f823d0569a81b22037e79c22d823933a63f1 /net
parent78821b2c0299ab807d483802f09897728b93bce0 (diff)
parent0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff)
Merge commit 'v3.6-rc1' into linux-next
* commit 'v3.6-rc1': (9532 commits) Linux 3.6-rc1 mm: remove node_start_pfn checking in new WARN_ON for now ARM: mmp: add missing irqs.h arm: mvebu: fix typo in .dtsi comment for Armada XP SoCs ARM: PRIMA2: delete redundant codes to restore LATCHED when timer resumes libceph: fix crypto key null deref, memory leak ceph: simplify+fix atomic_open sh: explicitly include sh_dma.h in setup-sh7722.c um: Add arch/x86/um to MAINTAINERS um: pass siginfo to guest process um: fix ubd_file_size for read-only files md/dm-raid: DM_RAID should select MD_RAID10 md/raid1: submit IO from originating thread instead of md thread. raid5: raid5d handle stripe in batch way raid5: make_request use batch stripe release um: pull interrupt_end() into userspace() um: split syscall_trace(), pass pt_regs to it um: switch UPT_SET_RETURN_VALUE and regs_return_value to pt_regs MIPS: Loongson 2: Sort out clock managment. locks: remove unused lm_release_private ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c5
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/protocol.c2
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/lec.c8
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_addr.c6
-rw-r--r--net/ax25/ax25_out.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/batman-adv/Makefile4
-rw-r--r--net/batman-adv/bat_algo.h6
-rw-r--r--net/batman-adv/bat_debugfs.c388
-rw-r--r--net/batman-adv/bat_iv_ogm.c1050
-rw-r--r--net/batman-adv/bat_sysfs.c735
-rw-r--r--net/batman-adv/bitarray.c65
-rw-r--r--net/batman-adv/bitarray.h24
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c810
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h75
-rw-r--r--net/batman-adv/debugfs.c409
-rw-r--r--net/batman-adv/debugfs.h (renamed from net/batman-adv/bat_debugfs.h)15
-rw-r--r--net/batman-adv/gateway_client.c354
-rw-r--r--net/batman-adv/gateway_client.h32
-rw-r--r--net/batman-adv/gateway_common.c61
-rw-r--r--net/batman-adv/gateway_common.h23
-rw-r--r--net/batman-adv/hard-interface.c342
-rw-r--r--net/batman-adv/hard-interface.h51
-rw-r--r--net/batman-adv/hash.c25
-rw-r--r--net/batman-adv/hash.h78
-rw-r--r--net/batman-adv/icmp_socket.c180
-rw-r--r--net/batman-adv/icmp_socket.h14
-rw-r--r--net/batman-adv/main.c276
-rw-r--r--net/batman-adv/main.h257
-rw-r--r--net/batman-adv/originator.c337
-rw-r--r--net/batman-adv/originator.h57
-rw-r--r--net/batman-adv/packet.h181
-rw-r--r--net/batman-adv/ring_buffer.c13
-rw-r--r--net/batman-adv/ring_buffer.h9
-rw-r--r--net/batman-adv/routing.c689
-rw-r--r--net/batman-adv/routing.h64
-rw-r--r--net/batman-adv/send.c237
-rw-r--r--net/batman-adv/send.h23
-rw-r--r--net/batman-adv/soft-interface.c304
-rw-r--r--net/batman-adv/soft-interface.h17
-rw-r--r--net/batman-adv/sysfs.c787
-rw-r--r--net/batman-adv/sysfs.h (renamed from net/batman-adv/bat_sysfs.h)24
-rw-r--r--net/batman-adv/translation-table.c1669
-rw-r--r--net/batman-adv/translation-table.h75
-rw-r--r--net/batman-adv/types.h183
-rw-r--r--net/batman-adv/unicast.c179
-rw-r--r--net/batman-adv/unicast.h34
-rw-r--r--net/batman-adv/vis.c728
-rw-r--r--net/batman-adv/vis.h26
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c143
-rw-r--r--net/bluetooth/hci_core.c265
-rw-r--r--net/bluetooth/hci_event.c525
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2245
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c151
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c18
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_multicast.c11
-rw-r--r--net/bridge/br_netfilter.c77
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_sysfs_if.c6
-rw-r--r--net/bridge/netfilter/ebt_ulog.c29
-rw-r--r--net/caif/caif_dev.c13
-rw-r--r--net/caif/caif_socket.c3
-rw-r--r--net/caif/cfctrl.c17
-rw-r--r--net/can/af_can.c126
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/gw.c90
-rw-r--r--net/can/proc.c3
-rw-r--r--net/can/raw.c53
-rw-r--r--net/ceph/ceph_common.c32
-rw-r--r--net/ceph/crush/mapper.c13
-rw-r--r--net/ceph/crypto.c1
-rw-r--r--net/ceph/crypto.h3
-rw-r--r--net/ceph/messenger.c941
-rw-r--r--net/ceph/mon_client.c84
-rw-r--r--net/ceph/msgpool.c7
-rw-r--r--net/ceph/osd_client.c89
-rw-r--r--net/ceph/osdmap.c59
-rw-r--r--net/ceph/pagelist.c14
-rw-r--r--net/compat.c4
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c138
-rw-r--r--net/core/dst.c25
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c5
-rw-r--r--net/core/neighbour.c31
-rw-r--r--net/core/net-sysfs.c74
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/netprio_cgroup.c133
-rw-r--r--net/core/rtnetlink.c74
-rw-r--r--net/core/scm.c22
-rw-r--r--net/core/skbuff.c198
-rw-r--r--net/core/sock.c74
-rw-r--r--net/core/sock_diag.c42
-rw-r--r--net/dcb/dcbnl.c1168
-rw-r--r--net/dccp/ackvec.h7
-rw-r--r--net/dccp/ccid.c1
-rw-r--r--net/dccp/ccids/ccid3.c8
-rw-r--r--net/dccp/ccids/lib/loss_interval.c1
-rw-r--r--net/dccp/ccids/lib/packet_history.c3
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c10
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c24
-rw-r--r--net/dccp/ipv6.c61
-rw-r--r--net/dccp/options.c1
-rw-r--r--net/dccp/output.c1
-rw-r--r--net/decnet/dn_fib.c8
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c144
-rw-r--r--net/decnet/dn_table.c76
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c30
-rw-r--r--net/ethernet/Makefile2
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/6lowpan.c251
-rw-r--r--net/ieee802154/dgram.c12
-rw-r--r--net/ieee802154/netlink.c4
-rw-r--r--net/ieee802154/nl-mac.c2
-rw-r--r--net/ieee802154/nl-phy.c2
-rw-r--r--net/ipv4/Kconfig11
-rw-r--r--net/ipv4/Makefile5
-rw-r--r--net/ipv4/af_inet.c75
-rw-r--r--net/ipv4/ah4.c17
-rw-r--r--net/ipv4/arp.c6
-rw-r--r--net/ipv4/cipso_ipv4.c6
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/esp4.c17
-rw-r--r--net/ipv4/fib_frontend.c131
-rw-r--r--net/ipv4/fib_rules.c39
-rw-r--r--net/ipv4/fib_semantics.c80
-rw-r--r--net/ipv4/fib_trie.c60
-rw-r--r--net/ipv4/icmp.c191
-rw-r--r--net/ipv4/inet_connection_sock.c53
-rw-r--r--net/ipv4/inet_diag.c146
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inetpeer.c99
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_input.c30
-rw-r--r--net/ipv4/ip_options.c29
-rw-r--r--net/ipv4/ip_output.c93
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_vti.c956
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/ipip.c28
-rw-r--r--net/ipv4/ipmr.c41
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c23
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c172
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c81
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c13
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c2229
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c43
-rw-r--r--net/ipv4/tcp.c77
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_fastopen.c11
-rw-r--r--net/ipv4/tcp_input.c398
-rw-r--r--net/ipv4/tcp_ipv4.c181
-rw-r--r--net/ipv4/tcp_metrics.c745
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_output.c353
-rw-r--r--net/ipv4/tcp_timer.c70
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv4/udp_diag.c10
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c68
-rw-r--r--net/ipv4/xfrm4_policy.c34
-rw-r--r--net/ipv6/addrconf.c21
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/esp6.c11
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/inet6_connection_sock.c103
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ip6_input.c20
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c96
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipcomp6.c11
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/ndisc.c129
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c131
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c51
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c585
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/tcp_ipv6.c239
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/xfrm6_policy.c26
-rw-r--r--net/ipx/Makefile2
-rw-r--r--net/ipx/pe2.c (renamed from net/ethernet/pe2.c)2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irqueue.c6
-rw-r--r--net/iucv/af_iucv.c1
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/l2tp/l2tp_eth.c54
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c8
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_station.c16
-rw-r--r--net/mac80211/Kconfig56
-rw-r--r--net/mac80211/Makefile7
-rw-r--r--net/mac80211/agg-rx.c38
-rw-r--r--net/mac80211/agg-tx.c118
-rw-r--r--net/mac80211/cfg.c737
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/debug.h170
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_key.c16
-rw-r--r--net/mac80211/debugfs_netdev.c49
-rw-r--r--net/mac80211/driver-ops.h39
-rw-r--r--net/mac80211/driver-trace.c9
-rw-r--r--net/mac80211/ht.c10
-rw-r--r--net/mac80211/ibss.c127
-rw-r--r--net/mac80211/ieee80211_i.h139
-rw-r--r--net/mac80211/iface.c325
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/led.c2
-rw-r--r--net/mac80211/main.c48
-rw-r--r--net/mac80211/mesh.c19
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mesh_hwmp.c173
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c70
-rw-r--r--net/mac80211/mesh_sync.c47
-rw-r--r--net/mac80211/mlme.c401
-rw-r--r--net/mac80211/offchannel.c291
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c10
-rw-r--r--net/mac80211/rx.c135
-rw-r--r--net/mac80211/scan.c123
-rw-r--r--net/mac80211/sta_info.c45
-rw-r--r--net/mac80211/sta_info.h5
-rw-r--r--net/mac80211/status.c48
-rw-r--r--net/mac80211/tkip.c46
-rw-r--r--net/mac80211/trace.c75
-rw-r--r--net/mac80211/trace.h (renamed from net/mac80211/driver-trace.h)80
-rw-r--r--net/mac80211/tx.c95
-rw-r--r--net/mac80211/util.c178
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/work.c370
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/mac802154.h9
-rw-r--r--net/mac802154/mac_cmd.c33
-rw-r--r--net/mac802154/mib.c108
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/mac802154/tx.c5
-rw-r--r--net/mac802154/wpan.c559
-rw-r--r--net/netfilter/Kconfig21
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipset/ip_set_core.c12
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c22
-rw-r--r--net/netfilter/nf_conntrack_extend.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c11
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/nf_conntrack_helper.c38
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c407
-rw-r--r--net/netfilter/nf_conntrack_pptp.c17
-rw-r--r--net/netfilter/nf_conntrack_proto.c300
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c143
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c81
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c79
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c175
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c163
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c111
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c127
-rw-r--r--net/netfilter/nf_conntrack_sane.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c32
-rw-r--r--net/netfilter/nf_conntrack_tftp.c8
-rw-r--r--net/netfilter/nfnetlink.c44
-rw-r--r--net/netfilter/nfnetlink_cthelper.c672
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c13
-rw-r--r--net/netfilter/nfnetlink_log.c29
-rw-r--r--net/netfilter/nfnetlink_queue_core.c (renamed from net/netfilter/nfnetlink_queue.c)95
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c98
-rw-r--r--net/netfilter/xt_CT.c44
-rw-r--r--net/netfilter/xt_NFQUEUE.c28
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_connlimit.c35
-rw-r--r--net/netfilter/xt_recent.c62
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/netlink/af_netlink.c35
-rw-r--r--net/netlink/genetlink.c14
-rw-r--r--net/nfc/core.c157
-rw-r--r--net/nfc/hci/command.c26
-rw-r--r--net/nfc/hci/core.c137
-rw-r--r--net/nfc/hci/hci.h12
-rw-r--r--net/nfc/hci/hcp.c2
-rw-r--r--net/nfc/hci/shdlc.c44
-rw-r--r--net/nfc/llcp/commands.c54
-rw-r--r--net/nfc/llcp/llcp.c627
-rw-r--r--net/nfc/llcp/llcp.h31
-rw-r--r--net/nfc/llcp/sock.c76
-rw-r--r--net/nfc/nci/core.c23
-rw-r--r--net/nfc/nci/ntf.c15
-rw-r--r--net/nfc/netlink.c104
-rw-r--r--net/nfc/nfc.h12
-rw-r--r--net/nfc/rawsock.c5
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c13
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/openvswitch/vport-internal_dev.h2
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport-netdev.h2
-rw-r--r--net/openvswitch/vport.c2
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/datagram.c4
-rw-r--r--net/phonet/pep-gprs.c2
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/phonet/pn_dev.c4
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/phonet/sysctl.c2
-rw-r--r--net/rds/page.c9
-rw-r--r--net/rds/recv.c3
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/sched/Kconfig20
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c59
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_canid.c240
-rw-r--r--net/sched/em_ipset.c135
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c24
-rw-r--r--net/sched/sch_netem.c51
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sched/sch_teql.c47
-rw-r--r--net/sctp/associola.c42
-rw-r--r--net/sctp/input.c27
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/output.c86
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_make_chunk.c18
-rw-r--r--net/sctp/sm_sideeffect.c35
-rw-r--r--net/sctp/socket.c119
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c22
-rw-r--r--net/sctp/tsnmap.c6
-rw-r--r--net/sctp/ulpevent.c6
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/Kconfig5
-rw-r--r--net/sunrpc/auth.c54
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c20
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/clnt.c14
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/svcauth_unix.c22
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xdr.c139
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtsock.c56
-rw-r--r--net/tipc/Kconfig25
-rw-r--r--net/tipc/bcast.c75
-rw-r--r--net/tipc/bearer.c69
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c41
-rw-r--r--net/tipc/core.c18
-rw-r--r--net/tipc/core.h65
-rw-r--r--net/tipc/discover.c10
-rw-r--r--net/tipc/handler.c4
-rw-r--r--net/tipc/link.c326
-rw-r--r--net/tipc/link.h63
-rw-r--r--net/tipc/log.c302
-rw-r--r--net/tipc/log.h66
-rw-r--r--net/tipc/msg.c242
-rw-r--r--net/tipc/name_distr.c25
-rw-r--r--net/tipc/name_table.c142
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/node_subscr.c3
-rw-r--r--net/tipc/port.c77
-rw-r--r--net/tipc/port.h1
-rw-r--r--net/tipc/ref.c10
-rw-r--r--net/tipc/socket.c17
-rw-r--r--net/tipc/subscr.c14
-rw-r--r--net/unix/af_unix.c203
-rw-r--r--net/unix/diag.c115
-rw-r--r--net/wanrouter/wanmain.c51
-rw-r--r--net/wireless/Kconfig35
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/ap.c46
-rw-r--r--net/wireless/chan.c107
-rw-r--r--net/wireless/core.c134
-rw-r--r--net/wireless/core.h106
-rw-r--r--net/wireless/ibss.c11
-rw-r--r--net/wireless/mesh.c121
-rw-r--r--net/wireless/mlme.c64
-rw-r--r--net/wireless/nl80211.c1009
-rw-r--r--net/wireless/nl80211.h21
-rw-r--r--net/wireless/reg.c139
-rw-r--r--net/wireless/reg.h8
-rw-r--r--net/wireless/scan.c24
-rw-r--r--net/wireless/sme.c10
-rw-r--r--net/wireless/util.c173
-rw-r--r--net/wireless/wext-compat.c23
-rw-r--r--net/wireless/wext-sme.c10
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/xfrm/xfrm_policy.c37
-rw-r--r--net/xfrm/xfrm_user.c401
468 files changed, 25772 insertions, 17811 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6089f0cf23b4..9096bcb08132 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
403 break; 403 break;
404 404
405 case NETDEV_DOWN: 405 case NETDEV_DOWN:
406 if (dev->features & NETIF_F_HW_VLAN_FILTER)
407 vlan_vid_del(dev, 0);
408
406 /* Put all VLANs for this dev in the down state too. */ 409 /* Put all VLANs for this dev in the down state too. */
407 for (i = 0; i < VLAN_N_VID; i++) { 410 for (i = 0; i < VLAN_N_VID; i++) {
408 vlandev = vlan_group_get_device(grp, i); 411 vlandev = vlan_group_get_device(grp, i);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index da1bc9c3cf38..73a2a83ee2da 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -681,10 +681,7 @@ static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *n
681 if (!netpoll) 681 if (!netpoll)
682 goto out; 682 goto out;
683 683
684 netpoll->dev = real_dev; 684 err = __netpoll_setup(netpoll, real_dev);
685 strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
686
687 err = __netpoll_setup(netpoll);
688 if (err) { 685 if (err) {
689 kfree(netpoll); 686 kfree(netpoll);
690 goto out; 687 goto out;
diff --git a/net/9p/client.c b/net/9p/client.c
index a170893d70e0..8260f132b32e 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1548 kernel_buf = 1; 1548 kernel_buf = 1;
1549 indata = data; 1549 indata = data;
1550 } else 1550 } else
1551 indata = (char *)udata; 1551 indata = (__force char *)udata;
1552 /* 1552 /*
1553 * response header len is 11 1553 * response header len is 11
1554 * PDU Header(7) + IO Size (4) 1554 * PDU Header(7) + IO Size (4)
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 9ee48cb30179..3d33ecf13327 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -368,7 +368,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
368 const char *sptr = va_arg(ap, const char *); 368 const char *sptr = va_arg(ap, const char *);
369 uint16_t len = 0; 369 uint16_t len = 0;
370 if (sptr) 370 if (sptr)
371 len = min_t(uint16_t, strlen(sptr), 371 len = min_t(size_t, strlen(sptr),
372 USHRT_MAX); 372 USHRT_MAX);
373 373
374 errcode = p9pdu_writef(pdu, proto_version, 374 errcode = p9pdu_writef(pdu, proto_version,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 5af18d11b518..35b8911b1c8e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
192 s = rest_of_page(data); 192 s = rest_of_page(data);
193 if (s > count) 193 if (s > count)
194 s = count; 194 s = count;
195 BUG_ON(index > limit);
195 sg_set_buf(&sg[index++], data, s); 196 sg_set_buf(&sg[index++], data, s);
196 count -= s; 197 count -= s;
197 data += s; 198 data += s;
198 BUG_ON(index > limit);
199 } 199 }
200 200
201 return index-start; 201 return index-start;
@@ -212,7 +212,7 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
212 * this takes a list of pages. 212 * this takes a list of pages.
213 * @sg: scatter/gather list to pack into 213 * @sg: scatter/gather list to pack into
214 * @start: which segment of the sg_list to start at 214 * @start: which segment of the sg_list to start at
215 * @**pdata: a list of pages to add into sg. 215 * @pdata: a list of pages to add into sg.
216 * @nr_pages: number of pages to pack into the scatter/gather list 216 * @nr_pages: number of pages to pack into the scatter/gather list
217 * @data: data to pack into scatter/gather list 217 * @data: data to pack into scatter/gather list
218 * @count: amount of data to pack into the scatter/gather list 218 * @count: amount of data to pack into the scatter/gather list
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 86852963b7f7..33475291c9c1 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -129,8 +129,8 @@ found:
129 129
130/** 130/**
131 * atalk_find_or_insert_socket - Try to find a socket matching ADDR 131 * atalk_find_or_insert_socket - Try to find a socket matching ADDR
132 * @sk - socket to insert in the list if it is not there already 132 * @sk: socket to insert in the list if it is not there already
133 * @sat - address to search for 133 * @sat: address to search for
134 * 134 *
135 * Try to find a socket matching ADDR in the socket list, if found then return 135 * Try to find a socket matching ADDR in the socket list, if found then return
136 * it. If not, insert SK into the socket list. 136 * it. If not, insert SK into the socket list.
@@ -1066,8 +1066,8 @@ static int atalk_release(struct socket *sock)
1066 1066
1067/** 1067/**
1068 * atalk_pick_and_bind_port - Pick a source port when one is not given 1068 * atalk_pick_and_bind_port - Pick a source port when one is not given
1069 * @sk - socket to insert into the tables 1069 * @sk: socket to insert into the tables
1070 * @sat - address to search for 1070 * @sat: address to search for
1071 * 1071 *
1072 * Pick a source port when one is not given. If we can find a suitable free 1072 * Pick a source port when one is not given. If we can find a suitable free
1073 * one, we insert the socket into the tables using it. 1073 * one, we insert the socket into the tables using it.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a7d172105c99..2e3d942e77f1 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -231,9 +231,11 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
231 if (skb_headroom(skb) < 2) { 231 if (skb_headroom(skb) < 2) {
232 pr_debug("reallocating skb\n"); 232 pr_debug("reallocating skb\n");
233 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 233 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
234 kfree_skb(skb); 234 if (unlikely(!skb2)) {
235 if (skb2 == NULL) 235 kfree_skb(skb);
236 return NETDEV_TX_OK; 236 return NETDEV_TX_OK;
237 }
238 consume_skb(skb);
237 skb = skb2; 239 skb = skb2;
238 } 240 }
239 skb_push(skb, 2); 241 skb_push(skb, 2);
@@ -1602,7 +1604,7 @@ static void lec_arp_expire_vcc(unsigned long data)
1602{ 1604{
1603 unsigned long flags; 1605 unsigned long flags;
1604 struct lec_arp_table *to_remove = (struct lec_arp_table *)data; 1606 struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
1605 struct lec_priv *priv = (struct lec_priv *)to_remove->priv; 1607 struct lec_priv *priv = to_remove->priv;
1606 1608
1607 del_timer(&to_remove->timer); 1609 del_timer(&to_remove->timer);
1608 1610
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index ce1e59fdae7b..226dca989448 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -283,7 +283,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
283 kfree_skb(n); 283 kfree_skb(n);
284 goto nospace; 284 goto nospace;
285 } 285 }
286 kfree_skb(skb); 286 consume_skb(skb);
287 skb = n; 287 skb = n;
288 if (skb == NULL) 288 if (skb == NULL)
289 return DROP_PACKET; 289 return DROP_PACKET;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 051f7abae66d..779095ded689 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
842 case AX25_P_NETROM: 842 case AX25_P_NETROM:
843 if (ax25_protocol_is_registered(AX25_P_NETROM)) 843 if (ax25_protocol_is_registered(AX25_P_NETROM))
844 return -ESOCKTNOSUPPORT; 844 return -ESOCKTNOSUPPORT;
845 break;
845#endif 846#endif
846#ifdef CONFIG_ROSE_MODULE 847#ifdef CONFIG_ROSE_MODULE
847 case AX25_P_ROSE: 848 case AX25_P_ROSE:
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 9162409559cf..e7c9b0ea17a1 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -189,8 +189,10 @@ const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
189 digi->ndigi = 0; 189 digi->ndigi = 0;
190 190
191 while (!(buf[-1] & AX25_EBIT)) { 191 while (!(buf[-1] & AX25_EBIT)) {
192 if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */ 192 if (d >= AX25_MAX_DIGIS)
193 if (len < 7) return NULL; /* Short packet */ 193 return NULL;
194 if (len < AX25_ADDR_LEN)
195 return NULL;
194 196
195 memcpy(&digi->calls[d], buf, AX25_ADDR_LEN); 197 memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
196 digi->ndigi = d + 1; 198 digi->ndigi = d + 1;
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index be8a25e0db65..be2acab9be9d 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -350,7 +350,7 @@ void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
350 if (skb->sk != NULL) 350 if (skb->sk != NULL)
351 skb_set_owner_w(skbn, skb->sk); 351 skb_set_owner_w(skbn, skb->sk);
352 352
353 kfree_skb(skb); 353 consume_skb(skb);
354 skb = skbn; 354 skb = skbn;
355 } 355 }
356 356
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a65588040b9e..d39097737e38 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -474,7 +474,7 @@ struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
474 if (skb->sk != NULL) 474 if (skb->sk != NULL)
475 skb_set_owner_w(skbn, skb->sk); 475 skb_set_owner_w(skbn, skb->sk);
476 476
477 kfree_skb(skb); 477 consume_skb(skb);
478 478
479 skb = skbn; 479 skb = skbn;
480 } 480 }
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 6d5c1940667d..8676d2b1d574 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,11 +19,10 @@
19# 19#
20 20
21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o 21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
22batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o 22batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 23batman-adv-y += bitarray.o
26batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o 24batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
25batman-adv-y += debugfs.o
27batman-adv-y += gateway_client.o 26batman-adv-y += gateway_client.o
28batman-adv-y += gateway_common.o 27batman-adv-y += gateway_common.o
29batman-adv-y += hard-interface.o 28batman-adv-y += hard-interface.o
@@ -35,6 +34,7 @@ batman-adv-y += ring_buffer.o
35batman-adv-y += routing.o 34batman-adv-y += routing.o
36batman-adv-y += send.o 35batman-adv-y += send.o
37batman-adv-y += soft-interface.o 36batman-adv-y += soft-interface.o
37batman-adv-y += sysfs.o
38batman-adv-y += translation-table.o 38batman-adv-y += translation-table.o
39batman-adv-y += unicast.o 39batman-adv-y += unicast.o
40batman-adv-y += vis.o 40batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 9852a688ba43..a0ba3bff9b36 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,12 +15,11 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_ 20#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
23#define _NET_BATMAN_ADV_BAT_ALGO_H_ 21#define _NET_BATMAN_ADV_BAT_ALGO_H_
24 22
25int bat_iv_init(void); 23int batadv_iv_init(void);
26 24
27#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ 25#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
deleted file mode 100644
index 3b588f86d770..000000000000
--- a/net/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,388 +0,0 @@
1/*
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23
24#include <linux/debugfs.h>
25
26#include "bat_debugfs.h"
27#include "translation-table.h"
28#include "originator.h"
29#include "hard-interface.h"
30#include "gateway_common.h"
31#include "gateway_client.h"
32#include "soft-interface.h"
33#include "vis.h"
34#include "icmp_socket.h"
35#include "bridge_loop_avoidance.h"
36
37static struct dentry *bat_debugfs;
38
39#ifdef CONFIG_BATMAN_ADV_DEBUG
40#define LOG_BUFF_MASK (log_buff_len-1)
41#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
42
43static int log_buff_len = LOG_BUF_LEN;
44
45static void emit_log_char(struct debug_log *debug_log, char c)
46{
47 LOG_BUFF(debug_log->log_end) = c;
48 debug_log->log_end++;
49
50 if (debug_log->log_end - debug_log->log_start > log_buff_len)
51 debug_log->log_start = debug_log->log_end - log_buff_len;
52}
53
54__printf(2, 3)
55static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
56{
57 va_list args;
58 static char debug_log_buf[256];
59 char *p;
60
61 if (!debug_log)
62 return 0;
63
64 spin_lock_bh(&debug_log->lock);
65 va_start(args, fmt);
66 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
67 va_end(args);
68
69 for (p = debug_log_buf; *p != 0; p++)
70 emit_log_char(debug_log, *p);
71
72 spin_unlock_bh(&debug_log->lock);
73
74 wake_up(&debug_log->queue_wait);
75
76 return 0;
77}
78
79int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
80{
81 va_list args;
82 char tmp_log_buf[256];
83
84 va_start(args, fmt);
85 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
86 fdebug_log(bat_priv->debug_log, "[%10u] %s",
87 jiffies_to_msecs(jiffies), tmp_log_buf);
88 va_end(args);
89
90 return 0;
91}
92
93static int log_open(struct inode *inode, struct file *file)
94{
95 nonseekable_open(inode, file);
96 file->private_data = inode->i_private;
97 inc_module_count();
98 return 0;
99}
100
101static int log_release(struct inode *inode, struct file *file)
102{
103 dec_module_count();
104 return 0;
105}
106
107static ssize_t log_read(struct file *file, char __user *buf,
108 size_t count, loff_t *ppos)
109{
110 struct bat_priv *bat_priv = file->private_data;
111 struct debug_log *debug_log = bat_priv->debug_log;
112 int error, i = 0;
113 char c;
114
115 if ((file->f_flags & O_NONBLOCK) &&
116 !(debug_log->log_end - debug_log->log_start))
117 return -EAGAIN;
118
119 if (!buf)
120 return -EINVAL;
121
122 if (count == 0)
123 return 0;
124
125 if (!access_ok(VERIFY_WRITE, buf, count))
126 return -EFAULT;
127
128 error = wait_event_interruptible(debug_log->queue_wait,
129 (debug_log->log_start - debug_log->log_end));
130
131 if (error)
132 return error;
133
134 spin_lock_bh(&debug_log->lock);
135
136 while ((!error) && (i < count) &&
137 (debug_log->log_start != debug_log->log_end)) {
138 c = LOG_BUFF(debug_log->log_start);
139
140 debug_log->log_start++;
141
142 spin_unlock_bh(&debug_log->lock);
143
144 error = __put_user(c, buf);
145
146 spin_lock_bh(&debug_log->lock);
147
148 buf++;
149 i++;
150
151 }
152
153 spin_unlock_bh(&debug_log->lock);
154
155 if (!error)
156 return i;
157
158 return error;
159}
160
161static unsigned int log_poll(struct file *file, poll_table *wait)
162{
163 struct bat_priv *bat_priv = file->private_data;
164 struct debug_log *debug_log = bat_priv->debug_log;
165
166 poll_wait(file, &debug_log->queue_wait, wait);
167
168 if (debug_log->log_end - debug_log->log_start)
169 return POLLIN | POLLRDNORM;
170
171 return 0;
172}
173
174static const struct file_operations log_fops = {
175 .open = log_open,
176 .release = log_release,
177 .read = log_read,
178 .poll = log_poll,
179 .llseek = no_llseek,
180};
181
182static int debug_log_setup(struct bat_priv *bat_priv)
183{
184 struct dentry *d;
185
186 if (!bat_priv->debug_dir)
187 goto err;
188
189 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
190 if (!bat_priv->debug_log)
191 goto err;
192
193 spin_lock_init(&bat_priv->debug_log->lock);
194 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
195
196 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
197 bat_priv->debug_dir, bat_priv, &log_fops);
198 if (d)
199 goto err;
200
201 return 0;
202
203err:
204 return 1;
205}
206
207static void debug_log_cleanup(struct bat_priv *bat_priv)
208{
209 kfree(bat_priv->debug_log);
210 bat_priv->debug_log = NULL;
211}
212#else /* CONFIG_BATMAN_ADV_DEBUG */
213static int debug_log_setup(struct bat_priv *bat_priv)
214{
215 bat_priv->debug_log = NULL;
216 return 0;
217}
218
219static void debug_log_cleanup(struct bat_priv *bat_priv)
220{
221 return;
222}
223#endif
224
225static int bat_algorithms_open(struct inode *inode, struct file *file)
226{
227 return single_open(file, bat_algo_seq_print_text, NULL);
228}
229
230static int originators_open(struct inode *inode, struct file *file)
231{
232 struct net_device *net_dev = (struct net_device *)inode->i_private;
233 return single_open(file, orig_seq_print_text, net_dev);
234}
235
236static int gateways_open(struct inode *inode, struct file *file)
237{
238 struct net_device *net_dev = (struct net_device *)inode->i_private;
239 return single_open(file, gw_client_seq_print_text, net_dev);
240}
241
242static int transtable_global_open(struct inode *inode, struct file *file)
243{
244 struct net_device *net_dev = (struct net_device *)inode->i_private;
245 return single_open(file, tt_global_seq_print_text, net_dev);
246}
247
248#ifdef CONFIG_BATMAN_ADV_BLA
249static int bla_claim_table_open(struct inode *inode, struct file *file)
250{
251 struct net_device *net_dev = (struct net_device *)inode->i_private;
252 return single_open(file, bla_claim_table_seq_print_text, net_dev);
253}
254#endif
255
256static int transtable_local_open(struct inode *inode, struct file *file)
257{
258 struct net_device *net_dev = (struct net_device *)inode->i_private;
259 return single_open(file, tt_local_seq_print_text, net_dev);
260}
261
262static int vis_data_open(struct inode *inode, struct file *file)
263{
264 struct net_device *net_dev = (struct net_device *)inode->i_private;
265 return single_open(file, vis_seq_print_text, net_dev);
266}
267
268struct bat_debuginfo {
269 struct attribute attr;
270 const struct file_operations fops;
271};
272
273#define BAT_DEBUGINFO(_name, _mode, _open) \
274struct bat_debuginfo bat_debuginfo_##_name = { \
275 .attr = { .name = __stringify(_name), \
276 .mode = _mode, }, \
277 .fops = { .owner = THIS_MODULE, \
278 .open = _open, \
279 .read = seq_read, \
280 .llseek = seq_lseek, \
281 .release = single_release, \
282 } \
283};
284
285static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
286static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
287static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
288static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
289#ifdef CONFIG_BATMAN_ADV_BLA
290static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
291#endif
292static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
293static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
294
295static struct bat_debuginfo *mesh_debuginfos[] = {
296 &bat_debuginfo_originators,
297 &bat_debuginfo_gateways,
298 &bat_debuginfo_transtable_global,
299#ifdef CONFIG_BATMAN_ADV_BLA
300 &bat_debuginfo_bla_claim_table,
301#endif
302 &bat_debuginfo_transtable_local,
303 &bat_debuginfo_vis_data,
304 NULL,
305};
306
307void debugfs_init(void)
308{
309 struct bat_debuginfo *bat_debug;
310 struct dentry *file;
311
312 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
313 if (bat_debugfs == ERR_PTR(-ENODEV))
314 bat_debugfs = NULL;
315
316 if (!bat_debugfs)
317 goto out;
318
319 bat_debug = &bat_debuginfo_routing_algos;
320 file = debugfs_create_file(bat_debug->attr.name,
321 S_IFREG | bat_debug->attr.mode,
322 bat_debugfs, NULL, &bat_debug->fops);
323 if (!file)
324 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
325
326out:
327 return;
328}
329
330void debugfs_destroy(void)
331{
332 if (bat_debugfs) {
333 debugfs_remove_recursive(bat_debugfs);
334 bat_debugfs = NULL;
335 }
336}
337
338int debugfs_add_meshif(struct net_device *dev)
339{
340 struct bat_priv *bat_priv = netdev_priv(dev);
341 struct bat_debuginfo **bat_debug;
342 struct dentry *file;
343
344 if (!bat_debugfs)
345 goto out;
346
347 bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
348 if (!bat_priv->debug_dir)
349 goto out;
350
351 bat_socket_setup(bat_priv);
352 debug_log_setup(bat_priv);
353
354 for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
355 file = debugfs_create_file(((*bat_debug)->attr).name,
356 S_IFREG | ((*bat_debug)->attr).mode,
357 bat_priv->debug_dir,
358 dev, &(*bat_debug)->fops);
359 if (!file) {
360 bat_err(dev, "Can't add debugfs file: %s/%s\n",
361 dev->name, ((*bat_debug)->attr).name);
362 goto rem_attr;
363 }
364 }
365
366 return 0;
367rem_attr:
368 debugfs_remove_recursive(bat_priv->debug_dir);
369 bat_priv->debug_dir = NULL;
370out:
371#ifdef CONFIG_DEBUG_FS
372 return -ENOMEM;
373#else
374 return 0;
375#endif /* CONFIG_DEBUG_FS */
376}
377
378void debugfs_del_meshif(struct net_device *dev)
379{
380 struct bat_priv *bat_priv = netdev_priv(dev);
381
382 debug_log_cleanup(bat_priv);
383
384 if (bat_debugfs) {
385 debugfs_remove_recursive(bat_priv->debug_dir);
386 bat_priv->debug_dir = NULL;
387 }
388}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index dc53798ebb47..e877af8bdd1e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -30,15 +28,16 @@
30#include "send.h" 28#include "send.h"
31#include "bat_algo.h" 29#include "bat_algo.h"
32 30
33static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, 31static struct batadv_neigh_node *
34 const uint8_t *neigh_addr, 32batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
35 struct orig_node *orig_node, 33 const uint8_t *neigh_addr,
36 struct orig_node *orig_neigh, 34 struct batadv_orig_node *orig_node,
37 uint32_t seqno) 35 struct batadv_orig_node *orig_neigh, __be32 seqno)
38{ 36{
39 struct neigh_node *neigh_node; 37 struct batadv_neigh_node *neigh_node;
40 38
41 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); 39 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
40 ntohl(seqno));
42 if (!neigh_node) 41 if (!neigh_node)
43 goto out; 42 goto out;
44 43
@@ -55,30 +54,30 @@ out:
55 return neigh_node; 54 return neigh_node;
56} 55}
57 56
58static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) 57static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
59{ 58{
60 struct batman_ogm_packet *batman_ogm_packet; 59 struct batadv_ogm_packet *batadv_ogm_packet;
61 uint32_t random_seqno; 60 uint32_t random_seqno;
62 int res = -1; 61 int res = -ENOMEM;
63 62
64 /* randomize initial seqno to avoid collision */ 63 /* randomize initial seqno to avoid collision */
65 get_random_bytes(&random_seqno, sizeof(random_seqno)); 64 get_random_bytes(&random_seqno, sizeof(random_seqno));
66 atomic_set(&hard_iface->seqno, random_seqno); 65 atomic_set(&hard_iface->seqno, random_seqno);
67 66
68 hard_iface->packet_len = BATMAN_OGM_HLEN; 67 hard_iface->packet_len = BATADV_OGM_HLEN;
69 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 68 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
70 69
71 if (!hard_iface->packet_buff) 70 if (!hard_iface->packet_buff)
72 goto out; 71 goto out;
73 72
74 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 73 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
75 batman_ogm_packet->header.packet_type = BAT_IV_OGM; 74 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
76 batman_ogm_packet->header.version = COMPAT_VERSION; 75 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
77 batman_ogm_packet->header.ttl = 2; 76 batadv_ogm_packet->header.ttl = 2;
78 batman_ogm_packet->flags = NO_FLAGS; 77 batadv_ogm_packet->flags = BATADV_NO_FLAGS;
79 batman_ogm_packet->tq = TQ_MAX_VALUE; 78 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
80 batman_ogm_packet->tt_num_changes = 0; 79 batadv_ogm_packet->tt_num_changes = 0;
81 batman_ogm_packet->ttvn = 0; 80 batadv_ogm_packet->ttvn = 0;
82 81
83 res = 0; 82 res = 0;
84 83
@@ -86,133 +85,152 @@ out:
86 return res; 85 return res;
87} 86}
88 87
89static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) 88static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
90{ 89{
91 kfree(hard_iface->packet_buff); 90 kfree(hard_iface->packet_buff);
92 hard_iface->packet_buff = NULL; 91 hard_iface->packet_buff = NULL;
93} 92}
94 93
95static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) 94static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
96{ 95{
97 struct batman_ogm_packet *batman_ogm_packet; 96 struct batadv_ogm_packet *batadv_ogm_packet;
98 97
99 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 98 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
100 memcpy(batman_ogm_packet->orig, 99 memcpy(batadv_ogm_packet->orig,
101 hard_iface->net_dev->dev_addr, ETH_ALEN); 100 hard_iface->net_dev->dev_addr, ETH_ALEN);
102 memcpy(batman_ogm_packet->prev_sender, 101 memcpy(batadv_ogm_packet->prev_sender,
103 hard_iface->net_dev->dev_addr, ETH_ALEN); 102 hard_iface->net_dev->dev_addr, ETH_ALEN);
104} 103}
105 104
106static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) 105static void
106batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
107{ 107{
108 struct batman_ogm_packet *batman_ogm_packet; 108 struct batadv_ogm_packet *batadv_ogm_packet;
109 109
110 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 110 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
111 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; 111 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
112 batman_ogm_packet->header.ttl = TTL; 112 batadv_ogm_packet->header.ttl = BATADV_TTL;
113} 113}
114 114
115/* when do we schedule our own ogm to be sent */ 115/* when do we schedule our own ogm to be sent */
116static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) 116static unsigned long
117batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
117{ 118{
118 return jiffies + msecs_to_jiffies( 119 unsigned int msecs;
119 atomic_read(&bat_priv->orig_interval) - 120
120 JITTER + (random32() % 2*JITTER)); 121 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
122 msecs += (random32() % 2 * BATADV_JITTER);
123
124 return jiffies + msecs_to_jiffies(msecs);
121} 125}
122 126
123/* when do we schedule a ogm packet to be sent */ 127/* when do we schedule a ogm packet to be sent */
124static unsigned long bat_iv_ogm_fwd_send_time(void) 128static unsigned long batadv_iv_ogm_fwd_send_time(void)
125{ 129{
126 return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); 130 return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2));
127} 131}
128 132
129/* apply hop penalty for a normal link */ 133/* apply hop penalty for a normal link */
130static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) 134static uint8_t batadv_hop_penalty(uint8_t tq,
135 const struct batadv_priv *bat_priv)
131{ 136{
132 int hop_penalty = atomic_read(&bat_priv->hop_penalty); 137 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
133 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); 138 int new_tq;
139
140 new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
141 new_tq /= BATADV_TQ_MAX_VALUE;
142
143 return new_tq;
134} 144}
135 145
136/* is there another aggregated packet here? */ 146/* is there another aggregated packet here? */
137static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, 147static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
138 int tt_num_changes) 148 int tt_num_changes)
139{ 149{
140 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); 150 int next_buff_pos = 0;
151
152 next_buff_pos += buff_pos + BATADV_OGM_HLEN;
153 next_buff_pos += batadv_tt_len(tt_num_changes);
141 154
142 return (next_buff_pos <= packet_len) && 155 return (next_buff_pos <= packet_len) &&
143 (next_buff_pos <= MAX_AGGREGATION_BYTES); 156 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
144} 157}
145 158
146/* send a batman ogm to a given interface */ 159/* send a batman ogm to a given interface */
147static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, 160static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
148 struct hard_iface *hard_iface) 161 struct batadv_hard_iface *hard_iface)
149{ 162{
150 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 163 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
151 char *fwd_str; 164 char *fwd_str;
152 uint8_t packet_num; 165 uint8_t packet_num;
153 int16_t buff_pos; 166 int16_t buff_pos;
154 struct batman_ogm_packet *batman_ogm_packet; 167 struct batadv_ogm_packet *batadv_ogm_packet;
155 struct sk_buff *skb; 168 struct sk_buff *skb;
156 169
157 if (hard_iface->if_status != IF_ACTIVE) 170 if (hard_iface->if_status != BATADV_IF_ACTIVE)
158 return; 171 return;
159 172
160 packet_num = 0; 173 packet_num = 0;
161 buff_pos = 0; 174 buff_pos = 0;
162 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 175 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
163 176
164 /* adjust all flags and log packets */ 177 /* adjust all flags and log packets */
165 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 178 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
166 batman_ogm_packet->tt_num_changes)) { 179 batadv_ogm_packet->tt_num_changes)) {
167 180
168 /* we might have aggregated direct link packets with an 181 /* we might have aggregated direct link packets with an
169 * ordinary base packet */ 182 * ordinary base packet
183 */
170 if ((forw_packet->direct_link_flags & (1 << packet_num)) && 184 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
171 (forw_packet->if_incoming == hard_iface)) 185 (forw_packet->if_incoming == hard_iface))
172 batman_ogm_packet->flags |= DIRECTLINK; 186 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
173 else 187 else
174 batman_ogm_packet->flags &= ~DIRECTLINK; 188 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
175 189
176 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? 190 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
177 "Sending own" : 191 "Sending own" :
178 "Forwarding")); 192 "Forwarding"));
179 bat_dbg(DBG_BATMAN, bat_priv, 193 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
180 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", 194 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
181 fwd_str, (packet_num > 0 ? "aggregated " : ""), 195 fwd_str, (packet_num > 0 ? "aggregated " : ""),
182 batman_ogm_packet->orig, 196 batadv_ogm_packet->orig,
183 ntohl(batman_ogm_packet->seqno), 197 ntohl(batadv_ogm_packet->seqno),
184 batman_ogm_packet->tq, batman_ogm_packet->header.ttl, 198 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
185 (batman_ogm_packet->flags & DIRECTLINK ? 199 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
186 "on" : "off"), 200 "on" : "off"),
187 batman_ogm_packet->ttvn, hard_iface->net_dev->name, 201 batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
188 hard_iface->net_dev->dev_addr); 202 hard_iface->net_dev->dev_addr);
189 203
190 buff_pos += BATMAN_OGM_HLEN + 204 buff_pos += BATADV_OGM_HLEN;
191 tt_len(batman_ogm_packet->tt_num_changes); 205 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
192 packet_num++; 206 packet_num++;
193 batman_ogm_packet = (struct batman_ogm_packet *) 207 batadv_ogm_packet = (struct batadv_ogm_packet *)
194 (forw_packet->skb->data + buff_pos); 208 (forw_packet->skb->data + buff_pos);
195 } 209 }
196 210
197 /* create clone because function is called more than once */ 211 /* create clone because function is called more than once */
198 skb = skb_clone(forw_packet->skb, GFP_ATOMIC); 212 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
199 if (skb) 213 if (skb) {
200 send_skb_packet(skb, hard_iface, broadcast_addr); 214 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
215 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
216 skb->len + ETH_HLEN);
217 batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
218 }
201} 219}
202 220
203/* send a batman ogm packet */ 221/* send a batman ogm packet */
204static void bat_iv_ogm_emit(struct forw_packet *forw_packet) 222static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
205{ 223{
206 struct hard_iface *hard_iface; 224 struct batadv_hard_iface *hard_iface;
207 struct net_device *soft_iface; 225 struct net_device *soft_iface;
208 struct bat_priv *bat_priv; 226 struct batadv_priv *bat_priv;
209 struct hard_iface *primary_if = NULL; 227 struct batadv_hard_iface *primary_if = NULL;
210 struct batman_ogm_packet *batman_ogm_packet; 228 struct batadv_ogm_packet *batadv_ogm_packet;
211 unsigned char directlink; 229 unsigned char directlink;
212 230
213 batman_ogm_packet = (struct batman_ogm_packet *) 231 batadv_ogm_packet = (struct batadv_ogm_packet *)
214 (forw_packet->skb->data); 232 (forw_packet->skb->data);
215 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 233 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
216 234
217 if (!forw_packet->if_incoming) { 235 if (!forw_packet->if_incoming) {
218 pr_err("Error - can't forward packet: incoming iface not specified\n"); 236 pr_err("Error - can't forward packet: incoming iface not specified\n");
@@ -222,31 +240,33 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
222 soft_iface = forw_packet->if_incoming->soft_iface; 240 soft_iface = forw_packet->if_incoming->soft_iface;
223 bat_priv = netdev_priv(soft_iface); 241 bat_priv = netdev_priv(soft_iface);
224 242
225 if (forw_packet->if_incoming->if_status != IF_ACTIVE) 243 if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
226 goto out; 244 goto out;
227 245
228 primary_if = primary_if_get_selected(bat_priv); 246 primary_if = batadv_primary_if_get_selected(bat_priv);
229 if (!primary_if) 247 if (!primary_if)
230 goto out; 248 goto out;
231 249
232 /* multihomed peer assumed */ 250 /* multihomed peer assumed
233 /* non-primary OGMs are only broadcasted on their interface */ 251 * non-primary OGMs are only broadcasted on their interface
234 if ((directlink && (batman_ogm_packet->header.ttl == 1)) || 252 */
253 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
235 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 254 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
236 255
237 /* FIXME: what about aggregated packets ? */ 256 /* FIXME: what about aggregated packets ? */
238 bat_dbg(DBG_BATMAN, bat_priv, 257 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
239 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", 258 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
240 (forw_packet->own ? "Sending own" : "Forwarding"), 259 (forw_packet->own ? "Sending own" : "Forwarding"),
241 batman_ogm_packet->orig, 260 batadv_ogm_packet->orig,
242 ntohl(batman_ogm_packet->seqno), 261 ntohl(batadv_ogm_packet->seqno),
243 batman_ogm_packet->header.ttl, 262 batadv_ogm_packet->header.ttl,
244 forw_packet->if_incoming->net_dev->name, 263 forw_packet->if_incoming->net_dev->name,
245 forw_packet->if_incoming->net_dev->dev_addr); 264 forw_packet->if_incoming->net_dev->dev_addr);
246 265
247 /* skb is only used once and than forw_packet is free'd */ 266 /* skb is only used once and than forw_packet is free'd */
248 send_skb_packet(forw_packet->skb, forw_packet->if_incoming, 267 batadv_send_skb_packet(forw_packet->skb,
249 broadcast_addr); 268 forw_packet->if_incoming,
269 batadv_broadcast_addr);
250 forw_packet->skb = NULL; 270 forw_packet->skb = NULL;
251 271
252 goto out; 272 goto out;
@@ -254,70 +274,70 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
254 274
255 /* broadcast on every interface */ 275 /* broadcast on every interface */
256 rcu_read_lock(); 276 rcu_read_lock();
257 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 277 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
258 if (hard_iface->soft_iface != soft_iface) 278 if (hard_iface->soft_iface != soft_iface)
259 continue; 279 continue;
260 280
261 bat_iv_ogm_send_to_if(forw_packet, hard_iface); 281 batadv_iv_ogm_send_to_if(forw_packet, hard_iface);
262 } 282 }
263 rcu_read_unlock(); 283 rcu_read_unlock();
264 284
265out: 285out:
266 if (primary_if) 286 if (primary_if)
267 hardif_free_ref(primary_if); 287 batadv_hardif_free_ref(primary_if);
268} 288}
269 289
270/* return true if new_packet can be aggregated with forw_packet */ 290/* return true if new_packet can be aggregated with forw_packet */
271static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet 291static bool
272 *new_batman_ogm_packet, 292batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
273 struct bat_priv *bat_priv, 293 struct batadv_priv *bat_priv,
274 int packet_len, unsigned long send_time, 294 int packet_len, unsigned long send_time,
275 bool directlink, 295 bool directlink,
276 const struct hard_iface *if_incoming, 296 const struct batadv_hard_iface *if_incoming,
277 const struct forw_packet *forw_packet) 297 const struct batadv_forw_packet *forw_packet)
278{ 298{
279 struct batman_ogm_packet *batman_ogm_packet; 299 struct batadv_ogm_packet *batadv_ogm_packet;
280 int aggregated_bytes = forw_packet->packet_len + packet_len; 300 int aggregated_bytes = forw_packet->packet_len + packet_len;
281 struct hard_iface *primary_if = NULL; 301 struct batadv_hard_iface *primary_if = NULL;
282 bool res = false; 302 bool res = false;
303 unsigned long aggregation_end_time;
283 304
284 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 305 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
306 aggregation_end_time = send_time;
307 aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
285 308
286 /** 309 /* we can aggregate the current packet to this aggregated packet
287 * we can aggregate the current packet to this aggregated packet
288 * if: 310 * if:
289 * 311 *
290 * - the send time is within our MAX_AGGREGATION_MS time 312 * - the send time is within our MAX_AGGREGATION_MS time
291 * - the resulting packet wont be bigger than 313 * - the resulting packet wont be bigger than
292 * MAX_AGGREGATION_BYTES 314 * MAX_AGGREGATION_BYTES
293 */ 315 */
294
295 if (time_before(send_time, forw_packet->send_time) && 316 if (time_before(send_time, forw_packet->send_time) &&
296 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS), 317 time_after_eq(aggregation_end_time, forw_packet->send_time) &&
297 forw_packet->send_time) && 318 (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
298 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
299 319
300 /** 320 /* check aggregation compatibility
301 * check aggregation compatibility
302 * -> direct link packets are broadcasted on 321 * -> direct link packets are broadcasted on
303 * their interface only 322 * their interface only
304 * -> aggregate packet if the current packet is 323 * -> aggregate packet if the current packet is
305 * a "global" packet as well as the base 324 * a "global" packet as well as the base
306 * packet 325 * packet
307 */ 326 */
308 327 primary_if = batadv_primary_if_get_selected(bat_priv);
309 primary_if = primary_if_get_selected(bat_priv);
310 if (!primary_if) 328 if (!primary_if)
311 goto out; 329 goto out;
312 330
313 /* packets without direct link flag and high TTL 331 /* packets without direct link flag and high TTL
314 * are flooded through the net */ 332 * are flooded through the net
333 */
315 if ((!directlink) && 334 if ((!directlink) &&
316 (!(batman_ogm_packet->flags & DIRECTLINK)) && 335 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
317 (batman_ogm_packet->header.ttl != 1) && 336 (batadv_ogm_packet->header.ttl != 1) &&
318 337
319 /* own packets originating non-primary 338 /* own packets originating non-primary
320 * interfaces leave only that interface */ 339 * interfaces leave only that interface
340 */
321 ((!forw_packet->own) || 341 ((!forw_packet->own) ||
322 (forw_packet->if_incoming == primary_if))) { 342 (forw_packet->if_incoming == primary_if))) {
323 res = true; 343 res = true;
@@ -325,15 +345,17 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
325 } 345 }
326 346
327 /* if the incoming packet is sent via this one 347 /* if the incoming packet is sent via this one
328 * interface only - we still can aggregate */ 348 * interface only - we still can aggregate
349 */
329 if ((directlink) && 350 if ((directlink) &&
330 (new_batman_ogm_packet->header.ttl == 1) && 351 (new_bat_ogm_packet->header.ttl == 1) &&
331 (forw_packet->if_incoming == if_incoming) && 352 (forw_packet->if_incoming == if_incoming) &&
332 353
333 /* packets from direct neighbors or 354 /* packets from direct neighbors or
334 * own secondary interface packets 355 * own secondary interface packets
335 * (= secondary interface packets in general) */ 356 * (= secondary interface packets in general)
336 (batman_ogm_packet->flags & DIRECTLINK || 357 */
358 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
337 (forw_packet->own && 359 (forw_packet->own &&
338 forw_packet->if_incoming != primary_if))) { 360 forw_packet->if_incoming != primary_if))) {
339 res = true; 361 res = true;
@@ -343,29 +365,30 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
343 365
344out: 366out:
345 if (primary_if) 367 if (primary_if)
346 hardif_free_ref(primary_if); 368 batadv_hardif_free_ref(primary_if);
347 return res; 369 return res;
348} 370}
349 371
350/* create a new aggregated packet and add this packet to it */ 372/* create a new aggregated packet and add this packet to it */
351static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, 373static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
352 int packet_len, unsigned long send_time, 374 int packet_len, unsigned long send_time,
353 bool direct_link, 375 bool direct_link,
354 struct hard_iface *if_incoming, 376 struct batadv_hard_iface *if_incoming,
355 int own_packet) 377 int own_packet)
356{ 378{
357 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 379 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
358 struct forw_packet *forw_packet_aggr; 380 struct batadv_forw_packet *forw_packet_aggr;
359 unsigned char *skb_buff; 381 unsigned char *skb_buff;
382 unsigned int skb_size;
360 383
361 if (!atomic_inc_not_zero(&if_incoming->refcount)) 384 if (!atomic_inc_not_zero(&if_incoming->refcount))
362 return; 385 return;
363 386
364 /* own packet should always be scheduled */ 387 /* own packet should always be scheduled */
365 if (!own_packet) { 388 if (!own_packet) {
366 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { 389 if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
367 bat_dbg(DBG_BATMAN, bat_priv, 390 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
368 "batman packet queue full\n"); 391 "batman packet queue full\n");
369 goto out; 392 goto out;
370 } 393 }
371 } 394 }
@@ -378,12 +401,12 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
378 } 401 }
379 402
380 if ((atomic_read(&bat_priv->aggregated_ogms)) && 403 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
381 (packet_len < MAX_AGGREGATION_BYTES)) 404 (packet_len < BATADV_MAX_AGGREGATION_BYTES))
382 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + 405 skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN;
383 ETH_HLEN);
384 else 406 else
385 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); 407 skb_size = packet_len + ETH_HLEN;
386 408
409 forw_packet_aggr->skb = dev_alloc_skb(skb_size);
387 if (!forw_packet_aggr->skb) { 410 if (!forw_packet_aggr->skb) {
388 if (!own_packet) 411 if (!own_packet)
389 atomic_inc(&bat_priv->batman_queue_left); 412 atomic_inc(&bat_priv->batman_queue_left);
@@ -401,7 +424,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
401 forw_packet_aggr->own = own_packet; 424 forw_packet_aggr->own = own_packet;
402 forw_packet_aggr->if_incoming = if_incoming; 425 forw_packet_aggr->if_incoming = if_incoming;
403 forw_packet_aggr->num_packets = 0; 426 forw_packet_aggr->num_packets = 0;
404 forw_packet_aggr->direct_link_flags = NO_FLAGS; 427 forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
405 forw_packet_aggr->send_time = send_time; 428 forw_packet_aggr->send_time = send_time;
406 429
407 /* save packet direct link flag status */ 430 /* save packet direct link flag status */
@@ -415,20 +438,20 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
415 438
416 /* start timer for this packet */ 439 /* start timer for this packet */
417 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, 440 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
418 send_outstanding_bat_ogm_packet); 441 batadv_send_outstanding_bat_ogm_packet);
419 queue_delayed_work(bat_event_workqueue, 442 queue_delayed_work(batadv_event_workqueue,
420 &forw_packet_aggr->delayed_work, 443 &forw_packet_aggr->delayed_work,
421 send_time - jiffies); 444 send_time - jiffies);
422 445
423 return; 446 return;
424out: 447out:
425 hardif_free_ref(if_incoming); 448 batadv_hardif_free_ref(if_incoming);
426} 449}
427 450
428/* aggregate a new packet into the existing ogm packet */ 451/* aggregate a new packet into the existing ogm packet */
429static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr, 452static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
430 const unsigned char *packet_buff, 453 const unsigned char *packet_buff,
431 int packet_len, bool direct_link) 454 int packet_len, bool direct_link)
432{ 455{
433 unsigned char *skb_buff; 456 unsigned char *skb_buff;
434 457
@@ -443,22 +466,25 @@ static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
443 (1 << forw_packet_aggr->num_packets); 466 (1 << forw_packet_aggr->num_packets);
444} 467}
445 468
446static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, 469static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
447 unsigned char *packet_buff, 470 unsigned char *packet_buff,
448 int packet_len, struct hard_iface *if_incoming, 471 int packet_len,
449 int own_packet, unsigned long send_time) 472 struct batadv_hard_iface *if_incoming,
473 int own_packet, unsigned long send_time)
450{ 474{
451 /** 475 /* _aggr -> pointer to the packet we want to aggregate with
452 * _aggr -> pointer to the packet we want to aggregate with
453 * _pos -> pointer to the position in the queue 476 * _pos -> pointer to the position in the queue
454 */ 477 */
455 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL; 478 struct batadv_forw_packet *forw_packet_aggr = NULL;
479 struct batadv_forw_packet *forw_packet_pos = NULL;
456 struct hlist_node *tmp_node; 480 struct hlist_node *tmp_node;
457 struct batman_ogm_packet *batman_ogm_packet; 481 struct batadv_ogm_packet *batadv_ogm_packet;
458 bool direct_link; 482 bool direct_link;
483 unsigned long max_aggregation_jiffies;
459 484
460 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 485 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
461 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0; 486 direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
487 max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
462 488
463 /* find position for the packet in the forward queue */ 489 /* find position for the packet in the forward queue */
464 spin_lock_bh(&bat_priv->forw_bat_list_lock); 490 spin_lock_bh(&bat_priv->forw_bat_list_lock);
@@ -466,11 +492,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
466 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 492 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
467 hlist_for_each_entry(forw_packet_pos, tmp_node, 493 hlist_for_each_entry(forw_packet_pos, tmp_node,
468 &bat_priv->forw_bat_list, list) { 494 &bat_priv->forw_bat_list, list) {
469 if (bat_iv_ogm_can_aggregate(batman_ogm_packet, 495 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
470 bat_priv, packet_len, 496 bat_priv, packet_len,
471 send_time, direct_link, 497 send_time, direct_link,
472 if_incoming, 498 if_incoming,
473 forw_packet_pos)) { 499 forw_packet_pos)) {
474 forw_packet_aggr = forw_packet_pos; 500 forw_packet_aggr = forw_packet_pos;
475 break; 501 break;
476 } 502 }
@@ -478,42 +504,41 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
478 } 504 }
479 505
480 /* nothing to aggregate with - either aggregation disabled or no 506 /* nothing to aggregate with - either aggregation disabled or no
481 * suitable aggregation packet found */ 507 * suitable aggregation packet found
508 */
482 if (!forw_packet_aggr) { 509 if (!forw_packet_aggr) {
483 /* the following section can run without the lock */ 510 /* the following section can run without the lock */
484 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 511 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
485 512
486 /** 513 /* if we could not aggregate this packet with one of the others
487 * if we could not aggregate this packet with one of the others
488 * we hold it back for a while, so that it might be aggregated 514 * we hold it back for a while, so that it might be aggregated
489 * later on 515 * later on
490 */ 516 */
491 if ((!own_packet) && 517 if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
492 (atomic_read(&bat_priv->aggregated_ogms))) 518 send_time += max_aggregation_jiffies;
493 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
494 519
495 bat_iv_ogm_aggregate_new(packet_buff, packet_len, 520 batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
496 send_time, direct_link, 521 send_time, direct_link,
497 if_incoming, own_packet); 522 if_incoming, own_packet);
498 } else { 523 } else {
499 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff, 524 batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
500 packet_len, direct_link); 525 packet_len, direct_link);
501 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 526 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
502 } 527 }
503} 528}
504 529
505static void bat_iv_ogm_forward(struct orig_node *orig_node, 530static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
506 const struct ethhdr *ethhdr, 531 const struct ethhdr *ethhdr,
507 struct batman_ogm_packet *batman_ogm_packet, 532 struct batadv_ogm_packet *batadv_ogm_packet,
508 bool is_single_hop_neigh, 533 bool is_single_hop_neigh,
509 bool is_from_best_next_hop, 534 bool is_from_best_next_hop,
510 struct hard_iface *if_incoming) 535 struct batadv_hard_iface *if_incoming)
511{ 536{
512 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 537 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
513 uint8_t tt_num_changes; 538 uint8_t tt_num_changes;
514 539
515 if (batman_ogm_packet->header.ttl <= 1) { 540 if (batadv_ogm_packet->header.ttl <= 1) {
516 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 541 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
517 return; 542 return;
518 } 543 }
519 544
@@ -525,110 +550,113 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
525 * simply drop the ogm. 550 * simply drop the ogm.
526 */ 551 */
527 if (is_single_hop_neigh) 552 if (is_single_hop_neigh)
528 batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; 553 batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
529 else 554 else
530 return; 555 return;
531 } 556 }
532 557
533 tt_num_changes = batman_ogm_packet->tt_num_changes; 558 tt_num_changes = batadv_ogm_packet->tt_num_changes;
534 559
535 batman_ogm_packet->header.ttl--; 560 batadv_ogm_packet->header.ttl--;
536 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 561 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
537 562
538 /* apply hop penalty */ 563 /* apply hop penalty */
539 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); 564 batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
565 bat_priv);
540 566
541 bat_dbg(DBG_BATMAN, bat_priv, 567 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
542 "Forwarding packet: tq: %i, ttl: %i\n", 568 "Forwarding packet: tq: %i, ttl: %i\n",
543 batman_ogm_packet->tq, batman_ogm_packet->header.ttl); 569 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
544
545 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
546 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
547 570
548 /* switch of primaries first hop flag when forwarding */ 571 /* switch of primaries first hop flag when forwarding */
549 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; 572 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
550 if (is_single_hop_neigh) 573 if (is_single_hop_neigh)
551 batman_ogm_packet->flags |= DIRECTLINK; 574 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
552 else 575 else
553 batman_ogm_packet->flags &= ~DIRECTLINK; 576 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
554 577
555 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, 578 batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
556 BATMAN_OGM_HLEN + tt_len(tt_num_changes), 579 BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
557 if_incoming, 0, bat_iv_ogm_fwd_send_time()); 580 if_incoming, 0, batadv_iv_ogm_fwd_send_time());
558} 581}
559 582
560static void bat_iv_ogm_schedule(struct hard_iface *hard_iface, 583static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
561 int tt_num_changes)
562{ 584{
563 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 585 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
564 struct batman_ogm_packet *batman_ogm_packet; 586 struct batadv_ogm_packet *batadv_ogm_packet;
565 struct hard_iface *primary_if; 587 struct batadv_hard_iface *primary_if;
566 int vis_server; 588 int vis_server, tt_num_changes = 0;
567 589
568 vis_server = atomic_read(&bat_priv->vis_mode); 590 vis_server = atomic_read(&bat_priv->vis_mode);
569 primary_if = primary_if_get_selected(bat_priv); 591 primary_if = batadv_primary_if_get_selected(bat_priv);
592
593 if (hard_iface == primary_if)
594 tt_num_changes = batadv_tt_append_diff(bat_priv,
595 &hard_iface->packet_buff,
596 &hard_iface->packet_len,
597 BATADV_OGM_HLEN);
570 598
571 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 599 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
572 600
573 /* change sequence number to network order */ 601 /* change sequence number to network order */
574 batman_ogm_packet->seqno = 602 batadv_ogm_packet->seqno =
575 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 603 htonl((uint32_t)atomic_read(&hard_iface->seqno));
604 atomic_inc(&hard_iface->seqno);
576 605
577 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); 606 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
578 batman_ogm_packet->tt_crc = htons((uint16_t) 607 batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
579 atomic_read(&bat_priv->tt_crc));
580 if (tt_num_changes >= 0) 608 if (tt_num_changes >= 0)
581 batman_ogm_packet->tt_num_changes = tt_num_changes; 609 batadv_ogm_packet->tt_num_changes = tt_num_changes;
582 610
583 if (vis_server == VIS_TYPE_SERVER_SYNC) 611 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
584 batman_ogm_packet->flags |= VIS_SERVER; 612 batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
585 else 613 else
586 batman_ogm_packet->flags &= ~VIS_SERVER; 614 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
587 615
588 if ((hard_iface == primary_if) && 616 if ((hard_iface == primary_if) &&
589 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 617 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
590 batman_ogm_packet->gw_flags = 618 batadv_ogm_packet->gw_flags =
591 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 619 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
592 else 620 else
593 batman_ogm_packet->gw_flags = NO_FLAGS; 621 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
594
595 atomic_inc(&hard_iface->seqno);
596 622
597 slide_own_bcast_window(hard_iface); 623 batadv_slide_own_bcast_window(hard_iface);
598 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 624 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
599 hard_iface->packet_len, hard_iface, 1, 625 hard_iface->packet_len, hard_iface, 1,
600 bat_iv_ogm_emit_send_time(bat_priv)); 626 batadv_iv_ogm_emit_send_time(bat_priv));
601 627
602 if (primary_if) 628 if (primary_if)
603 hardif_free_ref(primary_if); 629 batadv_hardif_free_ref(primary_if);
604} 630}
605 631
606static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, 632static void
607 struct orig_node *orig_node, 633batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
608 const struct ethhdr *ethhdr, 634 struct batadv_orig_node *orig_node,
609 const struct batman_ogm_packet 635 const struct ethhdr *ethhdr,
610 *batman_ogm_packet, 636 const struct batadv_ogm_packet *batadv_ogm_packet,
611 struct hard_iface *if_incoming, 637 struct batadv_hard_iface *if_incoming,
612 const unsigned char *tt_buff, 638 const unsigned char *tt_buff,
613 int is_duplicate) 639 int is_duplicate)
614{ 640{
615 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 641 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
616 struct neigh_node *router = NULL; 642 struct batadv_neigh_node *router = NULL;
617 struct orig_node *orig_node_tmp; 643 struct batadv_orig_node *orig_node_tmp;
618 struct hlist_node *node; 644 struct hlist_node *node;
619 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 645 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
646 uint8_t *neigh_addr;
620 647
621 bat_dbg(DBG_BATMAN, bat_priv, 648 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
622 "update_originator(): Searching and updating originator entry of received packet\n"); 649 "update_originator(): Searching and updating originator entry of received packet\n");
623 650
624 rcu_read_lock(); 651 rcu_read_lock();
625 hlist_for_each_entry_rcu(tmp_neigh_node, node, 652 hlist_for_each_entry_rcu(tmp_neigh_node, node,
626 &orig_node->neigh_list, list) { 653 &orig_node->neigh_list, list) {
627 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 654 neigh_addr = tmp_neigh_node->addr;
628 (tmp_neigh_node->if_incoming == if_incoming) && 655 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
629 atomic_inc_not_zero(&tmp_neigh_node->refcount)) { 656 tmp_neigh_node->if_incoming == if_incoming &&
657 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
630 if (neigh_node) 658 if (neigh_node)
631 neigh_node_free_ref(neigh_node); 659 batadv_neigh_node_free_ref(neigh_node);
632 neigh_node = tmp_neigh_node; 660 neigh_node = tmp_neigh_node;
633 continue; 661 continue;
634 } 662 }
@@ -637,53 +665,55 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
637 continue; 665 continue;
638 666
639 spin_lock_bh(&tmp_neigh_node->lq_update_lock); 667 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
640 ring_buffer_set(tmp_neigh_node->tq_recv, 668 batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
641 &tmp_neigh_node->tq_index, 0); 669 &tmp_neigh_node->tq_index, 0);
642 tmp_neigh_node->tq_avg = 670 tmp_neigh_node->tq_avg =
643 ring_buffer_avg(tmp_neigh_node->tq_recv); 671 batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
644 spin_unlock_bh(&tmp_neigh_node->lq_update_lock); 672 spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
645 } 673 }
646 674
647 if (!neigh_node) { 675 if (!neigh_node) {
648 struct orig_node *orig_tmp; 676 struct batadv_orig_node *orig_tmp;
649 677
650 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); 678 orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
651 if (!orig_tmp) 679 if (!orig_tmp)
652 goto unlock; 680 goto unlock;
653 681
654 neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, 682 neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
655 orig_node, orig_tmp, 683 ethhdr->h_source,
656 batman_ogm_packet->seqno); 684 orig_node, orig_tmp,
685 batadv_ogm_packet->seqno);
657 686
658 orig_node_free_ref(orig_tmp); 687 batadv_orig_node_free_ref(orig_tmp);
659 if (!neigh_node) 688 if (!neigh_node)
660 goto unlock; 689 goto unlock;
661 } else 690 } else
662 bat_dbg(DBG_BATMAN, bat_priv, 691 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
663 "Updating existing last-hop neighbor of originator\n"); 692 "Updating existing last-hop neighbor of originator\n");
664 693
665 rcu_read_unlock(); 694 rcu_read_unlock();
666 695
667 orig_node->flags = batman_ogm_packet->flags; 696 orig_node->flags = batadv_ogm_packet->flags;
668 neigh_node->last_seen = jiffies; 697 neigh_node->last_seen = jiffies;
669 698
670 spin_lock_bh(&neigh_node->lq_update_lock); 699 spin_lock_bh(&neigh_node->lq_update_lock);
671 ring_buffer_set(neigh_node->tq_recv, 700 batadv_ring_buffer_set(neigh_node->tq_recv,
672 &neigh_node->tq_index, 701 &neigh_node->tq_index,
673 batman_ogm_packet->tq); 702 batadv_ogm_packet->tq);
674 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 703 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
675 spin_unlock_bh(&neigh_node->lq_update_lock); 704 spin_unlock_bh(&neigh_node->lq_update_lock);
676 705
677 if (!is_duplicate) { 706 if (!is_duplicate) {
678 orig_node->last_ttl = batman_ogm_packet->header.ttl; 707 orig_node->last_ttl = batadv_ogm_packet->header.ttl;
679 neigh_node->last_ttl = batman_ogm_packet->header.ttl; 708 neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
680 } 709 }
681 710
682 bonding_candidate_add(orig_node, neigh_node); 711 batadv_bonding_candidate_add(orig_node, neigh_node);
683 712
684 /* if this neighbor already is our next hop there is nothing 713 /* if this neighbor already is our next hop there is nothing
685 * to change */ 714 * to change
686 router = orig_node_get_router(orig_node); 715 */
716 router = batadv_orig_node_get_router(orig_node);
687 if (router == neigh_node) 717 if (router == neigh_node)
688 goto update_tt; 718 goto update_tt;
689 719
@@ -692,7 +722,8 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
692 goto update_tt; 722 goto update_tt;
693 723
694 /* if the TQ is the same and the link not more symmetric we 724 /* if the TQ is the same and the link not more symmetric we
695 * won't consider it either */ 725 * won't consider it either
726 */
696 if (router && (neigh_node->tq_avg == router->tq_avg)) { 727 if (router && (neigh_node->tq_avg == router->tq_avg)) {
697 orig_node_tmp = router->orig_node; 728 orig_node_tmp = router->orig_node;
698 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 729 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
@@ -710,30 +741,31 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
710 goto update_tt; 741 goto update_tt;
711 } 742 }
712 743
713 update_route(bat_priv, orig_node, neigh_node); 744 batadv_update_route(bat_priv, orig_node, neigh_node);
714 745
715update_tt: 746update_tt:
716 /* I have to check for transtable changes only if the OGM has been 747 /* I have to check for transtable changes only if the OGM has been
717 * sent through a primary interface */ 748 * sent through a primary interface
718 if (((batman_ogm_packet->orig != ethhdr->h_source) && 749 */
719 (batman_ogm_packet->header.ttl > 2)) || 750 if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
720 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 751 (batadv_ogm_packet->header.ttl > 2)) ||
721 tt_update_orig(bat_priv, orig_node, tt_buff, 752 (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
722 batman_ogm_packet->tt_num_changes, 753 batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
723 batman_ogm_packet->ttvn, 754 batadv_ogm_packet->tt_num_changes,
724 batman_ogm_packet->tt_crc); 755 batadv_ogm_packet->ttvn,
756 ntohs(batadv_ogm_packet->tt_crc));
725 757
726 if (orig_node->gw_flags != batman_ogm_packet->gw_flags) 758 if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
727 gw_node_update(bat_priv, orig_node, 759 batadv_gw_node_update(bat_priv, orig_node,
728 batman_ogm_packet->gw_flags); 760 batadv_ogm_packet->gw_flags);
729 761
730 orig_node->gw_flags = batman_ogm_packet->gw_flags; 762 orig_node->gw_flags = batadv_ogm_packet->gw_flags;
731 763
732 /* restart gateway selection if fast or late switching was enabled */ 764 /* restart gateway selection if fast or late switching was enabled */
733 if ((orig_node->gw_flags) && 765 if ((orig_node->gw_flags) &&
734 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && 766 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
735 (atomic_read(&bat_priv->gw_sel_class) > 2)) 767 (atomic_read(&bat_priv->gw_sel_class) > 2))
736 gw_check_election(bat_priv, orig_node); 768 batadv_gw_check_election(bat_priv, orig_node);
737 769
738 goto out; 770 goto out;
739 771
@@ -741,29 +773,32 @@ unlock:
741 rcu_read_unlock(); 773 rcu_read_unlock();
742out: 774out:
743 if (neigh_node) 775 if (neigh_node)
744 neigh_node_free_ref(neigh_node); 776 batadv_neigh_node_free_ref(neigh_node);
745 if (router) 777 if (router)
746 neigh_node_free_ref(router); 778 batadv_neigh_node_free_ref(router);
747} 779}
748 780
749static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, 781static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
750 struct orig_node *orig_neigh_node, 782 struct batadv_orig_node *orig_neigh_node,
751 struct batman_ogm_packet *batman_ogm_packet, 783 struct batadv_ogm_packet *batadv_ogm_packet,
752 struct hard_iface *if_incoming) 784 struct batadv_hard_iface *if_incoming)
753{ 785{
754 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 786 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
755 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 787 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
756 struct hlist_node *node; 788 struct hlist_node *node;
757 uint8_t total_count; 789 uint8_t total_count;
758 uint8_t orig_eq_count, neigh_rq_count, tq_own; 790 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
759 int tq_asym_penalty, ret = 0; 791 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
792 int tq_asym_penalty, inv_asym_penalty, ret = 0;
793 unsigned int combined_tq;
760 794
761 /* find corresponding one hop neighbor */ 795 /* find corresponding one hop neighbor */
762 rcu_read_lock(); 796 rcu_read_lock();
763 hlist_for_each_entry_rcu(tmp_neigh_node, node, 797 hlist_for_each_entry_rcu(tmp_neigh_node, node,
764 &orig_neigh_node->neigh_list, list) { 798 &orig_neigh_node->neigh_list, list) {
765 799
766 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) 800 if (!batadv_compare_eth(tmp_neigh_node->addr,
801 orig_neigh_node->orig))
767 continue; 802 continue;
768 803
769 if (tmp_neigh_node->if_incoming != if_incoming) 804 if (tmp_neigh_node->if_incoming != if_incoming)
@@ -778,11 +813,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
778 rcu_read_unlock(); 813 rcu_read_unlock();
779 814
780 if (!neigh_node) 815 if (!neigh_node)
781 neigh_node = bat_iv_ogm_neigh_new(if_incoming, 816 neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
782 orig_neigh_node->orig, 817 orig_neigh_node->orig,
783 orig_neigh_node, 818 orig_neigh_node,
784 orig_neigh_node, 819 orig_neigh_node,
785 batman_ogm_packet->seqno); 820 batadv_ogm_packet->seqno);
786 821
787 if (!neigh_node) 822 if (!neigh_node)
788 goto out; 823 goto out;
@@ -803,47 +838,52 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
803 total_count = (orig_eq_count > neigh_rq_count ? 838 total_count = (orig_eq_count > neigh_rq_count ?
804 neigh_rq_count : orig_eq_count); 839 neigh_rq_count : orig_eq_count);
805 840
806 /* if we have too few packets (too less data) we set tq_own to zero */ 841 /* if we have too few packets (too less data) we set tq_own to zero
807 /* if we receive too few packets it is not considered bidirectional */ 842 * if we receive too few packets it is not considered bidirectional
808 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || 843 */
809 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) 844 if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
845 neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
810 tq_own = 0; 846 tq_own = 0;
811 else 847 else
812 /* neigh_node->real_packet_count is never zero as we 848 /* neigh_node->real_packet_count is never zero as we
813 * only purge old information when getting new 849 * only purge old information when getting new
814 * information */ 850 * information
815 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; 851 */
852 tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
816 853
817 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does 854 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
818 * affect the nearly-symmetric links only a little, but 855 * affect the nearly-symmetric links only a little, but
819 * punishes asymmetric links more. This will give a value 856 * punishes asymmetric links more. This will give a value
820 * between 0 and TQ_MAX_VALUE 857 * between 0 and TQ_MAX_VALUE
821 */ 858 */
822 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * 859 neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
823 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 860 neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
824 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 861 neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
825 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / 862 BATADV_TQ_LOCAL_WINDOW_SIZE *
826 (TQ_LOCAL_WINDOW_SIZE * 863 BATADV_TQ_LOCAL_WINDOW_SIZE;
827 TQ_LOCAL_WINDOW_SIZE * 864 inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
828 TQ_LOCAL_WINDOW_SIZE); 865 inv_asym_penalty /= neigh_rq_max_cube;
829 866 tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
830 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own 867
831 * tq_asym_penalty) / 868 combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty;
832 (TQ_MAX_VALUE * TQ_MAX_VALUE)); 869 combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE;
833 870 batadv_ogm_packet->tq = combined_tq;
834 bat_dbg(DBG_BATMAN, bat_priv, 871
835 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n", 872 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
836 orig_node->orig, orig_neigh_node->orig, total_count, 873 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
837 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); 874 orig_node->orig, orig_neigh_node->orig, total_count,
875 neigh_rq_count, tq_own,
876 tq_asym_penalty, batadv_ogm_packet->tq);
838 877
839 /* if link has the minimum required transmission quality 878 /* if link has the minimum required transmission quality
840 * consider it bidirectional */ 879 * consider it bidirectional
841 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) 880 */
881 if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
842 ret = 1; 882 ret = 1;
843 883
844out: 884out:
845 if (neigh_node) 885 if (neigh_node)
846 neigh_node_free_ref(neigh_node); 886 batadv_neigh_node_free_ref(neigh_node);
847 return ret; 887 return ret;
848} 888}
849 889
@@ -855,90 +895,94 @@ out:
855 * -1 the packet is old and has been received while the seqno window 895 * -1 the packet is old and has been received while the seqno window
856 * was protected. Caller should drop it. 896 * was protected. Caller should drop it.
857 */ 897 */
858static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, 898static int
859 const struct batman_ogm_packet 899batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
860 *batman_ogm_packet, 900 const struct batadv_ogm_packet *batadv_ogm_packet,
861 const struct hard_iface *if_incoming) 901 const struct batadv_hard_iface *if_incoming)
862{ 902{
863 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 903 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
864 struct orig_node *orig_node; 904 struct batadv_orig_node *orig_node;
865 struct neigh_node *tmp_neigh_node; 905 struct batadv_neigh_node *tmp_neigh_node;
866 struct hlist_node *node; 906 struct hlist_node *node;
867 int is_duplicate = 0; 907 int is_duplicate = 0;
868 int32_t seq_diff; 908 int32_t seq_diff;
869 int need_update = 0; 909 int need_update = 0;
870 int set_mark, ret = -1; 910 int set_mark, ret = -1;
911 uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
912 uint8_t *neigh_addr;
871 913
872 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); 914 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
873 if (!orig_node) 915 if (!orig_node)
874 return 0; 916 return 0;
875 917
876 spin_lock_bh(&orig_node->ogm_cnt_lock); 918 spin_lock_bh(&orig_node->ogm_cnt_lock);
877 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; 919 seq_diff = seqno - orig_node->last_real_seqno;
878 920
879 /* signalize caller that the packet is to be dropped. */ 921 /* signalize caller that the packet is to be dropped. */
880 if (!hlist_empty(&orig_node->neigh_list) && 922 if (!hlist_empty(&orig_node->neigh_list) &&
881 window_protected(bat_priv, seq_diff, 923 batadv_window_protected(bat_priv, seq_diff,
882 &orig_node->batman_seqno_reset)) 924 &orig_node->batman_seqno_reset))
883 goto out; 925 goto out;
884 926
885 rcu_read_lock(); 927 rcu_read_lock();
886 hlist_for_each_entry_rcu(tmp_neigh_node, node, 928 hlist_for_each_entry_rcu(tmp_neigh_node, node,
887 &orig_node->neigh_list, list) { 929 &orig_node->neigh_list, list) {
888 930
889 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, 931 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
890 orig_node->last_real_seqno, 932 orig_node->last_real_seqno,
891 batman_ogm_packet->seqno); 933 seqno);
892 934
893 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 935 neigh_addr = tmp_neigh_node->addr;
894 (tmp_neigh_node->if_incoming == if_incoming)) 936 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
937 tmp_neigh_node->if_incoming == if_incoming)
895 set_mark = 1; 938 set_mark = 1;
896 else 939 else
897 set_mark = 0; 940 set_mark = 0;
898 941
899 /* if the window moved, set the update flag. */ 942 /* if the window moved, set the update flag. */
900 need_update |= bit_get_packet(bat_priv, 943 need_update |= batadv_bit_get_packet(bat_priv,
901 tmp_neigh_node->real_bits, 944 tmp_neigh_node->real_bits,
902 seq_diff, set_mark); 945 seq_diff, set_mark);
903 946
904 tmp_neigh_node->real_packet_count = 947 tmp_neigh_node->real_packet_count =
905 bitmap_weight(tmp_neigh_node->real_bits, 948 bitmap_weight(tmp_neigh_node->real_bits,
906 TQ_LOCAL_WINDOW_SIZE); 949 BATADV_TQ_LOCAL_WINDOW_SIZE);
907 } 950 }
908 rcu_read_unlock(); 951 rcu_read_unlock();
909 952
910 if (need_update) { 953 if (need_update) {
911 bat_dbg(DBG_BATMAN, bat_priv, 954 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
912 "updating last_seqno: old %u, new %u\n", 955 "updating last_seqno: old %u, new %u\n",
913 orig_node->last_real_seqno, batman_ogm_packet->seqno); 956 orig_node->last_real_seqno, seqno);
914 orig_node->last_real_seqno = batman_ogm_packet->seqno; 957 orig_node->last_real_seqno = seqno;
915 } 958 }
916 959
917 ret = is_duplicate; 960 ret = is_duplicate;
918 961
919out: 962out:
920 spin_unlock_bh(&orig_node->ogm_cnt_lock); 963 spin_unlock_bh(&orig_node->ogm_cnt_lock);
921 orig_node_free_ref(orig_node); 964 batadv_orig_node_free_ref(orig_node);
922 return ret; 965 return ret;
923} 966}
924 967
925static void bat_iv_ogm_process(const struct ethhdr *ethhdr, 968static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
926 struct batman_ogm_packet *batman_ogm_packet, 969 struct batadv_ogm_packet *batadv_ogm_packet,
927 const unsigned char *tt_buff, 970 const unsigned char *tt_buff,
928 struct hard_iface *if_incoming) 971 struct batadv_hard_iface *if_incoming)
929{ 972{
930 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 973 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
931 struct hard_iface *hard_iface; 974 struct batadv_hard_iface *hard_iface;
932 struct orig_node *orig_neigh_node, *orig_node; 975 struct batadv_orig_node *orig_neigh_node, *orig_node;
933 struct neigh_node *router = NULL, *router_router = NULL; 976 struct batadv_neigh_node *router = NULL, *router_router = NULL;
934 struct neigh_node *orig_neigh_router = NULL; 977 struct batadv_neigh_node *orig_neigh_router = NULL;
935 int has_directlink_flag; 978 int has_directlink_flag;
936 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 979 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
937 int is_broadcast = 0, is_bidirectional; 980 int is_broadcast = 0, is_bidirect;
938 bool is_single_hop_neigh = false; 981 bool is_single_hop_neigh = false;
939 bool is_from_best_next_hop = false; 982 bool is_from_best_next_hop = false;
940 int is_duplicate; 983 int is_duplicate, sameseq, simlar_ttl;
941 uint32_t if_incoming_seqno; 984 uint32_t if_incoming_seqno;
985 uint8_t *prev_sender;
942 986
943 /* Silently drop when the batman packet is actually not a 987 /* Silently drop when the batman packet is actually not a
944 * correct packet. 988 * correct packet.
@@ -948,49 +992,53 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
948 * it as an additional length. 992 * it as an additional length.
949 * 993 *
950 * TODO: A more sane solution would be to have a bit in the 994 * TODO: A more sane solution would be to have a bit in the
951 * batman_ogm_packet to detect whether the packet is the last 995 * batadv_ogm_packet to detect whether the packet is the last
952 * packet in an aggregation. Here we expect that the padding 996 * packet in an aggregation. Here we expect that the padding
953 * is always zero (or not 0x01) 997 * is always zero (or not 0x01)
954 */ 998 */
955 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) 999 if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
956 return; 1000 return;
957 1001
958 /* could be changed by schedule_own_packet() */ 1002 /* could be changed by schedule_own_packet() */
959 if_incoming_seqno = atomic_read(&if_incoming->seqno); 1003 if_incoming_seqno = atomic_read(&if_incoming->seqno);
960 1004
961 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 1005 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
1006 has_directlink_flag = 1;
1007 else
1008 has_directlink_flag = 0;
962 1009
963 if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) 1010 if (batadv_compare_eth(ethhdr->h_source, batadv_ogm_packet->orig))
964 is_single_hop_neigh = true; 1011 is_single_hop_neigh = true;
965 1012
966 bat_dbg(DBG_BATMAN, bat_priv, 1013 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
967 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", 1014 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
968 ethhdr->h_source, if_incoming->net_dev->name, 1015 ethhdr->h_source, if_incoming->net_dev->name,
969 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, 1016 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
970 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, 1017 batadv_ogm_packet->prev_sender,
971 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, 1018 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
972 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, 1019 ntohs(batadv_ogm_packet->tt_crc),
973 batman_ogm_packet->header.ttl, 1020 batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
974 batman_ogm_packet->header.version, has_directlink_flag); 1021 batadv_ogm_packet->header.ttl,
1022 batadv_ogm_packet->header.version, has_directlink_flag);
975 1023
976 rcu_read_lock(); 1024 rcu_read_lock();
977 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 1025 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
978 if (hard_iface->if_status != IF_ACTIVE) 1026 if (hard_iface->if_status != BATADV_IF_ACTIVE)
979 continue; 1027 continue;
980 1028
981 if (hard_iface->soft_iface != if_incoming->soft_iface) 1029 if (hard_iface->soft_iface != if_incoming->soft_iface)
982 continue; 1030 continue;
983 1031
984 if (compare_eth(ethhdr->h_source, 1032 if (batadv_compare_eth(ethhdr->h_source,
985 hard_iface->net_dev->dev_addr)) 1033 hard_iface->net_dev->dev_addr))
986 is_my_addr = 1; 1034 is_my_addr = 1;
987 1035
988 if (compare_eth(batman_ogm_packet->orig, 1036 if (batadv_compare_eth(batadv_ogm_packet->orig,
989 hard_iface->net_dev->dev_addr)) 1037 hard_iface->net_dev->dev_addr))
990 is_my_orig = 1; 1038 is_my_orig = 1;
991 1039
992 if (compare_eth(batman_ogm_packet->prev_sender, 1040 if (batadv_compare_eth(batadv_ogm_packet->prev_sender,
993 hard_iface->net_dev->dev_addr)) 1041 hard_iface->net_dev->dev_addr))
994 is_my_oldorig = 1; 1042 is_my_oldorig = 1;
995 1043
996 if (is_broadcast_ether_addr(ethhdr->h_source)) 1044 if (is_broadcast_ether_addr(ethhdr->h_source))
@@ -998,268 +1046,278 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
998 } 1046 }
999 rcu_read_unlock(); 1047 rcu_read_unlock();
1000 1048
1001 if (batman_ogm_packet->header.version != COMPAT_VERSION) { 1049 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
1002 bat_dbg(DBG_BATMAN, bat_priv, 1050 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1003 "Drop packet: incompatible batman version (%i)\n", 1051 "Drop packet: incompatible batman version (%i)\n",
1004 batman_ogm_packet->header.version); 1052 batadv_ogm_packet->header.version);
1005 return; 1053 return;
1006 } 1054 }
1007 1055
1008 if (is_my_addr) { 1056 if (is_my_addr) {
1009 bat_dbg(DBG_BATMAN, bat_priv, 1057 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1010 "Drop packet: received my own broadcast (sender: %pM)\n", 1058 "Drop packet: received my own broadcast (sender: %pM)\n",
1011 ethhdr->h_source); 1059 ethhdr->h_source);
1012 return; 1060 return;
1013 } 1061 }
1014 1062
1015 if (is_broadcast) { 1063 if (is_broadcast) {
1016 bat_dbg(DBG_BATMAN, bat_priv, 1064 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1017 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", 1065 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
1018 ethhdr->h_source); 1066 ethhdr->h_source);
1019 return; 1067 return;
1020 } 1068 }
1021 1069
1022 if (is_my_orig) { 1070 if (is_my_orig) {
1023 unsigned long *word; 1071 unsigned long *word;
1024 int offset; 1072 int offset;
1073 int32_t bit_pos;
1074 int16_t if_num;
1075 uint8_t *weight;
1025 1076
1026 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); 1077 orig_neigh_node = batadv_get_orig_node(bat_priv,
1078 ethhdr->h_source);
1027 if (!orig_neigh_node) 1079 if (!orig_neigh_node)
1028 return; 1080 return;
1029 1081
1030 /* neighbor has to indicate direct link and it has to 1082 /* neighbor has to indicate direct link and it has to
1031 * come via the corresponding interface */ 1083 * come via the corresponding interface
1032 /* save packet seqno for bidirectional check */ 1084 * save packet seqno for bidirectional check
1085 */
1033 if (has_directlink_flag && 1086 if (has_directlink_flag &&
1034 compare_eth(if_incoming->net_dev->dev_addr, 1087 batadv_compare_eth(if_incoming->net_dev->dev_addr,
1035 batman_ogm_packet->orig)) { 1088 batadv_ogm_packet->orig)) {
1036 offset = if_incoming->if_num * NUM_WORDS; 1089 if_num = if_incoming->if_num;
1090 offset = if_num * BATADV_NUM_WORDS;
1037 1091
1038 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 1092 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1039 word = &(orig_neigh_node->bcast_own[offset]); 1093 word = &(orig_neigh_node->bcast_own[offset]);
1040 bat_set_bit(word, 1094 bit_pos = if_incoming_seqno - 2;
1041 if_incoming_seqno - 1095 bit_pos -= ntohl(batadv_ogm_packet->seqno);
1042 batman_ogm_packet->seqno - 2); 1096 batadv_set_bit(word, bit_pos);
1043 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 1097 weight = &orig_neigh_node->bcast_own_sum[if_num];
1044 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); 1098 *weight = bitmap_weight(word,
1099 BATADV_TQ_LOCAL_WINDOW_SIZE);
1045 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 1100 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1046 } 1101 }
1047 1102
1048 bat_dbg(DBG_BATMAN, bat_priv, 1103 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1049 "Drop packet: originator packet from myself (via neighbor)\n"); 1104 "Drop packet: originator packet from myself (via neighbor)\n");
1050 orig_node_free_ref(orig_neigh_node); 1105 batadv_orig_node_free_ref(orig_neigh_node);
1051 return; 1106 return;
1052 } 1107 }
1053 1108
1054 if (is_my_oldorig) { 1109 if (is_my_oldorig) {
1055 bat_dbg(DBG_BATMAN, bat_priv, 1110 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1056 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", 1111 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1057 ethhdr->h_source); 1112 ethhdr->h_source);
1058 return; 1113 return;
1059 } 1114 }
1060 1115
1061 if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { 1116 if (batadv_ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
1062 bat_dbg(DBG_BATMAN, bat_priv, 1117 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1063 "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", 1118 "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
1064 ethhdr->h_source); 1119 ethhdr->h_source);
1065 return; 1120 return;
1066 } 1121 }
1067 1122
1068 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); 1123 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
1069 if (!orig_node) 1124 if (!orig_node)
1070 return; 1125 return;
1071 1126
1072 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet, 1127 is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
1073 if_incoming); 1128 if_incoming);
1074 1129
1075 if (is_duplicate == -1) { 1130 if (is_duplicate == -1) {
1076 bat_dbg(DBG_BATMAN, bat_priv, 1131 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1077 "Drop packet: packet within seqno protection time (sender: %pM)\n", 1132 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1078 ethhdr->h_source); 1133 ethhdr->h_source);
1079 goto out; 1134 goto out;
1080 } 1135 }
1081 1136
1082 if (batman_ogm_packet->tq == 0) { 1137 if (batadv_ogm_packet->tq == 0) {
1083 bat_dbg(DBG_BATMAN, bat_priv, 1138 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1084 "Drop packet: originator packet with tq equal 0\n"); 1139 "Drop packet: originator packet with tq equal 0\n");
1085 goto out; 1140 goto out;
1086 } 1141 }
1087 1142
1088 router = orig_node_get_router(orig_node); 1143 router = batadv_orig_node_get_router(orig_node);
1089 if (router) 1144 if (router)
1090 router_router = orig_node_get_router(router->orig_node); 1145 router_router = batadv_orig_node_get_router(router->orig_node);
1091 1146
1092 if ((router && router->tq_avg != 0) && 1147 if ((router && router->tq_avg != 0) &&
1093 (compare_eth(router->addr, ethhdr->h_source))) 1148 (batadv_compare_eth(router->addr, ethhdr->h_source)))
1094 is_from_best_next_hop = true; 1149 is_from_best_next_hop = true;
1095 1150
1151 prev_sender = batadv_ogm_packet->prev_sender;
1096 /* avoid temporary routing loops */ 1152 /* avoid temporary routing loops */
1097 if (router && router_router && 1153 if (router && router_router &&
1098 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && 1154 (batadv_compare_eth(router->addr, prev_sender)) &&
1099 !(compare_eth(batman_ogm_packet->orig, 1155 !(batadv_compare_eth(batadv_ogm_packet->orig, prev_sender)) &&
1100 batman_ogm_packet->prev_sender)) && 1156 (batadv_compare_eth(router->addr, router_router->addr))) {
1101 (compare_eth(router->addr, router_router->addr))) { 1157 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1102 bat_dbg(DBG_BATMAN, bat_priv, 1158 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1103 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", 1159 ethhdr->h_source);
1104 ethhdr->h_source);
1105 goto out; 1160 goto out;
1106 } 1161 }
1107 1162
1108 /* if sender is a direct neighbor the sender mac equals 1163 /* if sender is a direct neighbor the sender mac equals
1109 * originator mac */ 1164 * originator mac
1165 */
1110 orig_neigh_node = (is_single_hop_neigh ? 1166 orig_neigh_node = (is_single_hop_neigh ?
1111 orig_node : 1167 orig_node :
1112 get_orig_node(bat_priv, ethhdr->h_source)); 1168 batadv_get_orig_node(bat_priv, ethhdr->h_source));
1113 if (!orig_neigh_node) 1169 if (!orig_neigh_node)
1114 goto out; 1170 goto out;
1115 1171
1116 orig_neigh_router = orig_node_get_router(orig_neigh_node); 1172 orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node);
1117 1173
1118 /* drop packet if sender is not a direct neighbor and if we 1174 /* drop packet if sender is not a direct neighbor and if we
1119 * don't route towards it */ 1175 * don't route towards it
1176 */
1120 if (!is_single_hop_neigh && (!orig_neigh_router)) { 1177 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1121 bat_dbg(DBG_BATMAN, bat_priv, 1178 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1122 "Drop packet: OGM via unknown neighbor!\n"); 1179 "Drop packet: OGM via unknown neighbor!\n");
1123 goto out_neigh; 1180 goto out_neigh;
1124 } 1181 }
1125 1182
1126 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node, 1183 is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1127 batman_ogm_packet, if_incoming); 1184 batadv_ogm_packet, if_incoming);
1128 1185
1129 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); 1186 batadv_bonding_save_primary(orig_node, orig_neigh_node,
1187 batadv_ogm_packet);
1130 1188
1131 /* update ranking if it is not a duplicate or has the same 1189 /* update ranking if it is not a duplicate or has the same
1132 * seqno and similar ttl as the non-duplicate */ 1190 * seqno and similar ttl as the non-duplicate
1133 if (is_bidirectional && 1191 */
1134 (!is_duplicate || 1192 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
1135 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && 1193 simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
1136 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl)))) 1194 if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
1137 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1195 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1138 batman_ogm_packet, if_incoming, 1196 batadv_ogm_packet, if_incoming,
1139 tt_buff, is_duplicate); 1197 tt_buff, is_duplicate);
1140 1198
1141 /* is single hop (direct) neighbor */ 1199 /* is single hop (direct) neighbor */
1142 if (is_single_hop_neigh) { 1200 if (is_single_hop_neigh) {
1143 1201
1144 /* mark direct link on incoming interface */ 1202 /* mark direct link on incoming interface */
1145 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1203 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
1146 is_single_hop_neigh, is_from_best_next_hop, 1204 is_single_hop_neigh,
1147 if_incoming); 1205 is_from_best_next_hop, if_incoming);
1148 1206
1149 bat_dbg(DBG_BATMAN, bat_priv, 1207 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1150 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); 1208 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
1151 goto out_neigh; 1209 goto out_neigh;
1152 } 1210 }
1153 1211
1154 /* multihop originator */ 1212 /* multihop originator */
1155 if (!is_bidirectional) { 1213 if (!is_bidirect) {
1156 bat_dbg(DBG_BATMAN, bat_priv, 1214 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1157 "Drop packet: not received via bidirectional link\n"); 1215 "Drop packet: not received via bidirectional link\n");
1158 goto out_neigh; 1216 goto out_neigh;
1159 } 1217 }
1160 1218
1161 if (is_duplicate) { 1219 if (is_duplicate) {
1162 bat_dbg(DBG_BATMAN, bat_priv, 1220 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1163 "Drop packet: duplicate packet received\n"); 1221 "Drop packet: duplicate packet received\n");
1164 goto out_neigh; 1222 goto out_neigh;
1165 } 1223 }
1166 1224
1167 bat_dbg(DBG_BATMAN, bat_priv, 1225 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1168 "Forwarding packet: rebroadcast originator packet\n"); 1226 "Forwarding packet: rebroadcast originator packet\n");
1169 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1227 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
1170 is_single_hop_neigh, is_from_best_next_hop, 1228 is_single_hop_neigh, is_from_best_next_hop,
1171 if_incoming); 1229 if_incoming);
1172 1230
1173out_neigh: 1231out_neigh:
1174 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1232 if ((orig_neigh_node) && (!is_single_hop_neigh))
1175 orig_node_free_ref(orig_neigh_node); 1233 batadv_orig_node_free_ref(orig_neigh_node);
1176out: 1234out:
1177 if (router) 1235 if (router)
1178 neigh_node_free_ref(router); 1236 batadv_neigh_node_free_ref(router);
1179 if (router_router) 1237 if (router_router)
1180 neigh_node_free_ref(router_router); 1238 batadv_neigh_node_free_ref(router_router);
1181 if (orig_neigh_router) 1239 if (orig_neigh_router)
1182 neigh_node_free_ref(orig_neigh_router); 1240 batadv_neigh_node_free_ref(orig_neigh_router);
1183 1241
1184 orig_node_free_ref(orig_node); 1242 batadv_orig_node_free_ref(orig_node);
1185} 1243}
1186 1244
1187static int bat_iv_ogm_receive(struct sk_buff *skb, 1245static int batadv_iv_ogm_receive(struct sk_buff *skb,
1188 struct hard_iface *if_incoming) 1246 struct batadv_hard_iface *if_incoming)
1189{ 1247{
1190 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 1248 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
1191 struct batman_ogm_packet *batman_ogm_packet; 1249 struct batadv_ogm_packet *batadv_ogm_packet;
1192 struct ethhdr *ethhdr; 1250 struct ethhdr *ethhdr;
1193 int buff_pos = 0, packet_len; 1251 int buff_pos = 0, packet_len;
1194 unsigned char *tt_buff, *packet_buff; 1252 unsigned char *tt_buff, *packet_buff;
1195 bool ret; 1253 bool ret;
1196 1254
1197 ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); 1255 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
1198 if (!ret) 1256 if (!ret)
1199 return NET_RX_DROP; 1257 return NET_RX_DROP;
1200 1258
1201 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface 1259 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
1202 * that does not have B.A.T.M.A.N. IV enabled ? 1260 * that does not have B.A.T.M.A.N. IV enabled ?
1203 */ 1261 */
1204 if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) 1262 if (bat_priv->bat_algo_ops->bat_ogm_emit != batadv_iv_ogm_emit)
1205 return NET_RX_DROP; 1263 return NET_RX_DROP;
1206 1264
1265 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
1266 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
1267 skb->len + ETH_HLEN);
1268
1207 packet_len = skb_headlen(skb); 1269 packet_len = skb_headlen(skb);
1208 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1270 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1209 packet_buff = skb->data; 1271 packet_buff = skb->data;
1210 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 1272 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
1211 1273
1212 /* unpack the aggregated packets and process them one by one */ 1274 /* unpack the aggregated packets and process them one by one */
1213 do { 1275 do {
1214 /* network to host order for our 32bit seqno and the 1276 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
1215 orig_interval */
1216 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1217 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1218
1219 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
1220 1277
1221 bat_iv_ogm_process(ethhdr, batman_ogm_packet, 1278 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
1222 tt_buff, if_incoming); 1279 if_incoming);
1223 1280
1224 buff_pos += BATMAN_OGM_HLEN + 1281 buff_pos += BATADV_OGM_HLEN;
1225 tt_len(batman_ogm_packet->tt_num_changes); 1282 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
1226 1283
1227 batman_ogm_packet = (struct batman_ogm_packet *) 1284 batadv_ogm_packet = (struct batadv_ogm_packet *)
1228 (packet_buff + buff_pos); 1285 (packet_buff + buff_pos);
1229 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, 1286 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1230 batman_ogm_packet->tt_num_changes)); 1287 batadv_ogm_packet->tt_num_changes));
1231 1288
1232 kfree_skb(skb); 1289 kfree_skb(skb);
1233 return NET_RX_SUCCESS; 1290 return NET_RX_SUCCESS;
1234} 1291}
1235 1292
1236static struct bat_algo_ops batman_iv __read_mostly = { 1293static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
1237 .name = "BATMAN IV", 1294 .name = "BATMAN_IV",
1238 .bat_iface_enable = bat_iv_ogm_iface_enable, 1295 .bat_iface_enable = batadv_iv_ogm_iface_enable,
1239 .bat_iface_disable = bat_iv_ogm_iface_disable, 1296 .bat_iface_disable = batadv_iv_ogm_iface_disable,
1240 .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, 1297 .bat_iface_update_mac = batadv_iv_ogm_iface_update_mac,
1241 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, 1298 .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
1242 .bat_ogm_schedule = bat_iv_ogm_schedule, 1299 .bat_ogm_schedule = batadv_iv_ogm_schedule,
1243 .bat_ogm_emit = bat_iv_ogm_emit, 1300 .bat_ogm_emit = batadv_iv_ogm_emit,
1244}; 1301};
1245 1302
1246int __init bat_iv_init(void) 1303int __init batadv_iv_init(void)
1247{ 1304{
1248 int ret; 1305 int ret;
1249 1306
1250 /* batman originator packet */ 1307 /* batman originator packet */
1251 ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); 1308 ret = batadv_recv_handler_register(BATADV_IV_OGM,
1309 batadv_iv_ogm_receive);
1252 if (ret < 0) 1310 if (ret < 0)
1253 goto out; 1311 goto out;
1254 1312
1255 ret = bat_algo_register(&batman_iv); 1313 ret = batadv_algo_register(&batadv_batman_iv);
1256 if (ret < 0) 1314 if (ret < 0)
1257 goto handler_unregister; 1315 goto handler_unregister;
1258 1316
1259 goto out; 1317 goto out;
1260 1318
1261handler_unregister: 1319handler_unregister:
1262 recv_handler_unregister(BAT_IV_OGM); 1320 batadv_recv_handler_unregister(BATADV_IV_OGM);
1263out: 1321out:
1264 return ret; 1322 return ret;
1265} 1323}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
deleted file mode 100644
index 5bc7b66d32dc..000000000000
--- a/net/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,735 +0,0 @@
1/*
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "bat_sysfs.h"
24#include "translation-table.h"
25#include "originator.h"
26#include "hard-interface.h"
27#include "gateway_common.h"
28#include "gateway_client.h"
29#include "vis.h"
30
31static struct net_device *kobj_to_netdev(struct kobject *obj)
32{
33 struct device *dev = container_of(obj->parent, struct device, kobj);
34 return to_net_dev(dev);
35}
36
37static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
38{
39 struct net_device *net_dev = kobj_to_netdev(obj);
40 return netdev_priv(net_dev);
41}
42
43#define UEV_TYPE_VAR "BATTYPE="
44#define UEV_ACTION_VAR "BATACTION="
45#define UEV_DATA_VAR "BATDATA="
46
47static char *uev_action_str[] = {
48 "add",
49 "del",
50 "change"
51};
52
53static char *uev_type_str[] = {
54 "gw"
55};
56
57/* Use this, if you have customized show and store functions */
58#define BAT_ATTR(_name, _mode, _show, _store) \
59struct bat_attribute bat_attr_##_name = { \
60 .attr = {.name = __stringify(_name), \
61 .mode = _mode }, \
62 .show = _show, \
63 .store = _store, \
64};
65
66#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
67ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
68 char *buff, size_t count) \
69{ \
70 struct net_device *net_dev = kobj_to_netdev(kobj); \
71 struct bat_priv *bat_priv = netdev_priv(net_dev); \
72 return __store_bool_attr(buff, count, _post_func, attr, \
73 &bat_priv->_name, net_dev); \
74}
75
76#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
77ssize_t show_##_name(struct kobject *kobj, \
78 struct attribute *attr, char *buff) \
79{ \
80 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
81 return sprintf(buff, "%s\n", \
82 atomic_read(&bat_priv->_name) == 0 ? \
83 "disabled" : "enabled"); \
84} \
85
86/* Use this, if you are going to turn a [name] in the soft-interface
87 * (bat_priv) on or off */
88#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
89 static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
90 static BAT_ATTR_SIF_SHOW_BOOL(_name) \
91 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
92
93
94#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
95ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
96 char *buff, size_t count) \
97{ \
98 struct net_device *net_dev = kobj_to_netdev(kobj); \
99 struct bat_priv *bat_priv = netdev_priv(net_dev); \
100 return __store_uint_attr(buff, count, _min, _max, _post_func, \
101 attr, &bat_priv->_name, net_dev); \
102}
103
104#define BAT_ATTR_SIF_SHOW_UINT(_name) \
105ssize_t show_##_name(struct kobject *kobj, \
106 struct attribute *attr, char *buff) \
107{ \
108 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
109 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
110} \
111
112/* Use this, if you are going to set [name] in the soft-interface
113 * (bat_priv) to an unsigned integer value */
114#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
115 static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
116 static BAT_ATTR_SIF_SHOW_UINT(_name) \
117 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
118
119
120#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
121ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
122 char *buff, size_t count) \
123{ \
124 struct net_device *net_dev = kobj_to_netdev(kobj); \
125 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
126 ssize_t length; \
127 \
128 if (!hard_iface) \
129 return 0; \
130 \
131 length = __store_uint_attr(buff, count, _min, _max, _post_func, \
132 attr, &hard_iface->_name, net_dev); \
133 \
134 hardif_free_ref(hard_iface); \
135 return length; \
136}
137
138#define BAT_ATTR_HIF_SHOW_UINT(_name) \
139ssize_t show_##_name(struct kobject *kobj, \
140 struct attribute *attr, char *buff) \
141{ \
142 struct net_device *net_dev = kobj_to_netdev(kobj); \
143 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
144 ssize_t length; \
145 \
146 if (!hard_iface) \
147 return 0; \
148 \
149 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
150 \
151 hardif_free_ref(hard_iface); \
152 return length; \
153}
154
155/* Use this, if you are going to set [name] in hard_iface to an
156 * unsigned integer value*/
157#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
158 static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
159 static BAT_ATTR_HIF_SHOW_UINT(_name) \
160 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
161
162
163static int store_bool_attr(char *buff, size_t count,
164 struct net_device *net_dev,
165 const char *attr_name, atomic_t *attr)
166{
167 int enabled = -1;
168
169 if (buff[count - 1] == '\n')
170 buff[count - 1] = '\0';
171
172 if ((strncmp(buff, "1", 2) == 0) ||
173 (strncmp(buff, "enable", 7) == 0) ||
174 (strncmp(buff, "enabled", 8) == 0))
175 enabled = 1;
176
177 if ((strncmp(buff, "0", 2) == 0) ||
178 (strncmp(buff, "disable", 8) == 0) ||
179 (strncmp(buff, "disabled", 9) == 0))
180 enabled = 0;
181
182 if (enabled < 0) {
183 bat_info(net_dev,
184 "%s: Invalid parameter received: %s\n",
185 attr_name, buff);
186 return -EINVAL;
187 }
188
189 if (atomic_read(attr) == enabled)
190 return count;
191
192 bat_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
193 atomic_read(attr) == 1 ? "enabled" : "disabled",
194 enabled == 1 ? "enabled" : "disabled");
195
196 atomic_set(attr, (unsigned int)enabled);
197 return count;
198}
199
200static inline ssize_t __store_bool_attr(char *buff, size_t count,
201 void (*post_func)(struct net_device *),
202 struct attribute *attr,
203 atomic_t *attr_store, struct net_device *net_dev)
204{
205 int ret;
206
207 ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
208 if (post_func && ret)
209 post_func(net_dev);
210
211 return ret;
212}
213
214static int store_uint_attr(const char *buff, size_t count,
215 struct net_device *net_dev, const char *attr_name,
216 unsigned int min, unsigned int max, atomic_t *attr)
217{
218 unsigned long uint_val;
219 int ret;
220
221 ret = kstrtoul(buff, 10, &uint_val);
222 if (ret) {
223 bat_info(net_dev,
224 "%s: Invalid parameter received: %s\n",
225 attr_name, buff);
226 return -EINVAL;
227 }
228
229 if (uint_val < min) {
230 bat_info(net_dev, "%s: Value is too small: %lu min: %u\n",
231 attr_name, uint_val, min);
232 return -EINVAL;
233 }
234
235 if (uint_val > max) {
236 bat_info(net_dev, "%s: Value is too big: %lu max: %u\n",
237 attr_name, uint_val, max);
238 return -EINVAL;
239 }
240
241 if (atomic_read(attr) == uint_val)
242 return count;
243
244 bat_info(net_dev, "%s: Changing from: %i to: %lu\n",
245 attr_name, atomic_read(attr), uint_val);
246
247 atomic_set(attr, uint_val);
248 return count;
249}
250
251static inline ssize_t __store_uint_attr(const char *buff, size_t count,
252 int min, int max,
253 void (*post_func)(struct net_device *),
254 const struct attribute *attr,
255 atomic_t *attr_store, struct net_device *net_dev)
256{
257 int ret;
258
259 ret = store_uint_attr(buff, count, net_dev, attr->name,
260 min, max, attr_store);
261 if (post_func && ret)
262 post_func(net_dev);
263
264 return ret;
265}
266
267static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
268 char *buff)
269{
270 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
271 int vis_mode = atomic_read(&bat_priv->vis_mode);
272
273 return sprintf(buff, "%s\n",
274 vis_mode == VIS_TYPE_CLIENT_UPDATE ?
275 "client" : "server");
276}
277
278static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
279 char *buff, size_t count)
280{
281 struct net_device *net_dev = kobj_to_netdev(kobj);
282 struct bat_priv *bat_priv = netdev_priv(net_dev);
283 unsigned long val;
284 int ret, vis_mode_tmp = -1;
285
286 ret = kstrtoul(buff, 10, &val);
287
288 if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
289 (strncmp(buff, "client", 6) == 0) ||
290 (strncmp(buff, "off", 3) == 0))
291 vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
292
293 if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
294 (strncmp(buff, "server", 6) == 0))
295 vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
296
297 if (vis_mode_tmp < 0) {
298 if (buff[count - 1] == '\n')
299 buff[count - 1] = '\0';
300
301 bat_info(net_dev,
302 "Invalid parameter for 'vis mode' setting received: %s\n",
303 buff);
304 return -EINVAL;
305 }
306
307 if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
308 return count;
309
310 bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
311 atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
312 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
313 "client" : "server");
314
315 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
316 return count;
317}
318
319static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
320 char *buff)
321{
322 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
323 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
324}
325
326static void post_gw_deselect(struct net_device *net_dev)
327{
328 struct bat_priv *bat_priv = netdev_priv(net_dev);
329 gw_deselect(bat_priv);
330}
331
332static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr,
333 char *buff)
334{
335 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
336 int bytes_written;
337
338 switch (atomic_read(&bat_priv->gw_mode)) {
339 case GW_MODE_CLIENT:
340 bytes_written = sprintf(buff, "%s\n", GW_MODE_CLIENT_NAME);
341 break;
342 case GW_MODE_SERVER:
343 bytes_written = sprintf(buff, "%s\n", GW_MODE_SERVER_NAME);
344 break;
345 default:
346 bytes_written = sprintf(buff, "%s\n", GW_MODE_OFF_NAME);
347 break;
348 }
349
350 return bytes_written;
351}
352
353static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
354 char *buff, size_t count)
355{
356 struct net_device *net_dev = kobj_to_netdev(kobj);
357 struct bat_priv *bat_priv = netdev_priv(net_dev);
358 char *curr_gw_mode_str;
359 int gw_mode_tmp = -1;
360
361 if (buff[count - 1] == '\n')
362 buff[count - 1] = '\0';
363
364 if (strncmp(buff, GW_MODE_OFF_NAME, strlen(GW_MODE_OFF_NAME)) == 0)
365 gw_mode_tmp = GW_MODE_OFF;
366
367 if (strncmp(buff, GW_MODE_CLIENT_NAME,
368 strlen(GW_MODE_CLIENT_NAME)) == 0)
369 gw_mode_tmp = GW_MODE_CLIENT;
370
371 if (strncmp(buff, GW_MODE_SERVER_NAME,
372 strlen(GW_MODE_SERVER_NAME)) == 0)
373 gw_mode_tmp = GW_MODE_SERVER;
374
375 if (gw_mode_tmp < 0) {
376 bat_info(net_dev,
377 "Invalid parameter for 'gw mode' setting received: %s\n",
378 buff);
379 return -EINVAL;
380 }
381
382 if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
383 return count;
384
385 switch (atomic_read(&bat_priv->gw_mode)) {
386 case GW_MODE_CLIENT:
387 curr_gw_mode_str = GW_MODE_CLIENT_NAME;
388 break;
389 case GW_MODE_SERVER:
390 curr_gw_mode_str = GW_MODE_SERVER_NAME;
391 break;
392 default:
393 curr_gw_mode_str = GW_MODE_OFF_NAME;
394 break;
395 }
396
397 bat_info(net_dev, "Changing gw mode from: %s to: %s\n",
398 curr_gw_mode_str, buff);
399
400 gw_deselect(bat_priv);
401 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
402 return count;
403}
404
405static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr,
406 char *buff)
407{
408 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
409 int down, up;
410 int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
411
412 gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
413 return sprintf(buff, "%i%s/%i%s\n",
414 (down > 2048 ? down / 1024 : down),
415 (down > 2048 ? "MBit" : "KBit"),
416 (up > 2048 ? up / 1024 : up),
417 (up > 2048 ? "MBit" : "KBit"));
418}
419
420static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
421 char *buff, size_t count)
422{
423 struct net_device *net_dev = kobj_to_netdev(kobj);
424
425 if (buff[count - 1] == '\n')
426 buff[count - 1] = '\0';
427
428 return gw_bandwidth_set(net_dev, buff, count);
429}
430
431BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
432BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
433#ifdef CONFIG_BATMAN_ADV_BLA
434BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
435#endif
436BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
437BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
438static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
439static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
440static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
441BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
442BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
443BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
444 post_gw_deselect);
445static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
446 store_gw_bwidth);
447#ifdef CONFIG_BATMAN_ADV_DEBUG
448BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
449#endif
450
451static struct bat_attribute *mesh_attrs[] = {
452 &bat_attr_aggregated_ogms,
453 &bat_attr_bonding,
454#ifdef CONFIG_BATMAN_ADV_BLA
455 &bat_attr_bridge_loop_avoidance,
456#endif
457 &bat_attr_fragmentation,
458 &bat_attr_ap_isolation,
459 &bat_attr_vis_mode,
460 &bat_attr_routing_algo,
461 &bat_attr_gw_mode,
462 &bat_attr_orig_interval,
463 &bat_attr_hop_penalty,
464 &bat_attr_gw_sel_class,
465 &bat_attr_gw_bandwidth,
466#ifdef CONFIG_BATMAN_ADV_DEBUG
467 &bat_attr_log_level,
468#endif
469 NULL,
470};
471
472int sysfs_add_meshif(struct net_device *dev)
473{
474 struct kobject *batif_kobject = &dev->dev.kobj;
475 struct bat_priv *bat_priv = netdev_priv(dev);
476 struct bat_attribute **bat_attr;
477 int err;
478
479 bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
480 batif_kobject);
481 if (!bat_priv->mesh_obj) {
482 bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
483 SYSFS_IF_MESH_SUBDIR);
484 goto out;
485 }
486
487 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
488 err = sysfs_create_file(bat_priv->mesh_obj,
489 &((*bat_attr)->attr));
490 if (err) {
491 bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
492 dev->name, SYSFS_IF_MESH_SUBDIR,
493 ((*bat_attr)->attr).name);
494 goto rem_attr;
495 }
496 }
497
498 return 0;
499
500rem_attr:
501 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
502 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
503
504 kobject_put(bat_priv->mesh_obj);
505 bat_priv->mesh_obj = NULL;
506out:
507 return -ENOMEM;
508}
509
510void sysfs_del_meshif(struct net_device *dev)
511{
512 struct bat_priv *bat_priv = netdev_priv(dev);
513 struct bat_attribute **bat_attr;
514
515 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
516 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
517
518 kobject_put(bat_priv->mesh_obj);
519 bat_priv->mesh_obj = NULL;
520}
521
522static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
523 char *buff)
524{
525 struct net_device *net_dev = kobj_to_netdev(kobj);
526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 ssize_t length;
528
529 if (!hard_iface)
530 return 0;
531
532 length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
533 "none" : hard_iface->soft_iface->name);
534
535 hardif_free_ref(hard_iface);
536
537 return length;
538}
539
540static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
541 char *buff, size_t count)
542{
543 struct net_device *net_dev = kobj_to_netdev(kobj);
544 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
545 int status_tmp = -1;
546 int ret = count;
547
548 if (!hard_iface)
549 return count;
550
551 if (buff[count - 1] == '\n')
552 buff[count - 1] = '\0';
553
554 if (strlen(buff) >= IFNAMSIZ) {
555 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
556 buff);
557 hardif_free_ref(hard_iface);
558 return -EINVAL;
559 }
560
561 if (strncmp(buff, "none", 4) == 0)
562 status_tmp = IF_NOT_IN_USE;
563 else
564 status_tmp = IF_I_WANT_YOU;
565
566 if (hard_iface->if_status == status_tmp)
567 goto out;
568
569 if ((hard_iface->soft_iface) &&
570 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
571 goto out;
572
573 if (!rtnl_trylock()) {
574 ret = -ERESTARTSYS;
575 goto out;
576 }
577
578 if (status_tmp == IF_NOT_IN_USE) {
579 hardif_disable_interface(hard_iface);
580 goto unlock;
581 }
582
583 /* if the interface already is in use */
584 if (hard_iface->if_status != IF_NOT_IN_USE)
585 hardif_disable_interface(hard_iface);
586
587 ret = hardif_enable_interface(hard_iface, buff);
588
589unlock:
590 rtnl_unlock();
591out:
592 hardif_free_ref(hard_iface);
593 return ret;
594}
595
596static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
597 char *buff)
598{
599 struct net_device *net_dev = kobj_to_netdev(kobj);
600 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
601 ssize_t length;
602
603 if (!hard_iface)
604 return 0;
605
606 switch (hard_iface->if_status) {
607 case IF_TO_BE_REMOVED:
608 length = sprintf(buff, "disabling\n");
609 break;
610 case IF_INACTIVE:
611 length = sprintf(buff, "inactive\n");
612 break;
613 case IF_ACTIVE:
614 length = sprintf(buff, "active\n");
615 break;
616 case IF_TO_BE_ACTIVATED:
617 length = sprintf(buff, "enabling\n");
618 break;
619 case IF_NOT_IN_USE:
620 default:
621 length = sprintf(buff, "not in use\n");
622 break;
623 }
624
625 hardif_free_ref(hard_iface);
626
627 return length;
628}
629
630static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
631 show_mesh_iface, store_mesh_iface);
632static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
633
634static struct bat_attribute *batman_attrs[] = {
635 &bat_attr_mesh_iface,
636 &bat_attr_iface_status,
637 NULL,
638};
639
640int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
641{
642 struct kobject *hardif_kobject = &dev->dev.kobj;
643 struct bat_attribute **bat_attr;
644 int err;
645
646 *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
647 hardif_kobject);
648
649 if (!*hardif_obj) {
650 bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
651 SYSFS_IF_BAT_SUBDIR);
652 goto out;
653 }
654
655 for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
656 err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
657 if (err) {
658 bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
659 dev->name, SYSFS_IF_BAT_SUBDIR,
660 ((*bat_attr)->attr).name);
661 goto rem_attr;
662 }
663 }
664
665 return 0;
666
667rem_attr:
668 for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
669 sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
670out:
671 return -ENOMEM;
672}
673
674void sysfs_del_hardif(struct kobject **hardif_obj)
675{
676 kobject_put(*hardif_obj);
677 *hardif_obj = NULL;
678}
679
680int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
681 enum uev_action action, const char *data)
682{
683 int ret = -1;
684 struct hard_iface *primary_if = NULL;
685 struct kobject *bat_kobj;
686 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
687
688 primary_if = primary_if_get_selected(bat_priv);
689 if (!primary_if)
690 goto out;
691
692 bat_kobj = &primary_if->soft_iface->dev.kobj;
693
694 uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
695 strlen(uev_type_str[type]) + 1,
696 GFP_ATOMIC);
697 if (!uevent_env[0])
698 goto out;
699
700 sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
701
702 uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
703 strlen(uev_action_str[action]) + 1,
704 GFP_ATOMIC);
705 if (!uevent_env[1])
706 goto out;
707
708 sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
709
710 /* If the event is DEL, ignore the data field */
711 if (action != UEV_DEL) {
712 uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
713 strlen(data) + 1, GFP_ATOMIC);
714 if (!uevent_env[2])
715 goto out;
716
717 sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
718 }
719
720 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
721out:
722 kfree(uevent_env[0]);
723 kfree(uevent_env[1]);
724 kfree(uevent_env[2]);
725
726 if (primary_if)
727 hardif_free_ref(primary_if);
728
729 if (ret)
730 bat_dbg(DBG_BATMAN, bat_priv,
731 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
732 uev_type_str[type], uev_action_str[action],
733 (action == UEV_DEL ? "NULL" : data), ret);
734 return ret;
735}
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 07ae6e1b8aca..aea174cdbfbd 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -25,12 +23,12 @@
25#include <linux/bitops.h> 23#include <linux/bitops.h>
26 24
27/* shift the packet array by n places. */ 25/* shift the packet array by n places. */
28static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) 26static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
29{ 27{
30 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) 28 if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
31 return; 29 return;
32 30
33 bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE); 31 bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
34} 32}
35 33
36 34
@@ -40,58 +38,57 @@ static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
40 * 1 if the window was moved (either new or very old) 38 * 1 if the window was moved (either new or very old)
41 * 0 if the window was not moved/shifted. 39 * 0 if the window was not moved/shifted.
42 */ 40 */
43int bit_get_packet(void *priv, unsigned long *seq_bits, 41int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
44 int32_t seq_num_diff, int set_mark) 42 int32_t seq_num_diff, int set_mark)
45{ 43{
46 struct bat_priv *bat_priv = priv; 44 struct batadv_priv *bat_priv = priv;
47 45
48 /* sequence number is slightly older. We already got a sequence number 46 /* sequence number is slightly older. We already got a sequence number
49 * higher than this one, so we just mark it. */ 47 * higher than this one, so we just mark it.
50 48 */
51 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { 49 if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
52 if (set_mark) 50 if (set_mark)
53 bat_set_bit(seq_bits, -seq_num_diff); 51 batadv_set_bit(seq_bits, -seq_num_diff);
54 return 0; 52 return 0;
55 } 53 }
56 54
57 /* sequence number is slightly newer, so we shift the window and 55 /* sequence number is slightly newer, so we shift the window and
58 * set the mark if required */ 56 * set the mark if required
59 57 */
60 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { 58 if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
61 bat_bitmap_shift_left(seq_bits, seq_num_diff); 59 batadv_bitmap_shift_left(seq_bits, seq_num_diff);
62 60
63 if (set_mark) 61 if (set_mark)
64 bat_set_bit(seq_bits, 0); 62 batadv_set_bit(seq_bits, 0);
65 return 1; 63 return 1;
66 } 64 }
67 65
68 /* sequence number is much newer, probably missed a lot of packets */ 66 /* sequence number is much newer, probably missed a lot of packets */
69 67 if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
70 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) && 68 seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
71 (seq_num_diff < EXPECTED_SEQNO_RANGE)) { 69 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
72 bat_dbg(DBG_BATMAN, bat_priv, 70 "We missed a lot of packets (%i) !\n",
73 "We missed a lot of packets (%i) !\n", 71 seq_num_diff - 1);
74 seq_num_diff - 1); 72 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
75 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
76 if (set_mark) 73 if (set_mark)
77 bat_set_bit(seq_bits, 0); 74 batadv_set_bit(seq_bits, 0);
78 return 1; 75 return 1;
79 } 76 }
80 77
81 /* received a much older packet. The other host either restarted 78 /* received a much older packet. The other host either restarted
82 * or the old packet got delayed somewhere in the network. The 79 * or the old packet got delayed somewhere in the network. The
83 * packet should be dropped without calling this function if the 80 * packet should be dropped without calling this function if the
84 * seqno window is protected. */ 81 * seqno window is protected.
85 82 */
86 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || 83 if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
87 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 84 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
88 85
89 bat_dbg(DBG_BATMAN, bat_priv, 86 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
90 "Other host probably restarted!\n"); 87 "Other host probably restarted!\n");
91 88
92 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); 89 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
93 if (set_mark) 90 if (set_mark)
94 bat_set_bit(seq_bits, 0); 91 batadv_set_bit(seq_bits, 0);
95 92
96 return 1; 93 return 1;
97 } 94 }
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 1835c15cda41..a081ce1c0514 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,39 +15,40 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BITARRAY_H_ 20#ifndef _NET_BATMAN_ADV_BITARRAY_H_
23#define _NET_BATMAN_ADV_BITARRAY_H_ 21#define _NET_BATMAN_ADV_BITARRAY_H_
24 22
25/* returns true if the corresponding bit in the given seq_bits indicates true 23/* returns true if the corresponding bit in the given seq_bits indicates true
26 * and curr_seqno is within range of last_seqno */ 24 * and curr_seqno is within range of last_seqno
27static inline int bat_test_bit(const unsigned long *seq_bits, 25 */
28 uint32_t last_seqno, uint32_t curr_seqno) 26static inline int batadv_test_bit(const unsigned long *seq_bits,
27 uint32_t last_seqno, uint32_t curr_seqno)
29{ 28{
30 int32_t diff; 29 int32_t diff;
31 30
32 diff = last_seqno - curr_seqno; 31 diff = last_seqno - curr_seqno;
33 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) 32 if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
34 return 0; 33 return 0;
35 else 34 else
36 return test_bit(diff, seq_bits); 35 return test_bit(diff, seq_bits);
37} 36}
38 37
39/* turn corresponding bit on, so we can remember that we got the packet */ 38/* turn corresponding bit on, so we can remember that we got the packet */
40static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) 39static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
41{ 40{
42 /* if too old, just drop it */ 41 /* if too old, just drop it */
43 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) 42 if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
44 return; 43 return;
45 44
46 set_bit(n, seq_bits); /* turn the position on */ 45 set_bit(n, seq_bits); /* turn the position on */
47} 46}
48 47
49/* receive and process one packet, returns 1 if received seq_num is considered 48/* receive and process one packet, returns 1 if received seq_num is considered
50 * new, 0 if old */ 49 * new, 0 if old
51int bit_get_packet(void *priv, unsigned long *seq_bits, 50 */
52 int32_t seq_num_diff, int set_mark); 51int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
52 int32_t seq_num_diff, int set_mark);
53 53
54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8bf97515a77d..6705d35b17ce 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -33,14 +31,14 @@
33#include <net/arp.h> 31#include <net/arp.h>
34#include <linux/if_vlan.h> 32#include <linux/if_vlan.h>
35 33
36static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 34static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
37 35
38static void bla_periodic_work(struct work_struct *work); 36static void batadv_bla_periodic_work(struct work_struct *work);
39static void bla_send_announce(struct bat_priv *bat_priv, 37static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
40 struct backbone_gw *backbone_gw); 38 struct batadv_backbone_gw *backbone_gw);
41 39
42/* return the index of the claim */ 40/* return the index of the claim */
43static inline uint32_t choose_claim(const void *data, uint32_t size) 41static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
44{ 42{
45 const unsigned char *key = data; 43 const unsigned char *key = data;
46 uint32_t hash = 0; 44 uint32_t hash = 0;
@@ -60,7 +58,8 @@ static inline uint32_t choose_claim(const void *data, uint32_t size)
60} 58}
61 59
62/* return the index of the backbone gateway */ 60/* return the index of the backbone gateway */
63static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) 61static inline uint32_t batadv_choose_backbone_gw(const void *data,
62 uint32_t size)
64{ 63{
65 const unsigned char *key = data; 64 const unsigned char *key = data;
66 uint32_t hash = 0; 65 uint32_t hash = 0;
@@ -81,74 +80,75 @@ static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
81 80
82 81
83/* compares address and vid of two backbone gws */ 82/* compares address and vid of two backbone gws */
84static int compare_backbone_gw(const struct hlist_node *node, const void *data2) 83static int batadv_compare_backbone_gw(const struct hlist_node *node,
84 const void *data2)
85{ 85{
86 const void *data1 = container_of(node, struct backbone_gw, 86 const void *data1 = container_of(node, struct batadv_backbone_gw,
87 hash_entry); 87 hash_entry);
88 88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90} 90}
91 91
92/* compares address and vid of two claims */ 92/* compares address and vid of two claims */
93static int compare_claim(const struct hlist_node *node, const void *data2) 93static int batadv_compare_claim(const struct hlist_node *node,
94 const void *data2)
94{ 95{
95 const void *data1 = container_of(node, struct claim, 96 const void *data1 = container_of(node, struct batadv_claim,
96 hash_entry); 97 hash_entry);
97 98
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
99} 100}
100 101
101/* free a backbone gw */ 102/* free a backbone gw */
102static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) 103static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
103{ 104{
104 if (atomic_dec_and_test(&backbone_gw->refcount)) 105 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu); 106 kfree_rcu(backbone_gw, rcu);
106} 107}
107 108
108/* finally deinitialize the claim */ 109/* finally deinitialize the claim */
109static void claim_free_rcu(struct rcu_head *rcu) 110static void batadv_claim_free_rcu(struct rcu_head *rcu)
110{ 111{
111 struct claim *claim; 112 struct batadv_claim *claim;
112 113
113 claim = container_of(rcu, struct claim, rcu); 114 claim = container_of(rcu, struct batadv_claim, rcu);
114 115
115 backbone_gw_free_ref(claim->backbone_gw); 116 batadv_backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim); 117 kfree(claim);
117} 118}
118 119
119/* free a claim, call claim_free_rcu if its the last reference */ 120/* free a claim, call claim_free_rcu if its the last reference */
120static void claim_free_ref(struct claim *claim) 121static void batadv_claim_free_ref(struct batadv_claim *claim)
121{ 122{
122 if (atomic_dec_and_test(&claim->refcount)) 123 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu); 124 call_rcu(&claim->rcu, batadv_claim_free_rcu);
124} 125}
125 126
126/** 127/* @bat_priv: the bat priv with all the soft interface information
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data) 128 * @data: search data (may be local/static data)
129 * 129 *
130 * looks for a claim in the hash, and returns it if found 130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise. 131 * or NULL otherwise.
132 */ 132 */
133static struct claim *claim_hash_find(struct bat_priv *bat_priv, 133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
134 struct claim *data) 134 struct batadv_claim *data)
135{ 135{
136 struct hashtable_t *hash = bat_priv->claim_hash; 136 struct batadv_hashtable *hash = bat_priv->claim_hash;
137 struct hlist_head *head; 137 struct hlist_head *head;
138 struct hlist_node *node; 138 struct hlist_node *node;
139 struct claim *claim; 139 struct batadv_claim *claim;
140 struct claim *claim_tmp = NULL; 140 struct batadv_claim *claim_tmp = NULL;
141 int index; 141 int index;
142 142
143 if (!hash) 143 if (!hash)
144 return NULL; 144 return NULL;
145 145
146 index = choose_claim(data, hash->size); 146 index = batadv_choose_claim(data, hash->size);
147 head = &hash->table[index]; 147 head = &hash->table[index];
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data)) 151 if (!batadv_compare_claim(&claim->hash_entry, data))
152 continue; 152 continue;
153 153
154 if (!atomic_inc_not_zero(&claim->refcount)) 154 if (!atomic_inc_not_zero(&claim->refcount))
@@ -163,21 +163,22 @@ static struct claim *claim_hash_find(struct bat_priv *bat_priv,
163} 163}
164 164
165/** 165/**
166 * batadv_backbone_hash_find - looks for a claim in the hash
166 * @bat_priv: the bat priv with all the soft interface information 167 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator 168 * @addr: the address of the originator
168 * @vid: the VLAN ID 169 * @vid: the VLAN ID
169 * 170 *
170 * looks for a claim in the hash, and returns it if found 171 * Returns claim if found or NULL otherwise.
171 * or NULL otherwise.
172 */ 172 */
173static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, 173static struct batadv_backbone_gw *
174 uint8_t *addr, short vid) 174batadv_backbone_hash_find(struct batadv_priv *bat_priv,
175 uint8_t *addr, short vid)
175{ 176{
176 struct hashtable_t *hash = bat_priv->backbone_hash; 177 struct batadv_hashtable *hash = bat_priv->backbone_hash;
177 struct hlist_head *head; 178 struct hlist_head *head;
178 struct hlist_node *node; 179 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw; 180 struct batadv_backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL; 181 struct batadv_backbone_gw *backbone_gw_tmp = NULL;
181 int index; 182 int index;
182 183
183 if (!hash) 184 if (!hash)
@@ -186,13 +187,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
186 memcpy(search_entry.orig, addr, ETH_ALEN); 187 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid; 188 search_entry.vid = vid;
188 189
189 index = choose_backbone_gw(&search_entry, hash->size); 190 index = batadv_choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index]; 191 head = &hash->table[index];
191 192
192 rcu_read_lock(); 193 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry, 195 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry)) 196 &search_entry))
196 continue; 197 continue;
197 198
198 if (!atomic_inc_not_zero(&backbone_gw->refcount)) 199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
@@ -207,12 +208,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
207} 208}
208 209
209/* delete all claims for a backbone */ 210/* delete all claims for a backbone */
210static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) 211static void
212batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
211{ 213{
212 struct hashtable_t *hash; 214 struct batadv_hashtable *hash;
213 struct hlist_node *node, *node_tmp; 215 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head; 216 struct hlist_head *head;
215 struct claim *claim; 217 struct batadv_claim *claim;
216 int i; 218 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */ 219 spinlock_t *list_lock; /* protects write access to the hash lists */
218 220
@@ -231,36 +233,35 @@ static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
231 if (claim->backbone_gw != backbone_gw) 233 if (claim->backbone_gw != backbone_gw)
232 continue; 234 continue;
233 235
234 claim_free_ref(claim); 236 batadv_claim_free_ref(claim);
235 hlist_del_rcu(node); 237 hlist_del_rcu(node);
236 } 238 }
237 spin_unlock_bh(list_lock); 239 spin_unlock_bh(list_lock);
238 } 240 }
239 241
240 /* all claims gone, intialize CRC */ 242 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT; 243 backbone_gw->crc = BATADV_BLA_CRC_INIT;
242} 244}
243 245
244/** 246/**
247 * batadv_bla_send_claim - sends a claim frame according to the provided info
245 * @bat_priv: the bat priv with all the soft interface information 248 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim 249 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID 250 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) 251 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
249 *
250 * sends a claim frame according to the provided info.
251 */ 252 */
252static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, 253static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype) 254 short vid, int claimtype)
254{ 255{
255 struct sk_buff *skb; 256 struct sk_buff *skb;
256 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if; 258 struct batadv_hard_iface *primary_if;
258 struct net_device *soft_iface; 259 struct net_device *soft_iface;
259 uint8_t *hw_src; 260 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest; 261 struct batadv_bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0; 262 __be32 zeroip = 0;
262 263
263 primary_if = primary_if_get_selected(bat_priv); 264 primary_if = batadv_primary_if_get_selected(bat_priv);
264 if (!primary_if) 265 if (!primary_if)
265 return; 266 return;
266 267
@@ -294,40 +295,41 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
294 295
295 /* now we pretend that the client would have sent this ... */ 296 /* now we pretend that the client would have sent this ... */
296 switch (claimtype) { 297 switch (claimtype) {
297 case CLAIM_TYPE_ADD: 298 case BATADV_CLAIM_TYPE_ADD:
298 /* normal claim frame 299 /* normal claim frame
299 * set Ethernet SRC to the clients mac 300 * set Ethernet SRC to the clients mac
300 */ 301 */
301 memcpy(ethhdr->h_source, mac, ETH_ALEN); 302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
302 bat_dbg(DBG_BLA, bat_priv, 303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
303 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); 304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
304 break; 305 break;
305 case CLAIM_TYPE_DEL: 306 case BATADV_CLAIM_TYPE_DEL:
306 /* unclaim frame 307 /* unclaim frame
307 * set HW SRC to the clients mac 308 * set HW SRC to the clients mac
308 */ 309 */
309 memcpy(hw_src, mac, ETH_ALEN); 310 memcpy(hw_src, mac, ETH_ALEN);
310 bat_dbg(DBG_BLA, bat_priv, 311 batadv_dbg(BATADV_DBG_BLA, bat_priv,
311 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); 312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
313 vid);
312 break; 314 break;
313 case CLAIM_TYPE_ANNOUNCE: 315 case BATADV_CLAIM_TYPE_ANNOUNCE:
314 /* announcement frame 316 /* announcement frame
315 * set HW SRC to the special mac containg the crc 317 * set HW SRC to the special mac containg the crc
316 */ 318 */
317 memcpy(hw_src, mac, ETH_ALEN); 319 memcpy(hw_src, mac, ETH_ALEN);
318 bat_dbg(DBG_BLA, bat_priv, 320 batadv_dbg(BATADV_DBG_BLA, bat_priv,
319 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", 321 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
320 ethhdr->h_source, vid); 322 ethhdr->h_source, vid);
321 break; 323 break;
322 case CLAIM_TYPE_REQUEST: 324 case BATADV_CLAIM_TYPE_REQUEST:
323 /* request frame 325 /* request frame
324 * set HW SRC to the special mac containg the crc 326 * set HW SRC to the special mac containg the crc
325 */ 327 */
326 memcpy(hw_src, mac, ETH_ALEN); 328 memcpy(hw_src, mac, ETH_ALEN);
327 memcpy(ethhdr->h_dest, mac, ETH_ALEN); 329 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
328 bat_dbg(DBG_BLA, bat_priv, 330 batadv_dbg(BATADV_DBG_BLA, bat_priv,
329 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", 331 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
330 ethhdr->h_source, ethhdr->h_dest, vid); 332 ethhdr->h_source, ethhdr->h_dest, vid);
331 break; 333 break;
332 334
333 } 335 }
@@ -344,10 +346,11 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
344 netif_rx(skb); 346 netif_rx(skb);
345out: 347out:
346 if (primary_if) 348 if (primary_if)
347 hardif_free_ref(primary_if); 349 batadv_hardif_free_ref(primary_if);
348} 350}
349 351
350/** 352/**
353 * batadv_bla_get_backbone_gw
351 * @bat_priv: the bat priv with all the soft interface information 354 * @bat_priv: the bat priv with all the soft interface information
352 * @orig: the mac address of the originator 355 * @orig: the mac address of the originator
353 * @vid: the VLAN ID 356 * @vid: the VLAN ID
@@ -355,21 +358,22 @@ out:
355 * searches for the backbone gw or creates a new one if it could not 358 * searches for the backbone gw or creates a new one if it could not
356 * be found. 359 * be found.
357 */ 360 */
358static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, 361static struct batadv_backbone_gw *
359 uint8_t *orig, short vid) 362batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
363 short vid)
360{ 364{
361 struct backbone_gw *entry; 365 struct batadv_backbone_gw *entry;
362 struct orig_node *orig_node; 366 struct batadv_orig_node *orig_node;
363 int hash_added; 367 int hash_added;
364 368
365 entry = backbone_hash_find(bat_priv, orig, vid); 369 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
366 370
367 if (entry) 371 if (entry)
368 return entry; 372 return entry;
369 373
370 bat_dbg(DBG_BLA, bat_priv, 374 batadv_dbg(BATADV_DBG_BLA, bat_priv,
371 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", 375 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
372 orig, vid); 376 orig, vid);
373 377
374 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 378 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
375 if (!entry) 379 if (!entry)
@@ -377,7 +381,7 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
377 381
378 entry->vid = vid; 382 entry->vid = vid;
379 entry->lasttime = jiffies; 383 entry->lasttime = jiffies;
380 entry->crc = BLA_CRC_INIT; 384 entry->crc = BATADV_BLA_CRC_INIT;
381 entry->bat_priv = bat_priv; 385 entry->bat_priv = bat_priv;
382 atomic_set(&entry->request_sent, 0); 386 atomic_set(&entry->request_sent, 0);
383 memcpy(entry->orig, orig, ETH_ALEN); 387 memcpy(entry->orig, orig, ETH_ALEN);
@@ -385,8 +389,10 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
385 /* one for the hash, one for returning */ 389 /* one for the hash, one for returning */
386 atomic_set(&entry->refcount, 2); 390 atomic_set(&entry->refcount, 2);
387 391
388 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, 392 hash_added = batadv_hash_add(bat_priv->backbone_hash,
389 choose_backbone_gw, entry, &entry->hash_entry); 393 batadv_compare_backbone_gw,
394 batadv_choose_backbone_gw, entry,
395 &entry->hash_entry);
390 396
391 if (unlikely(hash_added != 0)) { 397 if (unlikely(hash_added != 0)) {
392 /* hash failed, free the structure */ 398 /* hash failed, free the structure */
@@ -395,11 +401,11 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
395 } 401 }
396 402
397 /* this is a gateway now, remove any tt entries */ 403 /* this is a gateway now, remove any tt entries */
398 orig_node = orig_hash_find(bat_priv, orig); 404 orig_node = batadv_orig_hash_find(bat_priv, orig);
399 if (orig_node) { 405 if (orig_node) {
400 tt_global_del_orig(bat_priv, orig_node, 406 batadv_tt_global_del_orig(bat_priv, orig_node,
401 "became a backbone gateway"); 407 "became a backbone gateway");
402 orig_node_free_ref(orig_node); 408 batadv_orig_node_free_ref(orig_node);
403 } 409 }
404 return entry; 410 return entry;
405} 411}
@@ -407,43 +413,46 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
407/* update or add the own backbone gw to make sure we announce 413/* update or add the own backbone gw to make sure we announce
408 * where we receive other backbone gws 414 * where we receive other backbone gws
409 */ 415 */
410static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, 416static void
411 struct hard_iface *primary_if, 417batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
412 short vid) 418 struct batadv_hard_iface *primary_if,
419 short vid)
413{ 420{
414 struct backbone_gw *backbone_gw; 421 struct batadv_backbone_gw *backbone_gw;
415 422
416 backbone_gw = bla_get_backbone_gw(bat_priv, 423 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
417 primary_if->net_dev->dev_addr, vid); 424 primary_if->net_dev->dev_addr,
425 vid);
418 if (unlikely(!backbone_gw)) 426 if (unlikely(!backbone_gw))
419 return; 427 return;
420 428
421 backbone_gw->lasttime = jiffies; 429 backbone_gw->lasttime = jiffies;
422 backbone_gw_free_ref(backbone_gw); 430 batadv_backbone_gw_free_ref(backbone_gw);
423} 431}
424 432
425/** 433/* @bat_priv: the bat priv with all the soft interface information
426 * @bat_priv: the bat priv with all the soft interface information
427 * @vid: the vid where the request came on 434 * @vid: the vid where the request came on
428 * 435 *
429 * Repeat all of our own claims, and finally send an ANNOUNCE frame 436 * Repeat all of our own claims, and finally send an ANNOUNCE frame
430 * to allow the requester another check if the CRC is correct now. 437 * to allow the requester another check if the CRC is correct now.
431 */ 438 */
432static void bla_answer_request(struct bat_priv *bat_priv, 439static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
433 struct hard_iface *primary_if, short vid) 440 struct batadv_hard_iface *primary_if,
441 short vid)
434{ 442{
435 struct hlist_node *node; 443 struct hlist_node *node;
436 struct hlist_head *head; 444 struct hlist_head *head;
437 struct hashtable_t *hash; 445 struct batadv_hashtable *hash;
438 struct claim *claim; 446 struct batadv_claim *claim;
439 struct backbone_gw *backbone_gw; 447 struct batadv_backbone_gw *backbone_gw;
440 int i; 448 int i;
441 449
442 bat_dbg(DBG_BLA, bat_priv, 450 batadv_dbg(BATADV_DBG_BLA, bat_priv,
443 "bla_answer_request(): received a claim request, send all of our own claims again\n"); 451 "bla_answer_request(): received a claim request, send all of our own claims again\n");
444 452
445 backbone_gw = backbone_hash_find(bat_priv, 453 backbone_gw = batadv_backbone_hash_find(bat_priv,
446 primary_if->net_dev->dev_addr, vid); 454 primary_if->net_dev->dev_addr,
455 vid);
447 if (!backbone_gw) 456 if (!backbone_gw)
448 return; 457 return;
449 458
@@ -457,36 +466,34 @@ static void bla_answer_request(struct bat_priv *bat_priv,
457 if (claim->backbone_gw != backbone_gw) 466 if (claim->backbone_gw != backbone_gw)
458 continue; 467 continue;
459 468
460 bla_send_claim(bat_priv, claim->addr, claim->vid, 469 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
461 CLAIM_TYPE_ADD); 470 BATADV_CLAIM_TYPE_ADD);
462 } 471 }
463 rcu_read_unlock(); 472 rcu_read_unlock();
464 } 473 }
465 474
466 /* finally, send an announcement frame */ 475 /* finally, send an announcement frame */
467 bla_send_announce(bat_priv, backbone_gw); 476 batadv_bla_send_announce(bat_priv, backbone_gw);
468 backbone_gw_free_ref(backbone_gw); 477 batadv_backbone_gw_free_ref(backbone_gw);
469} 478}
470 479
471/** 480/* @backbone_gw: the backbone gateway from whom we are out of sync
472 * @backbone_gw: the backbone gateway from whom we are out of sync
473 * 481 *
474 * When the crc is wrong, ask the backbone gateway for a full table update. 482 * When the crc is wrong, ask the backbone gateway for a full table update.
475 * After the request, it will repeat all of his own claims and finally 483 * After the request, it will repeat all of his own claims and finally
476 * send an announcement claim with which we can check again. 484 * send an announcement claim with which we can check again.
477 */ 485 */
478static void bla_send_request(struct backbone_gw *backbone_gw) 486static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
479{ 487{
480 /* first, remove all old entries */ 488 /* first, remove all old entries */
481 bla_del_backbone_claims(backbone_gw); 489 batadv_bla_del_backbone_claims(backbone_gw);
482 490
483 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 491 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
484 "Sending REQUEST to %pM\n", 492 "Sending REQUEST to %pM\n", backbone_gw->orig);
485 backbone_gw->orig);
486 493
487 /* send request */ 494 /* send request */
488 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, 495 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
489 backbone_gw->vid, CLAIM_TYPE_REQUEST); 496 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
490 497
491 /* no local broadcasts should be sent or received, for now. */ 498 /* no local broadcasts should be sent or received, for now. */
492 if (!atomic_read(&backbone_gw->request_sent)) { 499 if (!atomic_read(&backbone_gw->request_sent)) {
@@ -495,45 +502,45 @@ static void bla_send_request(struct backbone_gw *backbone_gw)
495 } 502 }
496} 503}
497 504
498/** 505/* @bat_priv: the bat priv with all the soft interface information
499 * @bat_priv: the bat priv with all the soft interface information
500 * @backbone_gw: our backbone gateway which should be announced 506 * @backbone_gw: our backbone gateway which should be announced
501 * 507 *
502 * This function sends an announcement. It is called from multiple 508 * This function sends an announcement. It is called from multiple
503 * places. 509 * places.
504 */ 510 */
505static void bla_send_announce(struct bat_priv *bat_priv, 511static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
506 struct backbone_gw *backbone_gw) 512 struct batadv_backbone_gw *backbone_gw)
507{ 513{
508 uint8_t mac[ETH_ALEN]; 514 uint8_t mac[ETH_ALEN];
509 uint16_t crc; 515 __be16 crc;
510 516
511 memcpy(mac, announce_mac, 4); 517 memcpy(mac, batadv_announce_mac, 4);
512 crc = htons(backbone_gw->crc); 518 crc = htons(backbone_gw->crc);
513 memcpy(&mac[4], (uint8_t *)&crc, 2); 519 memcpy(&mac[4], &crc, 2);
514 520
515 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); 521 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
522 BATADV_CLAIM_TYPE_ANNOUNCE);
516 523
517} 524}
518 525
519/** 526/**
527 * batadv_bla_add_claim - Adds a claim in the claim hash
520 * @bat_priv: the bat priv with all the soft interface information 528 * @bat_priv: the bat priv with all the soft interface information
521 * @mac: the mac address of the claim 529 * @mac: the mac address of the claim
522 * @vid: the VLAN ID of the frame 530 * @vid: the VLAN ID of the frame
523 * @backbone_gw: the backbone gateway which claims it 531 * @backbone_gw: the backbone gateway which claims it
524 *
525 * Adds a claim in the claim hash.
526 */ 532 */
527static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, 533static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
528 const short vid, struct backbone_gw *backbone_gw) 534 const uint8_t *mac, const short vid,
535 struct batadv_backbone_gw *backbone_gw)
529{ 536{
530 struct claim *claim; 537 struct batadv_claim *claim;
531 struct claim search_claim; 538 struct batadv_claim search_claim;
532 int hash_added; 539 int hash_added;
533 540
534 memcpy(search_claim.addr, mac, ETH_ALEN); 541 memcpy(search_claim.addr, mac, ETH_ALEN);
535 search_claim.vid = vid; 542 search_claim.vid = vid;
536 claim = claim_hash_find(bat_priv, &search_claim); 543 claim = batadv_claim_hash_find(bat_priv, &search_claim);
537 544
538 /* create a new claim entry if it does not exist yet. */ 545 /* create a new claim entry if it does not exist yet. */
539 if (!claim) { 546 if (!claim) {
@@ -547,11 +554,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
547 claim->backbone_gw = backbone_gw; 554 claim->backbone_gw = backbone_gw;
548 555
549 atomic_set(&claim->refcount, 2); 556 atomic_set(&claim->refcount, 2);
550 bat_dbg(DBG_BLA, bat_priv, 557 batadv_dbg(BATADV_DBG_BLA, bat_priv,
551 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 558 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
552 mac, vid); 559 mac, vid);
553 hash_added = hash_add(bat_priv->claim_hash, compare_claim, 560 hash_added = batadv_hash_add(bat_priv->claim_hash,
554 choose_claim, claim, &claim->hash_entry); 561 batadv_compare_claim,
562 batadv_choose_claim, claim,
563 &claim->hash_entry);
555 564
556 if (unlikely(hash_added != 0)) { 565 if (unlikely(hash_added != 0)) {
557 /* only local changes happened. */ 566 /* only local changes happened. */
@@ -564,13 +573,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
564 /* no need to register a new backbone */ 573 /* no need to register a new backbone */
565 goto claim_free_ref; 574 goto claim_free_ref;
566 575
567 bat_dbg(DBG_BLA, bat_priv, 576 batadv_dbg(BATADV_DBG_BLA, bat_priv,
568 "bla_add_claim(): changing ownership for %pM, vid %d\n", 577 "bla_add_claim(): changing ownership for %pM, vid %d\n",
569 mac, vid); 578 mac, vid);
570 579
571 claim->backbone_gw->crc ^= 580 claim->backbone_gw->crc ^=
572 crc16(0, claim->addr, ETH_ALEN); 581 crc16(0, claim->addr, ETH_ALEN);
573 backbone_gw_free_ref(claim->backbone_gw); 582 batadv_backbone_gw_free_ref(claim->backbone_gw);
574 583
575 } 584 }
576 /* set (new) backbone gw */ 585 /* set (new) backbone gw */
@@ -581,45 +590,48 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
581 backbone_gw->lasttime = jiffies; 590 backbone_gw->lasttime = jiffies;
582 591
583claim_free_ref: 592claim_free_ref:
584 claim_free_ref(claim); 593 batadv_claim_free_ref(claim);
585} 594}
586 595
587/* Delete a claim from the claim hash which has the 596/* Delete a claim from the claim hash which has the
588 * given mac address and vid. 597 * given mac address and vid.
589 */ 598 */
590static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, 599static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
591 const short vid) 600 const uint8_t *mac, const short vid)
592{ 601{
593 struct claim search_claim, *claim; 602 struct batadv_claim search_claim, *claim;
594 603
595 memcpy(search_claim.addr, mac, ETH_ALEN); 604 memcpy(search_claim.addr, mac, ETH_ALEN);
596 search_claim.vid = vid; 605 search_claim.vid = vid;
597 claim = claim_hash_find(bat_priv, &search_claim); 606 claim = batadv_claim_hash_find(bat_priv, &search_claim);
598 if (!claim) 607 if (!claim)
599 return; 608 return;
600 609
601 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); 610 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
611 mac, vid);
602 612
603 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); 613 batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
604 claim_free_ref(claim); /* reference from the hash is gone */ 614 batadv_choose_claim, claim);
615 batadv_claim_free_ref(claim); /* reference from the hash is gone */
605 616
606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 617 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
607 618
608 /* don't need the reference from hash_find() anymore */ 619 /* don't need the reference from hash_find() anymore */
609 claim_free_ref(claim); 620 batadv_claim_free_ref(claim);
610} 621}
611 622
612/* check for ANNOUNCE frame, return 1 if handled */ 623/* check for ANNOUNCE frame, return 1 if handled */
613static int handle_announce(struct bat_priv *bat_priv, 624static int batadv_handle_announce(struct batadv_priv *bat_priv,
614 uint8_t *an_addr, uint8_t *backbone_addr, short vid) 625 uint8_t *an_addr, uint8_t *backbone_addr,
626 short vid)
615{ 627{
616 struct backbone_gw *backbone_gw; 628 struct batadv_backbone_gw *backbone_gw;
617 uint16_t crc; 629 uint16_t crc;
618 630
619 if (memcmp(an_addr, announce_mac, 4) != 0) 631 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
620 return 0; 632 return 0;
621 633
622 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); 634 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
623 635
624 if (unlikely(!backbone_gw)) 636 if (unlikely(!backbone_gw))
625 return 1; 637 return 1;
@@ -627,19 +639,19 @@ static int handle_announce(struct bat_priv *bat_priv,
627 639
628 /* handle as ANNOUNCE frame */ 640 /* handle as ANNOUNCE frame */
629 backbone_gw->lasttime = jiffies; 641 backbone_gw->lasttime = jiffies;
630 crc = ntohs(*((uint16_t *)(&an_addr[4]))); 642 crc = ntohs(*((__be16 *)(&an_addr[4])));
631 643
632 bat_dbg(DBG_BLA, bat_priv, 644 batadv_dbg(BATADV_DBG_BLA, bat_priv,
633 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", 645 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
634 vid, backbone_gw->orig, crc); 646 vid, backbone_gw->orig, crc);
635 647
636 if (backbone_gw->crc != crc) { 648 if (backbone_gw->crc != crc) {
637 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 649 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
638 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", 650 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
639 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, 651 backbone_gw->orig, backbone_gw->vid,
640 crc); 652 backbone_gw->crc, crc);
641 653
642 bla_send_request(backbone_gw); 654 batadv_bla_send_request(backbone_gw);
643 } else { 655 } else {
644 /* if we have sent a request and the crc was OK, 656 /* if we have sent a request and the crc was OK,
645 * we can allow traffic again. 657 * we can allow traffic again.
@@ -650,88 +662,92 @@ static int handle_announce(struct bat_priv *bat_priv,
650 } 662 }
651 } 663 }
652 664
653 backbone_gw_free_ref(backbone_gw); 665 batadv_backbone_gw_free_ref(backbone_gw);
654 return 1; 666 return 1;
655} 667}
656 668
657/* check for REQUEST frame, return 1 if handled */ 669/* check for REQUEST frame, return 1 if handled */
658static int handle_request(struct bat_priv *bat_priv, 670static int batadv_handle_request(struct batadv_priv *bat_priv,
659 struct hard_iface *primary_if, 671 struct batadv_hard_iface *primary_if,
660 uint8_t *backbone_addr, 672 uint8_t *backbone_addr,
661 struct ethhdr *ethhdr, short vid) 673 struct ethhdr *ethhdr, short vid)
662{ 674{
663 /* check for REQUEST frame */ 675 /* check for REQUEST frame */
664 if (!compare_eth(backbone_addr, ethhdr->h_dest)) 676 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
665 return 0; 677 return 0;
666 678
667 /* sanity check, this should not happen on a normal switch, 679 /* sanity check, this should not happen on a normal switch,
668 * we ignore it in this case. 680 * we ignore it in this case.
669 */ 681 */
670 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) 682 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
671 return 1; 683 return 1;
672 684
673 bat_dbg(DBG_BLA, bat_priv, 685 batadv_dbg(BATADV_DBG_BLA, bat_priv,
674 "handle_request(): REQUEST vid %d (sent by %pM)...\n", 686 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
675 vid, ethhdr->h_source); 687 vid, ethhdr->h_source);
676 688
677 bla_answer_request(bat_priv, primary_if, vid); 689 batadv_bla_answer_request(bat_priv, primary_if, vid);
678 return 1; 690 return 1;
679} 691}
680 692
681/* check for UNCLAIM frame, return 1 if handled */ 693/* check for UNCLAIM frame, return 1 if handled */
682static int handle_unclaim(struct bat_priv *bat_priv, 694static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
683 struct hard_iface *primary_if, 695 struct batadv_hard_iface *primary_if,
684 uint8_t *backbone_addr, 696 uint8_t *backbone_addr,
685 uint8_t *claim_addr, short vid) 697 uint8_t *claim_addr, short vid)
686{ 698{
687 struct backbone_gw *backbone_gw; 699 struct batadv_backbone_gw *backbone_gw;
688 700
689 /* unclaim in any case if it is our own */ 701 /* unclaim in any case if it is our own */
690 if (primary_if && compare_eth(backbone_addr, 702 if (primary_if && batadv_compare_eth(backbone_addr,
691 primary_if->net_dev->dev_addr)) 703 primary_if->net_dev->dev_addr))
692 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); 704 batadv_bla_send_claim(bat_priv, claim_addr, vid,
705 BATADV_CLAIM_TYPE_DEL);
693 706
694 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); 707 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
695 708
696 if (!backbone_gw) 709 if (!backbone_gw)
697 return 1; 710 return 1;
698 711
699 /* this must be an UNCLAIM frame */ 712 /* this must be an UNCLAIM frame */
700 bat_dbg(DBG_BLA, bat_priv, 713 batadv_dbg(BATADV_DBG_BLA, bat_priv,
701 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", 714 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
702 claim_addr, vid, backbone_gw->orig); 715 claim_addr, vid, backbone_gw->orig);
703 716
704 bla_del_claim(bat_priv, claim_addr, vid); 717 batadv_bla_del_claim(bat_priv, claim_addr, vid);
705 backbone_gw_free_ref(backbone_gw); 718 batadv_backbone_gw_free_ref(backbone_gw);
706 return 1; 719 return 1;
707} 720}
708 721
709/* check for CLAIM frame, return 1 if handled */ 722/* check for CLAIM frame, return 1 if handled */
710static int handle_claim(struct bat_priv *bat_priv, 723static int batadv_handle_claim(struct batadv_priv *bat_priv,
711 struct hard_iface *primary_if, uint8_t *backbone_addr, 724 struct batadv_hard_iface *primary_if,
712 uint8_t *claim_addr, short vid) 725 uint8_t *backbone_addr, uint8_t *claim_addr,
726 short vid)
713{ 727{
714 struct backbone_gw *backbone_gw; 728 struct batadv_backbone_gw *backbone_gw;
715 729
716 /* register the gateway if not yet available, and add the claim. */ 730 /* register the gateway if not yet available, and add the claim. */
717 731
718 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); 732 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
719 733
720 if (unlikely(!backbone_gw)) 734 if (unlikely(!backbone_gw))
721 return 1; 735 return 1;
722 736
723 /* this must be a CLAIM frame */ 737 /* this must be a CLAIM frame */
724 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 738 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
725 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 739 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
726 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); 740 batadv_bla_send_claim(bat_priv, claim_addr, vid,
741 BATADV_CLAIM_TYPE_ADD);
727 742
728 /* TODO: we could call something like tt_local_del() here. */ 743 /* TODO: we could call something like tt_local_del() here. */
729 744
730 backbone_gw_free_ref(backbone_gw); 745 batadv_backbone_gw_free_ref(backbone_gw);
731 return 1; 746 return 1;
732} 747}
733 748
734/** 749/**
750 * batadv_check_claim_group
735 * @bat_priv: the bat priv with all the soft interface information 751 * @bat_priv: the bat priv with all the soft interface information
736 * @hw_src: the Hardware source in the ARP Header 752 * @hw_src: the Hardware source in the ARP Header
737 * @hw_dst: the Hardware destination in the ARP Header 753 * @hw_dst: the Hardware destination in the ARP Header
@@ -746,16 +762,16 @@ static int handle_claim(struct bat_priv *bat_priv,
746 * 1 - if is a claim packet from another group 762 * 1 - if is a claim packet from another group
747 * 0 - if it is not a claim packet 763 * 0 - if it is not a claim packet
748 */ 764 */
749static int check_claim_group(struct bat_priv *bat_priv, 765static int batadv_check_claim_group(struct batadv_priv *bat_priv,
750 struct hard_iface *primary_if, 766 struct batadv_hard_iface *primary_if,
751 uint8_t *hw_src, uint8_t *hw_dst, 767 uint8_t *hw_src, uint8_t *hw_dst,
752 struct ethhdr *ethhdr) 768 struct ethhdr *ethhdr)
753{ 769{
754 uint8_t *backbone_addr; 770 uint8_t *backbone_addr;
755 struct orig_node *orig_node; 771 struct batadv_orig_node *orig_node;
756 struct bla_claim_dst *bla_dst, *bla_dst_own; 772 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
757 773
758 bla_dst = (struct bla_claim_dst *)hw_dst; 774 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
759 bla_dst_own = &bat_priv->claim_dest; 775 bla_dst_own = &bat_priv->claim_dest;
760 776
761 /* check if it is a claim packet in general */ 777 /* check if it is a claim packet in general */
@@ -767,12 +783,12 @@ static int check_claim_group(struct bat_priv *bat_priv,
767 * otherwise assume it is in the hw_src 783 * otherwise assume it is in the hw_src
768 */ 784 */
769 switch (bla_dst->type) { 785 switch (bla_dst->type) {
770 case CLAIM_TYPE_ADD: 786 case BATADV_CLAIM_TYPE_ADD:
771 backbone_addr = hw_src; 787 backbone_addr = hw_src;
772 break; 788 break;
773 case CLAIM_TYPE_REQUEST: 789 case BATADV_CLAIM_TYPE_REQUEST:
774 case CLAIM_TYPE_ANNOUNCE: 790 case BATADV_CLAIM_TYPE_ANNOUNCE:
775 case CLAIM_TYPE_DEL: 791 case BATADV_CLAIM_TYPE_DEL:
776 backbone_addr = ethhdr->h_source; 792 backbone_addr = ethhdr->h_source;
777 break; 793 break;
778 default: 794 default:
@@ -780,7 +796,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
780 } 796 }
781 797
782 /* don't accept claim frames from ourselves */ 798 /* don't accept claim frames from ourselves */
783 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 799 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
784 return 0; 800 return 0;
785 801
786 /* if its already the same group, it is fine. */ 802 /* if its already the same group, it is fine. */
@@ -788,7 +804,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
788 return 2; 804 return 2;
789 805
790 /* lets see if this originator is in our mesh */ 806 /* lets see if this originator is in our mesh */
791 orig_node = orig_hash_find(bat_priv, backbone_addr); 807 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
792 808
793 /* dont accept claims from gateways which are not in 809 /* dont accept claims from gateways which are not in
794 * the same mesh or group. 810 * the same mesh or group.
@@ -798,20 +814,19 @@ static int check_claim_group(struct bat_priv *bat_priv,
798 814
799 /* if our mesh friends mac is bigger, use it for ourselves. */ 815 /* if our mesh friends mac is bigger, use it for ourselves. */
800 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 816 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
801 bat_dbg(DBG_BLA, bat_priv, 817 batadv_dbg(BATADV_DBG_BLA, bat_priv,
802 "taking other backbones claim group: %04x\n", 818 "taking other backbones claim group: %04x\n",
803 ntohs(bla_dst->group)); 819 ntohs(bla_dst->group));
804 bla_dst_own->group = bla_dst->group; 820 bla_dst_own->group = bla_dst->group;
805 } 821 }
806 822
807 orig_node_free_ref(orig_node); 823 batadv_orig_node_free_ref(orig_node);
808 824
809 return 2; 825 return 2;
810} 826}
811 827
812 828
813/** 829/* @bat_priv: the bat priv with all the soft interface information
814 * @bat_priv: the bat priv with all the soft interface information
815 * @skb: the frame to be checked 830 * @skb: the frame to be checked
816 * 831 *
817 * Check if this is a claim frame, and process it accordingly. 832 * Check if this is a claim frame, and process it accordingly.
@@ -819,15 +834,15 @@ static int check_claim_group(struct bat_priv *bat_priv,
819 * returns 1 if it was a claim frame, otherwise return 0 to 834 * returns 1 if it was a claim frame, otherwise return 0 to
820 * tell the callee that it can use the frame on its own. 835 * tell the callee that it can use the frame on its own.
821 */ 836 */
822static int bla_process_claim(struct bat_priv *bat_priv, 837static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
823 struct hard_iface *primary_if, 838 struct batadv_hard_iface *primary_if,
824 struct sk_buff *skb) 839 struct sk_buff *skb)
825{ 840{
826 struct ethhdr *ethhdr; 841 struct ethhdr *ethhdr;
827 struct vlan_ethhdr *vhdr; 842 struct vlan_ethhdr *vhdr;
828 struct arphdr *arphdr; 843 struct arphdr *arphdr;
829 uint8_t *hw_src, *hw_dst; 844 uint8_t *hw_src, *hw_dst;
830 struct bla_claim_dst *bla_dst; 845 struct batadv_bla_claim_dst *bla_dst;
831 uint16_t proto; 846 uint16_t proto;
832 int headlen; 847 int headlen;
833 short vid = -1; 848 short vid = -1;
@@ -860,7 +875,6 @@ static int bla_process_claim(struct bat_priv *bat_priv,
860 /* Check whether the ARP frame carries a valid 875 /* Check whether the ARP frame carries a valid
861 * IP information 876 * IP information
862 */ 877 */
863
864 if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) 878 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
865 return 0; 879 return 0;
866 if (arphdr->ar_pro != htons(ETH_P_IP)) 880 if (arphdr->ar_pro != htons(ETH_P_IP))
@@ -872,59 +886,62 @@ static int bla_process_claim(struct bat_priv *bat_priv,
872 886
873 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 887 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
874 hw_dst = hw_src + ETH_ALEN + 4; 888 hw_dst = hw_src + ETH_ALEN + 4;
875 bla_dst = (struct bla_claim_dst *)hw_dst; 889 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
876 890
877 /* check if it is a claim frame. */ 891 /* check if it is a claim frame. */
878 ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); 892 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
893 ethhdr);
879 if (ret == 1) 894 if (ret == 1)
880 bat_dbg(DBG_BLA, bat_priv, 895 batadv_dbg(BATADV_DBG_BLA, bat_priv,
881 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 896 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
882 ethhdr->h_source, vid, hw_src, hw_dst); 897 ethhdr->h_source, vid, hw_src, hw_dst);
883 898
884 if (ret < 2) 899 if (ret < 2)
885 return ret; 900 return ret;
886 901
887 /* become a backbone gw ourselves on this vlan if not happened yet */ 902 /* become a backbone gw ourselves on this vlan if not happened yet */
888 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 903 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
889 904
890 /* check for the different types of claim frames ... */ 905 /* check for the different types of claim frames ... */
891 switch (bla_dst->type) { 906 switch (bla_dst->type) {
892 case CLAIM_TYPE_ADD: 907 case BATADV_CLAIM_TYPE_ADD:
893 if (handle_claim(bat_priv, primary_if, hw_src, 908 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
894 ethhdr->h_source, vid)) 909 ethhdr->h_source, vid))
895 return 1; 910 return 1;
896 break; 911 break;
897 case CLAIM_TYPE_DEL: 912 case BATADV_CLAIM_TYPE_DEL:
898 if (handle_unclaim(bat_priv, primary_if, 913 if (batadv_handle_unclaim(bat_priv, primary_if,
899 ethhdr->h_source, hw_src, vid)) 914 ethhdr->h_source, hw_src, vid))
900 return 1; 915 return 1;
901 break; 916 break;
902 917
903 case CLAIM_TYPE_ANNOUNCE: 918 case BATADV_CLAIM_TYPE_ANNOUNCE:
904 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) 919 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
920 vid))
905 return 1; 921 return 1;
906 break; 922 break;
907 case CLAIM_TYPE_REQUEST: 923 case BATADV_CLAIM_TYPE_REQUEST:
908 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) 924 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
925 vid))
909 return 1; 926 return 1;
910 break; 927 break;
911 } 928 }
912 929
913 bat_dbg(DBG_BLA, bat_priv, 930 batadv_dbg(BATADV_DBG_BLA, bat_priv,
914 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 931 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
915 ethhdr->h_source, vid, hw_src, hw_dst); 932 ethhdr->h_source, vid, hw_src, hw_dst);
916 return 1; 933 return 1;
917} 934}
918 935
919/* Check when we last heard from other nodes, and remove them in case of 936/* Check when we last heard from other nodes, and remove them in case of
920 * a time out, or clean all backbone gws if now is set. 937 * a time out, or clean all backbone gws if now is set.
921 */ 938 */
922static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) 939static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
923{ 940{
924 struct backbone_gw *backbone_gw; 941 struct batadv_backbone_gw *backbone_gw;
925 struct hlist_node *node, *node_tmp; 942 struct hlist_node *node, *node_tmp;
926 struct hlist_head *head; 943 struct hlist_head *head;
927 struct hashtable_t *hash; 944 struct batadv_hashtable *hash;
928 spinlock_t *list_lock; /* protects write access to the hash lists */ 945 spinlock_t *list_lock; /* protects write access to the hash lists */
929 int i; 946 int i;
930 947
@@ -941,29 +958,30 @@ static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
941 head, hash_entry) { 958 head, hash_entry) {
942 if (now) 959 if (now)
943 goto purge_now; 960 goto purge_now;
944 if (!has_timed_out(backbone_gw->lasttime, 961 if (!batadv_has_timed_out(backbone_gw->lasttime,
945 BLA_BACKBONE_TIMEOUT)) 962 BATADV_BLA_BACKBONE_TIMEOUT))
946 continue; 963 continue;
947 964
948 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 965 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
949 "bla_purge_backbone_gw(): backbone gw %pM timed out\n", 966 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
950 backbone_gw->orig); 967 backbone_gw->orig);
951 968
952purge_now: 969purge_now:
953 /* don't wait for the pending request anymore */ 970 /* don't wait for the pending request anymore */
954 if (atomic_read(&backbone_gw->request_sent)) 971 if (atomic_read(&backbone_gw->request_sent))
955 atomic_dec(&bat_priv->bla_num_requests); 972 atomic_dec(&bat_priv->bla_num_requests);
956 973
957 bla_del_backbone_claims(backbone_gw); 974 batadv_bla_del_backbone_claims(backbone_gw);
958 975
959 hlist_del_rcu(node); 976 hlist_del_rcu(node);
960 backbone_gw_free_ref(backbone_gw); 977 batadv_backbone_gw_free_ref(backbone_gw);
961 } 978 }
962 spin_unlock_bh(list_lock); 979 spin_unlock_bh(list_lock);
963 } 980 }
964} 981}
965 982
966/** 983/**
984 * batadv_bla_purge_claims
967 * @bat_priv: the bat priv with all the soft interface information 985 * @bat_priv: the bat priv with all the soft interface information
968 * @primary_if: the selected primary interface, may be NULL if now is set 986 * @primary_if: the selected primary interface, may be NULL if now is set
969 * @now: whether the whole hash shall be wiped now 987 * @now: whether the whole hash shall be wiped now
@@ -971,13 +989,14 @@ purge_now:
971 * Check when we heard last time from our own claims, and remove them in case of 989 * Check when we heard last time from our own claims, and remove them in case of
972 * a time out, or clean all claims if now is set 990 * a time out, or clean all claims if now is set
973 */ 991 */
974static void bla_purge_claims(struct bat_priv *bat_priv, 992static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
975 struct hard_iface *primary_if, int now) 993 struct batadv_hard_iface *primary_if,
994 int now)
976{ 995{
977 struct claim *claim; 996 struct batadv_claim *claim;
978 struct hlist_node *node; 997 struct hlist_node *node;
979 struct hlist_head *head; 998 struct hlist_head *head;
980 struct hashtable_t *hash; 999 struct batadv_hashtable *hash;
981 int i; 1000 int i;
982 1001
983 hash = bat_priv->claim_hash; 1002 hash = bat_priv->claim_hash;
@@ -991,42 +1010,42 @@ static void bla_purge_claims(struct bat_priv *bat_priv,
991 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1010 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
992 if (now) 1011 if (now)
993 goto purge_now; 1012 goto purge_now;
994 if (!compare_eth(claim->backbone_gw->orig, 1013 if (!batadv_compare_eth(claim->backbone_gw->orig,
995 primary_if->net_dev->dev_addr)) 1014 primary_if->net_dev->dev_addr))
996 continue; 1015 continue;
997 if (!has_timed_out(claim->lasttime, 1016 if (!batadv_has_timed_out(claim->lasttime,
998 BLA_CLAIM_TIMEOUT)) 1017 BATADV_BLA_CLAIM_TIMEOUT))
999 continue; 1018 continue;
1000 1019
1001 bat_dbg(DBG_BLA, bat_priv, 1020 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1002 "bla_purge_claims(): %pM, vid %d, time out\n", 1021 "bla_purge_claims(): %pM, vid %d, time out\n",
1003 claim->addr, claim->vid); 1022 claim->addr, claim->vid);
1004 1023
1005purge_now: 1024purge_now:
1006 handle_unclaim(bat_priv, primary_if, 1025 batadv_handle_unclaim(bat_priv, primary_if,
1007 claim->backbone_gw->orig, 1026 claim->backbone_gw->orig,
1008 claim->addr, claim->vid); 1027 claim->addr, claim->vid);
1009 } 1028 }
1010 rcu_read_unlock(); 1029 rcu_read_unlock();
1011 } 1030 }
1012} 1031}
1013 1032
1014/** 1033/**
1034 * batadv_bla_update_orig_address
1015 * @bat_priv: the bat priv with all the soft interface information 1035 * @bat_priv: the bat priv with all the soft interface information
1016 * @primary_if: the new selected primary_if 1036 * @primary_if: the new selected primary_if
1017 * @oldif: the old primary interface, may be NULL 1037 * @oldif: the old primary interface, may be NULL
1018 * 1038 *
1019 * Update the backbone gateways when the own orig address changes. 1039 * Update the backbone gateways when the own orig address changes.
1020 *
1021 */ 1040 */
1022void bla_update_orig_address(struct bat_priv *bat_priv, 1041void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1023 struct hard_iface *primary_if, 1042 struct batadv_hard_iface *primary_if,
1024 struct hard_iface *oldif) 1043 struct batadv_hard_iface *oldif)
1025{ 1044{
1026 struct backbone_gw *backbone_gw; 1045 struct batadv_backbone_gw *backbone_gw;
1027 struct hlist_node *node; 1046 struct hlist_node *node;
1028 struct hlist_head *head; 1047 struct hlist_head *head;
1029 struct hashtable_t *hash; 1048 struct batadv_hashtable *hash;
1030 int i; 1049 int i;
1031 1050
1032 /* reset bridge loop avoidance group id */ 1051 /* reset bridge loop avoidance group id */
@@ -1034,8 +1053,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1034 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1053 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1035 1054
1036 if (!oldif) { 1055 if (!oldif) {
1037 bla_purge_claims(bat_priv, NULL, 1); 1056 batadv_bla_purge_claims(bat_priv, NULL, 1);
1038 bla_purge_backbone_gw(bat_priv, 1); 1057 batadv_bla_purge_backbone_gw(bat_priv, 1);
1039 return; 1058 return;
1040 } 1059 }
1041 1060
@@ -1049,8 +1068,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1049 rcu_read_lock(); 1068 rcu_read_lock();
1050 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1069 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1051 /* own orig still holds the old value. */ 1070 /* own orig still holds the old value. */
1052 if (!compare_eth(backbone_gw->orig, 1071 if (!batadv_compare_eth(backbone_gw->orig,
1053 oldif->net_dev->dev_addr)) 1072 oldif->net_dev->dev_addr))
1054 continue; 1073 continue;
1055 1074
1056 memcpy(backbone_gw->orig, 1075 memcpy(backbone_gw->orig,
@@ -1058,7 +1077,7 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1058 /* send an announce frame so others will ask for our 1077 /* send an announce frame so others will ask for our
1059 * claims and update their tables. 1078 * claims and update their tables.
1060 */ 1079 */
1061 bla_send_announce(bat_priv, backbone_gw); 1080 batadv_bla_send_announce(bat_priv, backbone_gw);
1062 } 1081 }
1063 rcu_read_unlock(); 1082 rcu_read_unlock();
1064 } 1083 }
@@ -1067,36 +1086,36 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1067 1086
1068 1087
1069/* (re)start the timer */ 1088/* (re)start the timer */
1070static void bla_start_timer(struct bat_priv *bat_priv) 1089static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1071{ 1090{
1072 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); 1091 INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
1073 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, 1092 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
1074 msecs_to_jiffies(BLA_PERIOD_LENGTH)); 1093 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1075} 1094}
1076 1095
1077/* periodic work to do: 1096/* periodic work to do:
1078 * * purge structures when they are too old 1097 * * purge structures when they are too old
1079 * * send announcements 1098 * * send announcements
1080 */ 1099 */
1081static void bla_periodic_work(struct work_struct *work) 1100static void batadv_bla_periodic_work(struct work_struct *work)
1082{ 1101{
1083 struct delayed_work *delayed_work = 1102 struct delayed_work *delayed_work =
1084 container_of(work, struct delayed_work, work); 1103 container_of(work, struct delayed_work, work);
1085 struct bat_priv *bat_priv = 1104 struct batadv_priv *bat_priv;
1086 container_of(delayed_work, struct bat_priv, bla_work);
1087 struct hlist_node *node; 1105 struct hlist_node *node;
1088 struct hlist_head *head; 1106 struct hlist_head *head;
1089 struct backbone_gw *backbone_gw; 1107 struct batadv_backbone_gw *backbone_gw;
1090 struct hashtable_t *hash; 1108 struct batadv_hashtable *hash;
1091 struct hard_iface *primary_if; 1109 struct batadv_hard_iface *primary_if;
1092 int i; 1110 int i;
1093 1111
1094 primary_if = primary_if_get_selected(bat_priv); 1112 bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
1113 primary_if = batadv_primary_if_get_selected(bat_priv);
1095 if (!primary_if) 1114 if (!primary_if)
1096 goto out; 1115 goto out;
1097 1116
1098 bla_purge_claims(bat_priv, primary_if, 0); 1117 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1099 bla_purge_backbone_gw(bat_priv, 0); 1118 batadv_bla_purge_backbone_gw(bat_priv, 0);
1100 1119
1101 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1120 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1102 goto out; 1121 goto out;
@@ -1110,67 +1129,81 @@ static void bla_periodic_work(struct work_struct *work)
1110 1129
1111 rcu_read_lock(); 1130 rcu_read_lock();
1112 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1131 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1113 if (!compare_eth(backbone_gw->orig, 1132 if (!batadv_compare_eth(backbone_gw->orig,
1114 primary_if->net_dev->dev_addr)) 1133 primary_if->net_dev->dev_addr))
1115 continue; 1134 continue;
1116 1135
1117 backbone_gw->lasttime = jiffies; 1136 backbone_gw->lasttime = jiffies;
1118 1137
1119 bla_send_announce(bat_priv, backbone_gw); 1138 batadv_bla_send_announce(bat_priv, backbone_gw);
1120 } 1139 }
1121 rcu_read_unlock(); 1140 rcu_read_unlock();
1122 } 1141 }
1123out: 1142out:
1124 if (primary_if) 1143 if (primary_if)
1125 hardif_free_ref(primary_if); 1144 batadv_hardif_free_ref(primary_if);
1126 1145
1127 bla_start_timer(bat_priv); 1146 batadv_bla_start_timer(bat_priv);
1128} 1147}
1129 1148
1149/* The hash for claim and backbone hash receive the same key because they
1150 * are getting initialized by hash_new with the same key. Reinitializing
1151 * them with to different keys to allow nested locking without generating
1152 * lockdep warnings
1153 */
1154static struct lock_class_key batadv_claim_hash_lock_class_key;
1155static struct lock_class_key batadv_backbone_hash_lock_class_key;
1156
1130/* initialize all bla structures */ 1157/* initialize all bla structures */
1131int bla_init(struct bat_priv *bat_priv) 1158int batadv_bla_init(struct batadv_priv *bat_priv)
1132{ 1159{
1133 int i; 1160 int i;
1134 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1161 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1135 struct hard_iface *primary_if; 1162 struct batadv_hard_iface *primary_if;
1136 1163
1137 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); 1164 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1138 1165
1139 /* setting claim destination address */ 1166 /* setting claim destination address */
1140 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); 1167 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
1141 bat_priv->claim_dest.type = 0; 1168 bat_priv->claim_dest.type = 0;
1142 primary_if = primary_if_get_selected(bat_priv); 1169 primary_if = batadv_primary_if_get_selected(bat_priv);
1143 if (primary_if) { 1170 if (primary_if) {
1144 bat_priv->claim_dest.group = 1171 bat_priv->claim_dest.group =
1145 htons(crc16(0, primary_if->net_dev->dev_addr, 1172 htons(crc16(0, primary_if->net_dev->dev_addr,
1146 ETH_ALEN)); 1173 ETH_ALEN));
1147 hardif_free_ref(primary_if); 1174 batadv_hardif_free_ref(primary_if);
1148 } else { 1175 } else {
1149 bat_priv->claim_dest.group = 0; /* will be set later */ 1176 bat_priv->claim_dest.group = 0; /* will be set later */
1150 } 1177 }
1151 1178
1152 /* initialize the duplicate list */ 1179 /* initialize the duplicate list */
1153 for (i = 0; i < DUPLIST_SIZE; i++) 1180 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1154 bat_priv->bcast_duplist[i].entrytime = 1181 bat_priv->bcast_duplist[i].entrytime =
1155 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); 1182 jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1156 bat_priv->bcast_duplist_curr = 0; 1183 bat_priv->bcast_duplist_curr = 0;
1157 1184
1158 if (bat_priv->claim_hash) 1185 if (bat_priv->claim_hash)
1159 return 1; 1186 return 0;
1160 1187
1161 bat_priv->claim_hash = hash_new(128); 1188 bat_priv->claim_hash = batadv_hash_new(128);
1162 bat_priv->backbone_hash = hash_new(32); 1189 bat_priv->backbone_hash = batadv_hash_new(32);
1163 1190
1164 if (!bat_priv->claim_hash || !bat_priv->backbone_hash) 1191 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1165 return -1; 1192 return -ENOMEM;
1166 1193
1167 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); 1194 batadv_hash_set_lock_class(bat_priv->claim_hash,
1195 &batadv_claim_hash_lock_class_key);
1196 batadv_hash_set_lock_class(bat_priv->backbone_hash,
1197 &batadv_backbone_hash_lock_class_key);
1168 1198
1169 bla_start_timer(bat_priv); 1199 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1170 return 1; 1200
1201 batadv_bla_start_timer(bat_priv);
1202 return 0;
1171} 1203}
1172 1204
1173/** 1205/**
1206 * batadv_bla_check_bcast_duplist
1174 * @bat_priv: the bat priv with all the soft interface information 1207 * @bat_priv: the bat priv with all the soft interface information
1175 * @bcast_packet: originator mac address 1208 * @bcast_packet: originator mac address
1176 * @hdr_size: maximum length of the frame 1209 * @hdr_size: maximum length of the frame
@@ -1183,17 +1216,15 @@ int bla_init(struct bat_priv *bat_priv)
1183 * with a good chance that it is the same packet. If it is furthermore 1216 * with a good chance that it is the same packet. If it is furthermore
1184 * sent by another host, drop it. We allow equal packets from 1217 * sent by another host, drop it. We allow equal packets from
1185 * the same host however as this might be intended. 1218 * the same host however as this might be intended.
1186 * 1219 */
1187 **/ 1220int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1188 1221 struct batadv_bcast_packet *bcast_packet,
1189int bla_check_bcast_duplist(struct bat_priv *bat_priv, 1222 int hdr_size)
1190 struct bcast_packet *bcast_packet,
1191 int hdr_size)
1192{ 1223{
1193 int i, length, curr; 1224 int i, length, curr;
1194 uint8_t *content; 1225 uint8_t *content;
1195 uint16_t crc; 1226 uint16_t crc;
1196 struct bcast_duplist_entry *entry; 1227 struct batadv_bcast_duplist_entry *entry;
1197 1228
1198 length = hdr_size - sizeof(*bcast_packet); 1229 length = hdr_size - sizeof(*bcast_packet);
1199 content = (uint8_t *)bcast_packet; 1230 content = (uint8_t *)bcast_packet;
@@ -1202,20 +1233,21 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1202 /* calculate the crc ... */ 1233 /* calculate the crc ... */
1203 crc = crc16(0, content, length); 1234 crc = crc16(0, content, length);
1204 1235
1205 for (i = 0 ; i < DUPLIST_SIZE; i++) { 1236 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1206 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; 1237 curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
1207 entry = &bat_priv->bcast_duplist[curr]; 1238 entry = &bat_priv->bcast_duplist[curr];
1208 1239
1209 /* we can stop searching if the entry is too old ; 1240 /* we can stop searching if the entry is too old ;
1210 * later entries will be even older 1241 * later entries will be even older
1211 */ 1242 */
1212 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) 1243 if (batadv_has_timed_out(entry->entrytime,
1244 BATADV_DUPLIST_TIMEOUT))
1213 break; 1245 break;
1214 1246
1215 if (entry->crc != crc) 1247 if (entry->crc != crc)
1216 continue; 1248 continue;
1217 1249
1218 if (compare_eth(entry->orig, bcast_packet->orig)) 1250 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1219 continue; 1251 continue;
1220 1252
1221 /* this entry seems to match: same crc, not too old, 1253 /* this entry seems to match: same crc, not too old,
@@ -1224,7 +1256,8 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1224 return 1; 1256 return 1;
1225 } 1257 }
1226 /* not found, add a new entry (overwrite the oldest entry) */ 1258 /* not found, add a new entry (overwrite the oldest entry) */
1227 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; 1259 curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1260 curr %= BATADV_DUPLIST_SIZE;
1228 entry = &bat_priv->bcast_duplist[curr]; 1261 entry = &bat_priv->bcast_duplist[curr];
1229 entry->crc = crc; 1262 entry->crc = crc;
1230 entry->entrytime = jiffies; 1263 entry->entrytime = jiffies;
@@ -1237,22 +1270,19 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1237 1270
1238 1271
1239 1272
1240/** 1273/* @bat_priv: the bat priv with all the soft interface information
1241 * @bat_priv: the bat priv with all the soft interface information
1242 * @orig: originator mac address 1274 * @orig: originator mac address
1243 * 1275 *
1244 * check if the originator is a gateway for any VLAN ID. 1276 * check if the originator is a gateway for any VLAN ID.
1245 * 1277 *
1246 * returns 1 if it is found, 0 otherwise 1278 * returns 1 if it is found, 0 otherwise
1247 *
1248 */ 1279 */
1249 1280int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1250int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1251{ 1281{
1252 struct hashtable_t *hash = bat_priv->backbone_hash; 1282 struct batadv_hashtable *hash = bat_priv->backbone_hash;
1253 struct hlist_head *head; 1283 struct hlist_head *head;
1254 struct hlist_node *node; 1284 struct hlist_node *node;
1255 struct backbone_gw *backbone_gw; 1285 struct batadv_backbone_gw *backbone_gw;
1256 int i; 1286 int i;
1257 1287
1258 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1288 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@ -1266,7 +1296,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1266 1296
1267 rcu_read_lock(); 1297 rcu_read_lock();
1268 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1298 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1269 if (compare_eth(backbone_gw->orig, orig)) { 1299 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1270 rcu_read_unlock(); 1300 rcu_read_unlock();
1271 return 1; 1301 return 1;
1272 } 1302 }
@@ -1279,6 +1309,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1279 1309
1280 1310
1281/** 1311/**
1312 * batadv_bla_is_backbone_gw
1282 * @skb: the frame to be checked 1313 * @skb: the frame to be checked
1283 * @orig_node: the orig_node of the frame 1314 * @orig_node: the orig_node of the frame
1284 * @hdr_size: maximum length of the frame 1315 * @hdr_size: maximum length of the frame
@@ -1286,14 +1317,13 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1286 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 1317 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1287 * if the orig_node is also a gateway on the soft interface, otherwise it 1318 * if the orig_node is also a gateway on the soft interface, otherwise it
1288 * returns 0. 1319 * returns 0.
1289 *
1290 */ 1320 */
1291int bla_is_backbone_gw(struct sk_buff *skb, 1321int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1292 struct orig_node *orig_node, int hdr_size) 1322 struct batadv_orig_node *orig_node, int hdr_size)
1293{ 1323{
1294 struct ethhdr *ethhdr; 1324 struct ethhdr *ethhdr;
1295 struct vlan_ethhdr *vhdr; 1325 struct vlan_ethhdr *vhdr;
1296 struct backbone_gw *backbone_gw; 1326 struct batadv_backbone_gw *backbone_gw;
1297 short vid = -1; 1327 short vid = -1;
1298 1328
1299 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1329 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@ -1315,42 +1345,43 @@ int bla_is_backbone_gw(struct sk_buff *skb,
1315 } 1345 }
1316 1346
1317 /* see if this originator is a backbone gw for this VLAN */ 1347 /* see if this originator is a backbone gw for this VLAN */
1318 1348 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1319 backbone_gw = backbone_hash_find(orig_node->bat_priv, 1349 orig_node->orig, vid);
1320 orig_node->orig, vid);
1321 if (!backbone_gw) 1350 if (!backbone_gw)
1322 return 0; 1351 return 0;
1323 1352
1324 backbone_gw_free_ref(backbone_gw); 1353 batadv_backbone_gw_free_ref(backbone_gw);
1325 return 1; 1354 return 1;
1326} 1355}
1327 1356
1328/* free all bla structures (for softinterface free or module unload) */ 1357/* free all bla structures (for softinterface free or module unload) */
1329void bla_free(struct bat_priv *bat_priv) 1358void batadv_bla_free(struct batadv_priv *bat_priv)
1330{ 1359{
1331 struct hard_iface *primary_if; 1360 struct batadv_hard_iface *primary_if;
1332 1361
1333 cancel_delayed_work_sync(&bat_priv->bla_work); 1362 cancel_delayed_work_sync(&bat_priv->bla_work);
1334 primary_if = primary_if_get_selected(bat_priv); 1363 primary_if = batadv_primary_if_get_selected(bat_priv);
1335 1364
1336 if (bat_priv->claim_hash) { 1365 if (bat_priv->claim_hash) {
1337 bla_purge_claims(bat_priv, primary_if, 1); 1366 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1338 hash_destroy(bat_priv->claim_hash); 1367 batadv_hash_destroy(bat_priv->claim_hash);
1339 bat_priv->claim_hash = NULL; 1368 bat_priv->claim_hash = NULL;
1340 } 1369 }
1341 if (bat_priv->backbone_hash) { 1370 if (bat_priv->backbone_hash) {
1342 bla_purge_backbone_gw(bat_priv, 1); 1371 batadv_bla_purge_backbone_gw(bat_priv, 1);
1343 hash_destroy(bat_priv->backbone_hash); 1372 batadv_hash_destroy(bat_priv->backbone_hash);
1344 bat_priv->backbone_hash = NULL; 1373 bat_priv->backbone_hash = NULL;
1345 } 1374 }
1346 if (primary_if) 1375 if (primary_if)
1347 hardif_free_ref(primary_if); 1376 batadv_hardif_free_ref(primary_if);
1348} 1377}
1349 1378
1350/** 1379/**
1380 * batadv_bla_rx
1351 * @bat_priv: the bat priv with all the soft interface information 1381 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked 1382 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame 1383 * @vid: the VLAN ID of the frame
1384 * @is_bcast: the packet came in a broadcast packet type.
1354 * 1385 *
1355 * bla_rx avoidance checks if: 1386 * bla_rx avoidance checks if:
1356 * * we have to race for a claim 1387 * * we have to race for a claim
@@ -1359,18 +1390,18 @@ void bla_free(struct bat_priv *bat_priv)
1359 * in these cases, the skb is further handled by this function and 1390 * in these cases, the skb is further handled by this function and
1360 * returns 1, otherwise it returns 0 and the caller shall further 1391 * returns 1, otherwise it returns 0 and the caller shall further
1361 * process the skb. 1392 * process the skb.
1362 *
1363 */ 1393 */
1364int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) 1394int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1395 bool is_bcast)
1365{ 1396{
1366 struct ethhdr *ethhdr; 1397 struct ethhdr *ethhdr;
1367 struct claim search_claim, *claim = NULL; 1398 struct batadv_claim search_claim, *claim = NULL;
1368 struct hard_iface *primary_if; 1399 struct batadv_hard_iface *primary_if;
1369 int ret; 1400 int ret;
1370 1401
1371 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1402 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1372 1403
1373 primary_if = primary_if_get_selected(bat_priv); 1404 primary_if = batadv_primary_if_get_selected(bat_priv);
1374 if (!primary_if) 1405 if (!primary_if)
1375 goto handled; 1406 goto handled;
1376 1407
@@ -1380,47 +1411,52 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1380 1411
1381 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1412 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1382 /* don't allow broadcasts while requests are in flight */ 1413 /* don't allow broadcasts while requests are in flight */
1383 if (is_multicast_ether_addr(ethhdr->h_dest)) 1414 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1384 goto handled; 1415 goto handled;
1385 1416
1386 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1417 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1387 search_claim.vid = vid; 1418 search_claim.vid = vid;
1388 claim = claim_hash_find(bat_priv, &search_claim); 1419 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1389 1420
1390 if (!claim) { 1421 if (!claim) {
1391 /* possible optimization: race for a claim */ 1422 /* possible optimization: race for a claim */
1392 /* No claim exists yet, claim it for us! 1423 /* No claim exists yet, claim it for us!
1393 */ 1424 */
1394 handle_claim(bat_priv, primary_if, 1425 batadv_handle_claim(bat_priv, primary_if,
1395 primary_if->net_dev->dev_addr, 1426 primary_if->net_dev->dev_addr,
1396 ethhdr->h_source, vid); 1427 ethhdr->h_source, vid);
1397 goto allow; 1428 goto allow;
1398 } 1429 }
1399 1430
1400 /* if it is our own claim ... */ 1431 /* if it is our own claim ... */
1401 if (compare_eth(claim->backbone_gw->orig, 1432 if (batadv_compare_eth(claim->backbone_gw->orig,
1402 primary_if->net_dev->dev_addr)) { 1433 primary_if->net_dev->dev_addr)) {
1403 /* ... allow it in any case */ 1434 /* ... allow it in any case */
1404 claim->lasttime = jiffies; 1435 claim->lasttime = jiffies;
1405 goto allow; 1436 goto allow;
1406 } 1437 }
1407 1438
1408 /* if it is a broadcast ... */ 1439 /* if it is a broadcast ... */
1409 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1440 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1410 /* ... drop it. the responsible gateway is in charge. */ 1441 /* ... drop it. the responsible gateway is in charge.
1442 *
1443 * We need to check is_bcast because with the gateway
1444 * feature, broadcasts (like DHCP requests) may be sent
1445 * using a unicast packet type.
1446 */
1411 goto handled; 1447 goto handled;
1412 } else { 1448 } else {
1413 /* seems the client considers us as its best gateway. 1449 /* seems the client considers us as its best gateway.
1414 * send a claim and update the claim table 1450 * send a claim and update the claim table
1415 * immediately. 1451 * immediately.
1416 */ 1452 */
1417 handle_claim(bat_priv, primary_if, 1453 batadv_handle_claim(bat_priv, primary_if,
1418 primary_if->net_dev->dev_addr, 1454 primary_if->net_dev->dev_addr,
1419 ethhdr->h_source, vid); 1455 ethhdr->h_source, vid);
1420 goto allow; 1456 goto allow;
1421 } 1457 }
1422allow: 1458allow:
1423 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1459 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1424 ret = 0; 1460 ret = 0;
1425 goto out; 1461 goto out;
1426 1462
@@ -1430,13 +1466,14 @@ handled:
1430 1466
1431out: 1467out:
1432 if (primary_if) 1468 if (primary_if)
1433 hardif_free_ref(primary_if); 1469 batadv_hardif_free_ref(primary_if);
1434 if (claim) 1470 if (claim)
1435 claim_free_ref(claim); 1471 batadv_claim_free_ref(claim);
1436 return ret; 1472 return ret;
1437} 1473}
1438 1474
1439/** 1475/**
1476 * batadv_bla_tx
1440 * @bat_priv: the bat priv with all the soft interface information 1477 * @bat_priv: the bat priv with all the soft interface information
1441 * @skb: the frame to be checked 1478 * @skb: the frame to be checked
1442 * @vid: the VLAN ID of the frame 1479 * @vid: the VLAN ID of the frame
@@ -1448,16 +1485,15 @@ out:
1448 * in these cases, the skb is further handled by this function and 1485 * in these cases, the skb is further handled by this function and
1449 * returns 1, otherwise it returns 0 and the caller shall further 1486 * returns 1, otherwise it returns 0 and the caller shall further
1450 * process the skb. 1487 * process the skb.
1451 *
1452 */ 1488 */
1453int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) 1489int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1454{ 1490{
1455 struct ethhdr *ethhdr; 1491 struct ethhdr *ethhdr;
1456 struct claim search_claim, *claim = NULL; 1492 struct batadv_claim search_claim, *claim = NULL;
1457 struct hard_iface *primary_if; 1493 struct batadv_hard_iface *primary_if;
1458 int ret = 0; 1494 int ret = 0;
1459 1495
1460 primary_if = primary_if_get_selected(bat_priv); 1496 primary_if = batadv_primary_if_get_selected(bat_priv);
1461 if (!primary_if) 1497 if (!primary_if)
1462 goto out; 1498 goto out;
1463 1499
@@ -1467,7 +1503,7 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1467 /* in VLAN case, the mac header might not be set. */ 1503 /* in VLAN case, the mac header might not be set. */
1468 skb_reset_mac_header(skb); 1504 skb_reset_mac_header(skb);
1469 1505
1470 if (bla_process_claim(bat_priv, primary_if, skb)) 1506 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1471 goto handled; 1507 goto handled;
1472 1508
1473 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1509 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1480,21 +1516,21 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1480 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1516 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1481 search_claim.vid = vid; 1517 search_claim.vid = vid;
1482 1518
1483 claim = claim_hash_find(bat_priv, &search_claim); 1519 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1484 1520
1485 /* if no claim exists, allow it. */ 1521 /* if no claim exists, allow it. */
1486 if (!claim) 1522 if (!claim)
1487 goto allow; 1523 goto allow;
1488 1524
1489 /* check if we are responsible. */ 1525 /* check if we are responsible. */
1490 if (compare_eth(claim->backbone_gw->orig, 1526 if (batadv_compare_eth(claim->backbone_gw->orig,
1491 primary_if->net_dev->dev_addr)) { 1527 primary_if->net_dev->dev_addr)) {
1492 /* if yes, the client has roamed and we have 1528 /* if yes, the client has roamed and we have
1493 * to unclaim it. 1529 * to unclaim it.
1494 */ 1530 */
1495 handle_unclaim(bat_priv, primary_if, 1531 batadv_handle_unclaim(bat_priv, primary_if,
1496 primary_if->net_dev->dev_addr, 1532 primary_if->net_dev->dev_addr,
1497 ethhdr->h_source, vid); 1533 ethhdr->h_source, vid);
1498 goto allow; 1534 goto allow;
1499 } 1535 }
1500 1536
@@ -1511,33 +1547,34 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1511 goto allow; 1547 goto allow;
1512 } 1548 }
1513allow: 1549allow:
1514 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1550 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1515 ret = 0; 1551 ret = 0;
1516 goto out; 1552 goto out;
1517handled: 1553handled:
1518 ret = 1; 1554 ret = 1;
1519out: 1555out:
1520 if (primary_if) 1556 if (primary_if)
1521 hardif_free_ref(primary_if); 1557 batadv_hardif_free_ref(primary_if);
1522 if (claim) 1558 if (claim)
1523 claim_free_ref(claim); 1559 batadv_claim_free_ref(claim);
1524 return ret; 1560 return ret;
1525} 1561}
1526 1562
1527int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) 1563int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1528{ 1564{
1529 struct net_device *net_dev = (struct net_device *)seq->private; 1565 struct net_device *net_dev = (struct net_device *)seq->private;
1530 struct bat_priv *bat_priv = netdev_priv(net_dev); 1566 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1531 struct hashtable_t *hash = bat_priv->claim_hash; 1567 struct batadv_hashtable *hash = bat_priv->claim_hash;
1532 struct claim *claim; 1568 struct batadv_claim *claim;
1533 struct hard_iface *primary_if; 1569 struct batadv_hard_iface *primary_if;
1534 struct hlist_node *node; 1570 struct hlist_node *node;
1535 struct hlist_head *head; 1571 struct hlist_head *head;
1536 uint32_t i; 1572 uint32_t i;
1537 bool is_own; 1573 bool is_own;
1538 int ret = 0; 1574 int ret = 0;
1575 uint8_t *primary_addr;
1539 1576
1540 primary_if = primary_if_get_selected(bat_priv); 1577 primary_if = batadv_primary_if_get_selected(bat_priv);
1541 if (!primary_if) { 1578 if (!primary_if) {
1542 ret = seq_printf(seq, 1579 ret = seq_printf(seq,
1543 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 1580 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -1545,16 +1582,17 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1545 goto out; 1582 goto out;
1546 } 1583 }
1547 1584
1548 if (primary_if->if_status != IF_ACTIVE) { 1585 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1549 ret = seq_printf(seq, 1586 ret = seq_printf(seq,
1550 "BATMAN mesh %s disabled - primary interface not active\n", 1587 "BATMAN mesh %s disabled - primary interface not active\n",
1551 net_dev->name); 1588 net_dev->name);
1552 goto out; 1589 goto out;
1553 } 1590 }
1554 1591
1592 primary_addr = primary_if->net_dev->dev_addr;
1555 seq_printf(seq, 1593 seq_printf(seq,
1556 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1594 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1557 net_dev->name, primary_if->net_dev->dev_addr, 1595 net_dev->name, primary_addr,
1558 ntohs(bat_priv->claim_dest.group)); 1596 ntohs(bat_priv->claim_dest.group));
1559 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1597 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1560 "Client", "VID", "Originator", "CRC"); 1598 "Client", "VID", "Originator", "CRC");
@@ -1563,8 +1601,8 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1563 1601
1564 rcu_read_lock(); 1602 rcu_read_lock();
1565 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1603 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1566 is_own = compare_eth(claim->backbone_gw->orig, 1604 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1567 primary_if->net_dev->dev_addr); 1605 primary_addr);
1568 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", 1606 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1569 claim->addr, claim->vid, 1607 claim->addr, claim->vid,
1570 claim->backbone_gw->orig, 1608 claim->backbone_gw->orig,
@@ -1575,6 +1613,6 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1575 } 1613 }
1576out: 1614out:
1577 if (primary_if) 1615 if (primary_if)
1578 hardif_free_ref(primary_if); 1616 batadv_hardif_free_ref(primary_if);
1579 return ret; 1617 return ret;
1580} 1618}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index e39f93acc28f..563cfbf94a7f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,80 +15,84 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BLA_H_ 20#ifndef _NET_BATMAN_ADV_BLA_H_
23#define _NET_BATMAN_ADV_BLA_H_ 21#define _NET_BATMAN_ADV_BLA_H_
24 22
25#ifdef CONFIG_BATMAN_ADV_BLA 23#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 24int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
27int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 25 bool is_bcast);
28int bla_is_backbone_gw(struct sk_buff *skb, 26int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
29 struct orig_node *orig_node, int hdr_size); 27int batadv_bla_is_backbone_gw(struct sk_buff *skb,
30int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); 28 struct batadv_orig_node *orig_node, int hdr_size);
31int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); 29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
32int bla_check_bcast_duplist(struct bat_priv *bat_priv, 30int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
33 struct bcast_packet *bcast_packet, int hdr_size); 31int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
34void bla_update_orig_address(struct bat_priv *bat_priv, 32 struct batadv_bcast_packet *bcast_packet,
35 struct hard_iface *primary_if, 33 int hdr_size);
36 struct hard_iface *oldif); 34void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
37int bla_init(struct bat_priv *bat_priv); 35 struct batadv_hard_iface *primary_if,
38void bla_free(struct bat_priv *bat_priv); 36 struct batadv_hard_iface *oldif);
37int batadv_bla_init(struct batadv_priv *bat_priv);
38void batadv_bla_free(struct batadv_priv *bat_priv);
39 39
40#define BLA_CRC_INIT 0 40#define BATADV_BLA_CRC_INIT 0
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 41#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42 42
43static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, 43static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
44 short vid) 44 struct sk_buff *skb, short vid,
45 bool is_bcast)
45{ 46{
46 return 0; 47 return 0;
47} 48}
48 49
49static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, 50static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
50 short vid) 51 struct sk_buff *skb, short vid)
51{ 52{
52 return 0; 53 return 0;
53} 54}
54 55
55static inline int bla_is_backbone_gw(struct sk_buff *skb, 56static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
56 struct orig_node *orig_node, 57 struct batadv_orig_node *orig_node,
57 int hdr_size) 58 int hdr_size)
58{ 59{
59 return 0; 60 return 0;
60} 61}
61 62
62static inline int bla_claim_table_seq_print_text(struct seq_file *seq, 63static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
63 void *offset) 64 void *offset)
64{ 65{
65 return 0; 66 return 0;
66} 67}
67 68
68static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, 69static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
69 uint8_t *orig) 70 uint8_t *orig)
70{ 71{
71 return 0; 72 return 0;
72} 73}
73 74
74static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, 75static inline int
75 struct bcast_packet *bcast_packet, 76batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
76 int hdr_size) 77 struct batadv_bcast_packet *bcast_packet,
78 int hdr_size)
77{ 79{
78 return 0; 80 return 0;
79} 81}
80 82
81static inline void bla_update_orig_address(struct bat_priv *bat_priv, 83static inline void
82 struct hard_iface *primary_if, 84batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
83 struct hard_iface *oldif) 85 struct batadv_hard_iface *primary_if,
86 struct batadv_hard_iface *oldif)
84{ 87{
85} 88}
86 89
87static inline int bla_init(struct bat_priv *bat_priv) 90static inline int batadv_bla_init(struct batadv_priv *bat_priv)
88{ 91{
89 return 1; 92 return 1;
90} 93}
91 94
92static inline void bla_free(struct bat_priv *bat_priv) 95static inline void batadv_bla_free(struct batadv_priv *bat_priv)
93{ 96{
94} 97}
95 98
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
new file mode 100644
index 000000000000..34fbb1667bcd
--- /dev/null
+++ b/net/batman-adv/debugfs.c
@@ -0,0 +1,409 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include "main.h"
21
22#include <linux/debugfs.h>
23
24#include "debugfs.h"
25#include "translation-table.h"
26#include "originator.h"
27#include "hard-interface.h"
28#include "gateway_common.h"
29#include "gateway_client.h"
30#include "soft-interface.h"
31#include "vis.h"
32#include "icmp_socket.h"
33#include "bridge_loop_avoidance.h"
34
35static struct dentry *batadv_debugfs;
36
37#ifdef CONFIG_BATMAN_ADV_DEBUG
38#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
39
40static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
41
42static char *batadv_log_char_addr(struct batadv_debug_log *debug_log,
43 size_t idx)
44{
45 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
46}
47
48static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
49{
50 char *char_addr;
51
52 char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
53 *char_addr = c;
54 debug_log->log_end++;
55
56 if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
57 debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
58}
59
60__printf(2, 3)
61static int batadv_fdebug_log(struct batadv_debug_log *debug_log,
62 const char *fmt, ...)
63{
64 va_list args;
65 static char debug_log_buf[256];
66 char *p;
67
68 if (!debug_log)
69 return 0;
70
71 spin_lock_bh(&debug_log->lock);
72 va_start(args, fmt);
73 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
74 va_end(args);
75
76 for (p = debug_log_buf; *p != 0; p++)
77 batadv_emit_log_char(debug_log, *p);
78
79 spin_unlock_bh(&debug_log->lock);
80
81 wake_up(&debug_log->queue_wait);
82
83 return 0;
84}
85
86int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
87{
88 va_list args;
89 char tmp_log_buf[256];
90
91 va_start(args, fmt);
92 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
93 batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
94 jiffies_to_msecs(jiffies), tmp_log_buf);
95 va_end(args);
96
97 return 0;
98}
99
100static int batadv_log_open(struct inode *inode, struct file *file)
101{
102 nonseekable_open(inode, file);
103 file->private_data = inode->i_private;
104 batadv_inc_module_count();
105 return 0;
106}
107
108static int batadv_log_release(struct inode *inode, struct file *file)
109{
110 batadv_dec_module_count();
111 return 0;
112}
113
114static int batadv_log_empty(struct batadv_debug_log *debug_log)
115{
116 return !(debug_log->log_start - debug_log->log_end);
117}
118
119static ssize_t batadv_log_read(struct file *file, char __user *buf,
120 size_t count, loff_t *ppos)
121{
122 struct batadv_priv *bat_priv = file->private_data;
123 struct batadv_debug_log *debug_log = bat_priv->debug_log;
124 int error, i = 0;
125 char *char_addr;
126 char c;
127
128 if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
129 return -EAGAIN;
130
131 if (!buf)
132 return -EINVAL;
133
134 if (count == 0)
135 return 0;
136
137 if (!access_ok(VERIFY_WRITE, buf, count))
138 return -EFAULT;
139
140 error = wait_event_interruptible(debug_log->queue_wait,
141 (!batadv_log_empty(debug_log)));
142
143 if (error)
144 return error;
145
146 spin_lock_bh(&debug_log->lock);
147
148 while ((!error) && (i < count) &&
149 (debug_log->log_start != debug_log->log_end)) {
150 char_addr = batadv_log_char_addr(debug_log,
151 debug_log->log_start);
152 c = *char_addr;
153
154 debug_log->log_start++;
155
156 spin_unlock_bh(&debug_log->lock);
157
158 error = __put_user(c, buf);
159
160 spin_lock_bh(&debug_log->lock);
161
162 buf++;
163 i++;
164
165 }
166
167 spin_unlock_bh(&debug_log->lock);
168
169 if (!error)
170 return i;
171
172 return error;
173}
174
175static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
176{
177 struct batadv_priv *bat_priv = file->private_data;
178 struct batadv_debug_log *debug_log = bat_priv->debug_log;
179
180 poll_wait(file, &debug_log->queue_wait, wait);
181
182 if (!batadv_log_empty(debug_log))
183 return POLLIN | POLLRDNORM;
184
185 return 0;
186}
187
188static const struct file_operations batadv_log_fops = {
189 .open = batadv_log_open,
190 .release = batadv_log_release,
191 .read = batadv_log_read,
192 .poll = batadv_log_poll,
193 .llseek = no_llseek,
194};
195
196static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
197{
198 struct dentry *d;
199
200 if (!bat_priv->debug_dir)
201 goto err;
202
203 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
204 if (!bat_priv->debug_log)
205 goto err;
206
207 spin_lock_init(&bat_priv->debug_log->lock);
208 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
209
210 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
211 bat_priv->debug_dir, bat_priv,
212 &batadv_log_fops);
213 if (!d)
214 goto err;
215
216 return 0;
217
218err:
219 return -ENOMEM;
220}
221
222static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
223{
224 kfree(bat_priv->debug_log);
225 bat_priv->debug_log = NULL;
226}
227#else /* CONFIG_BATMAN_ADV_DEBUG */
228static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
229{
230 bat_priv->debug_log = NULL;
231 return 0;
232}
233
234static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
235{
236 return;
237}
238#endif
239
240static int batadv_algorithms_open(struct inode *inode, struct file *file)
241{
242 return single_open(file, batadv_algo_seq_print_text, NULL);
243}
244
245static int batadv_originators_open(struct inode *inode, struct file *file)
246{
247 struct net_device *net_dev = (struct net_device *)inode->i_private;
248 return single_open(file, batadv_orig_seq_print_text, net_dev);
249}
250
251static int batadv_gateways_open(struct inode *inode, struct file *file)
252{
253 struct net_device *net_dev = (struct net_device *)inode->i_private;
254 return single_open(file, batadv_gw_client_seq_print_text, net_dev);
255}
256
257static int batadv_transtable_global_open(struct inode *inode, struct file *file)
258{
259 struct net_device *net_dev = (struct net_device *)inode->i_private;
260 return single_open(file, batadv_tt_global_seq_print_text, net_dev);
261}
262
263#ifdef CONFIG_BATMAN_ADV_BLA
264static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
265{
266 struct net_device *net_dev = (struct net_device *)inode->i_private;
267 return single_open(file, batadv_bla_claim_table_seq_print_text,
268 net_dev);
269}
270#endif
271
272static int batadv_transtable_local_open(struct inode *inode, struct file *file)
273{
274 struct net_device *net_dev = (struct net_device *)inode->i_private;
275 return single_open(file, batadv_tt_local_seq_print_text, net_dev);
276}
277
278static int batadv_vis_data_open(struct inode *inode, struct file *file)
279{
280 struct net_device *net_dev = (struct net_device *)inode->i_private;
281 return single_open(file, batadv_vis_seq_print_text, net_dev);
282}
283
284struct batadv_debuginfo {
285 struct attribute attr;
286 const struct file_operations fops;
287};
288
289#define BATADV_DEBUGINFO(_name, _mode, _open) \
290struct batadv_debuginfo batadv_debuginfo_##_name = { \
291 .attr = { .name = __stringify(_name), \
292 .mode = _mode, }, \
293 .fops = { .owner = THIS_MODULE, \
294 .open = _open, \
295 .read = seq_read, \
296 .llseek = seq_lseek, \
297 .release = single_release, \
298 } \
299};
300
301static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
302static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
303static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
304static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
305 batadv_transtable_global_open);
306#ifdef CONFIG_BATMAN_ADV_BLA
307static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
308#endif
309static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
310 batadv_transtable_local_open);
311static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
312
313static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
314 &batadv_debuginfo_originators,
315 &batadv_debuginfo_gateways,
316 &batadv_debuginfo_transtable_global,
317#ifdef CONFIG_BATMAN_ADV_BLA
318 &batadv_debuginfo_bla_claim_table,
319#endif
320 &batadv_debuginfo_transtable_local,
321 &batadv_debuginfo_vis_data,
322 NULL,
323};
324
325void batadv_debugfs_init(void)
326{
327 struct batadv_debuginfo *bat_debug;
328 struct dentry *file;
329
330 batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
331 if (batadv_debugfs == ERR_PTR(-ENODEV))
332 batadv_debugfs = NULL;
333
334 if (!batadv_debugfs)
335 goto out;
336
337 bat_debug = &batadv_debuginfo_routing_algos;
338 file = debugfs_create_file(bat_debug->attr.name,
339 S_IFREG | bat_debug->attr.mode,
340 batadv_debugfs, NULL, &bat_debug->fops);
341 if (!file)
342 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
343
344out:
345 return;
346}
347
348void batadv_debugfs_destroy(void)
349{
350 if (batadv_debugfs) {
351 debugfs_remove_recursive(batadv_debugfs);
352 batadv_debugfs = NULL;
353 }
354}
355
356int batadv_debugfs_add_meshif(struct net_device *dev)
357{
358 struct batadv_priv *bat_priv = netdev_priv(dev);
359 struct batadv_debuginfo **bat_debug;
360 struct dentry *file;
361
362 if (!batadv_debugfs)
363 goto out;
364
365 bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
366 if (!bat_priv->debug_dir)
367 goto out;
368
369 if (batadv_socket_setup(bat_priv) < 0)
370 goto rem_attr;
371
372 if (batadv_debug_log_setup(bat_priv) < 0)
373 goto rem_attr;
374
375 for (bat_debug = batadv_mesh_debuginfos; *bat_debug; ++bat_debug) {
376 file = debugfs_create_file(((*bat_debug)->attr).name,
377 S_IFREG | ((*bat_debug)->attr).mode,
378 bat_priv->debug_dir,
379 dev, &(*bat_debug)->fops);
380 if (!file) {
381 batadv_err(dev, "Can't add debugfs file: %s/%s\n",
382 dev->name, ((*bat_debug)->attr).name);
383 goto rem_attr;
384 }
385 }
386
387 return 0;
388rem_attr:
389 debugfs_remove_recursive(bat_priv->debug_dir);
390 bat_priv->debug_dir = NULL;
391out:
392#ifdef CONFIG_DEBUG_FS
393 return -ENOMEM;
394#else
395 return 0;
396#endif /* CONFIG_DEBUG_FS */
397}
398
399void batadv_debugfs_del_meshif(struct net_device *dev)
400{
401 struct batadv_priv *bat_priv = netdev_priv(dev);
402
403 batadv_debug_log_cleanup(bat_priv);
404
405 if (batadv_debugfs) {
406 debugfs_remove_recursive(bat_priv->debug_dir);
407 bat_priv->debug_dir = NULL;
408 }
409}
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/debugfs.h
index d605c6746428..3319e1f21f55 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,18 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23#ifndef _NET_BATMAN_ADV_DEBUGFS_H_ 20#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
24#define _NET_BATMAN_ADV_DEBUGFS_H_ 21#define _NET_BATMAN_ADV_DEBUGFS_H_
25 22
26#define DEBUGFS_BAT_SUBDIR "batman_adv" 23#define BATADV_DEBUGFS_SUBDIR "batman_adv"
27 24
28void debugfs_init(void); 25void batadv_debugfs_init(void);
29void debugfs_destroy(void); 26void batadv_debugfs_destroy(void);
30int debugfs_add_meshif(struct net_device *dev); 27int batadv_debugfs_add_meshif(struct net_device *dev);
31void debugfs_del_meshif(struct net_device *dev); 28void batadv_debugfs_del_meshif(struct net_device *dev);
32 29
33#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ 30#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 47f7186dcefc..b421cc49d2cd 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,11 +15,10 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "bat_sysfs.h" 21#include "sysfs.h"
24#include "gateway_client.h" 22#include "gateway_client.h"
25#include "gateway_common.h" 23#include "gateway_common.h"
26#include "hard-interface.h" 24#include "hard-interface.h"
@@ -33,19 +31,21 @@
33#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
34 32
35/* This is the offset of the options field in a dhcp packet starting at 33/* This is the offset of the options field in a dhcp packet starting at
36 * the beginning of the dhcp header */ 34 * the beginning of the dhcp header
37#define DHCP_OPTIONS_OFFSET 240 35 */
38#define DHCP_REQUEST 3 36#define BATADV_DHCP_OPTIONS_OFFSET 240
37#define BATADV_DHCP_REQUEST 3
39 38
40static void gw_node_free_ref(struct gw_node *gw_node) 39static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
41{ 40{
42 if (atomic_dec_and_test(&gw_node->refcount)) 41 if (atomic_dec_and_test(&gw_node->refcount))
43 kfree_rcu(gw_node, rcu); 42 kfree_rcu(gw_node, rcu);
44} 43}
45 44
46static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv) 45static struct batadv_gw_node *
46batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
47{ 47{
48 struct gw_node *gw_node; 48 struct batadv_gw_node *gw_node;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->curr_gw);
@@ -60,12 +60,13 @@ out:
60 return gw_node; 60 return gw_node;
61} 61}
62 62
63struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv) 63struct batadv_orig_node *
64batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
64{ 65{
65 struct gw_node *gw_node; 66 struct batadv_gw_node *gw_node;
66 struct orig_node *orig_node = NULL; 67 struct batadv_orig_node *orig_node = NULL;
67 68
68 gw_node = gw_get_selected_gw_node(bat_priv); 69 gw_node = batadv_gw_get_selected_gw_node(bat_priv);
69 if (!gw_node) 70 if (!gw_node)
70 goto out; 71 goto out;
71 72
@@ -81,13 +82,14 @@ unlock:
81 rcu_read_unlock(); 82 rcu_read_unlock();
82out: 83out:
83 if (gw_node) 84 if (gw_node)
84 gw_node_free_ref(gw_node); 85 batadv_gw_node_free_ref(gw_node);
85 return orig_node; 86 return orig_node;
86} 87}
87 88
88static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 89static void batadv_gw_select(struct batadv_priv *bat_priv,
90 struct batadv_gw_node *new_gw_node)
89{ 91{
90 struct gw_node *curr_gw_node; 92 struct batadv_gw_node *curr_gw_node;
91 93
92 spin_lock_bh(&bat_priv->gw_list_lock); 94 spin_lock_bh(&bat_priv->gw_list_lock);
93 95
@@ -98,31 +100,34 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
98 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 100 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
99 101
100 if (curr_gw_node) 102 if (curr_gw_node)
101 gw_node_free_ref(curr_gw_node); 103 batadv_gw_node_free_ref(curr_gw_node);
102 104
103 spin_unlock_bh(&bat_priv->gw_list_lock); 105 spin_unlock_bh(&bat_priv->gw_list_lock);
104} 106}
105 107
106void gw_deselect(struct bat_priv *bat_priv) 108void batadv_gw_deselect(struct batadv_priv *bat_priv)
107{ 109{
108 atomic_set(&bat_priv->gw_reselect, 1); 110 atomic_set(&bat_priv->gw_reselect, 1);
109} 111}
110 112
111static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) 113static struct batadv_gw_node *
114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
112{ 115{
113 struct neigh_node *router; 116 struct batadv_neigh_node *router;
114 struct hlist_node *node; 117 struct hlist_node *node;
115 struct gw_node *gw_node, *curr_gw = NULL; 118 struct batadv_gw_node *gw_node, *curr_gw = NULL;
116 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
117 uint8_t max_tq = 0; 120 uint8_t max_tq = 0;
118 int down, up; 121 int down, up;
122 struct batadv_orig_node *orig_node;
119 123
120 rcu_read_lock(); 124 rcu_read_lock();
121 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 125 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
122 if (gw_node->deleted) 126 if (gw_node->deleted)
123 continue; 127 continue;
124 128
125 router = orig_node_get_router(gw_node->orig_node); 129 orig_node = gw_node->orig_node;
130 router = batadv_orig_node_get_router(orig_node);
126 if (!router) 131 if (!router)
127 continue; 132 continue;
128 133
@@ -131,35 +136,34 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
131 136
132 switch (atomic_read(&bat_priv->gw_sel_class)) { 137 switch (atomic_read(&bat_priv->gw_sel_class)) {
133 case 1: /* fast connection */ 138 case 1: /* fast connection */
134 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 139 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
135 &down, &up); 140 &down, &up);
136 141
137 tmp_gw_factor = (router->tq_avg * router->tq_avg * 142 tmp_gw_factor = (router->tq_avg * router->tq_avg *
138 down * 100 * 100) / 143 down * 100 * 100) /
139 (TQ_LOCAL_WINDOW_SIZE * 144 (BATADV_TQ_LOCAL_WINDOW_SIZE *
140 TQ_LOCAL_WINDOW_SIZE * 64); 145 BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
141 146
142 if ((tmp_gw_factor > max_gw_factor) || 147 if ((tmp_gw_factor > max_gw_factor) ||
143 ((tmp_gw_factor == max_gw_factor) && 148 ((tmp_gw_factor == max_gw_factor) &&
144 (router->tq_avg > max_tq))) { 149 (router->tq_avg > max_tq))) {
145 if (curr_gw) 150 if (curr_gw)
146 gw_node_free_ref(curr_gw); 151 batadv_gw_node_free_ref(curr_gw);
147 curr_gw = gw_node; 152 curr_gw = gw_node;
148 atomic_inc(&curr_gw->refcount); 153 atomic_inc(&curr_gw->refcount);
149 } 154 }
150 break; 155 break;
151 156
152 default: /** 157 default: /* 2: stable connection (use best statistic)
153 * 2: stable connection (use best statistic)
154 * 3: fast-switch (use best statistic but change as 158 * 3: fast-switch (use best statistic but change as
155 * soon as a better gateway appears) 159 * soon as a better gateway appears)
156 * XX: late-switch (use best statistic but change as 160 * XX: late-switch (use best statistic but change as
157 * soon as a better gateway appears which has 161 * soon as a better gateway appears which has
158 * $routing_class more tq points) 162 * $routing_class more tq points)
159 **/ 163 */
160 if (router->tq_avg > max_tq) { 164 if (router->tq_avg > max_tq) {
161 if (curr_gw) 165 if (curr_gw)
162 gw_node_free_ref(curr_gw); 166 batadv_gw_node_free_ref(curr_gw);
163 curr_gw = gw_node; 167 curr_gw = gw_node;
164 atomic_inc(&curr_gw->refcount); 168 atomic_inc(&curr_gw->refcount);
165 } 169 }
@@ -172,37 +176,36 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
172 if (tmp_gw_factor > max_gw_factor) 176 if (tmp_gw_factor > max_gw_factor)
173 max_gw_factor = tmp_gw_factor; 177 max_gw_factor = tmp_gw_factor;
174 178
175 gw_node_free_ref(gw_node); 179 batadv_gw_node_free_ref(gw_node);
176 180
177next: 181next:
178 neigh_node_free_ref(router); 182 batadv_neigh_node_free_ref(router);
179 } 183 }
180 rcu_read_unlock(); 184 rcu_read_unlock();
181 185
182 return curr_gw; 186 return curr_gw;
183} 187}
184 188
185void gw_election(struct bat_priv *bat_priv) 189void batadv_gw_election(struct batadv_priv *bat_priv)
186{ 190{
187 struct gw_node *curr_gw = NULL, *next_gw = NULL; 191 struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
188 struct neigh_node *router = NULL; 192 struct batadv_neigh_node *router = NULL;
189 char gw_addr[18] = { '\0' }; 193 char gw_addr[18] = { '\0' };
190 194
191 /** 195 /* The batman daemon checks here if we already passed a full originator
192 * The batman daemon checks here if we already passed a full originator
193 * cycle in order to make sure we don't choose the first gateway we 196 * cycle in order to make sure we don't choose the first gateway we
194 * hear about. This check is based on the daemon's uptime which we 197 * hear about. This check is based on the daemon's uptime which we
195 * don't have. 198 * don't have.
196 **/ 199 */
197 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
198 goto out; 201 goto out;
199 202
200 if (!atomic_dec_not_zero(&bat_priv->gw_reselect)) 203 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
201 goto out; 204 goto out;
202 205
203 curr_gw = gw_get_selected_gw_node(bat_priv); 206 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
204 207
205 next_gw = gw_get_best_gw_node(bat_priv); 208 next_gw = batadv_gw_get_best_gw_node(bat_priv);
206 209
207 if (curr_gw == next_gw) 210 if (curr_gw == next_gw)
208 goto out; 211 goto out;
@@ -210,53 +213,57 @@ void gw_election(struct bat_priv *bat_priv)
210 if (next_gw) { 213 if (next_gw) {
211 sprintf(gw_addr, "%pM", next_gw->orig_node->orig); 214 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
212 215
213 router = orig_node_get_router(next_gw->orig_node); 216 router = batadv_orig_node_get_router(next_gw->orig_node);
214 if (!router) { 217 if (!router) {
215 gw_deselect(bat_priv); 218 batadv_gw_deselect(bat_priv);
216 goto out; 219 goto out;
217 } 220 }
218 } 221 }
219 222
220 if ((curr_gw) && (!next_gw)) { 223 if ((curr_gw) && (!next_gw)) {
221 bat_dbg(DBG_BATMAN, bat_priv, 224 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
222 "Removing selected gateway - no gateway in range\n"); 225 "Removing selected gateway - no gateway in range\n");
223 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); 226 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
227 NULL);
224 } else if ((!curr_gw) && (next_gw)) { 228 } else if ((!curr_gw) && (next_gw)) {
225 bat_dbg(DBG_BATMAN, bat_priv, 229 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", 230 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
227 next_gw->orig_node->orig, next_gw->orig_node->gw_flags, 231 next_gw->orig_node->orig,
228 router->tq_avg); 232 next_gw->orig_node->gw_flags, router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); 233 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
234 gw_addr);
230 } else { 235 } else {
231 bat_dbg(DBG_BATMAN, bat_priv, 236 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n", 237 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
233 next_gw->orig_node->orig, next_gw->orig_node->gw_flags, 238 next_gw->orig_node->orig,
234 router->tq_avg); 239 next_gw->orig_node->gw_flags, router->tq_avg);
235 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); 240 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
241 gw_addr);
236 } 242 }
237 243
238 gw_select(bat_priv, next_gw); 244 batadv_gw_select(bat_priv, next_gw);
239 245
240out: 246out:
241 if (curr_gw) 247 if (curr_gw)
242 gw_node_free_ref(curr_gw); 248 batadv_gw_node_free_ref(curr_gw);
243 if (next_gw) 249 if (next_gw)
244 gw_node_free_ref(next_gw); 250 batadv_gw_node_free_ref(next_gw);
245 if (router) 251 if (router)
246 neigh_node_free_ref(router); 252 batadv_neigh_node_free_ref(router);
247} 253}
248 254
249void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 255void batadv_gw_check_election(struct batadv_priv *bat_priv,
256 struct batadv_orig_node *orig_node)
250{ 257{
251 struct orig_node *curr_gw_orig; 258 struct batadv_orig_node *curr_gw_orig;
252 struct neigh_node *router_gw = NULL, *router_orig = NULL; 259 struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
253 uint8_t gw_tq_avg, orig_tq_avg; 260 uint8_t gw_tq_avg, orig_tq_avg;
254 261
255 curr_gw_orig = gw_get_selected_orig(bat_priv); 262 curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
256 if (!curr_gw_orig) 263 if (!curr_gw_orig)
257 goto deselect; 264 goto deselect;
258 265
259 router_gw = orig_node_get_router(curr_gw_orig); 266 router_gw = batadv_orig_node_get_router(curr_gw_orig);
260 if (!router_gw) 267 if (!router_gw)
261 goto deselect; 268 goto deselect;
262 269
@@ -264,7 +271,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
264 if (curr_gw_orig == orig_node) 271 if (curr_gw_orig == orig_node)
265 goto out; 272 goto out;
266 273
267 router_orig = orig_node_get_router(orig_node); 274 router_orig = batadv_orig_node_get_router(orig_node);
268 if (!router_orig) 275 if (!router_orig)
269 goto out; 276 goto out;
270 277
@@ -275,35 +282,35 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
275 if (orig_tq_avg < gw_tq_avg) 282 if (orig_tq_avg < gw_tq_avg)
276 goto out; 283 goto out;
277 284
278 /** 285 /* if the routing class is greater than 3 the value tells us how much
279 * if the routing class is greater than 3 the value tells us how much
280 * greater the TQ value of the new gateway must be 286 * greater the TQ value of the new gateway must be
281 **/ 287 */
282 if ((atomic_read(&bat_priv->gw_sel_class) > 3) && 288 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
283 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) 289 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
284 goto out; 290 goto out;
285 291
286 bat_dbg(DBG_BATMAN, bat_priv, 292 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
287 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", 293 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
288 gw_tq_avg, orig_tq_avg); 294 gw_tq_avg, orig_tq_avg);
289 295
290deselect: 296deselect:
291 gw_deselect(bat_priv); 297 batadv_gw_deselect(bat_priv);
292out: 298out:
293 if (curr_gw_orig) 299 if (curr_gw_orig)
294 orig_node_free_ref(curr_gw_orig); 300 batadv_orig_node_free_ref(curr_gw_orig);
295 if (router_gw) 301 if (router_gw)
296 neigh_node_free_ref(router_gw); 302 batadv_neigh_node_free_ref(router_gw);
297 if (router_orig) 303 if (router_orig)
298 neigh_node_free_ref(router_orig); 304 batadv_neigh_node_free_ref(router_orig);
299 305
300 return; 306 return;
301} 307}
302 308
303static void gw_node_add(struct bat_priv *bat_priv, 309static void batadv_gw_node_add(struct batadv_priv *bat_priv,
304 struct orig_node *orig_node, uint8_t new_gwflags) 310 struct batadv_orig_node *orig_node,
311 uint8_t new_gwflags)
305{ 312{
306 struct gw_node *gw_node; 313 struct batadv_gw_node *gw_node;
307 int down, up; 314 int down, up;
308 315
309 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 316 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
@@ -318,47 +325,47 @@ static void gw_node_add(struct bat_priv *bat_priv,
318 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); 325 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
319 spin_unlock_bh(&bat_priv->gw_list_lock); 326 spin_unlock_bh(&bat_priv->gw_list_lock);
320 327
321 gw_bandwidth_to_kbit(new_gwflags, &down, &up); 328 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
322 bat_dbg(DBG_BATMAN, bat_priv, 329 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
323 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", 330 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
324 orig_node->orig, new_gwflags, 331 orig_node->orig, new_gwflags,
325 (down > 2048 ? down / 1024 : down), 332 (down > 2048 ? down / 1024 : down),
326 (down > 2048 ? "MBit" : "KBit"), 333 (down > 2048 ? "MBit" : "KBit"),
327 (up > 2048 ? up / 1024 : up), 334 (up > 2048 ? up / 1024 : up),
328 (up > 2048 ? "MBit" : "KBit")); 335 (up > 2048 ? "MBit" : "KBit"));
329} 336}
330 337
331void gw_node_update(struct bat_priv *bat_priv, 338void batadv_gw_node_update(struct batadv_priv *bat_priv,
332 struct orig_node *orig_node, uint8_t new_gwflags) 339 struct batadv_orig_node *orig_node,
340 uint8_t new_gwflags)
333{ 341{
334 struct hlist_node *node; 342 struct hlist_node *node;
335 struct gw_node *gw_node, *curr_gw; 343 struct batadv_gw_node *gw_node, *curr_gw;
336 344
337 /** 345 /* Note: We don't need a NULL check here, since curr_gw never gets
338 * Note: We don't need a NULL check here, since curr_gw never gets
339 * dereferenced. If curr_gw is NULL we also should not exit as we may 346 * dereferenced. If curr_gw is NULL we also should not exit as we may
340 * have this gateway in our list (duplication check!) even though we 347 * have this gateway in our list (duplication check!) even though we
341 * have no currently selected gateway. 348 * have no currently selected gateway.
342 */ 349 */
343 curr_gw = gw_get_selected_gw_node(bat_priv); 350 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
344 351
345 rcu_read_lock(); 352 rcu_read_lock();
346 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 353 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
347 if (gw_node->orig_node != orig_node) 354 if (gw_node->orig_node != orig_node)
348 continue; 355 continue;
349 356
350 bat_dbg(DBG_BATMAN, bat_priv, 357 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
351 "Gateway class of originator %pM changed from %i to %i\n", 358 "Gateway class of originator %pM changed from %i to %i\n",
352 orig_node->orig, gw_node->orig_node->gw_flags, 359 orig_node->orig, gw_node->orig_node->gw_flags,
353 new_gwflags); 360 new_gwflags);
354 361
355 gw_node->deleted = 0; 362 gw_node->deleted = 0;
356 363
357 if (new_gwflags == NO_FLAGS) { 364 if (new_gwflags == BATADV_NO_FLAGS) {
358 gw_node->deleted = jiffies; 365 gw_node->deleted = jiffies;
359 bat_dbg(DBG_BATMAN, bat_priv, 366 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
360 "Gateway %pM removed from gateway list\n", 367 "Gateway %pM removed from gateway list\n",
361 orig_node->orig); 368 orig_node->orig);
362 369
363 if (gw_node == curr_gw) 370 if (gw_node == curr_gw)
364 goto deselect; 371 goto deselect;
@@ -367,34 +374,35 @@ void gw_node_update(struct bat_priv *bat_priv,
367 goto unlock; 374 goto unlock;
368 } 375 }
369 376
370 if (new_gwflags == NO_FLAGS) 377 if (new_gwflags == BATADV_NO_FLAGS)
371 goto unlock; 378 goto unlock;
372 379
373 gw_node_add(bat_priv, orig_node, new_gwflags); 380 batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
374 goto unlock; 381 goto unlock;
375 382
376deselect: 383deselect:
377 gw_deselect(bat_priv); 384 batadv_gw_deselect(bat_priv);
378unlock: 385unlock:
379 rcu_read_unlock(); 386 rcu_read_unlock();
380 387
381 if (curr_gw) 388 if (curr_gw)
382 gw_node_free_ref(curr_gw); 389 batadv_gw_node_free_ref(curr_gw);
383} 390}
384 391
385void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 392void batadv_gw_node_delete(struct batadv_priv *bat_priv,
393 struct batadv_orig_node *orig_node)
386{ 394{
387 gw_node_update(bat_priv, orig_node, 0); 395 batadv_gw_node_update(bat_priv, orig_node, 0);
388} 396}
389 397
390void gw_node_purge(struct bat_priv *bat_priv) 398void batadv_gw_node_purge(struct batadv_priv *bat_priv)
391{ 399{
392 struct gw_node *gw_node, *curr_gw; 400 struct batadv_gw_node *gw_node, *curr_gw;
393 struct hlist_node *node, *node_tmp; 401 struct hlist_node *node, *node_tmp;
394 unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT); 402 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
395 int do_deselect = 0; 403 int do_deselect = 0;
396 404
397 curr_gw = gw_get_selected_gw_node(bat_priv); 405 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
398 406
399 spin_lock_bh(&bat_priv->gw_list_lock); 407 spin_lock_bh(&bat_priv->gw_list_lock);
400 408
@@ -402,43 +410,42 @@ void gw_node_purge(struct bat_priv *bat_priv)
402 &bat_priv->gw_list, list) { 410 &bat_priv->gw_list, list) {
403 if (((!gw_node->deleted) || 411 if (((!gw_node->deleted) ||
404 (time_before(jiffies, gw_node->deleted + timeout))) && 412 (time_before(jiffies, gw_node->deleted + timeout))) &&
405 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 413 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
406 continue; 414 continue;
407 415
408 if (curr_gw == gw_node) 416 if (curr_gw == gw_node)
409 do_deselect = 1; 417 do_deselect = 1;
410 418
411 hlist_del_rcu(&gw_node->list); 419 hlist_del_rcu(&gw_node->list);
412 gw_node_free_ref(gw_node); 420 batadv_gw_node_free_ref(gw_node);
413 } 421 }
414 422
415 spin_unlock_bh(&bat_priv->gw_list_lock); 423 spin_unlock_bh(&bat_priv->gw_list_lock);
416 424
417 /* gw_deselect() needs to acquire the gw_list_lock */ 425 /* gw_deselect() needs to acquire the gw_list_lock */
418 if (do_deselect) 426 if (do_deselect)
419 gw_deselect(bat_priv); 427 batadv_gw_deselect(bat_priv);
420 428
421 if (curr_gw) 429 if (curr_gw)
422 gw_node_free_ref(curr_gw); 430 batadv_gw_node_free_ref(curr_gw);
423} 431}
424 432
425/** 433/* fails if orig_node has no router */
426 * fails if orig_node has no router 434static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
427 */ 435 struct seq_file *seq,
428static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, 436 const struct batadv_gw_node *gw_node)
429 const struct gw_node *gw_node)
430{ 437{
431 struct gw_node *curr_gw; 438 struct batadv_gw_node *curr_gw;
432 struct neigh_node *router; 439 struct batadv_neigh_node *router;
433 int down, up, ret = -1; 440 int down, up, ret = -1;
434 441
435 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 442 batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
436 443
437 router = orig_node_get_router(gw_node->orig_node); 444 router = batadv_orig_node_get_router(gw_node->orig_node);
438 if (!router) 445 if (!router)
439 goto out; 446 goto out;
440 447
441 curr_gw = gw_get_selected_gw_node(bat_priv); 448 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
442 449
443 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 450 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
444 (curr_gw == gw_node ? "=>" : " "), 451 (curr_gw == gw_node ? "=>" : " "),
@@ -451,23 +458,23 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
451 (up > 2048 ? up / 1024 : up), 458 (up > 2048 ? up / 1024 : up),
452 (up > 2048 ? "MBit" : "KBit")); 459 (up > 2048 ? "MBit" : "KBit"));
453 460
454 neigh_node_free_ref(router); 461 batadv_neigh_node_free_ref(router);
455 if (curr_gw) 462 if (curr_gw)
456 gw_node_free_ref(curr_gw); 463 batadv_gw_node_free_ref(curr_gw);
457out: 464out:
458 return ret; 465 return ret;
459} 466}
460 467
461int gw_client_seq_print_text(struct seq_file *seq, void *offset) 468int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
462{ 469{
463 struct net_device *net_dev = (struct net_device *)seq->private; 470 struct net_device *net_dev = (struct net_device *)seq->private;
464 struct bat_priv *bat_priv = netdev_priv(net_dev); 471 struct batadv_priv *bat_priv = netdev_priv(net_dev);
465 struct hard_iface *primary_if; 472 struct batadv_hard_iface *primary_if;
466 struct gw_node *gw_node; 473 struct batadv_gw_node *gw_node;
467 struct hlist_node *node; 474 struct hlist_node *node;
468 int gw_count = 0, ret = 0; 475 int gw_count = 0, ret = 0;
469 476
470 primary_if = primary_if_get_selected(bat_priv); 477 primary_if = batadv_primary_if_get_selected(bat_priv);
471 if (!primary_if) { 478 if (!primary_if) {
472 ret = seq_printf(seq, 479 ret = seq_printf(seq,
473 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 480 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -475,7 +482,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
475 goto out; 482 goto out;
476 } 483 }
477 484
478 if (primary_if->if_status != IF_ACTIVE) { 485 if (primary_if->if_status != BATADV_IF_ACTIVE) {
479 ret = seq_printf(seq, 486 ret = seq_printf(seq,
480 "BATMAN mesh %s disabled - primary interface not active\n", 487 "BATMAN mesh %s disabled - primary interface not active\n",
481 net_dev->name); 488 net_dev->name);
@@ -484,8 +491,8 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
484 491
485 seq_printf(seq, 492 seq_printf(seq,
486 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 493 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
487 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF", 494 "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
488 SOURCE_VERSION, primary_if->net_dev->name, 495 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
489 primary_if->net_dev->dev_addr, net_dev->name); 496 primary_if->net_dev->dev_addr, net_dev->name);
490 497
491 rcu_read_lock(); 498 rcu_read_lock();
@@ -494,7 +501,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
494 continue; 501 continue;
495 502
496 /* fails if orig_node has no router */ 503 /* fails if orig_node has no router */
497 if (_write_buffer_text(bat_priv, seq, gw_node) < 0) 504 if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
498 continue; 505 continue;
499 506
500 gw_count++; 507 gw_count++;
@@ -506,11 +513,11 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
506 513
507out: 514out:
508 if (primary_if) 515 if (primary_if)
509 hardif_free_ref(primary_if); 516 batadv_hardif_free_ref(primary_if);
510 return ret; 517 return ret;
511} 518}
512 519
513static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) 520static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
514{ 521{
515 int ret = false; 522 int ret = false;
516 unsigned char *p; 523 unsigned char *p;
@@ -521,27 +528,29 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
521 528
522 pkt_len = skb_headlen(skb); 529 pkt_len = skb_headlen(skb);
523 530
524 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1) 531 if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
525 goto out; 532 goto out;
526 533
527 p = skb->data + header_len + DHCP_OPTIONS_OFFSET; 534 p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
528 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; 535 pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
529 536
530 /* Access the dhcp option lists. Each entry is made up by: 537 /* Access the dhcp option lists. Each entry is made up by:
531 * - octet 1: option type 538 * - octet 1: option type
532 * - octet 2: option data len (only if type != 255 and 0) 539 * - octet 2: option data len (only if type != 255 and 0)
533 * - octet 3: option data */ 540 * - octet 3: option data
541 */
534 while (*p != 255 && !ret) { 542 while (*p != 255 && !ret) {
535 /* p now points to the first octet: option type */ 543 /* p now points to the first octet: option type */
536 if (*p == 53) { 544 if (*p == 53) {
537 /* type 53 is the message type option. 545 /* type 53 is the message type option.
538 * Jump the len octet and go to the data octet */ 546 * Jump the len octet and go to the data octet
547 */
539 if (pkt_len < 2) 548 if (pkt_len < 2)
540 goto out; 549 goto out;
541 p += 2; 550 p += 2;
542 551
543 /* check if the message type is what we need */ 552 /* check if the message type is what we need */
544 if (*p == DHCP_REQUEST) 553 if (*p == BATADV_DHCP_REQUEST)
545 ret = true; 554 ret = true;
546 break; 555 break;
547 } else if (*p == 0) { 556 } else if (*p == 0) {
@@ -568,7 +577,7 @@ out:
568 return ret; 577 return ret;
569} 578}
570 579
571bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 580bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 581{
573 struct ethhdr *ethhdr; 582 struct ethhdr *ethhdr;
574 struct iphdr *iphdr; 583 struct iphdr *iphdr;
@@ -634,40 +643,41 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 643 return true;
635} 644}
636 645
637bool gw_out_of_range(struct bat_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb, struct ethhdr *ethhdr)
639{ 648{
640 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
643 bool ret, out_of_range = false; 652 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 653 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 654 uint8_t curr_tq_avg;
646 655
647 ret = gw_is_dhcp_target(skb, &header_len); 656 ret = batadv_gw_is_dhcp_target(skb, &header_len);
648 if (!ret) 657 if (!ret)
649 goto out; 658 goto out;
650 659
651 orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, 660 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 661 ethhdr->h_dest);
653 if (!orig_dst_node) 662 if (!orig_dst_node)
654 goto out; 663 goto out;
655 664
656 if (!orig_dst_node->gw_flags) 665 if (!orig_dst_node->gw_flags)
657 goto out; 666 goto out;
658 667
659 ret = is_type_dhcprequest(skb, header_len); 668 ret = batadv_is_type_dhcprequest(skb, header_len);
660 if (!ret) 669 if (!ret)
661 goto out; 670 goto out;
662 671
663 switch (atomic_read(&bat_priv->gw_mode)) { 672 switch (atomic_read(&bat_priv->gw_mode)) {
664 case GW_MODE_SERVER: 673 case BATADV_GW_MODE_SERVER:
665 /* If we are a GW then we are our best GW. We can artificially 674 /* If we are a GW then we are our best GW. We can artificially
666 * set the tq towards ourself as the maximum value */ 675 * set the tq towards ourself as the maximum value
667 curr_tq_avg = TQ_MAX_VALUE; 676 */
677 curr_tq_avg = BATADV_TQ_MAX_VALUE;
668 break; 678 break;
669 case GW_MODE_CLIENT: 679 case BATADV_GW_MODE_CLIENT:
670 curr_gw = gw_get_selected_gw_node(bat_priv); 680 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
671 if (!curr_gw) 681 if (!curr_gw)
672 goto out; 682 goto out;
673 683
@@ -677,33 +687,35 @@ bool gw_out_of_range(struct bat_priv *bat_priv,
677 687
678 /* If the dhcp packet has been sent to a different gw, 688 /* If the dhcp packet has been sent to a different gw,
679 * we have to evaluate whether the old gw is still 689 * we have to evaluate whether the old gw is still
680 * reliable enough */ 690 * reliable enough
681 neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); 691 */
692 neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
693 NULL);
682 if (!neigh_curr) 694 if (!neigh_curr)
683 goto out; 695 goto out;
684 696
685 curr_tq_avg = neigh_curr->tq_avg; 697 curr_tq_avg = neigh_curr->tq_avg;
686 break; 698 break;
687 case GW_MODE_OFF: 699 case BATADV_GW_MODE_OFF:
688 default: 700 default:
689 goto out; 701 goto out;
690 } 702 }
691 703
692 neigh_old = find_router(bat_priv, orig_dst_node, NULL); 704 neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
693 if (!neigh_old) 705 if (!neigh_old)
694 goto out; 706 goto out;
695 707
696 if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD) 708 if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
697 out_of_range = true; 709 out_of_range = true;
698 710
699out: 711out:
700 if (orig_dst_node) 712 if (orig_dst_node)
701 orig_node_free_ref(orig_dst_node); 713 batadv_orig_node_free_ref(orig_dst_node);
702 if (curr_gw) 714 if (curr_gw)
703 gw_node_free_ref(curr_gw); 715 batadv_gw_node_free_ref(curr_gw);
704 if (neigh_old) 716 if (neigh_old)
705 neigh_node_free_ref(neigh_old); 717 batadv_neigh_node_free_ref(neigh_old);
706 if (neigh_curr) 718 if (neigh_curr)
707 neigh_node_free_ref(neigh_curr); 719 batadv_neigh_node_free_ref(neigh_curr);
708 return out_of_range; 720 return out_of_range;
709} 721}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index bf56a5aea10b..f0d129e323c8 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,23 +15,26 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ 20#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
23#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ 21#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
24 22
25void gw_deselect(struct bat_priv *bat_priv); 23void batadv_gw_deselect(struct batadv_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 24void batadv_gw_election(struct batadv_priv *bat_priv);
27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv); 25struct batadv_orig_node *
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 26batadv_gw_get_selected_orig(struct batadv_priv *bat_priv);
29void gw_node_update(struct bat_priv *bat_priv, 27void batadv_gw_check_election(struct batadv_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 28 struct batadv_orig_node *orig_node);
31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); 29void batadv_gw_node_update(struct batadv_priv *bat_priv,
32void gw_node_purge(struct bat_priv *bat_priv); 30 struct batadv_orig_node *orig_node,
33int gw_client_seq_print_text(struct seq_file *seq, void *offset); 31 uint8_t new_gwflags);
34bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 32void batadv_gw_node_delete(struct batadv_priv *bat_priv,
35bool gw_out_of_range(struct bat_priv *bat_priv, 33 struct batadv_orig_node *orig_node);
36 struct sk_buff *skb, struct ethhdr *ethhdr); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
38 struct sk_buff *skb, struct ethhdr *ethhdr);
37 39
38#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index ca57ac7d73b2..9001208d1752 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -24,7 +22,7 @@
24#include "gateway_client.h" 22#include "gateway_client.h"
25 23
26/* calculates the gateway class from kbit */ 24/* calculates the gateway class from kbit */
27static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) 25static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
28{ 26{
29 int mdown = 0, tdown, tup, difference; 27 int mdown = 0, tdown, tup, difference;
30 uint8_t sbit, part; 28 uint8_t sbit, part;
@@ -59,7 +57,7 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
59} 57}
60 58
61/* returns the up and downspeeds in kbit, calculated from the class */ 59/* returns the up and downspeeds in kbit, calculated from the class */
62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) 60void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
63{ 61{
64 int sbit = (gw_srv_class & 0x80) >> 7; 62 int sbit = (gw_srv_class & 0x80) >> 7;
65 int dpart = (gw_srv_class & 0x78) >> 3; 63 int dpart = (gw_srv_class & 0x78) >> 3;
@@ -75,8 +73,8 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
75 *up = ((upart + 1) * (*down)) / 8; 73 *up = ((upart + 1) * (*down)) / 8;
76} 74}
77 75
78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, 76static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
79 int *up, int *down) 77 int *up, int *down)
80{ 78{
81 int ret, multi = 1; 79 int ret, multi = 1;
82 char *slash_ptr, *tmp_ptr; 80 char *slash_ptr, *tmp_ptr;
@@ -99,9 +97,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
99 97
100 ret = kstrtol(buff, 10, &ldown); 98 ret = kstrtol(buff, 10, &ldown);
101 if (ret) { 99 if (ret) {
102 bat_err(net_dev, 100 batadv_err(net_dev,
103 "Download speed of gateway mode invalid: %s\n", 101 "Download speed of gateway mode invalid: %s\n",
104 buff); 102 buff);
105 return false; 103 return false;
106 } 104 }
107 105
@@ -124,9 +122,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
124 122
125 ret = kstrtol(slash_ptr + 1, 10, &lup); 123 ret = kstrtol(slash_ptr + 1, 10, &lup);
126 if (ret) { 124 if (ret) {
127 bat_err(net_dev, 125 batadv_err(net_dev,
128 "Upload speed of gateway mode invalid: %s\n", 126 "Upload speed of gateway mode invalid: %s\n",
129 slash_ptr + 1); 127 slash_ptr + 1);
130 return false; 128 return false;
131 } 129 }
132 130
@@ -136,14 +134,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
136 return true; 134 return true;
137} 135}
138 136
139ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) 137ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
138 size_t count)
140{ 139{
141 struct bat_priv *bat_priv = netdev_priv(net_dev); 140 struct batadv_priv *bat_priv = netdev_priv(net_dev);
142 long gw_bandwidth_tmp = 0; 141 long gw_bandwidth_tmp = 0;
143 int up = 0, down = 0; 142 int up = 0, down = 0;
144 bool ret; 143 bool ret;
145 144
146 ret = parse_gw_bandwidth(net_dev, buff, &up, &down); 145 ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
147 if (!ret) 146 if (!ret)
148 goto end; 147 goto end;
149 148
@@ -153,23 +152,25 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
153 if (!up) 152 if (!up)
154 up = down / 5; 153 up = down / 5;
155 154
156 kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp); 155 batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
157 156
158 /** 157 /* the gw bandwidth we guessed above might not match the given
159 * the gw bandwidth we guessed above might not match the given
160 * speeds, hence we need to calculate it back to show the number 158 * speeds, hence we need to calculate it back to show the number
161 * that is going to be propagated 159 * that is going to be propagated
162 **/ 160 */
163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); 161 batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
164 162
165 gw_deselect(bat_priv); 163 if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
166 bat_info(net_dev, 164 return count;
167 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n", 165
168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 166 batadv_gw_deselect(bat_priv);
169 (down > 2048 ? down / 1024 : down), 167 batadv_info(net_dev,
170 (down > 2048 ? "MBit" : "KBit"), 168 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
171 (up > 2048 ? up / 1024 : up), 169 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
172 (up > 2048 ? "MBit" : "KBit")); 170 (down > 2048 ? down / 1024 : down),
171 (down > 2048 ? "MBit" : "KBit"),
172 (up > 2048 ? up / 1024 : up),
173 (up > 2048 ? "MBit" : "KBit"));
173 174
174 atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp); 175 atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
175 176
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index b8fb11c4f927..13697f6e7113 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,23 +15,23 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_ 20#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
23#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_ 21#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
24 22
25enum gw_modes { 23enum batadv_gw_modes {
26 GW_MODE_OFF, 24 BATADV_GW_MODE_OFF,
27 GW_MODE_CLIENT, 25 BATADV_GW_MODE_CLIENT,
28 GW_MODE_SERVER, 26 BATADV_GW_MODE_SERVER,
29}; 27};
30 28
31#define GW_MODE_OFF_NAME "off" 29#define BATADV_GW_MODE_OFF_NAME "off"
32#define GW_MODE_CLIENT_NAME "client" 30#define BATADV_GW_MODE_CLIENT_NAME "client"
33#define GW_MODE_SERVER_NAME "server" 31#define BATADV_GW_MODE_SERVER_NAME "server"
34 32
35void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up); 33void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
36ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count); 34ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
35 size_t count);
37 36
38#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */ 37#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dc334fa89847..282bf6e9353e 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -25,28 +23,29 @@
25#include "send.h" 23#include "send.h"
26#include "translation-table.h" 24#include "translation-table.h"
27#include "routing.h" 25#include "routing.h"
28#include "bat_sysfs.h" 26#include "sysfs.h"
29#include "originator.h" 27#include "originator.h"
30#include "hash.h" 28#include "hash.h"
31#include "bridge_loop_avoidance.h" 29#include "bridge_loop_avoidance.h"
32 30
33#include <linux/if_arp.h> 31#include <linux/if_arp.h>
34 32
35void hardif_free_rcu(struct rcu_head *rcu) 33void batadv_hardif_free_rcu(struct rcu_head *rcu)
36{ 34{
37 struct hard_iface *hard_iface; 35 struct batadv_hard_iface *hard_iface;
38 36
39 hard_iface = container_of(rcu, struct hard_iface, rcu); 37 hard_iface = container_of(rcu, struct batadv_hard_iface, rcu);
40 dev_put(hard_iface->net_dev); 38 dev_put(hard_iface->net_dev);
41 kfree(hard_iface); 39 kfree(hard_iface);
42} 40}
43 41
44struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) 42struct batadv_hard_iface *
43batadv_hardif_get_by_netdev(const struct net_device *net_dev)
45{ 44{
46 struct hard_iface *hard_iface; 45 struct batadv_hard_iface *hard_iface;
47 46
48 rcu_read_lock(); 47 rcu_read_lock();
49 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 48 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
50 if (hard_iface->net_dev == net_dev && 49 if (hard_iface->net_dev == net_dev &&
51 atomic_inc_not_zero(&hard_iface->refcount)) 50 atomic_inc_not_zero(&hard_iface->refcount))
52 goto out; 51 goto out;
@@ -59,7 +58,7 @@ out:
59 return hard_iface; 58 return hard_iface;
60} 59}
61 60
62static int is_valid_iface(const struct net_device *net_dev) 61static int batadv_is_valid_iface(const struct net_device *net_dev)
63{ 62{
64 if (net_dev->flags & IFF_LOOPBACK) 63 if (net_dev->flags & IFF_LOOPBACK)
65 return 0; 64 return 0;
@@ -71,26 +70,23 @@ static int is_valid_iface(const struct net_device *net_dev)
71 return 0; 70 return 0;
72 71
73 /* no batman over batman */ 72 /* no batman over batman */
74 if (softif_is_valid(net_dev)) 73 if (batadv_softif_is_valid(net_dev))
75 return 0; 74 return 0;
76 75
77 /* Device is being bridged */
78 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
79 return 0; */
80
81 return 1; 76 return 1;
82} 77}
83 78
84static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) 79static struct batadv_hard_iface *
80batadv_hardif_get_active(const struct net_device *soft_iface)
85{ 81{
86 struct hard_iface *hard_iface; 82 struct batadv_hard_iface *hard_iface;
87 83
88 rcu_read_lock(); 84 rcu_read_lock();
89 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 85 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
90 if (hard_iface->soft_iface != soft_iface) 86 if (hard_iface->soft_iface != soft_iface)
91 continue; 87 continue;
92 88
93 if (hard_iface->if_status == IF_ACTIVE && 89 if (hard_iface->if_status == BATADV_IF_ACTIVE &&
94 atomic_inc_not_zero(&hard_iface->refcount)) 90 atomic_inc_not_zero(&hard_iface->refcount))
95 goto out; 91 goto out;
96 } 92 }
@@ -102,32 +98,32 @@ out:
102 return hard_iface; 98 return hard_iface;
103} 99}
104 100
105static void primary_if_update_addr(struct bat_priv *bat_priv, 101static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
106 struct hard_iface *oldif) 102 struct batadv_hard_iface *oldif)
107{ 103{
108 struct vis_packet *vis_packet; 104 struct batadv_vis_packet *vis_packet;
109 struct hard_iface *primary_if; 105 struct batadv_hard_iface *primary_if;
110 106
111 primary_if = primary_if_get_selected(bat_priv); 107 primary_if = batadv_primary_if_get_selected(bat_priv);
112 if (!primary_if) 108 if (!primary_if)
113 goto out; 109 goto out;
114 110
115 vis_packet = (struct vis_packet *) 111 vis_packet = (struct batadv_vis_packet *)
116 bat_priv->my_vis_info->skb_packet->data; 112 bat_priv->my_vis_info->skb_packet->data;
117 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 113 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
118 memcpy(vis_packet->sender_orig, 114 memcpy(vis_packet->sender_orig,
119 primary_if->net_dev->dev_addr, ETH_ALEN); 115 primary_if->net_dev->dev_addr, ETH_ALEN);
120 116
121 bla_update_orig_address(bat_priv, primary_if, oldif); 117 batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
122out: 118out:
123 if (primary_if) 119 if (primary_if)
124 hardif_free_ref(primary_if); 120 batadv_hardif_free_ref(primary_if);
125} 121}
126 122
127static void primary_if_select(struct bat_priv *bat_priv, 123static void batadv_primary_if_select(struct batadv_priv *bat_priv,
128 struct hard_iface *new_hard_iface) 124 struct batadv_hard_iface *new_hard_iface)
129{ 125{
130 struct hard_iface *curr_hard_iface; 126 struct batadv_hard_iface *curr_hard_iface;
131 127
132 ASSERT_RTNL(); 128 ASSERT_RTNL();
133 129
@@ -141,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
141 goto out; 137 goto out;
142 138
143 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); 139 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
144 primary_if_update_addr(bat_priv, curr_hard_iface); 140 batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
145 141
146out: 142out:
147 if (curr_hard_iface) 143 if (curr_hard_iface)
148 hardif_free_ref(curr_hard_iface); 144 batadv_hardif_free_ref(curr_hard_iface);
149} 145}
150 146
151static bool hardif_is_iface_up(const struct hard_iface *hard_iface) 147static bool
148batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
152{ 149{
153 if (hard_iface->net_dev->flags & IFF_UP) 150 if (hard_iface->net_dev->flags & IFF_UP)
154 return true; 151 return true;
@@ -156,21 +153,21 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
156 return false; 153 return false;
157} 154}
158 155
159static void check_known_mac_addr(const struct net_device *net_dev) 156static void batadv_check_known_mac_addr(const struct net_device *net_dev)
160{ 157{
161 const struct hard_iface *hard_iface; 158 const struct batadv_hard_iface *hard_iface;
162 159
163 rcu_read_lock(); 160 rcu_read_lock();
164 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 161 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
165 if ((hard_iface->if_status != IF_ACTIVE) && 162 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
166 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 163 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
167 continue; 164 continue;
168 165
169 if (hard_iface->net_dev == net_dev) 166 if (hard_iface->net_dev == net_dev)
170 continue; 167 continue;
171 168
172 if (!compare_eth(hard_iface->net_dev->dev_addr, 169 if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
173 net_dev->dev_addr)) 170 net_dev->dev_addr))
174 continue; 171 continue;
175 172
176 pr_warn("The newly added mac address (%pM) already exists on: %s\n", 173 pr_warn("The newly added mac address (%pM) already exists on: %s\n",
@@ -180,27 +177,29 @@ static void check_known_mac_addr(const struct net_device *net_dev)
180 rcu_read_unlock(); 177 rcu_read_unlock();
181} 178}
182 179
183int hardif_min_mtu(struct net_device *soft_iface) 180int batadv_hardif_min_mtu(struct net_device *soft_iface)
184{ 181{
185 const struct bat_priv *bat_priv = netdev_priv(soft_iface); 182 const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
186 const struct hard_iface *hard_iface; 183 const struct batadv_hard_iface *hard_iface;
187 /* allow big frames if all devices are capable to do so 184 /* allow big frames if all devices are capable to do so
188 * (have MTU > 1500 + BAT_HEADER_LEN) */ 185 * (have MTU > 1500 + BAT_HEADER_LEN)
186 */
189 int min_mtu = ETH_DATA_LEN; 187 int min_mtu = ETH_DATA_LEN;
190 188
191 if (atomic_read(&bat_priv->fragmentation)) 189 if (atomic_read(&bat_priv->fragmentation))
192 goto out; 190 goto out;
193 191
194 rcu_read_lock(); 192 rcu_read_lock();
195 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 193 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
196 if ((hard_iface->if_status != IF_ACTIVE) && 194 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
197 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 195 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
198 continue; 196 continue;
199 197
200 if (hard_iface->soft_iface != soft_iface) 198 if (hard_iface->soft_iface != soft_iface)
201 continue; 199 continue;
202 200
203 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, 201 min_mtu = min_t(int,
202 hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
204 min_mtu); 203 min_mtu);
205 } 204 }
206 rcu_read_unlock(); 205 rcu_read_unlock();
@@ -209,68 +208,70 @@ out:
209} 208}
210 209
211/* adjusts the MTU if a new interface with a smaller MTU appeared. */ 210/* adjusts the MTU if a new interface with a smaller MTU appeared. */
212void update_min_mtu(struct net_device *soft_iface) 211void batadv_update_min_mtu(struct net_device *soft_iface)
213{ 212{
214 int min_mtu; 213 int min_mtu;
215 214
216 min_mtu = hardif_min_mtu(soft_iface); 215 min_mtu = batadv_hardif_min_mtu(soft_iface);
217 if (soft_iface->mtu != min_mtu) 216 if (soft_iface->mtu != min_mtu)
218 soft_iface->mtu = min_mtu; 217 soft_iface->mtu = min_mtu;
219} 218}
220 219
221static void hardif_activate_interface(struct hard_iface *hard_iface) 220static void
221batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
222{ 222{
223 struct bat_priv *bat_priv; 223 struct batadv_priv *bat_priv;
224 struct hard_iface *primary_if = NULL; 224 struct batadv_hard_iface *primary_if = NULL;
225 225
226 if (hard_iface->if_status != IF_INACTIVE) 226 if (hard_iface->if_status != BATADV_IF_INACTIVE)
227 goto out; 227 goto out;
228 228
229 bat_priv = netdev_priv(hard_iface->soft_iface); 229 bat_priv = netdev_priv(hard_iface->soft_iface);
230 230
231 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 231 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
232 hard_iface->if_status = IF_TO_BE_ACTIVATED; 232 hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
233 233
234 /** 234 /* the first active interface becomes our primary interface or
235 * the first active interface becomes our primary interface or
236 * the next active interface after the old primary interface was removed 235 * the next active interface after the old primary interface was removed
237 */ 236 */
238 primary_if = primary_if_get_selected(bat_priv); 237 primary_if = batadv_primary_if_get_selected(bat_priv);
239 if (!primary_if) 238 if (!primary_if)
240 primary_if_select(bat_priv, hard_iface); 239 batadv_primary_if_select(bat_priv, hard_iface);
241 240
242 bat_info(hard_iface->soft_iface, "Interface activated: %s\n", 241 batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
243 hard_iface->net_dev->name); 242 hard_iface->net_dev->name);
244 243
245 update_min_mtu(hard_iface->soft_iface); 244 batadv_update_min_mtu(hard_iface->soft_iface);
246 245
247out: 246out:
248 if (primary_if) 247 if (primary_if)
249 hardif_free_ref(primary_if); 248 batadv_hardif_free_ref(primary_if);
250} 249}
251 250
252static void hardif_deactivate_interface(struct hard_iface *hard_iface) 251static void
252batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
253{ 253{
254 if ((hard_iface->if_status != IF_ACTIVE) && 254 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
255 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 255 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
256 return; 256 return;
257 257
258 hard_iface->if_status = IF_INACTIVE; 258 hard_iface->if_status = BATADV_IF_INACTIVE;
259 259
260 bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", 260 batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
261 hard_iface->net_dev->name); 261 hard_iface->net_dev->name);
262 262
263 update_min_mtu(hard_iface->soft_iface); 263 batadv_update_min_mtu(hard_iface->soft_iface);
264} 264}
265 265
266int hardif_enable_interface(struct hard_iface *hard_iface, 266int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
267 const char *iface_name) 267 const char *iface_name)
268{ 268{
269 struct bat_priv *bat_priv; 269 struct batadv_priv *bat_priv;
270 struct net_device *soft_iface; 270 struct net_device *soft_iface;
271 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
271 int ret; 272 int ret;
272 273
273 if (hard_iface->if_status != IF_NOT_IN_USE) 274 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
274 goto out; 275 goto out;
275 276
276 if (!atomic_inc_not_zero(&hard_iface->refcount)) 277 if (!atomic_inc_not_zero(&hard_iface->refcount))
@@ -284,7 +285,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
284 soft_iface = dev_get_by_name(&init_net, iface_name); 285 soft_iface = dev_get_by_name(&init_net, iface_name);
285 286
286 if (!soft_iface) { 287 if (!soft_iface) {
287 soft_iface = softif_create(iface_name); 288 soft_iface = batadv_softif_create(iface_name);
288 289
289 if (!soft_iface) { 290 if (!soft_iface) {
290 ret = -ENOMEM; 291 ret = -ENOMEM;
@@ -295,7 +296,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
295 dev_hold(soft_iface); 296 dev_hold(soft_iface);
296 } 297 }
297 298
298 if (!softif_is_valid(soft_iface)) { 299 if (!batadv_softif_is_valid(soft_iface)) {
299 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", 300 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
300 soft_iface->name); 301 soft_iface->name);
301 ret = -EINVAL; 302 ret = -EINVAL;
@@ -306,48 +307,46 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
306 bat_priv = netdev_priv(hard_iface->soft_iface); 307 bat_priv = netdev_priv(hard_iface->soft_iface);
307 308
308 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); 309 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
309 if (ret < 0) { 310 if (ret < 0)
310 ret = -ENOMEM;
311 goto err_dev; 311 goto err_dev;
312 }
313 312
314 hard_iface->if_num = bat_priv->num_ifaces; 313 hard_iface->if_num = bat_priv->num_ifaces;
315 bat_priv->num_ifaces++; 314 bat_priv->num_ifaces++;
316 hard_iface->if_status = IF_INACTIVE; 315 hard_iface->if_status = BATADV_IF_INACTIVE;
317 orig_hash_add_if(hard_iface, bat_priv->num_ifaces); 316 batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
318 317
319 hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); 318 hard_iface->batman_adv_ptype.type = ethertype;
320 hard_iface->batman_adv_ptype.func = batman_skb_recv; 319 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
321 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; 320 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
322 dev_add_pack(&hard_iface->batman_adv_ptype); 321 dev_add_pack(&hard_iface->batman_adv_ptype);
323 322
324 atomic_set(&hard_iface->frag_seqno, 1); 323 atomic_set(&hard_iface->frag_seqno, 1);
325 bat_info(hard_iface->soft_iface, "Adding interface: %s\n", 324 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
326 hard_iface->net_dev->name); 325 hard_iface->net_dev->name);
327 326
328 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 327 if (atomic_read(&bat_priv->fragmentation) &&
329 ETH_DATA_LEN + BAT_HEADER_LEN) 328 hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
330 bat_info(hard_iface->soft_iface, 329 batadv_info(hard_iface->soft_iface,
331 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", 330 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
332 hard_iface->net_dev->name, hard_iface->net_dev->mtu, 331 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
333 ETH_DATA_LEN + BAT_HEADER_LEN); 332 ETH_DATA_LEN + BATADV_HEADER_LEN);
334 333
335 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 334 if (!atomic_read(&bat_priv->fragmentation) &&
336 ETH_DATA_LEN + BAT_HEADER_LEN) 335 hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
337 bat_info(hard_iface->soft_iface, 336 batadv_info(hard_iface->soft_iface,
338 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", 337 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
339 hard_iface->net_dev->name, hard_iface->net_dev->mtu, 338 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
340 ETH_DATA_LEN + BAT_HEADER_LEN); 339 ETH_DATA_LEN + BATADV_HEADER_LEN);
341 340
342 if (hardif_is_iface_up(hard_iface)) 341 if (batadv_hardif_is_iface_up(hard_iface))
343 hardif_activate_interface(hard_iface); 342 batadv_hardif_activate_interface(hard_iface);
344 else 343 else
345 bat_err(hard_iface->soft_iface, 344 batadv_err(hard_iface->soft_iface,
346 "Not using interface %s (retrying later): interface not active\n", 345 "Not using interface %s (retrying later): interface not active\n",
347 hard_iface->net_dev->name); 346 hard_iface->net_dev->name);
348 347
349 /* begin scheduling originator messages on that interface */ 348 /* begin scheduling originator messages on that interface */
350 schedule_bat_ogm(hard_iface); 349 batadv_schedule_bat_ogm(hard_iface);
351 350
352out: 351out:
353 return 0; 352 return 0;
@@ -355,67 +354,68 @@ out:
355err_dev: 354err_dev:
356 dev_put(soft_iface); 355 dev_put(soft_iface);
357err: 356err:
358 hardif_free_ref(hard_iface); 357 batadv_hardif_free_ref(hard_iface);
359 return ret; 358 return ret;
360} 359}
361 360
362void hardif_disable_interface(struct hard_iface *hard_iface) 361void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
363{ 362{
364 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 363 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
365 struct hard_iface *primary_if = NULL; 364 struct batadv_hard_iface *primary_if = NULL;
366 365
367 if (hard_iface->if_status == IF_ACTIVE) 366 if (hard_iface->if_status == BATADV_IF_ACTIVE)
368 hardif_deactivate_interface(hard_iface); 367 batadv_hardif_deactivate_interface(hard_iface);
369 368
370 if (hard_iface->if_status != IF_INACTIVE) 369 if (hard_iface->if_status != BATADV_IF_INACTIVE)
371 goto out; 370 goto out;
372 371
373 bat_info(hard_iface->soft_iface, "Removing interface: %s\n", 372 batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
374 hard_iface->net_dev->name); 373 hard_iface->net_dev->name);
375 dev_remove_pack(&hard_iface->batman_adv_ptype); 374 dev_remove_pack(&hard_iface->batman_adv_ptype);
376 375
377 bat_priv->num_ifaces--; 376 bat_priv->num_ifaces--;
378 orig_hash_del_if(hard_iface, bat_priv->num_ifaces); 377 batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
379 378
380 primary_if = primary_if_get_selected(bat_priv); 379 primary_if = batadv_primary_if_get_selected(bat_priv);
381 if (hard_iface == primary_if) { 380 if (hard_iface == primary_if) {
382 struct hard_iface *new_if; 381 struct batadv_hard_iface *new_if;
383 382
384 new_if = hardif_get_active(hard_iface->soft_iface); 383 new_if = batadv_hardif_get_active(hard_iface->soft_iface);
385 primary_if_select(bat_priv, new_if); 384 batadv_primary_if_select(bat_priv, new_if);
386 385
387 if (new_if) 386 if (new_if)
388 hardif_free_ref(new_if); 387 batadv_hardif_free_ref(new_if);
389 } 388 }
390 389
391 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); 390 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
392 hard_iface->if_status = IF_NOT_IN_USE; 391 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
393 392
394 /* delete all references to this hard_iface */ 393 /* delete all references to this hard_iface */
395 purge_orig_ref(bat_priv); 394 batadv_purge_orig_ref(bat_priv);
396 purge_outstanding_packets(bat_priv, hard_iface); 395 batadv_purge_outstanding_packets(bat_priv, hard_iface);
397 dev_put(hard_iface->soft_iface); 396 dev_put(hard_iface->soft_iface);
398 397
399 /* nobody uses this interface anymore */ 398 /* nobody uses this interface anymore */
400 if (!bat_priv->num_ifaces) 399 if (!bat_priv->num_ifaces)
401 softif_destroy(hard_iface->soft_iface); 400 batadv_softif_destroy(hard_iface->soft_iface);
402 401
403 hard_iface->soft_iface = NULL; 402 hard_iface->soft_iface = NULL;
404 hardif_free_ref(hard_iface); 403 batadv_hardif_free_ref(hard_iface);
405 404
406out: 405out:
407 if (primary_if) 406 if (primary_if)
408 hardif_free_ref(primary_if); 407 batadv_hardif_free_ref(primary_if);
409} 408}
410 409
411static struct hard_iface *hardif_add_interface(struct net_device *net_dev) 410static struct batadv_hard_iface *
411batadv_hardif_add_interface(struct net_device *net_dev)
412{ 412{
413 struct hard_iface *hard_iface; 413 struct batadv_hard_iface *hard_iface;
414 int ret; 414 int ret;
415 415
416 ASSERT_RTNL(); 416 ASSERT_RTNL();
417 417
418 ret = is_valid_iface(net_dev); 418 ret = batadv_is_valid_iface(net_dev);
419 if (ret != 1) 419 if (ret != 1)
420 goto out; 420 goto out;
421 421
@@ -425,23 +425,22 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
425 if (!hard_iface) 425 if (!hard_iface)
426 goto release_dev; 426 goto release_dev;
427 427
428 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); 428 ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
429 if (ret) 429 if (ret)
430 goto free_if; 430 goto free_if;
431 431
432 hard_iface->if_num = -1; 432 hard_iface->if_num = -1;
433 hard_iface->net_dev = net_dev; 433 hard_iface->net_dev = net_dev;
434 hard_iface->soft_iface = NULL; 434 hard_iface->soft_iface = NULL;
435 hard_iface->if_status = IF_NOT_IN_USE; 435 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
436 INIT_LIST_HEAD(&hard_iface->list); 436 INIT_LIST_HEAD(&hard_iface->list);
437 /* extra reference for return */ 437 /* extra reference for return */
438 atomic_set(&hard_iface->refcount, 2); 438 atomic_set(&hard_iface->refcount, 2);
439 439
440 check_known_mac_addr(hard_iface->net_dev); 440 batadv_check_known_mac_addr(hard_iface->net_dev);
441 list_add_tail_rcu(&hard_iface->list, &hardif_list); 441 list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
442 442
443 /** 443 /* This can't be called via a bat_priv callback because
444 * This can't be called via a bat_priv callback because
445 * we have no bat_priv yet. 444 * we have no bat_priv yet.
446 */ 445 */
447 atomic_set(&hard_iface->seqno, 1); 446 atomic_set(&hard_iface->seqno, 1);
@@ -457,102 +456,104 @@ out:
457 return NULL; 456 return NULL;
458} 457}
459 458
460static void hardif_remove_interface(struct hard_iface *hard_iface) 459static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
461{ 460{
462 ASSERT_RTNL(); 461 ASSERT_RTNL();
463 462
464 /* first deactivate interface */ 463 /* first deactivate interface */
465 if (hard_iface->if_status != IF_NOT_IN_USE) 464 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
466 hardif_disable_interface(hard_iface); 465 batadv_hardif_disable_interface(hard_iface);
467 466
468 if (hard_iface->if_status != IF_NOT_IN_USE) 467 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
469 return; 468 return;
470 469
471 hard_iface->if_status = IF_TO_BE_REMOVED; 470 hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
472 sysfs_del_hardif(&hard_iface->hardif_obj); 471 batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
473 hardif_free_ref(hard_iface); 472 batadv_hardif_free_ref(hard_iface);
474} 473}
475 474
476void hardif_remove_interfaces(void) 475void batadv_hardif_remove_interfaces(void)
477{ 476{
478 struct hard_iface *hard_iface, *hard_iface_tmp; 477 struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
479 478
480 rtnl_lock(); 479 rtnl_lock();
481 list_for_each_entry_safe(hard_iface, hard_iface_tmp, 480 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
482 &hardif_list, list) { 481 &batadv_hardif_list, list) {
483 list_del_rcu(&hard_iface->list); 482 list_del_rcu(&hard_iface->list);
484 hardif_remove_interface(hard_iface); 483 batadv_hardif_remove_interface(hard_iface);
485 } 484 }
486 rtnl_unlock(); 485 rtnl_unlock();
487} 486}
488 487
489static int hard_if_event(struct notifier_block *this, 488static int batadv_hard_if_event(struct notifier_block *this,
490 unsigned long event, void *ptr) 489 unsigned long event, void *ptr)
491{ 490{
492 struct net_device *net_dev = ptr; 491 struct net_device *net_dev = ptr;
493 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 492 struct batadv_hard_iface *hard_iface;
494 struct hard_iface *primary_if = NULL; 493 struct batadv_hard_iface *primary_if = NULL;
495 struct bat_priv *bat_priv; 494 struct batadv_priv *bat_priv;
496 495
496 hard_iface = batadv_hardif_get_by_netdev(net_dev);
497 if (!hard_iface && event == NETDEV_REGISTER) 497 if (!hard_iface && event == NETDEV_REGISTER)
498 hard_iface = hardif_add_interface(net_dev); 498 hard_iface = batadv_hardif_add_interface(net_dev);
499 499
500 if (!hard_iface) 500 if (!hard_iface)
501 goto out; 501 goto out;
502 502
503 switch (event) { 503 switch (event) {
504 case NETDEV_UP: 504 case NETDEV_UP:
505 hardif_activate_interface(hard_iface); 505 batadv_hardif_activate_interface(hard_iface);
506 break; 506 break;
507 case NETDEV_GOING_DOWN: 507 case NETDEV_GOING_DOWN:
508 case NETDEV_DOWN: 508 case NETDEV_DOWN:
509 hardif_deactivate_interface(hard_iface); 509 batadv_hardif_deactivate_interface(hard_iface);
510 break; 510 break;
511 case NETDEV_UNREGISTER: 511 case NETDEV_UNREGISTER:
512 list_del_rcu(&hard_iface->list); 512 list_del_rcu(&hard_iface->list);
513 513
514 hardif_remove_interface(hard_iface); 514 batadv_hardif_remove_interface(hard_iface);
515 break; 515 break;
516 case NETDEV_CHANGEMTU: 516 case NETDEV_CHANGEMTU:
517 if (hard_iface->soft_iface) 517 if (hard_iface->soft_iface)
518 update_min_mtu(hard_iface->soft_iface); 518 batadv_update_min_mtu(hard_iface->soft_iface);
519 break; 519 break;
520 case NETDEV_CHANGEADDR: 520 case NETDEV_CHANGEADDR:
521 if (hard_iface->if_status == IF_NOT_IN_USE) 521 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
522 goto hardif_put; 522 goto hardif_put;
523 523
524 check_known_mac_addr(hard_iface->net_dev); 524 batadv_check_known_mac_addr(hard_iface->net_dev);
525 525
526 bat_priv = netdev_priv(hard_iface->soft_iface); 526 bat_priv = netdev_priv(hard_iface->soft_iface);
527 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 527 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
528 528
529 primary_if = primary_if_get_selected(bat_priv); 529 primary_if = batadv_primary_if_get_selected(bat_priv);
530 if (!primary_if) 530 if (!primary_if)
531 goto hardif_put; 531 goto hardif_put;
532 532
533 if (hard_iface == primary_if) 533 if (hard_iface == primary_if)
534 primary_if_update_addr(bat_priv, NULL); 534 batadv_primary_if_update_addr(bat_priv, NULL);
535 break; 535 break;
536 default: 536 default:
537 break; 537 break;
538 } 538 }
539 539
540hardif_put: 540hardif_put:
541 hardif_free_ref(hard_iface); 541 batadv_hardif_free_ref(hard_iface);
542out: 542out:
543 if (primary_if) 543 if (primary_if)
544 hardif_free_ref(primary_if); 544 batadv_hardif_free_ref(primary_if);
545 return NOTIFY_DONE; 545 return NOTIFY_DONE;
546} 546}
547 547
548/* This function returns true if the interface represented by ifindex is a 548/* This function returns true if the interface represented by ifindex is a
549 * 802.11 wireless device */ 549 * 802.11 wireless device
550bool is_wifi_iface(int ifindex) 550 */
551bool batadv_is_wifi_iface(int ifindex)
551{ 552{
552 struct net_device *net_device = NULL; 553 struct net_device *net_device = NULL;
553 bool ret = false; 554 bool ret = false;
554 555
555 if (ifindex == NULL_IFINDEX) 556 if (ifindex == BATADV_NULL_IFINDEX)
556 goto out; 557 goto out;
557 558
558 net_device = dev_get_by_index(&init_net, ifindex); 559 net_device = dev_get_by_index(&init_net, ifindex);
@@ -561,7 +562,8 @@ bool is_wifi_iface(int ifindex)
561 562
562#ifdef CONFIG_WIRELESS_EXT 563#ifdef CONFIG_WIRELESS_EXT
563 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to 564 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
564 * check for wireless_handlers != NULL */ 565 * check for wireless_handlers != NULL
566 */
565 if (net_device->wireless_handlers) 567 if (net_device->wireless_handlers)
566 ret = true; 568 ret = true;
567 else 569 else
@@ -575,6 +577,6 @@ out:
575 return ret; 577 return ret;
576} 578}
577 579
578struct notifier_block hard_if_notifier = { 580struct notifier_block batadv_hard_if_notifier = {
579 .notifier_call = hard_if_event, 581 .notifier_call = batadv_hard_if_event,
580}; 582};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index e68c5655e616..3732366e7445 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,44 +15,44 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ 20#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_ 21#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
24 22
25enum hard_if_state { 23enum batadv_hard_if_state {
26 IF_NOT_IN_USE, 24 BATADV_IF_NOT_IN_USE,
27 IF_TO_BE_REMOVED, 25 BATADV_IF_TO_BE_REMOVED,
28 IF_INACTIVE, 26 BATADV_IF_INACTIVE,
29 IF_ACTIVE, 27 BATADV_IF_ACTIVE,
30 IF_TO_BE_ACTIVATED, 28 BATADV_IF_TO_BE_ACTIVATED,
31 IF_I_WANT_YOU 29 BATADV_IF_I_WANT_YOU,
32}; 30};
33 31
34extern struct notifier_block hard_if_notifier; 32extern struct notifier_block batadv_hard_if_notifier;
35 33
36struct hard_iface* 34struct batadv_hard_iface*
37hardif_get_by_netdev(const struct net_device *net_dev); 35batadv_hardif_get_by_netdev(const struct net_device *net_dev);
38int hardif_enable_interface(struct hard_iface *hard_iface, 36int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
39 const char *iface_name); 37 const char *iface_name);
40void hardif_disable_interface(struct hard_iface *hard_iface); 38void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
41void hardif_remove_interfaces(void); 39void batadv_hardif_remove_interfaces(void);
42int hardif_min_mtu(struct net_device *soft_iface); 40int batadv_hardif_min_mtu(struct net_device *soft_iface);
43void update_min_mtu(struct net_device *soft_iface); 41void batadv_update_min_mtu(struct net_device *soft_iface);
44void hardif_free_rcu(struct rcu_head *rcu); 42void batadv_hardif_free_rcu(struct rcu_head *rcu);
45bool is_wifi_iface(int ifindex); 43bool batadv_is_wifi_iface(int ifindex);
46 44
47static inline void hardif_free_ref(struct hard_iface *hard_iface) 45static inline void
46batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
48{ 47{
49 if (atomic_dec_and_test(&hard_iface->refcount)) 48 if (atomic_dec_and_test(&hard_iface->refcount))
50 call_rcu(&hard_iface->rcu, hardif_free_rcu); 49 call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
51} 50}
52 51
53static inline struct hard_iface *primary_if_get_selected( 52static inline struct batadv_hard_iface *
54 struct bat_priv *bat_priv) 53batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
55{ 54{
56 struct hard_iface *hard_iface; 55 struct batadv_hard_iface *hard_iface;
57 56
58 rcu_read_lock(); 57 rcu_read_lock();
59 hard_iface = rcu_dereference(bat_priv->primary_if); 58 hard_iface = rcu_dereference(bat_priv->primary_if);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 117687bedf25..15a849c2d414 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,25 +15,24 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "hash.h" 21#include "hash.h"
24 22
25/* clears the hash */ 23/* clears the hash */
26static void hash_init(struct hashtable_t *hash) 24static void batadv_hash_init(struct batadv_hashtable *hash)
27{ 25{
28 uint32_t i; 26 uint32_t i;
29 27
30 for (i = 0 ; i < hash->size; i++) { 28 for (i = 0; i < hash->size; i++) {
31 INIT_HLIST_HEAD(&hash->table[i]); 29 INIT_HLIST_HEAD(&hash->table[i]);
32 spin_lock_init(&hash->list_locks[i]); 30 spin_lock_init(&hash->list_locks[i]);
33 } 31 }
34} 32}
35 33
36/* free only the hashtable and the hash itself. */ 34/* free only the hashtable and the hash itself. */
37void hash_destroy(struct hashtable_t *hash) 35void batadv_hash_destroy(struct batadv_hashtable *hash)
38{ 36{
39 kfree(hash->list_locks); 37 kfree(hash->list_locks);
40 kfree(hash->table); 38 kfree(hash->table);
@@ -42,9 +40,9 @@ void hash_destroy(struct hashtable_t *hash)
42} 40}
43 41
44/* allocates and clears the hash */ 42/* allocates and clears the hash */
45struct hashtable_t *hash_new(uint32_t size) 43struct batadv_hashtable *batadv_hash_new(uint32_t size)
46{ 44{
47 struct hashtable_t *hash; 45 struct batadv_hashtable *hash;
48 46
49 hash = kmalloc(sizeof(*hash), GFP_ATOMIC); 47 hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
50 if (!hash) 48 if (!hash)
@@ -60,7 +58,7 @@ struct hashtable_t *hash_new(uint32_t size)
60 goto free_table; 58 goto free_table;
61 59
62 hash->size = size; 60 hash->size = size;
63 hash_init(hash); 61 batadv_hash_init(hash);
64 return hash; 62 return hash;
65 63
66free_table: 64free_table:
@@ -69,3 +67,12 @@ free_hash:
69 kfree(hash); 67 kfree(hash);
70 return NULL; 68 return NULL;
71} 69}
70
71void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
72 struct lock_class_key *key)
73{
74 uint32_t i;
75
76 for (i = 0; i < hash->size; i++)
77 lockdep_set_class(&hash->list_locks[i], key);
78}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d4bd7862719b..977de9c75fc2 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_HASH_H_ 20#ifndef _NET_BATMAN_ADV_HASH_H_
@@ -24,35 +22,42 @@
24 22
25#include <linux/list.h> 23#include <linux/list.h>
26 24
27/* callback to a compare function. should 25/* callback to a compare function. should compare 2 element datas for their
28 * compare 2 element datas for their keys, 26 * keys, return 0 if same and not 0 if not same
29 * return 0 if same and not 0 if not 27 */
30 * same */ 28typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
31typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); 29 const void *);
32 30
33/* the hashfunction, should return an index 31/* the hashfunction, should return an index
34 * based on the key in the data of the first 32 * based on the key in the data of the first
35 * argument and the size the second */ 33 * argument and the size the second
36typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); 34 */
37typedef void (*hashdata_free_cb)(struct hlist_node *, void *); 35typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
36typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
38 37
39struct hashtable_t { 38struct batadv_hashtable {
40 struct hlist_head *table; /* the hashtable itself with the buckets */ 39 struct hlist_head *table; /* the hashtable itself with the buckets */
41 spinlock_t *list_locks; /* spinlock for each hash list entry */ 40 spinlock_t *list_locks; /* spinlock for each hash list entry */
42 uint32_t size; /* size of hashtable */ 41 uint32_t size; /* size of hashtable */
43}; 42};
44 43
45/* allocates and clears the hash */ 44/* allocates and clears the hash */
46struct hashtable_t *hash_new(uint32_t size); 45struct batadv_hashtable *batadv_hash_new(uint32_t size);
46
47/* set class key for all locks */
48void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
49 struct lock_class_key *key);
47 50
48/* free only the hashtable and the hash itself. */ 51/* free only the hashtable and the hash itself. */
49void hash_destroy(struct hashtable_t *hash); 52void batadv_hash_destroy(struct batadv_hashtable *hash);
50 53
51/* remove the hash structure. if hashdata_free_cb != NULL, this function will be 54/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
52 * called to remove the elements inside of the hash. if you don't remove the 55 * called to remove the elements inside of the hash. if you don't remove the
53 * elements, memory might be leaked. */ 56 * elements, memory might be leaked.
54static inline void hash_delete(struct hashtable_t *hash, 57 */
55 hashdata_free_cb free_cb, void *arg) 58static inline void batadv_hash_delete(struct batadv_hashtable *hash,
59 batadv_hashdata_free_cb free_cb,
60 void *arg)
56{ 61{
57 struct hlist_head *head; 62 struct hlist_head *head;
58 struct hlist_node *node, *node_tmp; 63 struct hlist_node *node, *node_tmp;
@@ -73,11 +78,11 @@ static inline void hash_delete(struct hashtable_t *hash,
73 spin_unlock_bh(list_lock); 78 spin_unlock_bh(list_lock);
74 } 79 }
75 80
76 hash_destroy(hash); 81 batadv_hash_destroy(hash);
77} 82}
78 83
79/** 84/**
80 * hash_add - adds data to the hashtable 85 * batadv_hash_add - adds data to the hashtable
81 * @hash: storage hash table 86 * @hash: storage hash table
82 * @compare: callback to determine if 2 hash elements are identical 87 * @compare: callback to determine if 2 hash elements are identical
83 * @choose: callback calculating the hash index 88 * @choose: callback calculating the hash index
@@ -87,11 +92,11 @@ static inline void hash_delete(struct hashtable_t *hash,
87 * Returns 0 on success, 1 if the element already is in the hash 92 * Returns 0 on success, 1 if the element already is in the hash
88 * and -1 on error. 93 * and -1 on error.
89 */ 94 */
90 95static inline int batadv_hash_add(struct batadv_hashtable *hash,
91static inline int hash_add(struct hashtable_t *hash, 96 batadv_hashdata_compare_cb compare,
92 hashdata_compare_cb compare, 97 batadv_hashdata_choose_cb choose,
93 hashdata_choose_cb choose, 98 const void *data,
94 const void *data, struct hlist_node *data_node) 99 struct hlist_node *data_node)
95{ 100{
96 uint32_t index; 101 uint32_t index;
97 int ret = -1; 102 int ret = -1;
@@ -106,26 +111,23 @@ static inline int hash_add(struct hashtable_t *hash,
106 head = &hash->table[index]; 111 head = &hash->table[index];
107 list_lock = &hash->list_locks[index]; 112 list_lock = &hash->list_locks[index];
108 113
109 rcu_read_lock(); 114 spin_lock_bh(list_lock);
110 __hlist_for_each_rcu(node, head) { 115
116 hlist_for_each(node, head) {
111 if (!compare(node, data)) 117 if (!compare(node, data))
112 continue; 118 continue;
113 119
114 ret = 1; 120 ret = 1;
115 goto err_unlock; 121 goto unlock;
116 } 122 }
117 rcu_read_unlock();
118 123
119 /* no duplicate found in list, add new element */ 124 /* no duplicate found in list, add new element */
120 spin_lock_bh(list_lock);
121 hlist_add_head_rcu(data_node, head); 125 hlist_add_head_rcu(data_node, head);
122 spin_unlock_bh(list_lock);
123 126
124 ret = 0; 127 ret = 0;
125 goto out;
126 128
127err_unlock: 129unlock:
128 rcu_read_unlock(); 130 spin_unlock_bh(list_lock);
129out: 131out:
130 return ret; 132 return ret;
131} 133}
@@ -133,10 +135,12 @@ out:
133/* removes data from hash, if found. returns pointer do data on success, so you 135/* removes data from hash, if found. returns pointer do data on success, so you
134 * can remove the used structure yourself, or NULL on error . data could be the 136 * can remove the used structure yourself, or NULL on error . data could be the
135 * structure you use with just the key filled, we just need the key for 137 * structure you use with just the key filled, we just need the key for
136 * comparing. */ 138 * comparing.
137static inline void *hash_remove(struct hashtable_t *hash, 139 */
138 hashdata_compare_cb compare, 140static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
139 hashdata_choose_cb choose, void *data) 141 batadv_hashdata_compare_cb compare,
142 batadv_hashdata_choose_cb choose,
143 void *data)
140{ 144{
141 uint32_t index; 145 uint32_t index;
142 struct hlist_node *node; 146 struct hlist_node *node;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 2e98a57f3407..bde3cf747507 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -28,21 +26,21 @@
28#include "originator.h" 26#include "originator.h"
29#include "hard-interface.h" 27#include "hard-interface.h"
30 28
31static struct socket_client *socket_client_hash[256]; 29static struct batadv_socket_client *batadv_socket_client_hash[256];
32 30
33static void bat_socket_add_packet(struct socket_client *socket_client, 31static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
34 struct icmp_packet_rr *icmp_packet, 32 struct batadv_icmp_packet_rr *icmp_packet,
35 size_t icmp_len); 33 size_t icmp_len);
36 34
37void bat_socket_init(void) 35void batadv_socket_init(void)
38{ 36{
39 memset(socket_client_hash, 0, sizeof(socket_client_hash)); 37 memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
40} 38}
41 39
42static int bat_socket_open(struct inode *inode, struct file *file) 40static int batadv_socket_open(struct inode *inode, struct file *file)
43{ 41{
44 unsigned int i; 42 unsigned int i;
45 struct socket_client *socket_client; 43 struct batadv_socket_client *socket_client;
46 44
47 nonseekable_open(inode, file); 45 nonseekable_open(inode, file);
48 46
@@ -51,14 +49,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
51 if (!socket_client) 49 if (!socket_client)
52 return -ENOMEM; 50 return -ENOMEM;
53 51
54 for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) { 52 for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
55 if (!socket_client_hash[i]) { 53 if (!batadv_socket_client_hash[i]) {
56 socket_client_hash[i] = socket_client; 54 batadv_socket_client_hash[i] = socket_client;
57 break; 55 break;
58 } 56 }
59 } 57 }
60 58
61 if (i == ARRAY_SIZE(socket_client_hash)) { 59 if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
62 pr_err("Error - can't add another packet client: maximum number of clients reached\n"); 60 pr_err("Error - can't add another packet client: maximum number of clients reached\n");
63 kfree(socket_client); 61 kfree(socket_client);
64 return -EXFULL; 62 return -EXFULL;
@@ -73,14 +71,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
73 71
74 file->private_data = socket_client; 72 file->private_data = socket_client;
75 73
76 inc_module_count(); 74 batadv_inc_module_count();
77 return 0; 75 return 0;
78} 76}
79 77
80static int bat_socket_release(struct inode *inode, struct file *file) 78static int batadv_socket_release(struct inode *inode, struct file *file)
81{ 79{
82 struct socket_client *socket_client = file->private_data; 80 struct batadv_socket_client *socket_client = file->private_data;
83 struct socket_packet *socket_packet; 81 struct batadv_socket_packet *socket_packet;
84 struct list_head *list_pos, *list_pos_tmp; 82 struct list_head *list_pos, *list_pos_tmp;
85 83
86 spin_lock_bh(&socket_client->lock); 84 spin_lock_bh(&socket_client->lock);
@@ -88,33 +86,33 @@ static int bat_socket_release(struct inode *inode, struct file *file)
88 /* for all packets in the queue ... */ 86 /* for all packets in the queue ... */
89 list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { 87 list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
90 socket_packet = list_entry(list_pos, 88 socket_packet = list_entry(list_pos,
91 struct socket_packet, list); 89 struct batadv_socket_packet, list);
92 90
93 list_del(list_pos); 91 list_del(list_pos);
94 kfree(socket_packet); 92 kfree(socket_packet);
95 } 93 }
96 94
97 socket_client_hash[socket_client->index] = NULL; 95 batadv_socket_client_hash[socket_client->index] = NULL;
98 spin_unlock_bh(&socket_client->lock); 96 spin_unlock_bh(&socket_client->lock);
99 97
100 kfree(socket_client); 98 kfree(socket_client);
101 dec_module_count(); 99 batadv_dec_module_count();
102 100
103 return 0; 101 return 0;
104} 102}
105 103
106static ssize_t bat_socket_read(struct file *file, char __user *buf, 104static ssize_t batadv_socket_read(struct file *file, char __user *buf,
107 size_t count, loff_t *ppos) 105 size_t count, loff_t *ppos)
108{ 106{
109 struct socket_client *socket_client = file->private_data; 107 struct batadv_socket_client *socket_client = file->private_data;
110 struct socket_packet *socket_packet; 108 struct batadv_socket_packet *socket_packet;
111 size_t packet_len; 109 size_t packet_len;
112 int error; 110 int error;
113 111
114 if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) 112 if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
115 return -EAGAIN; 113 return -EAGAIN;
116 114
117 if ((!buf) || (count < sizeof(struct icmp_packet))) 115 if ((!buf) || (count < sizeof(struct batadv_icmp_packet)))
118 return -EINVAL; 116 return -EINVAL;
119 117
120 if (!access_ok(VERIFY_WRITE, buf, count)) 118 if (!access_ok(VERIFY_WRITE, buf, count))
@@ -129,7 +127,7 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
129 spin_lock_bh(&socket_client->lock); 127 spin_lock_bh(&socket_client->lock);
130 128
131 socket_packet = list_first_entry(&socket_client->queue_list, 129 socket_packet = list_first_entry(&socket_client->queue_list,
132 struct socket_packet, list); 130 struct batadv_socket_packet, list);
133 list_del(&socket_packet->list); 131 list_del(&socket_packet->list);
134 socket_client->queue_len--; 132 socket_client->queue_len--;
135 133
@@ -146,34 +144,34 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
146 return packet_len; 144 return packet_len;
147} 145}
148 146
149static ssize_t bat_socket_write(struct file *file, const char __user *buff, 147static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
150 size_t len, loff_t *off) 148 size_t len, loff_t *off)
151{ 149{
152 struct socket_client *socket_client = file->private_data; 150 struct batadv_socket_client *socket_client = file->private_data;
153 struct bat_priv *bat_priv = socket_client->bat_priv; 151 struct batadv_priv *bat_priv = socket_client->bat_priv;
154 struct hard_iface *primary_if = NULL; 152 struct batadv_hard_iface *primary_if = NULL;
155 struct sk_buff *skb; 153 struct sk_buff *skb;
156 struct icmp_packet_rr *icmp_packet; 154 struct batadv_icmp_packet_rr *icmp_packet;
157 155
158 struct orig_node *orig_node = NULL; 156 struct batadv_orig_node *orig_node = NULL;
159 struct neigh_node *neigh_node = NULL; 157 struct batadv_neigh_node *neigh_node = NULL;
160 size_t packet_len = sizeof(struct icmp_packet); 158 size_t packet_len = sizeof(struct batadv_icmp_packet);
161 159
162 if (len < sizeof(struct icmp_packet)) { 160 if (len < sizeof(struct batadv_icmp_packet)) {
163 bat_dbg(DBG_BATMAN, bat_priv, 161 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
164 "Error - can't send packet from char device: invalid packet size\n"); 162 "Error - can't send packet from char device: invalid packet size\n");
165 return -EINVAL; 163 return -EINVAL;
166 } 164 }
167 165
168 primary_if = primary_if_get_selected(bat_priv); 166 primary_if = batadv_primary_if_get_selected(bat_priv);
169 167
170 if (!primary_if) { 168 if (!primary_if) {
171 len = -EFAULT; 169 len = -EFAULT;
172 goto out; 170 goto out;
173 } 171 }
174 172
175 if (len >= sizeof(struct icmp_packet_rr)) 173 if (len >= sizeof(struct batadv_icmp_packet_rr))
176 packet_len = sizeof(struct icmp_packet_rr); 174 packet_len = sizeof(struct batadv_icmp_packet_rr);
177 175
178 skb = dev_alloc_skb(packet_len + ETH_HLEN); 176 skb = dev_alloc_skb(packet_len + ETH_HLEN);
179 if (!skb) { 177 if (!skb) {
@@ -182,81 +180,82 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
182 } 180 }
183 181
184 skb_reserve(skb, ETH_HLEN); 182 skb_reserve(skb, ETH_HLEN);
185 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 183 icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
186 184
187 if (copy_from_user(icmp_packet, buff, packet_len)) { 185 if (copy_from_user(icmp_packet, buff, packet_len)) {
188 len = -EFAULT; 186 len = -EFAULT;
189 goto free_skb; 187 goto free_skb;
190 } 188 }
191 189
192 if (icmp_packet->header.packet_type != BAT_ICMP) { 190 if (icmp_packet->header.packet_type != BATADV_ICMP) {
193 bat_dbg(DBG_BATMAN, bat_priv, 191 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
194 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); 192 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
195 len = -EINVAL; 193 len = -EINVAL;
196 goto free_skb; 194 goto free_skb;
197 } 195 }
198 196
199 if (icmp_packet->msg_type != ECHO_REQUEST) { 197 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
200 bat_dbg(DBG_BATMAN, bat_priv, 198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
201 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n"); 199 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
202 len = -EINVAL; 200 len = -EINVAL;
203 goto free_skb; 201 goto free_skb;
204 } 202 }
205 203
206 icmp_packet->uid = socket_client->index; 204 icmp_packet->uid = socket_client->index;
207 205
208 if (icmp_packet->header.version != COMPAT_VERSION) { 206 if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
209 icmp_packet->msg_type = PARAMETER_PROBLEM; 207 icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
210 icmp_packet->header.version = COMPAT_VERSION; 208 icmp_packet->header.version = BATADV_COMPAT_VERSION;
211 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 209 batadv_socket_add_packet(socket_client, icmp_packet,
210 packet_len);
212 goto free_skb; 211 goto free_skb;
213 } 212 }
214 213
215 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 214 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
216 goto dst_unreach; 215 goto dst_unreach;
217 216
218 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 217 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
219 if (!orig_node) 218 if (!orig_node)
220 goto dst_unreach; 219 goto dst_unreach;
221 220
222 neigh_node = orig_node_get_router(orig_node); 221 neigh_node = batadv_orig_node_get_router(orig_node);
223 if (!neigh_node) 222 if (!neigh_node)
224 goto dst_unreach; 223 goto dst_unreach;
225 224
226 if (!neigh_node->if_incoming) 225 if (!neigh_node->if_incoming)
227 goto dst_unreach; 226 goto dst_unreach;
228 227
229 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 228 if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
230 goto dst_unreach; 229 goto dst_unreach;
231 230
232 memcpy(icmp_packet->orig, 231 memcpy(icmp_packet->orig,
233 primary_if->net_dev->dev_addr, ETH_ALEN); 232 primary_if->net_dev->dev_addr, ETH_ALEN);
234 233
235 if (packet_len == sizeof(struct icmp_packet_rr)) 234 if (packet_len == sizeof(struct batadv_icmp_packet_rr))
236 memcpy(icmp_packet->rr, 235 memcpy(icmp_packet->rr,
237 neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); 236 neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
238 237
239 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 238 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
240 goto out; 239 goto out;
241 240
242dst_unreach: 241dst_unreach:
243 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 242 icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
244 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 243 batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
245free_skb: 244free_skb:
246 kfree_skb(skb); 245 kfree_skb(skb);
247out: 246out:
248 if (primary_if) 247 if (primary_if)
249 hardif_free_ref(primary_if); 248 batadv_hardif_free_ref(primary_if);
250 if (neigh_node) 249 if (neigh_node)
251 neigh_node_free_ref(neigh_node); 250 batadv_neigh_node_free_ref(neigh_node);
252 if (orig_node) 251 if (orig_node)
253 orig_node_free_ref(orig_node); 252 batadv_orig_node_free_ref(orig_node);
254 return len; 253 return len;
255} 254}
256 255
257static unsigned int bat_socket_poll(struct file *file, poll_table *wait) 256static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
258{ 257{
259 struct socket_client *socket_client = file->private_data; 258 struct batadv_socket_client *socket_client = file->private_data;
260 259
261 poll_wait(file, &socket_client->queue_wait, wait); 260 poll_wait(file, &socket_client->queue_wait, wait);
262 261
@@ -266,39 +265,39 @@ static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
266 return 0; 265 return 0;
267} 266}
268 267
269static const struct file_operations fops = { 268static const struct file_operations batadv_fops = {
270 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
271 .open = bat_socket_open, 270 .open = batadv_socket_open,
272 .release = bat_socket_release, 271 .release = batadv_socket_release,
273 .read = bat_socket_read, 272 .read = batadv_socket_read,
274 .write = bat_socket_write, 273 .write = batadv_socket_write,
275 .poll = bat_socket_poll, 274 .poll = batadv_socket_poll,
276 .llseek = no_llseek, 275 .llseek = no_llseek,
277}; 276};
278 277
279int bat_socket_setup(struct bat_priv *bat_priv) 278int batadv_socket_setup(struct batadv_priv *bat_priv)
280{ 279{
281 struct dentry *d; 280 struct dentry *d;
282 281
283 if (!bat_priv->debug_dir) 282 if (!bat_priv->debug_dir)
284 goto err; 283 goto err;
285 284
286 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, 285 d = debugfs_create_file(BATADV_ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
287 bat_priv->debug_dir, bat_priv, &fops); 286 bat_priv->debug_dir, bat_priv, &batadv_fops);
288 if (d) 287 if (!d)
289 goto err; 288 goto err;
290 289
291 return 0; 290 return 0;
292 291
293err: 292err:
294 return 1; 293 return -ENOMEM;
295} 294}
296 295
297static void bat_socket_add_packet(struct socket_client *socket_client, 296static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
298 struct icmp_packet_rr *icmp_packet, 297 struct batadv_icmp_packet_rr *icmp_packet,
299 size_t icmp_len) 298 size_t icmp_len)
300{ 299{
301 struct socket_packet *socket_packet; 300 struct batadv_socket_packet *socket_packet;
302 301
303 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC); 302 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
304 303
@@ -312,8 +311,9 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
312 spin_lock_bh(&socket_client->lock); 311 spin_lock_bh(&socket_client->lock);
313 312
314 /* while waiting for the lock the socket_client could have been 313 /* while waiting for the lock the socket_client could have been
315 * deleted */ 314 * deleted
316 if (!socket_client_hash[icmp_packet->uid]) { 315 */
316 if (!batadv_socket_client_hash[icmp_packet->uid]) {
317 spin_unlock_bh(&socket_client->lock); 317 spin_unlock_bh(&socket_client->lock);
318 kfree(socket_packet); 318 kfree(socket_packet);
319 return; 319 return;
@@ -324,7 +324,8 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
324 324
325 if (socket_client->queue_len > 100) { 325 if (socket_client->queue_len > 100) {
326 socket_packet = list_first_entry(&socket_client->queue_list, 326 socket_packet = list_first_entry(&socket_client->queue_list,
327 struct socket_packet, list); 327 struct batadv_socket_packet,
328 list);
328 329
329 list_del(&socket_packet->list); 330 list_del(&socket_packet->list);
330 kfree(socket_packet); 331 kfree(socket_packet);
@@ -336,11 +337,12 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
336 wake_up(&socket_client->queue_wait); 337 wake_up(&socket_client->queue_wait);
337} 338}
338 339
339void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, 340void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
340 size_t icmp_len) 341 size_t icmp_len)
341{ 342{
342 struct socket_client *hash = socket_client_hash[icmp_packet->uid]; 343 struct batadv_socket_client *hash;
343 344
345 hash = batadv_socket_client_hash[icmp_packet->uid];
344 if (hash) 346 if (hash)
345 bat_socket_add_packet(hash, icmp_packet, icmp_len); 347 batadv_socket_add_packet(hash, icmp_packet, icmp_len);
346} 348}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 380ed4c2443a..29443a1dbb5c 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,17 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_ 20#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
23#define _NET_BATMAN_ADV_ICMP_SOCKET_H_ 21#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
24 22
25#define ICMP_SOCKET "socket" 23#define BATADV_ICMP_SOCKET "socket"
26 24
27void bat_socket_init(void); 25void batadv_socket_init(void);
28int bat_socket_setup(struct bat_priv *bat_priv); 26int batadv_socket_setup(struct batadv_priv *bat_priv);
29void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, 27void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
30 size_t icmp_len); 28 size_t icmp_len);
31 29
32#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */ 30#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 083a2993efe4..13c88b25ab31 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,12 +15,11 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "bat_sysfs.h" 21#include "sysfs.h"
24#include "bat_debugfs.h" 22#include "debugfs.h"
25#include "routing.h" 23#include "routing.h"
26#include "send.h" 24#include "send.h"
27#include "originator.h" 25#include "originator.h"
@@ -37,61 +35,65 @@
37 35
38 36
39/* List manipulations on hardif_list have to be rtnl_lock()'ed, 37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
40 * list traversals just rcu-locked */ 38 * list traversals just rcu-locked
41struct list_head hardif_list; 39 */
42static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); 40struct list_head batadv_hardif_list;
43char bat_routing_algo[20] = "BATMAN IV"; 41static int (*batadv_rx_handler[256])(struct sk_buff *,
44static struct hlist_head bat_algo_list; 42 struct batadv_hard_iface *);
43char batadv_routing_algo[20] = "BATMAN_IV";
44static struct hlist_head batadv_algo_list;
45 45
46unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 46unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47 47
48struct workqueue_struct *bat_event_workqueue; 48struct workqueue_struct *batadv_event_workqueue;
49 49
50static void recv_handler_init(void); 50static void batadv_recv_handler_init(void);
51 51
52static int __init batman_init(void) 52static int __init batadv_init(void)
53{ 53{
54 INIT_LIST_HEAD(&hardif_list); 54 INIT_LIST_HEAD(&batadv_hardif_list);
55 INIT_HLIST_HEAD(&bat_algo_list); 55 INIT_HLIST_HEAD(&batadv_algo_list);
56 56
57 recv_handler_init(); 57 batadv_recv_handler_init();
58 58
59 bat_iv_init(); 59 batadv_iv_init();
60 60
61 /* the name should not be longer than 10 chars - see 61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/ */ 62 * http://lwn.net/Articles/23634/
63 bat_event_workqueue = create_singlethread_workqueue("bat_events"); 63 */
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
64 65
65 if (!bat_event_workqueue) 66 if (!batadv_event_workqueue)
66 return -ENOMEM; 67 return -ENOMEM;
67 68
68 bat_socket_init(); 69 batadv_socket_init();
69 debugfs_init(); 70 batadv_debugfs_init();
70 71
71 register_netdevice_notifier(&hard_if_notifier); 72 register_netdevice_notifier(&batadv_hard_if_notifier);
72 73
73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 74 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION, COMPAT_VERSION); 75 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
75 76
76 return 0; 77 return 0;
77} 78}
78 79
79static void __exit batman_exit(void) 80static void __exit batadv_exit(void)
80{ 81{
81 debugfs_destroy(); 82 batadv_debugfs_destroy();
82 unregister_netdevice_notifier(&hard_if_notifier); 83 unregister_netdevice_notifier(&batadv_hard_if_notifier);
83 hardif_remove_interfaces(); 84 batadv_hardif_remove_interfaces();
84 85
85 flush_workqueue(bat_event_workqueue); 86 flush_workqueue(batadv_event_workqueue);
86 destroy_workqueue(bat_event_workqueue); 87 destroy_workqueue(batadv_event_workqueue);
87 bat_event_workqueue = NULL; 88 batadv_event_workqueue = NULL;
88 89
89 rcu_barrier(); 90 rcu_barrier();
90} 91}
91 92
92int mesh_init(struct net_device *soft_iface) 93int batadv_mesh_init(struct net_device *soft_iface)
93{ 94{
94 struct bat_priv *bat_priv = netdev_priv(soft_iface); 95 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
96 int ret;
95 97
96 spin_lock_init(&bat_priv->forw_bat_list_lock); 98 spin_lock_init(&bat_priv->forw_bat_list_lock);
97 spin_lock_init(&bat_priv->forw_bcast_list_lock); 99 spin_lock_init(&bat_priv->forw_bcast_list_lock);
@@ -110,72 +112,77 @@ int mesh_init(struct net_device *soft_iface)
110 INIT_LIST_HEAD(&bat_priv->tt_req_list); 112 INIT_LIST_HEAD(&bat_priv->tt_req_list);
111 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 113 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
112 114
113 if (originator_init(bat_priv) < 1) 115 ret = batadv_originator_init(bat_priv);
116 if (ret < 0)
114 goto err; 117 goto err;
115 118
116 if (tt_init(bat_priv) < 1) 119 ret = batadv_tt_init(bat_priv);
120 if (ret < 0)
117 goto err; 121 goto err;
118 122
119 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); 123 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
124 BATADV_NULL_IFINDEX);
120 125
121 if (vis_init(bat_priv) < 1) 126 ret = batadv_vis_init(bat_priv);
127 if (ret < 0)
122 goto err; 128 goto err;
123 129
124 if (bla_init(bat_priv) < 1) 130 ret = batadv_bla_init(bat_priv);
131 if (ret < 0)
125 goto err; 132 goto err;
126 133
127 atomic_set(&bat_priv->gw_reselect, 0); 134 atomic_set(&bat_priv->gw_reselect, 0);
128 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 135 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
129 goto end;
130
131err:
132 mesh_free(soft_iface);
133 return -1;
134 136
135end:
136 return 0; 137 return 0;
138
139err:
140 batadv_mesh_free(soft_iface);
141 return ret;
137} 142}
138 143
139void mesh_free(struct net_device *soft_iface) 144void batadv_mesh_free(struct net_device *soft_iface)
140{ 145{
141 struct bat_priv *bat_priv = netdev_priv(soft_iface); 146 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
142 147
143 atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING); 148 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
144 149
145 purge_outstanding_packets(bat_priv, NULL); 150 batadv_purge_outstanding_packets(bat_priv, NULL);
146 151
147 vis_quit(bat_priv); 152 batadv_vis_quit(bat_priv);
148 153
149 gw_node_purge(bat_priv); 154 batadv_gw_node_purge(bat_priv);
150 originator_free(bat_priv); 155 batadv_originator_free(bat_priv);
151 156
152 tt_free(bat_priv); 157 batadv_tt_free(bat_priv);
153 158
154 bla_free(bat_priv); 159 batadv_bla_free(bat_priv);
155 160
156 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 161 free_percpu(bat_priv->bat_counters);
162
163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
157} 164}
158 165
159void inc_module_count(void) 166void batadv_inc_module_count(void)
160{ 167{
161 try_module_get(THIS_MODULE); 168 try_module_get(THIS_MODULE);
162} 169}
163 170
164void dec_module_count(void) 171void batadv_dec_module_count(void)
165{ 172{
166 module_put(THIS_MODULE); 173 module_put(THIS_MODULE);
167} 174}
168 175
169int is_my_mac(const uint8_t *addr) 176int batadv_is_my_mac(const uint8_t *addr)
170{ 177{
171 const struct hard_iface *hard_iface; 178 const struct batadv_hard_iface *hard_iface;
172 179
173 rcu_read_lock(); 180 rcu_read_lock();
174 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 181 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
175 if (hard_iface->if_status != IF_ACTIVE) 182 if (hard_iface->if_status != BATADV_IF_ACTIVE)
176 continue; 183 continue;
177 184
178 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { 185 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
179 rcu_read_unlock(); 186 rcu_read_unlock();
180 return 1; 187 return 1;
181 } 188 }
@@ -184,8 +191,8 @@ int is_my_mac(const uint8_t *addr)
184 return 0; 191 return 0;
185} 192}
186 193
187static int recv_unhandled_packet(struct sk_buff *skb, 194static int batadv_recv_unhandled_packet(struct sk_buff *skb,
188 struct hard_iface *recv_if) 195 struct batadv_hard_iface *recv_if)
189{ 196{
190 return NET_RX_DROP; 197 return NET_RX_DROP;
191} 198}
@@ -193,16 +200,18 @@ static int recv_unhandled_packet(struct sk_buff *skb,
193/* incoming packets with the batman ethertype received on any active hard 200/* incoming packets with the batman ethertype received on any active hard
194 * interface 201 * interface
195 */ 202 */
196int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 203int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
197 struct packet_type *ptype, struct net_device *orig_dev) 204 struct packet_type *ptype,
205 struct net_device *orig_dev)
198{ 206{
199 struct bat_priv *bat_priv; 207 struct batadv_priv *bat_priv;
200 struct batman_ogm_packet *batman_ogm_packet; 208 struct batadv_ogm_packet *batadv_ogm_packet;
201 struct hard_iface *hard_iface; 209 struct batadv_hard_iface *hard_iface;
202 uint8_t idx; 210 uint8_t idx;
203 int ret; 211 int ret;
204 212
205 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); 213 hard_iface = container_of(ptype, struct batadv_hard_iface,
214 batman_adv_ptype);
206 skb = skb_share_check(skb, GFP_ATOMIC); 215 skb = skb_share_check(skb, GFP_ATOMIC);
207 216
208 /* skb was released by skb_share_check() */ 217 /* skb was released by skb_share_check() */
@@ -222,27 +231,27 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
222 231
223 bat_priv = netdev_priv(hard_iface->soft_iface); 232 bat_priv = netdev_priv(hard_iface->soft_iface);
224 233
225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 234 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
226 goto err_free; 235 goto err_free;
227 236
228 /* discard frames on not active interfaces */ 237 /* discard frames on not active interfaces */
229 if (hard_iface->if_status != IF_ACTIVE) 238 if (hard_iface->if_status != BATADV_IF_ACTIVE)
230 goto err_free; 239 goto err_free;
231 240
232 batman_ogm_packet = (struct batman_ogm_packet *)skb->data; 241 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
233 242
234 if (batman_ogm_packet->header.version != COMPAT_VERSION) { 243 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
235 bat_dbg(DBG_BATMAN, bat_priv, 244 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
236 "Drop packet: incompatible batman version (%i)\n", 245 "Drop packet: incompatible batman version (%i)\n",
237 batman_ogm_packet->header.version); 246 batadv_ogm_packet->header.version);
238 goto err_free; 247 goto err_free;
239 } 248 }
240 249
241 /* all receive handlers return whether they received or reused 250 /* all receive handlers return whether they received or reused
242 * the supplied skb. if not, we have to free the skb. 251 * the supplied skb. if not, we have to free the skb.
243 */ 252 */
244 idx = batman_ogm_packet->header.packet_type; 253 idx = batadv_ogm_packet->header.packet_type;
245 ret = (*recv_packet_handler[idx])(skb, hard_iface); 254 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
246 255
247 if (ret == NET_RX_DROP) 256 if (ret == NET_RX_DROP)
248 kfree_skb(skb); 257 kfree_skb(skb);
@@ -259,51 +268,52 @@ err_out:
259 return NET_RX_DROP; 268 return NET_RX_DROP;
260} 269}
261 270
262static void recv_handler_init(void) 271static void batadv_recv_handler_init(void)
263{ 272{
264 int i; 273 int i;
265 274
266 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) 275 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
267 recv_packet_handler[i] = recv_unhandled_packet; 276 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
268 277
269 /* batman icmp packet */ 278 /* batman icmp packet */
270 recv_packet_handler[BAT_ICMP] = recv_icmp_packet; 279 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
271 /* unicast packet */ 280 /* unicast packet */
272 recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; 281 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
273 /* fragmented unicast packet */ 282 /* fragmented unicast packet */
274 recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; 283 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
275 /* broadcast packet */ 284 /* broadcast packet */
276 recv_packet_handler[BAT_BCAST] = recv_bcast_packet; 285 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
277 /* vis packet */ 286 /* vis packet */
278 recv_packet_handler[BAT_VIS] = recv_vis_packet; 287 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
279 /* Translation table query (request or response) */ 288 /* Translation table query (request or response) */
280 recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; 289 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
281 /* Roaming advertisement */ 290 /* Roaming advertisement */
282 recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; 291 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
283} 292}
284 293
285int recv_handler_register(uint8_t packet_type, 294int
286 int (*recv_handler)(struct sk_buff *, 295batadv_recv_handler_register(uint8_t packet_type,
287 struct hard_iface *)) 296 int (*recv_handler)(struct sk_buff *,
297 struct batadv_hard_iface *))
288{ 298{
289 if (recv_packet_handler[packet_type] != &recv_unhandled_packet) 299 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
290 return -EBUSY; 300 return -EBUSY;
291 301
292 recv_packet_handler[packet_type] = recv_handler; 302 batadv_rx_handler[packet_type] = recv_handler;
293 return 0; 303 return 0;
294} 304}
295 305
296void recv_handler_unregister(uint8_t packet_type) 306void batadv_recv_handler_unregister(uint8_t packet_type)
297{ 307{
298 recv_packet_handler[packet_type] = recv_unhandled_packet; 308 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
299} 309}
300 310
301static struct bat_algo_ops *bat_algo_get(char *name) 311static struct batadv_algo_ops *batadv_algo_get(char *name)
302{ 312{
303 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 313 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
304 struct hlist_node *node; 314 struct hlist_node *node;
305 315
306 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) { 316 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
307 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 317 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
308 continue; 318 continue;
309 319
@@ -314,15 +324,16 @@ static struct bat_algo_ops *bat_algo_get(char *name)
314 return bat_algo_ops; 324 return bat_algo_ops;
315} 325}
316 326
317int bat_algo_register(struct bat_algo_ops *bat_algo_ops) 327int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
318{ 328{
319 struct bat_algo_ops *bat_algo_ops_tmp; 329 struct batadv_algo_ops *bat_algo_ops_tmp;
320 int ret = -1; 330 int ret;
321 331
322 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name); 332 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
323 if (bat_algo_ops_tmp) { 333 if (bat_algo_ops_tmp) {
324 pr_info("Trying to register already registered routing algorithm: %s\n", 334 pr_info("Trying to register already registered routing algorithm: %s\n",
325 bat_algo_ops->name); 335 bat_algo_ops->name);
336 ret = -EEXIST;
326 goto out; 337 goto out;
327 } 338 }
328 339
@@ -335,23 +346,24 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
335 !bat_algo_ops->bat_ogm_emit) { 346 !bat_algo_ops->bat_ogm_emit) {
336 pr_info("Routing algo '%s' does not implement required ops\n", 347 pr_info("Routing algo '%s' does not implement required ops\n",
337 bat_algo_ops->name); 348 bat_algo_ops->name);
349 ret = -EINVAL;
338 goto out; 350 goto out;
339 } 351 }
340 352
341 INIT_HLIST_NODE(&bat_algo_ops->list); 353 INIT_HLIST_NODE(&bat_algo_ops->list);
342 hlist_add_head(&bat_algo_ops->list, &bat_algo_list); 354 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
343 ret = 0; 355 ret = 0;
344 356
345out: 357out:
346 return ret; 358 return ret;
347} 359}
348 360
349int bat_algo_select(struct bat_priv *bat_priv, char *name) 361int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
350{ 362{
351 struct bat_algo_ops *bat_algo_ops; 363 struct batadv_algo_ops *bat_algo_ops;
352 int ret = -1; 364 int ret = -EINVAL;
353 365
354 bat_algo_ops = bat_algo_get(name); 366 bat_algo_ops = batadv_algo_get(name);
355 if (!bat_algo_ops) 367 if (!bat_algo_ops)
356 goto out; 368 goto out;
357 369
@@ -362,50 +374,56 @@ out:
362 return ret; 374 return ret;
363} 375}
364 376
365int bat_algo_seq_print_text(struct seq_file *seq, void *offset) 377int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
366{ 378{
367 struct bat_algo_ops *bat_algo_ops; 379 struct batadv_algo_ops *bat_algo_ops;
368 struct hlist_node *node; 380 struct hlist_node *node;
369 381
370 seq_printf(seq, "Available routing algorithms:\n"); 382 seq_printf(seq, "Available routing algorithms:\n");
371 383
372 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) { 384 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
373 seq_printf(seq, "%s\n", bat_algo_ops->name); 385 seq_printf(seq, "%s\n", bat_algo_ops->name);
374 } 386 }
375 387
376 return 0; 388 return 0;
377} 389}
378 390
379static int param_set_ra(const char *val, const struct kernel_param *kp) 391static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
380{ 392{
381 struct bat_algo_ops *bat_algo_ops; 393 struct batadv_algo_ops *bat_algo_ops;
394 char *algo_name = (char *)val;
395 size_t name_len = strlen(algo_name);
396
397 if (algo_name[name_len - 1] == '\n')
398 algo_name[name_len - 1] = '\0';
382 399
383 bat_algo_ops = bat_algo_get((char *)val); 400 bat_algo_ops = batadv_algo_get(algo_name);
384 if (!bat_algo_ops) { 401 if (!bat_algo_ops) {
385 pr_err("Routing algorithm '%s' is not supported\n", val); 402 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
386 return -EINVAL; 403 return -EINVAL;
387 } 404 }
388 405
389 return param_set_copystring(val, kp); 406 return param_set_copystring(algo_name, kp);
390} 407}
391 408
392static const struct kernel_param_ops param_ops_ra = { 409static const struct kernel_param_ops batadv_param_ops_ra = {
393 .set = param_set_ra, 410 .set = batadv_param_set_ra,
394 .get = param_get_string, 411 .get = param_get_string,
395}; 412};
396 413
397static struct kparam_string __param_string_ra = { 414static struct kparam_string batadv_param_string_ra = {
398 .maxlen = sizeof(bat_routing_algo), 415 .maxlen = sizeof(batadv_routing_algo),
399 .string = bat_routing_algo, 416 .string = batadv_routing_algo,
400}; 417};
401 418
402module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644); 419module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
403module_init(batman_init); 420 0644);
404module_exit(batman_exit); 421module_init(batadv_init);
422module_exit(batadv_exit);
405 423
406MODULE_LICENSE("GPL"); 424MODULE_LICENSE("GPL");
407 425
408MODULE_AUTHOR(DRIVER_AUTHOR); 426MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
409MODULE_DESCRIPTION(DRIVER_DESC); 427MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
410MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); 428MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
411MODULE_VERSION(SOURCE_VERSION); 429MODULE_VERSION(BATADV_SOURCE_VERSION);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index f4a3ec003479..5d8fa0757947 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,100 +15,106 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_MAIN_H_ 20#ifndef _NET_BATMAN_ADV_MAIN_H_
23#define _NET_BATMAN_ADV_MAIN_H_ 21#define _NET_BATMAN_ADV_MAIN_H_
24 22
25#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \ 23#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
26 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>" 24 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
27#define DRIVER_DESC "B.A.T.M.A.N. advanced" 25#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
28#define DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
29 27
30#ifndef SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
31#define SOURCE_VERSION "2012.2.0" 29#define BATADV_SOURCE_VERSION "2012.3.0"
32#endif 30#endif
33 31
34/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
35 33
36#define TQ_MAX_VALUE 255 34#define BATADV_TQ_MAX_VALUE 255
37#define JITTER 20 35#define BATADV_JITTER 20
38 36
39 /* Time To Live of broadcast messages */ 37/* Time To Live of broadcast messages */
40#define TTL 50 38#define BATADV_TTL 50
41 39
42/* purge originators after time in seconds if no valid packet comes in 40/* purge originators after time in seconds if no valid packet comes in
43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
44#define PURGE_TIMEOUT 200000 /* 200 seconds */ 42 */
45#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ 43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
47/* sliding packet range of received originator messages in sequence numbers 46/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 47 * (should be a multiple of our word size)
49#define TQ_LOCAL_WINDOW_SIZE 64 48 */
50#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep 49#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
51 * pending tt_req */ 50/* miliseconds we have to keep pending tt_req */
51#define BATADV_TT_REQUEST_TIMEOUT 3000
52 52
53#define TQ_GLOBAL_WINDOW_SIZE 5 53#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
54#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 54#define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
55#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 55#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
56#define TQ_TOTAL_BIDRECT_LIMIT 1 56#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
57 57
58#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ 58/* number of OGMs sent with the last tt diff */
59#define BATADV_TT_OGM_APPEND_MAX 3
59 60
60#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most 61/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
61 * ROAMING_MAX_COUNT times in miliseconds*/ 62 * miliseconds
62#define ROAMING_MAX_COUNT 5 63 */
64#define BATADV_ROAMING_MAX_TIME 20000
65#define BATADV_ROAMING_MAX_COUNT 5
63 66
64#define NO_FLAGS 0 67#define BATADV_NO_FLAGS 0
65 68
66#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ 69#define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
67 70
68#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) 71#define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
69 72
70#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 73#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
71 74
72#define VIS_INTERVAL 5000 /* 5 seconds */ 75#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
73 76
74/* how much worse secondary interfaces may be to be considered as bonding 77/* how much worse secondary interfaces may be to be considered as bonding
75 * candidates */ 78 * candidates
76#define BONDING_TQ_THRESHOLD 50 79 */
80#define BATADV_BONDING_TQ_THRESHOLD 50
77 81
78/* should not be bigger than 512 bytes or change the size of 82/* should not be bigger than 512 bytes or change the size of
79 * forw_packet->direct_link_flags */ 83 * forw_packet->direct_link_flags
80#define MAX_AGGREGATION_BYTES 512 84 */
81#define MAX_AGGREGATION_MS 100 85#define BATADV_MAX_AGGREGATION_BYTES 512
86#define BATADV_MAX_AGGREGATION_MS 100
82 87
83#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ 88#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
84#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) 89#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
85#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) 90#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
86 91
87#define DUPLIST_SIZE 16 92#define BATADV_DUPLIST_SIZE 16
88#define DUPLIST_TIMEOUT 500 /* 500 ms */ 93#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
89/* don't reset again within 30 seconds */ 94/* don't reset again within 30 seconds */
90#define RESET_PROTECTION_MS 30000 95#define BATADV_RESET_PROTECTION_MS 30000
91#define EXPECTED_SEQNO_RANGE 65536 96#define BATADV_EXPECTED_SEQNO_RANGE 65536
92 97
93enum mesh_state { 98enum batadv_mesh_state {
94 MESH_INACTIVE, 99 BATADV_MESH_INACTIVE,
95 MESH_ACTIVE, 100 BATADV_MESH_ACTIVE,
96 MESH_DEACTIVATING 101 BATADV_MESH_DEACTIVATING,
97}; 102};
98 103
99#define BCAST_QUEUE_LEN 256 104#define BATADV_BCAST_QUEUE_LEN 256
100#define BATMAN_QUEUE_LEN 256 105#define BATADV_BATMAN_QUEUE_LEN 256
101 106
102enum uev_action { 107enum batadv_uev_action {
103 UEV_ADD = 0, 108 BATADV_UEV_ADD = 0,
104 UEV_DEL, 109 BATADV_UEV_DEL,
105 UEV_CHANGE 110 BATADV_UEV_CHANGE,
106}; 111};
107 112
108enum uev_type { 113enum batadv_uev_type {
109 UEV_GW = 0 114 BATADV_UEV_GW = 0,
110}; 115};
111 116
112#define GW_THRESHOLD 50 117#define BATADV_GW_THRESHOLD 50
113 118
114/* Debug Messages */ 119/* Debug Messages */
115#ifdef pr_fmt 120#ifdef pr_fmt
@@ -119,12 +124,12 @@ enum uev_type {
119#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 124#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
120 125
121/* all messages related to routing / flooding / broadcasting / etc */ 126/* all messages related to routing / flooding / broadcasting / etc */
122enum dbg_level { 127enum batadv_dbg_level {
123 DBG_BATMAN = 1 << 0, 128 BATADV_DBG_BATMAN = 1 << 0,
124 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ 129 BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
125 DBG_TT = 1 << 2, /* translation table operations */ 130 BATADV_DBG_TT = 1 << 2, /* translation table operations */
126 DBG_BLA = 1 << 3, /* bridge loop avoidance */ 131 BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
127 DBG_ALL = 15 132 BATADV_DBG_ALL = 15,
128}; 133};
129 134
130/* Kernel headers */ 135/* Kernel headers */
@@ -138,73 +143,75 @@ enum dbg_level {
138#include <linux/kthread.h> /* kernel threads */ 143#include <linux/kthread.h> /* kernel threads */
139#include <linux/pkt_sched.h> /* schedule types */ 144#include <linux/pkt_sched.h> /* schedule types */
140#include <linux/workqueue.h> /* workqueue */ 145#include <linux/workqueue.h> /* workqueue */
146#include <linux/percpu.h>
141#include <linux/slab.h> 147#include <linux/slab.h>
142#include <net/sock.h> /* struct sock */ 148#include <net/sock.h> /* struct sock */
143#include <linux/jiffies.h> 149#include <linux/jiffies.h>
144#include <linux/seq_file.h> 150#include <linux/seq_file.h>
145#include "types.h" 151#include "types.h"
146 152
147extern char bat_routing_algo[]; 153extern char batadv_routing_algo[];
148extern struct list_head hardif_list; 154extern struct list_head batadv_hardif_list;
149 155
150extern unsigned char broadcast_addr[]; 156extern unsigned char batadv_broadcast_addr[];
151extern struct workqueue_struct *bat_event_workqueue; 157extern struct workqueue_struct *batadv_event_workqueue;
152 158
153int mesh_init(struct net_device *soft_iface); 159int batadv_mesh_init(struct net_device *soft_iface);
154void mesh_free(struct net_device *soft_iface); 160void batadv_mesh_free(struct net_device *soft_iface);
155void inc_module_count(void); 161void batadv_inc_module_count(void);
156void dec_module_count(void); 162void batadv_dec_module_count(void);
157int is_my_mac(const uint8_t *addr); 163int batadv_is_my_mac(const uint8_t *addr);
158int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 164int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
159 struct packet_type *ptype, struct net_device *orig_dev); 165 struct packet_type *ptype,
160int recv_handler_register(uint8_t packet_type, 166 struct net_device *orig_dev);
161 int (*recv_handler)(struct sk_buff *, 167int
162 struct hard_iface *)); 168batadv_recv_handler_register(uint8_t packet_type,
163void recv_handler_unregister(uint8_t packet_type); 169 int (*recv_handler)(struct sk_buff *,
164int bat_algo_register(struct bat_algo_ops *bat_algo_ops); 170 struct batadv_hard_iface *));
165int bat_algo_select(struct bat_priv *bat_priv, char *name); 171void batadv_recv_handler_unregister(uint8_t packet_type);
166int bat_algo_seq_print_text(struct seq_file *seq, void *offset); 172int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
173int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
174int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
167 175
168#ifdef CONFIG_BATMAN_ADV_DEBUG 176#ifdef CONFIG_BATMAN_ADV_DEBUG
169int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); 177int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
178__printf(2, 3);
170 179
171#define bat_dbg(type, bat_priv, fmt, arg...) \ 180#define batadv_dbg(type, bat_priv, fmt, arg...) \
172 do { \ 181 do { \
173 if (atomic_read(&bat_priv->log_level) & type) \ 182 if (atomic_read(&bat_priv->log_level) & type) \
174 debug_log(bat_priv, fmt, ## arg); \ 183 batadv_debug_log(bat_priv, fmt, ## arg);\
175 } \ 184 } \
176 while (0) 185 while (0)
177#else /* !CONFIG_BATMAN_ADV_DEBUG */ 186#else /* !CONFIG_BATMAN_ADV_DEBUG */
178__printf(3, 4) 187__printf(3, 4)
179static inline void bat_dbg(int type __always_unused, 188static inline void batadv_dbg(int type __always_unused,
180 struct bat_priv *bat_priv __always_unused, 189 struct batadv_priv *bat_priv __always_unused,
181 const char *fmt __always_unused, ...) 190 const char *fmt __always_unused, ...)
182{ 191{
183} 192}
184#endif 193#endif
185 194
186#define bat_info(net_dev, fmt, arg...) \ 195#define batadv_info(net_dev, fmt, arg...) \
187 do { \ 196 do { \
188 struct net_device *_netdev = (net_dev); \ 197 struct net_device *_netdev = (net_dev); \
189 struct bat_priv *_batpriv = netdev_priv(_netdev); \ 198 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
190 bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ 199 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
191 pr_info("%s: " fmt, _netdev->name, ## arg); \ 200 pr_info("%s: " fmt, _netdev->name, ## arg); \
192 } while (0) 201 } while (0)
193#define bat_err(net_dev, fmt, arg...) \ 202#define batadv_err(net_dev, fmt, arg...) \
194 do { \ 203 do { \
195 struct net_device *_netdev = (net_dev); \ 204 struct net_device *_netdev = (net_dev); \
196 struct bat_priv *_batpriv = netdev_priv(_netdev); \ 205 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
197 bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ 206 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
198 pr_err("%s: " fmt, _netdev->name, ## arg); \ 207 pr_err("%s: " fmt, _netdev->name, ## arg); \
199 } while (0) 208 } while (0)
200 209
201/** 210/* returns 1 if they are the same ethernet addr
202 * returns 1 if they are the same ethernet addr
203 * 211 *
204 * note: can't use compare_ether_addr() as it requires aligned memory 212 * note: can't use compare_ether_addr() as it requires aligned memory
205 */ 213 */
206 214static inline int batadv_compare_eth(const void *data1, const void *data2)
207static inline int compare_eth(const void *data1, const void *data2)
208{ 215{
209 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 216 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
210} 217}
@@ -216,15 +223,16 @@ static inline int compare_eth(const void *data1, const void *data2)
216 * 223 *
217 * Returns true if current time is after timestamp + timeout 224 * Returns true if current time is after timestamp + timeout
218 */ 225 */
219static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout) 226static inline bool batadv_has_timed_out(unsigned long timestamp,
227 unsigned int timeout)
220{ 228{
221 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout)); 229 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
222} 230}
223 231
224#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 232#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
225 233
226/* Returns the smallest signed integer in two's complement with the sizeof x */ 234/* Returns the smallest signed integer in two's complement with the sizeof x */
227#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) 235#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
228 236
229/* Checks if a sequence number x is a predecessor/successor of y. 237/* Checks if a sequence number x is a predecessor/successor of y.
230 * they handle overflows/underflows and can correctly check for a 238 * they handle overflows/underflows and can correctly check for a
@@ -234,12 +242,39 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
234 * - when adding nothing - it is neither a predecessor nor a successor 242 * - when adding nothing - it is neither a predecessor nor a successor
235 * - before adding more than 127 to the starting value - it is a predecessor, 243 * - before adding more than 127 to the starting value - it is a predecessor,
236 * - when adding 128 - it is neither a predecessor nor a successor, 244 * - when adding 128 - it is neither a predecessor nor a successor,
237 * - after adding more than 127 to the starting value - it is a successor */ 245 * - after adding more than 127 to the starting value - it is a successor
238#define seq_before(x, y) ({typeof(x) _d1 = (x); \ 246 */
239 typeof(y) _d2 = (y); \ 247#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
240 typeof(x) _dummy = (_d1 - _d2); \ 248 typeof(y) _d2 = (y); \
241 (void) (&_d1 == &_d2); \ 249 typeof(x) _dummy = (_d1 - _d2); \
242 _dummy > smallest_signed_int(_dummy); }) 250 (void) (&_d1 == &_d2); \
243#define seq_after(x, y) seq_before(y, x) 251 _dummy > batadv_smallest_signed_int(_dummy); })
252#define batadv_seq_after(x, y) batadv_seq_before(y, x)
253
254/* Stop preemption on local cpu while incrementing the counter */
255static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
256 size_t count)
257{
258 int cpu = get_cpu();
259 per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
260 put_cpu();
261}
262
263#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
264
265/* Sum and return the cpu-local counters for index 'idx' */
266static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
267 size_t idx)
268{
269 uint64_t *counters, sum = 0;
270 int cpu;
271
272 for_each_possible_cpu(cpu) {
273 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
274 sum += counters[idx];
275 }
276
277 return sum;
278}
244 279
245#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 280#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 41147942ba53..ac9bdf8f80a6 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -30,50 +28,52 @@
30#include "soft-interface.h" 28#include "soft-interface.h"
31#include "bridge_loop_avoidance.h" 29#include "bridge_loop_avoidance.h"
32 30
33static void purge_orig(struct work_struct *work); 31static void batadv_purge_orig(struct work_struct *work);
34 32
35static void start_purge_timer(struct bat_priv *bat_priv) 33static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
36{ 34{
37 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); 35 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
38 queue_delayed_work(bat_event_workqueue, 36 queue_delayed_work(batadv_event_workqueue,
39 &bat_priv->orig_work, msecs_to_jiffies(1000)); 37 &bat_priv->orig_work, msecs_to_jiffies(1000));
40} 38}
41 39
42/* returns 1 if they are the same originator */ 40/* returns 1 if they are the same originator */
43static int compare_orig(const struct hlist_node *node, const void *data2) 41static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
44{ 42{
45 const void *data1 = container_of(node, struct orig_node, hash_entry); 43 const void *data1 = container_of(node, struct batadv_orig_node,
44 hash_entry);
46 45
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48} 47}
49 48
50int originator_init(struct bat_priv *bat_priv) 49int batadv_originator_init(struct batadv_priv *bat_priv)
51{ 50{
52 if (bat_priv->orig_hash) 51 if (bat_priv->orig_hash)
53 return 1; 52 return 0;
54 53
55 bat_priv->orig_hash = hash_new(1024); 54 bat_priv->orig_hash = batadv_hash_new(1024);
56 55
57 if (!bat_priv->orig_hash) 56 if (!bat_priv->orig_hash)
58 goto err; 57 goto err;
59 58
60 start_purge_timer(bat_priv); 59 batadv_start_purge_timer(bat_priv);
61 return 1; 60 return 0;
62 61
63err: 62err:
64 return 0; 63 return -ENOMEM;
65} 64}
66 65
67void neigh_node_free_ref(struct neigh_node *neigh_node) 66void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
68{ 67{
69 if (atomic_dec_and_test(&neigh_node->refcount)) 68 if (atomic_dec_and_test(&neigh_node->refcount))
70 kfree_rcu(neigh_node, rcu); 69 kfree_rcu(neigh_node, rcu);
71} 70}
72 71
73/* increases the refcounter of a found router */ 72/* increases the refcounter of a found router */
74struct neigh_node *orig_node_get_router(struct orig_node *orig_node) 73struct batadv_neigh_node *
74batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
75{ 75{
76 struct neigh_node *router; 76 struct batadv_neigh_node *router;
77 77
78 rcu_read_lock(); 78 rcu_read_lock();
79 router = rcu_dereference(orig_node->router); 79 router = rcu_dereference(orig_node->router);
@@ -85,12 +85,12 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
85 return router; 85 return router;
86} 86}
87 87
88struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, 88struct batadv_neigh_node *
89 const uint8_t *neigh_addr, 89batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
90 uint32_t seqno) 90 const uint8_t *neigh_addr, uint32_t seqno)
91{ 91{
92 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 92 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
93 struct neigh_node *neigh_node; 93 struct batadv_neigh_node *neigh_node;
94 94
95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
96 if (!neigh_node) 96 if (!neigh_node)
@@ -104,21 +104,21 @@ struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
104 /* extra reference for return */ 104 /* extra reference for return */
105 atomic_set(&neigh_node->refcount, 2); 105 atomic_set(&neigh_node->refcount, 2);
106 106
107 bat_dbg(DBG_BATMAN, bat_priv, 107 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
108 "Creating new neighbor %pM, initial seqno %d\n", 108 "Creating new neighbor %pM, initial seqno %d\n",
109 neigh_addr, seqno); 109 neigh_addr, seqno);
110 110
111out: 111out:
112 return neigh_node; 112 return neigh_node;
113} 113}
114 114
115static void orig_node_free_rcu(struct rcu_head *rcu) 115static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
116{ 116{
117 struct hlist_node *node, *node_tmp; 117 struct hlist_node *node, *node_tmp;
118 struct neigh_node *neigh_node, *tmp_neigh_node; 118 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
119 struct orig_node *orig_node; 119 struct batadv_orig_node *orig_node;
120 120
121 orig_node = container_of(rcu, struct orig_node, rcu); 121 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
122 122
123 spin_lock_bh(&orig_node->neigh_list_lock); 123 spin_lock_bh(&orig_node->neigh_list_lock);
124 124
@@ -126,21 +126,21 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
126 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 126 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
127 &orig_node->bond_list, bonding_list) { 127 &orig_node->bond_list, bonding_list) {
128 list_del_rcu(&neigh_node->bonding_list); 128 list_del_rcu(&neigh_node->bonding_list);
129 neigh_node_free_ref(neigh_node); 129 batadv_neigh_node_free_ref(neigh_node);
130 } 130 }
131 131
132 /* for all neighbors towards this originator ... */ 132 /* for all neighbors towards this originator ... */
133 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 133 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
134 &orig_node->neigh_list, list) { 134 &orig_node->neigh_list, list) {
135 hlist_del_rcu(&neigh_node->list); 135 hlist_del_rcu(&neigh_node->list);
136 neigh_node_free_ref(neigh_node); 136 batadv_neigh_node_free_ref(neigh_node);
137 } 137 }
138 138
139 spin_unlock_bh(&orig_node->neigh_list_lock); 139 spin_unlock_bh(&orig_node->neigh_list_lock);
140 140
141 frag_list_free(&orig_node->frag_list); 141 batadv_frag_list_free(&orig_node->frag_list);
142 tt_global_del_orig(orig_node->bat_priv, orig_node, 142 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
143 "originator timed out"); 143 "originator timed out");
144 144
145 kfree(orig_node->tt_buff); 145 kfree(orig_node->tt_buff);
146 kfree(orig_node->bcast_own); 146 kfree(orig_node->bcast_own);
@@ -148,19 +148,19 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
148 kfree(orig_node); 148 kfree(orig_node);
149} 149}
150 150
151void orig_node_free_ref(struct orig_node *orig_node) 151void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
152{ 152{
153 if (atomic_dec_and_test(&orig_node->refcount)) 153 if (atomic_dec_and_test(&orig_node->refcount))
154 call_rcu(&orig_node->rcu, orig_node_free_rcu); 154 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
155} 155}
156 156
157void originator_free(struct bat_priv *bat_priv) 157void batadv_originator_free(struct batadv_priv *bat_priv)
158{ 158{
159 struct hashtable_t *hash = bat_priv->orig_hash; 159 struct batadv_hashtable *hash = bat_priv->orig_hash;
160 struct hlist_node *node, *node_tmp; 160 struct hlist_node *node, *node_tmp;
161 struct hlist_head *head; 161 struct hlist_head *head;
162 spinlock_t *list_lock; /* spinlock to protect write access */ 162 spinlock_t *list_lock; /* spinlock to protect write access */
163 struct orig_node *orig_node; 163 struct batadv_orig_node *orig_node;
164 uint32_t i; 164 uint32_t i;
165 165
166 if (!hash) 166 if (!hash)
@@ -179,28 +179,31 @@ void originator_free(struct bat_priv *bat_priv)
179 head, hash_entry) { 179 head, hash_entry) {
180 180
181 hlist_del_rcu(node); 181 hlist_del_rcu(node);
182 orig_node_free_ref(orig_node); 182 batadv_orig_node_free_ref(orig_node);
183 } 183 }
184 spin_unlock_bh(list_lock); 184 spin_unlock_bh(list_lock);
185 } 185 }
186 186
187 hash_destroy(hash); 187 batadv_hash_destroy(hash);
188} 188}
189 189
190/* this function finds or creates an originator entry for the given 190/* this function finds or creates an originator entry for the given
191 * address if it does not exits */ 191 * address if it does not exits
192struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) 192 */
193struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
194 const uint8_t *addr)
193{ 195{
194 struct orig_node *orig_node; 196 struct batadv_orig_node *orig_node;
195 int size; 197 int size;
196 int hash_added; 198 int hash_added;
199 unsigned long reset_time;
197 200
198 orig_node = orig_hash_find(bat_priv, addr); 201 orig_node = batadv_orig_hash_find(bat_priv, addr);
199 if (orig_node) 202 if (orig_node)
200 return orig_node; 203 return orig_node;
201 204
202 bat_dbg(DBG_BATMAN, bat_priv, 205 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
203 "Creating new originator: %pM\n", addr); 206 "Creating new originator: %pM\n", addr);
204 207
205 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
206 if (!orig_node) 209 if (!orig_node)
@@ -226,14 +229,13 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
226 orig_node->tt_buff = NULL; 229 orig_node->tt_buff = NULL;
227 orig_node->tt_buff_len = 0; 230 orig_node->tt_buff_len = 0;
228 atomic_set(&orig_node->tt_size, 0); 231 atomic_set(&orig_node->tt_size, 0);
229 orig_node->bcast_seqno_reset = jiffies - 1 232 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
230 - msecs_to_jiffies(RESET_PROTECTION_MS); 233 orig_node->bcast_seqno_reset = reset_time;
231 orig_node->batman_seqno_reset = jiffies - 1 234 orig_node->batman_seqno_reset = reset_time;
232 - msecs_to_jiffies(RESET_PROTECTION_MS);
233 235
234 atomic_set(&orig_node->bond_candidates, 0); 236 atomic_set(&orig_node->bond_candidates, 0);
235 237
236 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
237 239
238 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
239 if (!orig_node->bcast_own) 241 if (!orig_node->bcast_own)
@@ -248,8 +250,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
248 if (!orig_node->bcast_own_sum) 250 if (!orig_node->bcast_own_sum)
249 goto free_bcast_own; 251 goto free_bcast_own;
250 252
251 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 253 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
252 choose_orig, orig_node, &orig_node->hash_entry); 254 batadv_choose_orig, orig_node,
255 &orig_node->hash_entry);
253 if (hash_added != 0) 256 if (hash_added != 0)
254 goto free_bcast_own_sum; 257 goto free_bcast_own_sum;
255 258
@@ -263,14 +266,16 @@ free_orig_node:
263 return NULL; 266 return NULL;
264} 267}
265 268
266static bool purge_orig_neighbors(struct bat_priv *bat_priv, 269static bool
267 struct orig_node *orig_node, 270batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
268 struct neigh_node **best_neigh_node) 271 struct batadv_orig_node *orig_node,
272 struct batadv_neigh_node **best_neigh_node)
269{ 273{
270 struct hlist_node *node, *node_tmp; 274 struct hlist_node *node, *node_tmp;
271 struct neigh_node *neigh_node; 275 struct batadv_neigh_node *neigh_node;
272 bool neigh_purged = false; 276 bool neigh_purged = false;
273 unsigned long last_seen; 277 unsigned long last_seen;
278 struct batadv_hard_iface *if_incoming;
274 279
275 *best_neigh_node = NULL; 280 *best_neigh_node = NULL;
276 281
@@ -280,34 +285,32 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
280 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 285 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
281 &orig_node->neigh_list, list) { 286 &orig_node->neigh_list, list) {
282 287
283 if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) || 288 last_seen = neigh_node->last_seen;
284 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 289 if_incoming = neigh_node->if_incoming;
285 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 290
286 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 291 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
287 292 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
288 last_seen = neigh_node->last_seen; 293 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
289 294 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
290 if ((neigh_node->if_incoming->if_status == 295
291 IF_INACTIVE) || 296 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
292 (neigh_node->if_incoming->if_status == 297 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
293 IF_NOT_IN_USE) || 298 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
294 (neigh_node->if_incoming->if_status == 299 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
295 IF_TO_BE_REMOVED)) 300 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
296 bat_dbg(DBG_BATMAN, bat_priv, 301 orig_node->orig, neigh_node->addr,
297 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 302 if_incoming->net_dev->name);
298 orig_node->orig, neigh_node->addr,
299 neigh_node->if_incoming->net_dev->name);
300 else 303 else
301 bat_dbg(DBG_BATMAN, bat_priv, 304 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
302 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 305 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
303 orig_node->orig, neigh_node->addr, 306 orig_node->orig, neigh_node->addr,
304 jiffies_to_msecs(last_seen)); 307 jiffies_to_msecs(last_seen));
305 308
306 neigh_purged = true; 309 neigh_purged = true;
307 310
308 hlist_del_rcu(&neigh_node->list); 311 hlist_del_rcu(&neigh_node->list);
309 bonding_candidate_del(orig_node, neigh_node); 312 batadv_bonding_candidate_del(orig_node, neigh_node);
310 neigh_node_free_ref(neigh_node); 313 batadv_neigh_node_free_ref(neigh_node);
311 } else { 314 } else {
312 if ((!*best_neigh_node) || 315 if ((!*best_neigh_node) ||
313 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 316 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
@@ -319,33 +322,35 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
319 return neigh_purged; 322 return neigh_purged;
320} 323}
321 324
322static bool purge_orig_node(struct bat_priv *bat_priv, 325static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
323 struct orig_node *orig_node) 326 struct batadv_orig_node *orig_node)
324{ 327{
325 struct neigh_node *best_neigh_node; 328 struct batadv_neigh_node *best_neigh_node;
326 329
327 if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { 330 if (batadv_has_timed_out(orig_node->last_seen,
328 bat_dbg(DBG_BATMAN, bat_priv, 331 2 * BATADV_PURGE_TIMEOUT)) {
329 "Originator timeout: originator %pM, last_seen %u\n", 332 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
330 orig_node->orig, 333 "Originator timeout: originator %pM, last_seen %u\n",
331 jiffies_to_msecs(orig_node->last_seen)); 334 orig_node->orig,
335 jiffies_to_msecs(orig_node->last_seen));
332 return true; 336 return true;
333 } else { 337 } else {
334 if (purge_orig_neighbors(bat_priv, orig_node, 338 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
335 &best_neigh_node)) 339 &best_neigh_node))
336 update_route(bat_priv, orig_node, best_neigh_node); 340 batadv_update_route(bat_priv, orig_node,
341 best_neigh_node);
337 } 342 }
338 343
339 return false; 344 return false;
340} 345}
341 346
342static void _purge_orig(struct bat_priv *bat_priv) 347static void _batadv_purge_orig(struct batadv_priv *bat_priv)
343{ 348{
344 struct hashtable_t *hash = bat_priv->orig_hash; 349 struct batadv_hashtable *hash = bat_priv->orig_hash;
345 struct hlist_node *node, *node_tmp; 350 struct hlist_node *node, *node_tmp;
346 struct hlist_head *head; 351 struct hlist_head *head;
347 spinlock_t *list_lock; /* spinlock to protect write access */ 352 spinlock_t *list_lock; /* spinlock to protect write access */
348 struct orig_node *orig_node; 353 struct batadv_orig_node *orig_node;
349 uint32_t i; 354 uint32_t i;
350 355
351 if (!hash) 356 if (!hash)
@@ -359,58 +364,60 @@ static void _purge_orig(struct bat_priv *bat_priv)
359 spin_lock_bh(list_lock); 364 spin_lock_bh(list_lock);
360 hlist_for_each_entry_safe(orig_node, node, node_tmp, 365 hlist_for_each_entry_safe(orig_node, node, node_tmp,
361 head, hash_entry) { 366 head, hash_entry) {
362 if (purge_orig_node(bat_priv, orig_node)) { 367 if (batadv_purge_orig_node(bat_priv, orig_node)) {
363 if (orig_node->gw_flags) 368 if (orig_node->gw_flags)
364 gw_node_delete(bat_priv, orig_node); 369 batadv_gw_node_delete(bat_priv,
370 orig_node);
365 hlist_del_rcu(node); 371 hlist_del_rcu(node);
366 orig_node_free_ref(orig_node); 372 batadv_orig_node_free_ref(orig_node);
367 continue; 373 continue;
368 } 374 }
369 375
370 if (has_timed_out(orig_node->last_frag_packet, 376 if (batadv_has_timed_out(orig_node->last_frag_packet,
371 FRAG_TIMEOUT)) 377 BATADV_FRAG_TIMEOUT))
372 frag_list_free(&orig_node->frag_list); 378 batadv_frag_list_free(&orig_node->frag_list);
373 } 379 }
374 spin_unlock_bh(list_lock); 380 spin_unlock_bh(list_lock);
375 } 381 }
376 382
377 gw_node_purge(bat_priv); 383 batadv_gw_node_purge(bat_priv);
378 gw_election(bat_priv); 384 batadv_gw_election(bat_priv);
379} 385}
380 386
381static void purge_orig(struct work_struct *work) 387static void batadv_purge_orig(struct work_struct *work)
382{ 388{
383 struct delayed_work *delayed_work = 389 struct delayed_work *delayed_work;
384 container_of(work, struct delayed_work, work); 390 struct batadv_priv *bat_priv;
385 struct bat_priv *bat_priv =
386 container_of(delayed_work, struct bat_priv, orig_work);
387 391
388 _purge_orig(bat_priv); 392 delayed_work = container_of(work, struct delayed_work, work);
389 start_purge_timer(bat_priv); 393 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
394 _batadv_purge_orig(bat_priv);
395 batadv_start_purge_timer(bat_priv);
390} 396}
391 397
392void purge_orig_ref(struct bat_priv *bat_priv) 398void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
393{ 399{
394 _purge_orig(bat_priv); 400 _batadv_purge_orig(bat_priv);
395} 401}
396 402
397int orig_seq_print_text(struct seq_file *seq, void *offset) 403int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
398{ 404{
399 struct net_device *net_dev = (struct net_device *)seq->private; 405 struct net_device *net_dev = (struct net_device *)seq->private;
400 struct bat_priv *bat_priv = netdev_priv(net_dev); 406 struct batadv_priv *bat_priv = netdev_priv(net_dev);
401 struct hashtable_t *hash = bat_priv->orig_hash; 407 struct batadv_hashtable *hash = bat_priv->orig_hash;
402 struct hlist_node *node, *node_tmp; 408 struct hlist_node *node, *node_tmp;
403 struct hlist_head *head; 409 struct hlist_head *head;
404 struct hard_iface *primary_if; 410 struct batadv_hard_iface *primary_if;
405 struct orig_node *orig_node; 411 struct batadv_orig_node *orig_node;
406 struct neigh_node *neigh_node, *neigh_node_tmp; 412 struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
407 int batman_count = 0; 413 int batman_count = 0;
408 int last_seen_secs; 414 int last_seen_secs;
409 int last_seen_msecs; 415 int last_seen_msecs;
416 unsigned long last_seen_jiffies;
410 uint32_t i; 417 uint32_t i;
411 int ret = 0; 418 int ret = 0;
412 419
413 primary_if = primary_if_get_selected(bat_priv); 420 primary_if = batadv_primary_if_get_selected(bat_priv);
414 421
415 if (!primary_if) { 422 if (!primary_if) {
416 ret = seq_printf(seq, 423 ret = seq_printf(seq,
@@ -419,7 +426,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 goto out; 426 goto out;
420 } 427 }
421 428
422 if (primary_if->if_status != IF_ACTIVE) { 429 if (primary_if->if_status != BATADV_IF_ACTIVE) {
423 ret = seq_printf(seq, 430 ret = seq_printf(seq,
424 "BATMAN mesh %s disabled - primary interface not active\n", 431 "BATMAN mesh %s disabled - primary interface not active\n",
425 net_dev->name); 432 net_dev->name);
@@ -427,28 +434,28 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
427 } 434 }
428 435
429 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 436 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
430 SOURCE_VERSION, primary_if->net_dev->name, 437 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
431 primary_if->net_dev->dev_addr, net_dev->name); 438 primary_if->net_dev->dev_addr, net_dev->name);
432 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 439 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
433 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 440 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
434 "outgoingIF", "Potential nexthops"); 441 "Nexthop", "outgoingIF", "Potential nexthops");
435 442
436 for (i = 0; i < hash->size; i++) { 443 for (i = 0; i < hash->size; i++) {
437 head = &hash->table[i]; 444 head = &hash->table[i];
438 445
439 rcu_read_lock(); 446 rcu_read_lock();
440 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 447 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
441 neigh_node = orig_node_get_router(orig_node); 448 neigh_node = batadv_orig_node_get_router(orig_node);
442 if (!neigh_node) 449 if (!neigh_node)
443 continue; 450 continue;
444 451
445 if (neigh_node->tq_avg == 0) 452 if (neigh_node->tq_avg == 0)
446 goto next; 453 goto next;
447 454
448 last_seen_secs = jiffies_to_msecs(jiffies - 455 last_seen_jiffies = jiffies - orig_node->last_seen;
449 orig_node->last_seen) / 1000; 456 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
450 last_seen_msecs = jiffies_to_msecs(jiffies - 457 last_seen_secs = last_seen_msecs / 1000;
451 orig_node->last_seen) % 1000; 458 last_seen_msecs = last_seen_msecs % 1000;
452 459
453 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 460 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
454 orig_node->orig, last_seen_secs, 461 orig_node->orig, last_seen_secs,
@@ -467,7 +474,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
467 batman_count++; 474 batman_count++;
468 475
469next: 476next:
470 neigh_node_free_ref(neigh_node); 477 batadv_neigh_node_free_ref(neigh_node);
471 } 478 }
472 rcu_read_unlock(); 479 rcu_read_unlock();
473 } 480 }
@@ -477,27 +484,29 @@ next:
477 484
478out: 485out:
479 if (primary_if) 486 if (primary_if)
480 hardif_free_ref(primary_if); 487 batadv_hardif_free_ref(primary_if);
481 return ret; 488 return ret;
482} 489}
483 490
484static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 491static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
492 int max_if_num)
485{ 493{
486 void *data_ptr; 494 void *data_ptr;
495 size_t data_size, old_size;
487 496
488 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 497 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
489 GFP_ATOMIC); 498 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
499 data_ptr = kmalloc(data_size, GFP_ATOMIC);
490 if (!data_ptr) 500 if (!data_ptr)
491 return -1; 501 return -ENOMEM;
492 502
493 memcpy(data_ptr, orig_node->bcast_own, 503 memcpy(data_ptr, orig_node->bcast_own, old_size);
494 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
495 kfree(orig_node->bcast_own); 504 kfree(orig_node->bcast_own);
496 orig_node->bcast_own = data_ptr; 505 orig_node->bcast_own = data_ptr;
497 506
498 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 507 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
499 if (!data_ptr) 508 if (!data_ptr)
500 return -1; 509 return -ENOMEM;
501 510
502 memcpy(data_ptr, orig_node->bcast_own_sum, 511 memcpy(data_ptr, orig_node->bcast_own_sum,
503 (max_if_num - 1) * sizeof(uint8_t)); 512 (max_if_num - 1) * sizeof(uint8_t));
@@ -507,28 +516,30 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
507 return 0; 516 return 0;
508} 517}
509 518
510int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) 519int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
520 int max_if_num)
511{ 521{
512 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 522 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
513 struct hashtable_t *hash = bat_priv->orig_hash; 523 struct batadv_hashtable *hash = bat_priv->orig_hash;
514 struct hlist_node *node; 524 struct hlist_node *node;
515 struct hlist_head *head; 525 struct hlist_head *head;
516 struct orig_node *orig_node; 526 struct batadv_orig_node *orig_node;
517 uint32_t i; 527 uint32_t i;
518 int ret; 528 int ret;
519 529
520 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 530 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
521 * if_num */ 531 * if_num
532 */
522 for (i = 0; i < hash->size; i++) { 533 for (i = 0; i < hash->size; i++) {
523 head = &hash->table[i]; 534 head = &hash->table[i];
524 535
525 rcu_read_lock(); 536 rcu_read_lock();
526 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 537 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
527 spin_lock_bh(&orig_node->ogm_cnt_lock); 538 spin_lock_bh(&orig_node->ogm_cnt_lock);
528 ret = orig_node_add_if(orig_node, max_if_num); 539 ret = batadv_orig_node_add_if(orig_node, max_if_num);
529 spin_unlock_bh(&orig_node->ogm_cnt_lock); 540 spin_unlock_bh(&orig_node->ogm_cnt_lock);
530 541
531 if (ret == -1) 542 if (ret == -ENOMEM)
532 goto err; 543 goto err;
533 } 544 }
534 rcu_read_unlock(); 545 rcu_read_unlock();
@@ -541,8 +552,8 @@ err:
541 return -ENOMEM; 552 return -ENOMEM;
542} 553}
543 554
544static int orig_node_del_if(struct orig_node *orig_node, 555static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
545 int max_if_num, int del_if_num) 556 int max_if_num, int del_if_num)
546{ 557{
547 void *data_ptr = NULL; 558 void *data_ptr = NULL;
548 int chunk_size; 559 int chunk_size;
@@ -551,10 +562,10 @@ static int orig_node_del_if(struct orig_node *orig_node,
551 if (max_if_num == 0) 562 if (max_if_num == 0)
552 goto free_bcast_own; 563 goto free_bcast_own;
553 564
554 chunk_size = sizeof(unsigned long) * NUM_WORDS; 565 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
555 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 566 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
556 if (!data_ptr) 567 if (!data_ptr)
557 return -1; 568 return -ENOMEM;
558 569
559 /* copy first part */ 570 /* copy first part */
560 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -573,7 +584,7 @@ free_bcast_own:
573 584
574 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
575 if (!data_ptr) 586 if (!data_ptr)
576 return -1; 587 return -ENOMEM;
577 588
578 memcpy(data_ptr, orig_node->bcast_own_sum, 589 memcpy(data_ptr, orig_node->bcast_own_sum,
579 del_if_num * sizeof(uint8_t)); 590 del_if_num * sizeof(uint8_t));
@@ -589,30 +600,32 @@ free_own_sum:
589 return 0; 600 return 0;
590} 601}
591 602
592int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) 603int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
604 int max_if_num)
593{ 605{
594 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 606 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
595 struct hashtable_t *hash = bat_priv->orig_hash; 607 struct batadv_hashtable *hash = bat_priv->orig_hash;
596 struct hlist_node *node; 608 struct hlist_node *node;
597 struct hlist_head *head; 609 struct hlist_head *head;
598 struct hard_iface *hard_iface_tmp; 610 struct batadv_hard_iface *hard_iface_tmp;
599 struct orig_node *orig_node; 611 struct batadv_orig_node *orig_node;
600 uint32_t i; 612 uint32_t i;
601 int ret; 613 int ret;
602 614
603 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 615 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
604 * if_num */ 616 * if_num
617 */
605 for (i = 0; i < hash->size; i++) { 618 for (i = 0; i < hash->size; i++) {
606 head = &hash->table[i]; 619 head = &hash->table[i];
607 620
608 rcu_read_lock(); 621 rcu_read_lock();
609 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 622 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
610 spin_lock_bh(&orig_node->ogm_cnt_lock); 623 spin_lock_bh(&orig_node->ogm_cnt_lock);
611 ret = orig_node_del_if(orig_node, max_if_num, 624 ret = batadv_orig_node_del_if(orig_node, max_if_num,
612 hard_iface->if_num); 625 hard_iface->if_num);
613 spin_unlock_bh(&orig_node->ogm_cnt_lock); 626 spin_unlock_bh(&orig_node->ogm_cnt_lock);
614 627
615 if (ret == -1) 628 if (ret == -ENOMEM)
616 goto err; 629 goto err;
617 } 630 }
618 rcu_read_unlock(); 631 rcu_read_unlock();
@@ -620,8 +633,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
620 633
621 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 634 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
622 rcu_read_lock(); 635 rcu_read_lock();
623 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { 636 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
624 if (hard_iface_tmp->if_status == IF_NOT_IN_USE) 637 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
625 continue; 638 continue;
626 639
627 if (hard_iface == hard_iface_tmp) 640 if (hard_iface == hard_iface_tmp)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index f74d0d693359..9778e656dec7 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ 20#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
@@ -24,24 +22,29 @@
24 22
25#include "hash.h" 23#include "hash.h"
26 24
27int originator_init(struct bat_priv *bat_priv); 25int batadv_originator_init(struct batadv_priv *bat_priv);
28void originator_free(struct bat_priv *bat_priv); 26void batadv_originator_free(struct batadv_priv *bat_priv);
29void purge_orig_ref(struct bat_priv *bat_priv); 27void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node); 28void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); 29struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
32struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, 30 const uint8_t *addr);
33 const uint8_t *neigh_addr, 31struct batadv_neigh_node *
34 uint32_t seqno); 32batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
35void neigh_node_free_ref(struct neigh_node *neigh_node); 33 const uint8_t *neigh_addr, uint32_t seqno);
36struct neigh_node *orig_node_get_router(struct orig_node *orig_node); 34void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 35struct batadv_neigh_node *
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 36batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 37int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
40 38int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
41 39 int max_if_num);
42/* hashfunction to choose an entry in a hash table of given size */ 40int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
43/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 41 int max_if_num);
44static inline uint32_t choose_orig(const void *data, uint32_t size) 42
43
44/* hashfunction to choose an entry in a hash table of given size
45 * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
46 */
47static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
45{ 48{
46 const unsigned char *key = data; 49 const unsigned char *key = data;
47 uint32_t hash = 0; 50 uint32_t hash = 0;
@@ -60,24 +63,24 @@ static inline uint32_t choose_orig(const void *data, uint32_t size)
60 return hash % size; 63 return hash % size;
61} 64}
62 65
63static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, 66static inline struct batadv_orig_node *
64 const void *data) 67batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
65{ 68{
66 struct hashtable_t *hash = bat_priv->orig_hash; 69 struct batadv_hashtable *hash = bat_priv->orig_hash;
67 struct hlist_head *head; 70 struct hlist_head *head;
68 struct hlist_node *node; 71 struct hlist_node *node;
69 struct orig_node *orig_node, *orig_node_tmp = NULL; 72 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
70 int index; 73 int index;
71 74
72 if (!hash) 75 if (!hash)
73 return NULL; 76 return NULL;
74 77
75 index = choose_orig(data, hash->size); 78 index = batadv_choose_orig(data, hash->size);
76 head = &hash->table[index]; 79 head = &hash->table[index];
77 80
78 rcu_read_lock(); 81 rcu_read_lock();
79 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 82 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
80 if (!compare_eth(orig_node, data)) 83 if (!batadv_compare_eth(orig_node, data))
81 continue; 84 continue;
82 85
83 if (!atomic_inc_not_zero(&orig_node->refcount)) 86 if (!atomic_inc_not_zero(&orig_node->refcount))
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 0ee1af770798..8d3e55a96adc 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,171 +15,172 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_PACKET_H_ 20#ifndef _NET_BATMAN_ADV_PACKET_H_
23#define _NET_BATMAN_ADV_PACKET_H_ 21#define _NET_BATMAN_ADV_PACKET_H_
24 22
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 23#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 24
27enum bat_packettype { 25enum batadv_packettype {
28 BAT_IV_OGM = 0x01, 26 BATADV_IV_OGM = 0x01,
29 BAT_ICMP = 0x02, 27 BATADV_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 28 BATADV_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 29 BATADV_BCAST = 0x04,
32 BAT_VIS = 0x05, 30 BATADV_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06, 31 BATADV_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07, 32 BATADV_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08 33 BATADV_ROAM_ADV = 0x08,
36}; 34};
37 35
38/* this file is included by batctl which needs these defines */ 36/* this file is included by batctl which needs these defines */
39#define COMPAT_VERSION 14 37#define BATADV_COMPAT_VERSION 14
40 38
41enum batman_iv_flags { 39enum batadv_iv_flags {
42 NOT_BEST_NEXT_HOP = 1 << 3, 40 BATADV_NOT_BEST_NEXT_HOP = 1 << 3,
43 PRIMARIES_FIRST_HOP = 1 << 4, 41 BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
44 VIS_SERVER = 1 << 5, 42 BATADV_VIS_SERVER = 1 << 5,
45 DIRECTLINK = 1 << 6 43 BATADV_DIRECTLINK = 1 << 6,
46}; 44};
47 45
48/* ICMP message types */ 46/* ICMP message types */
49enum icmp_packettype { 47enum batadv_icmp_packettype {
50 ECHO_REPLY = 0, 48 BATADV_ECHO_REPLY = 0,
51 DESTINATION_UNREACHABLE = 3, 49 BATADV_DESTINATION_UNREACHABLE = 3,
52 ECHO_REQUEST = 8, 50 BATADV_ECHO_REQUEST = 8,
53 TTL_EXCEEDED = 11, 51 BATADV_TTL_EXCEEDED = 11,
54 PARAMETER_PROBLEM = 12 52 BATADV_PARAMETER_PROBLEM = 12,
55}; 53};
56 54
57/* vis defines */ 55/* vis defines */
58enum vis_packettype { 56enum batadv_vis_packettype {
59 VIS_TYPE_SERVER_SYNC = 0, 57 BATADV_VIS_TYPE_SERVER_SYNC = 0,
60 VIS_TYPE_CLIENT_UPDATE = 1 58 BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
61}; 59};
62 60
63/* fragmentation defines */ 61/* fragmentation defines */
64enum unicast_frag_flags { 62enum batadv_unicast_frag_flags {
65 UNI_FRAG_HEAD = 1 << 0, 63 BATADV_UNI_FRAG_HEAD = 1 << 0,
66 UNI_FRAG_LARGETAIL = 1 << 1 64 BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
67}; 65};
68 66
69/* TT_QUERY subtypes */ 67/* TT_QUERY subtypes */
70#define TT_QUERY_TYPE_MASK 0x3 68#define BATADV_TT_QUERY_TYPE_MASK 0x3
71 69
72enum tt_query_packettype { 70enum batadv_tt_query_packettype {
73 TT_REQUEST = 0, 71 BATADV_TT_REQUEST = 0,
74 TT_RESPONSE = 1 72 BATADV_TT_RESPONSE = 1,
75}; 73};
76 74
77/* TT_QUERY flags */ 75/* TT_QUERY flags */
78enum tt_query_flags { 76enum batadv_tt_query_flags {
79 TT_FULL_TABLE = 1 << 2 77 BATADV_TT_FULL_TABLE = 1 << 2,
80}; 78};
81 79
82/* TT_CLIENT flags. 80/* BATADV_TT_CLIENT flags.
83 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to 81 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
84 * 1 << 15 are used for local computation only */ 82 * 1 << 15 are used for local computation only
85enum tt_client_flags { 83 */
86 TT_CLIENT_DEL = 1 << 0, 84enum batadv_tt_client_flags {
87 TT_CLIENT_ROAM = 1 << 1, 85 BATADV_TT_CLIENT_DEL = 1 << 0,
88 TT_CLIENT_WIFI = 1 << 2, 86 BATADV_TT_CLIENT_ROAM = 1 << 1,
89 TT_CLIENT_NOPURGE = 1 << 8, 87 BATADV_TT_CLIENT_WIFI = 1 << 2,
90 TT_CLIENT_NEW = 1 << 9, 88 BATADV_TT_CLIENT_NOPURGE = 1 << 8,
91 TT_CLIENT_PENDING = 1 << 10 89 BATADV_TT_CLIENT_NEW = 1 << 9,
90 BATADV_TT_CLIENT_PENDING = 1 << 10,
92}; 91};
93 92
94/* claim frame types for the bridge loop avoidance */ 93/* claim frame types for the bridge loop avoidance */
95enum bla_claimframe { 94enum batadv_bla_claimframe {
96 CLAIM_TYPE_ADD = 0x00, 95 BATADV_CLAIM_TYPE_ADD = 0x00,
97 CLAIM_TYPE_DEL = 0x01, 96 BATADV_CLAIM_TYPE_DEL = 0x01,
98 CLAIM_TYPE_ANNOUNCE = 0x02, 97 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
99 CLAIM_TYPE_REQUEST = 0x03 98 BATADV_CLAIM_TYPE_REQUEST = 0x03,
100}; 99};
101 100
102/* the destination hardware field in the ARP frame is used to 101/* the destination hardware field in the ARP frame is used to
103 * transport the claim type and the group id 102 * transport the claim type and the group id
104 */ 103 */
105struct bla_claim_dst { 104struct batadv_bla_claim_dst {
106 uint8_t magic[3]; /* FF:43:05 */ 105 uint8_t magic[3]; /* FF:43:05 */
107 uint8_t type; /* bla_claimframe */ 106 uint8_t type; /* bla_claimframe */
108 uint16_t group; /* group id */ 107 __be16 group; /* group id */
109} __packed; 108} __packed;
110 109
111struct batman_header { 110struct batadv_header {
112 uint8_t packet_type; 111 uint8_t packet_type;
113 uint8_t version; /* batman version field */ 112 uint8_t version; /* batman version field */
114 uint8_t ttl; 113 uint8_t ttl;
115} __packed; 114} __packed;
116 115
117struct batman_ogm_packet { 116struct batadv_ogm_packet {
118 struct batman_header header; 117 struct batadv_header header;
119 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 118 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
120 uint32_t seqno; 119 __be32 seqno;
121 uint8_t orig[ETH_ALEN]; 120 uint8_t orig[ETH_ALEN];
122 uint8_t prev_sender[ETH_ALEN]; 121 uint8_t prev_sender[ETH_ALEN];
123 uint8_t gw_flags; /* flags related to gateway class */ 122 uint8_t gw_flags; /* flags related to gateway class */
124 uint8_t tq; 123 uint8_t tq;
125 uint8_t tt_num_changes; 124 uint8_t tt_num_changes;
126 uint8_t ttvn; /* translation table version number */ 125 uint8_t ttvn; /* translation table version number */
127 uint16_t tt_crc; 126 __be16 tt_crc;
128} __packed; 127} __packed;
129 128
130#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet) 129#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
131 130
132struct icmp_packet { 131struct batadv_icmp_packet {
133 struct batman_header header; 132 struct batadv_header header;
134 uint8_t msg_type; /* see ICMP message types above */ 133 uint8_t msg_type; /* see ICMP message types above */
135 uint8_t dst[ETH_ALEN]; 134 uint8_t dst[ETH_ALEN];
136 uint8_t orig[ETH_ALEN]; 135 uint8_t orig[ETH_ALEN];
137 uint16_t seqno; 136 __be16 seqno;
138 uint8_t uid; 137 uint8_t uid;
139 uint8_t reserved; 138 uint8_t reserved;
140} __packed; 139} __packed;
141 140
142#define BAT_RR_LEN 16 141#define BATADV_RR_LEN 16
143 142
144/* icmp_packet_rr must start with all fields from imcp_packet 143/* icmp_packet_rr must start with all fields from imcp_packet
145 * as this is assumed by code that handles ICMP packets */ 144 * as this is assumed by code that handles ICMP packets
146struct icmp_packet_rr { 145 */
147 struct batman_header header; 146struct batadv_icmp_packet_rr {
147 struct batadv_header header;
148 uint8_t msg_type; /* see ICMP message types above */ 148 uint8_t msg_type; /* see ICMP message types above */
149 uint8_t dst[ETH_ALEN]; 149 uint8_t dst[ETH_ALEN];
150 uint8_t orig[ETH_ALEN]; 150 uint8_t orig[ETH_ALEN];
151 uint16_t seqno; 151 __be16 seqno;
152 uint8_t uid; 152 uint8_t uid;
153 uint8_t rr_cur; 153 uint8_t rr_cur;
154 uint8_t rr[BAT_RR_LEN][ETH_ALEN]; 154 uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
155} __packed; 155} __packed;
156 156
157struct unicast_packet { 157struct batadv_unicast_packet {
158 struct batman_header header; 158 struct batadv_header header;
159 uint8_t ttvn; /* destination translation table version number */ 159 uint8_t ttvn; /* destination translation table version number */
160 uint8_t dest[ETH_ALEN]; 160 uint8_t dest[ETH_ALEN];
161} __packed; 161} __packed;
162 162
163struct unicast_frag_packet { 163struct batadv_unicast_frag_packet {
164 struct batman_header header; 164 struct batadv_header header;
165 uint8_t ttvn; /* destination translation table version number */ 165 uint8_t ttvn; /* destination translation table version number */
166 uint8_t dest[ETH_ALEN]; 166 uint8_t dest[ETH_ALEN];
167 uint8_t flags; 167 uint8_t flags;
168 uint8_t align; 168 uint8_t align;
169 uint8_t orig[ETH_ALEN]; 169 uint8_t orig[ETH_ALEN];
170 uint16_t seqno; 170 __be16 seqno;
171} __packed; 171} __packed;
172 172
173struct bcast_packet { 173struct batadv_bcast_packet {
174 struct batman_header header; 174 struct batadv_header header;
175 uint8_t reserved; 175 uint8_t reserved;
176 uint32_t seqno; 176 __be32 seqno;
177 uint8_t orig[ETH_ALEN]; 177 uint8_t orig[ETH_ALEN];
178} __packed; 178} __packed;
179 179
180struct vis_packet { 180struct batadv_vis_packet {
181 struct batman_header header; 181 struct batadv_header header;
182 uint8_t vis_type; /* which type of vis-participant sent this? */ 182 uint8_t vis_type; /* which type of vis-participant sent this? */
183 uint32_t seqno; /* sequence number */ 183 __be32 seqno; /* sequence number */
184 uint8_t entries; /* number of entries behind this struct */ 184 uint8_t entries; /* number of entries behind this struct */
185 uint8_t reserved; 185 uint8_t reserved;
186 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ 186 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
@@ -188,11 +188,12 @@ struct vis_packet {
188 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ 188 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
189} __packed; 189} __packed;
190 190
191struct tt_query_packet { 191struct batadv_tt_query_packet {
192 struct batman_header header; 192 struct batadv_header header;
193 /* the flag field is a combination of: 193 /* the flag field is a combination of:
194 * - TT_REQUEST or TT_RESPONSE 194 * - TT_REQUEST or TT_RESPONSE
195 * - TT_FULL_TABLE */ 195 * - TT_FULL_TABLE
196 */
196 uint8_t flags; 197 uint8_t flags;
197 uint8_t dst[ETH_ALEN]; 198 uint8_t dst[ETH_ALEN];
198 uint8_t src[ETH_ALEN]; 199 uint8_t src[ETH_ALEN];
@@ -200,24 +201,26 @@ struct tt_query_packet {
200 * if TT_REQUEST: ttvn that triggered the 201 * if TT_REQUEST: ttvn that triggered the
201 * request 202 * request
202 * if TT_RESPONSE: new ttvn for the src 203 * if TT_RESPONSE: new ttvn for the src
203 * orig_node */ 204 * orig_node
205 */
204 uint8_t ttvn; 206 uint8_t ttvn;
205 /* tt_data field is: 207 /* tt_data field is:
206 * if TT_REQUEST: crc associated with the 208 * if TT_REQUEST: crc associated with the
207 * ttvn 209 * ttvn
208 * if TT_RESPONSE: table_size */ 210 * if TT_RESPONSE: table_size
209 uint16_t tt_data; 211 */
212 __be16 tt_data;
210} __packed; 213} __packed;
211 214
212struct roam_adv_packet { 215struct batadv_roam_adv_packet {
213 struct batman_header header; 216 struct batadv_header header;
214 uint8_t reserved; 217 uint8_t reserved;
215 uint8_t dst[ETH_ALEN]; 218 uint8_t dst[ETH_ALEN];
216 uint8_t src[ETH_ALEN]; 219 uint8_t src[ETH_ALEN];
217 uint8_t client[ETH_ALEN]; 220 uint8_t client[ETH_ALEN];
218} __packed; 221} __packed;
219 222
220struct tt_change { 223struct batadv_tt_change {
221 uint8_t flags; 224 uint8_t flags;
222 uint8_t addr[ETH_ALEN]; 225 uint8_t addr[ETH_ALEN];
223} __packed; 226} __packed;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index fd63951d118d..c8f61e395b74 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,26 +15,26 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "ring_buffer.h" 21#include "ring_buffer.h"
24 22
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value) 23void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
24 uint8_t value)
26{ 25{
27 lq_recv[*lq_index] = value; 26 lq_recv[*lq_index] = value;
28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; 27 *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
29} 28}
30 29
31uint8_t ring_buffer_avg(const uint8_t lq_recv[]) 30uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
32{ 31{
33 const uint8_t *ptr; 32 const uint8_t *ptr;
34 uint16_t count = 0, i = 0, sum = 0; 33 uint16_t count = 0, i = 0, sum = 0;
35 34
36 ptr = lq_recv; 35 ptr = lq_recv;
37 36
38 while (i < TQ_GLOBAL_WINDOW_SIZE) { 37 while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
39 if (*ptr != 0) { 38 if (*ptr != 0) {
40 count++; 39 count++;
41 sum += *ptr; 40 sum += *ptr;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 8b58bd82767d..fda8c17df273 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,13 +15,13 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_ 20#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
23#define _NET_BATMAN_ADV_RING_BUFFER_H_ 21#define _NET_BATMAN_ADV_RING_BUFFER_H_
24 22
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); 23void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
26uint8_t ring_buffer_avg(const uint8_t lq_recv[]); 24 uint8_t value);
25uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]);
27 26
28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ 27#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 840e2c64a301..bc2b88bbea1f 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,19 +29,20 @@
31#include "unicast.h" 29#include "unicast.h"
32#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
33 31
34static int route_unicast_packet(struct sk_buff *skb, 32static int batadv_route_unicast_packet(struct sk_buff *skb,
35 struct hard_iface *recv_if); 33 struct batadv_hard_iface *recv_if);
36 34
37void slide_own_bcast_window(struct hard_iface *hard_iface) 35void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
38{ 36{
39 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 37 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
40 struct hashtable_t *hash = bat_priv->orig_hash; 38 struct batadv_hashtable *hash = bat_priv->orig_hash;
41 struct hlist_node *node; 39 struct hlist_node *node;
42 struct hlist_head *head; 40 struct hlist_head *head;
43 struct orig_node *orig_node; 41 struct batadv_orig_node *orig_node;
44 unsigned long *word; 42 unsigned long *word;
45 uint32_t i; 43 uint32_t i;
46 size_t word_index; 44 size_t word_index;
45 uint8_t *w;
47 46
48 for (i = 0; i < hash->size; i++) { 47 for (i = 0; i < hash->size; i++) {
49 head = &hash->table[i]; 48 head = &hash->table[i];
@@ -51,49 +50,49 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
51 rcu_read_lock(); 50 rcu_read_lock();
52 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 51 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
53 spin_lock_bh(&orig_node->ogm_cnt_lock); 52 spin_lock_bh(&orig_node->ogm_cnt_lock);
54 word_index = hard_iface->if_num * NUM_WORDS; 53 word_index = hard_iface->if_num * BATADV_NUM_WORDS;
55 word = &(orig_node->bcast_own[word_index]); 54 word = &(orig_node->bcast_own[word_index]);
56 55
57 bit_get_packet(bat_priv, word, 1, 0); 56 batadv_bit_get_packet(bat_priv, word, 1, 0);
58 orig_node->bcast_own_sum[hard_iface->if_num] = 57 w = &orig_node->bcast_own_sum[hard_iface->if_num];
59 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); 58 *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
60 spin_unlock_bh(&orig_node->ogm_cnt_lock); 59 spin_unlock_bh(&orig_node->ogm_cnt_lock);
61 } 60 }
62 rcu_read_unlock(); 61 rcu_read_unlock();
63 } 62 }
64} 63}
65 64
66static void _update_route(struct bat_priv *bat_priv, 65static void _batadv_update_route(struct batadv_priv *bat_priv,
67 struct orig_node *orig_node, 66 struct batadv_orig_node *orig_node,
68 struct neigh_node *neigh_node) 67 struct batadv_neigh_node *neigh_node)
69{ 68{
70 struct neigh_node *curr_router; 69 struct batadv_neigh_node *curr_router;
71 70
72 curr_router = orig_node_get_router(orig_node); 71 curr_router = batadv_orig_node_get_router(orig_node);
73 72
74 /* route deleted */ 73 /* route deleted */
75 if ((curr_router) && (!neigh_node)) { 74 if ((curr_router) && (!neigh_node)) {
76 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 75 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
77 orig_node->orig); 76 "Deleting route towards: %pM\n", orig_node->orig);
78 tt_global_del_orig(bat_priv, orig_node, 77 batadv_tt_global_del_orig(bat_priv, orig_node,
79 "Deleted route towards originator"); 78 "Deleted route towards originator");
80 79
81 /* route added */ 80 /* route added */
82 } else if ((!curr_router) && (neigh_node)) { 81 } else if ((!curr_router) && (neigh_node)) {
83 82
84 bat_dbg(DBG_ROUTES, bat_priv, 83 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
85 "Adding route towards: %pM (via %pM)\n", 84 "Adding route towards: %pM (via %pM)\n",
86 orig_node->orig, neigh_node->addr); 85 orig_node->orig, neigh_node->addr);
87 /* route changed */ 86 /* route changed */
88 } else if (neigh_node && curr_router) { 87 } else if (neigh_node && curr_router) {
89 bat_dbg(DBG_ROUTES, bat_priv, 88 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
90 "Changing route towards: %pM (now via %pM - was via %pM)\n", 89 "Changing route towards: %pM (now via %pM - was via %pM)\n",
91 orig_node->orig, neigh_node->addr, 90 orig_node->orig, neigh_node->addr,
92 curr_router->addr); 91 curr_router->addr);
93 } 92 }
94 93
95 if (curr_router) 94 if (curr_router)
96 neigh_node_free_ref(curr_router); 95 batadv_neigh_node_free_ref(curr_router);
97 96
98 /* increase refcount of new best neighbor */ 97 /* increase refcount of new best neighbor */
99 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 98 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
@@ -105,30 +104,31 @@ static void _update_route(struct bat_priv *bat_priv,
105 104
106 /* decrease refcount of previous best neighbor */ 105 /* decrease refcount of previous best neighbor */
107 if (curr_router) 106 if (curr_router)
108 neigh_node_free_ref(curr_router); 107 batadv_neigh_node_free_ref(curr_router);
109} 108}
110 109
111void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 110void batadv_update_route(struct batadv_priv *bat_priv,
112 struct neigh_node *neigh_node) 111 struct batadv_orig_node *orig_node,
112 struct batadv_neigh_node *neigh_node)
113{ 113{
114 struct neigh_node *router = NULL; 114 struct batadv_neigh_node *router = NULL;
115 115
116 if (!orig_node) 116 if (!orig_node)
117 goto out; 117 goto out;
118 118
119 router = orig_node_get_router(orig_node); 119 router = batadv_orig_node_get_router(orig_node);
120 120
121 if (router != neigh_node) 121 if (router != neigh_node)
122 _update_route(bat_priv, orig_node, neigh_node); 122 _batadv_update_route(bat_priv, orig_node, neigh_node);
123 123
124out: 124out:
125 if (router) 125 if (router)
126 neigh_node_free_ref(router); 126 batadv_neigh_node_free_ref(router);
127} 127}
128 128
129/* caller must hold the neigh_list_lock */ 129/* caller must hold the neigh_list_lock */
130void bonding_candidate_del(struct orig_node *orig_node, 130void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
131 struct neigh_node *neigh_node) 131 struct batadv_neigh_node *neigh_node)
132{ 132{
133 /* this neighbor is not part of our candidate list */ 133 /* this neighbor is not part of our candidate list */
134 if (list_empty(&neigh_node->bonding_list)) 134 if (list_empty(&neigh_node->bonding_list))
@@ -136,37 +136,36 @@ void bonding_candidate_del(struct orig_node *orig_node,
136 136
137 list_del_rcu(&neigh_node->bonding_list); 137 list_del_rcu(&neigh_node->bonding_list);
138 INIT_LIST_HEAD(&neigh_node->bonding_list); 138 INIT_LIST_HEAD(&neigh_node->bonding_list);
139 neigh_node_free_ref(neigh_node); 139 batadv_neigh_node_free_ref(neigh_node);
140 atomic_dec(&orig_node->bond_candidates); 140 atomic_dec(&orig_node->bond_candidates);
141 141
142out: 142out:
143 return; 143 return;
144} 144}
145 145
146void bonding_candidate_add(struct orig_node *orig_node, 146void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
147 struct neigh_node *neigh_node) 147 struct batadv_neigh_node *neigh_node)
148{ 148{
149 struct hlist_node *node; 149 struct hlist_node *node;
150 struct neigh_node *tmp_neigh_node, *router = NULL; 150 struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
151 uint8_t interference_candidate = 0; 151 uint8_t interference_candidate = 0;
152 152
153 spin_lock_bh(&orig_node->neigh_list_lock); 153 spin_lock_bh(&orig_node->neigh_list_lock);
154 154
155 /* only consider if it has the same primary address ... */ 155 /* only consider if it has the same primary address ... */
156 if (!compare_eth(orig_node->orig, 156 if (!batadv_compare_eth(orig_node->orig,
157 neigh_node->orig_node->primary_addr)) 157 neigh_node->orig_node->primary_addr))
158 goto candidate_del; 158 goto candidate_del;
159 159
160 router = orig_node_get_router(orig_node); 160 router = batadv_orig_node_get_router(orig_node);
161 if (!router) 161 if (!router)
162 goto candidate_del; 162 goto candidate_del;
163 163
164 /* ... and is good enough to be considered */ 164 /* ... and is good enough to be considered */
165 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD) 165 if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
166 goto candidate_del; 166 goto candidate_del;
167 167
168 /** 168 /* check if we have another candidate with the same mac address or
169 * check if we have another candidate with the same mac address or
170 * interface. If we do, we won't select this candidate because of 169 * interface. If we do, we won't select this candidate because of
171 * possible interference. 170 * possible interference.
172 */ 171 */
@@ -177,12 +176,14 @@ void bonding_candidate_add(struct orig_node *orig_node,
177 continue; 176 continue;
178 177
179 /* we only care if the other candidate is even 178 /* we only care if the other candidate is even
180 * considered as candidate. */ 179 * considered as candidate.
180 */
181 if (list_empty(&tmp_neigh_node->bonding_list)) 181 if (list_empty(&tmp_neigh_node->bonding_list))
182 continue; 182 continue;
183 183
184 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || 184 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
185 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { 185 (batadv_compare_eth(neigh_node->addr,
186 tmp_neigh_node->addr))) {
186 interference_candidate = 1; 187 interference_candidate = 1;
187 break; 188 break;
188 } 189 }
@@ -204,21 +205,22 @@ void bonding_candidate_add(struct orig_node *orig_node,
204 goto out; 205 goto out;
205 206
206candidate_del: 207candidate_del:
207 bonding_candidate_del(orig_node, neigh_node); 208 batadv_bonding_candidate_del(orig_node, neigh_node);
208 209
209out: 210out:
210 spin_unlock_bh(&orig_node->neigh_list_lock); 211 spin_unlock_bh(&orig_node->neigh_list_lock);
211 212
212 if (router) 213 if (router)
213 neigh_node_free_ref(router); 214 batadv_neigh_node_free_ref(router);
214} 215}
215 216
216/* copy primary address for bonding */ 217/* copy primary address for bonding */
217void bonding_save_primary(const struct orig_node *orig_node, 218void
218 struct orig_node *orig_neigh_node, 219batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
219 const struct batman_ogm_packet *batman_ogm_packet) 220 struct batadv_orig_node *orig_neigh_node,
221 const struct batadv_ogm_packet *batman_ogm_packet)
220{ 222{
221 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 223 if (!(batman_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
222 return; 224 return;
223 225
224 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 226 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
@@ -229,25 +231,26 @@ void bonding_save_primary(const struct orig_node *orig_node,
229 * 0 if the packet is to be accepted 231 * 0 if the packet is to be accepted
230 * 1 if the packet is to be ignored. 232 * 1 if the packet is to be ignored.
231 */ 233 */
232int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 234int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
233 unsigned long *last_reset) 235 unsigned long *last_reset)
234{ 236{
235 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || 237 if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
236 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 238 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
237 if (!has_timed_out(*last_reset, RESET_PROTECTION_MS)) 239 if (!batadv_has_timed_out(*last_reset,
240 BATADV_RESET_PROTECTION_MS))
238 return 1; 241 return 1;
239 242
240 *last_reset = jiffies; 243 *last_reset = jiffies;
241 bat_dbg(DBG_BATMAN, bat_priv, 244 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
242 "old packet received, start protection\n"); 245 "old packet received, start protection\n");
243 } 246 }
244 247
245 return 0; 248 return 0;
246} 249}
247 250
248bool check_management_packet(struct sk_buff *skb, 251bool batadv_check_management_packet(struct sk_buff *skb,
249 struct hard_iface *hard_iface, 252 struct batadv_hard_iface *hard_iface,
250 int header_len) 253 int header_len)
251{ 254{
252 struct ethhdr *ethhdr; 255 struct ethhdr *ethhdr;
253 256
@@ -276,34 +279,34 @@ bool check_management_packet(struct sk_buff *skb,
276 return true; 279 return true;
277} 280}
278 281
279static int recv_my_icmp_packet(struct bat_priv *bat_priv, 282static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
280 struct sk_buff *skb, size_t icmp_len) 283 struct sk_buff *skb, size_t icmp_len)
281{ 284{
282 struct hard_iface *primary_if = NULL; 285 struct batadv_hard_iface *primary_if = NULL;
283 struct orig_node *orig_node = NULL; 286 struct batadv_orig_node *orig_node = NULL;
284 struct neigh_node *router = NULL; 287 struct batadv_neigh_node *router = NULL;
285 struct icmp_packet_rr *icmp_packet; 288 struct batadv_icmp_packet_rr *icmp_packet;
286 int ret = NET_RX_DROP; 289 int ret = NET_RX_DROP;
287 290
288 icmp_packet = (struct icmp_packet_rr *)skb->data; 291 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
289 292
290 /* add data to device queue */ 293 /* add data to device queue */
291 if (icmp_packet->msg_type != ECHO_REQUEST) { 294 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
292 bat_socket_receive_packet(icmp_packet, icmp_len); 295 batadv_socket_receive_packet(icmp_packet, icmp_len);
293 goto out; 296 goto out;
294 } 297 }
295 298
296 primary_if = primary_if_get_selected(bat_priv); 299 primary_if = batadv_primary_if_get_selected(bat_priv);
297 if (!primary_if) 300 if (!primary_if)
298 goto out; 301 goto out;
299 302
300 /* answer echo request (ping) */ 303 /* answer echo request (ping) */
301 /* get routing information */ 304 /* get routing information */
302 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 305 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
303 if (!orig_node) 306 if (!orig_node)
304 goto out; 307 goto out;
305 308
306 router = orig_node_get_router(orig_node); 309 router = batadv_orig_node_get_router(orig_node);
307 if (!router) 310 if (!router)
308 goto out; 311 goto out;
309 312
@@ -311,54 +314,54 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
311 if (skb_cow(skb, ETH_HLEN) < 0) 314 if (skb_cow(skb, ETH_HLEN) < 0)
312 goto out; 315 goto out;
313 316
314 icmp_packet = (struct icmp_packet_rr *)skb->data; 317 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
315 318
316 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 319 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
317 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 320 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
318 icmp_packet->msg_type = ECHO_REPLY; 321 icmp_packet->msg_type = BATADV_ECHO_REPLY;
319 icmp_packet->header.ttl = TTL; 322 icmp_packet->header.ttl = BATADV_TTL;
320 323
321 send_skb_packet(skb, router->if_incoming, router->addr); 324 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
322 ret = NET_RX_SUCCESS; 325 ret = NET_RX_SUCCESS;
323 326
324out: 327out:
325 if (primary_if) 328 if (primary_if)
326 hardif_free_ref(primary_if); 329 batadv_hardif_free_ref(primary_if);
327 if (router) 330 if (router)
328 neigh_node_free_ref(router); 331 batadv_neigh_node_free_ref(router);
329 if (orig_node) 332 if (orig_node)
330 orig_node_free_ref(orig_node); 333 batadv_orig_node_free_ref(orig_node);
331 return ret; 334 return ret;
332} 335}
333 336
334static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 337static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
335 struct sk_buff *skb) 338 struct sk_buff *skb)
336{ 339{
337 struct hard_iface *primary_if = NULL; 340 struct batadv_hard_iface *primary_if = NULL;
338 struct orig_node *orig_node = NULL; 341 struct batadv_orig_node *orig_node = NULL;
339 struct neigh_node *router = NULL; 342 struct batadv_neigh_node *router = NULL;
340 struct icmp_packet *icmp_packet; 343 struct batadv_icmp_packet *icmp_packet;
341 int ret = NET_RX_DROP; 344 int ret = NET_RX_DROP;
342 345
343 icmp_packet = (struct icmp_packet *)skb->data; 346 icmp_packet = (struct batadv_icmp_packet *)skb->data;
344 347
345 /* send TTL exceeded if packet is an echo request (traceroute) */ 348 /* send TTL exceeded if packet is an echo request (traceroute) */
346 if (icmp_packet->msg_type != ECHO_REQUEST) { 349 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
347 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", 350 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
348 icmp_packet->orig, icmp_packet->dst); 351 icmp_packet->orig, icmp_packet->dst);
349 goto out; 352 goto out;
350 } 353 }
351 354
352 primary_if = primary_if_get_selected(bat_priv); 355 primary_if = batadv_primary_if_get_selected(bat_priv);
353 if (!primary_if) 356 if (!primary_if)
354 goto out; 357 goto out;
355 358
356 /* get routing information */ 359 /* get routing information */
357 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 360 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
358 if (!orig_node) 361 if (!orig_node)
359 goto out; 362 goto out;
360 363
361 router = orig_node_get_router(orig_node); 364 router = batadv_orig_node_get_router(orig_node);
362 if (!router) 365 if (!router)
363 goto out; 366 goto out;
364 367
@@ -366,42 +369,41 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
366 if (skb_cow(skb, ETH_HLEN) < 0) 369 if (skb_cow(skb, ETH_HLEN) < 0)
367 goto out; 370 goto out;
368 371
369 icmp_packet = (struct icmp_packet *)skb->data; 372 icmp_packet = (struct batadv_icmp_packet *)skb->data;
370 373
371 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 374 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
372 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 375 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
373 icmp_packet->msg_type = TTL_EXCEEDED; 376 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
374 icmp_packet->header.ttl = TTL; 377 icmp_packet->header.ttl = BATADV_TTL;
375 378
376 send_skb_packet(skb, router->if_incoming, router->addr); 379 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
377 ret = NET_RX_SUCCESS; 380 ret = NET_RX_SUCCESS;
378 381
379out: 382out:
380 if (primary_if) 383 if (primary_if)
381 hardif_free_ref(primary_if); 384 batadv_hardif_free_ref(primary_if);
382 if (router) 385 if (router)
383 neigh_node_free_ref(router); 386 batadv_neigh_node_free_ref(router);
384 if (orig_node) 387 if (orig_node)
385 orig_node_free_ref(orig_node); 388 batadv_orig_node_free_ref(orig_node);
386 return ret; 389 return ret;
387} 390}
388 391
389 392
390int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) 393int batadv_recv_icmp_packet(struct sk_buff *skb,
394 struct batadv_hard_iface *recv_if)
391{ 395{
392 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 396 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
393 struct icmp_packet_rr *icmp_packet; 397 struct batadv_icmp_packet_rr *icmp_packet;
394 struct ethhdr *ethhdr; 398 struct ethhdr *ethhdr;
395 struct orig_node *orig_node = NULL; 399 struct batadv_orig_node *orig_node = NULL;
396 struct neigh_node *router = NULL; 400 struct batadv_neigh_node *router = NULL;
397 int hdr_size = sizeof(struct icmp_packet); 401 int hdr_size = sizeof(struct batadv_icmp_packet);
398 int ret = NET_RX_DROP; 402 int ret = NET_RX_DROP;
399 403
400 /** 404 /* we truncate all incoming icmp packets if they don't match our size */
401 * we truncate all incoming icmp packets if they don't match our size 405 if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
402 */ 406 hdr_size = sizeof(struct batadv_icmp_packet_rr);
403 if (skb->len >= sizeof(struct icmp_packet_rr))
404 hdr_size = sizeof(struct icmp_packet_rr);
405 407
406 /* drop packet if it has not necessary minimum size */ 408 /* drop packet if it has not necessary minimum size */
407 if (unlikely(!pskb_may_pull(skb, hdr_size))) 409 if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -418,33 +420,33 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
418 goto out; 420 goto out;
419 421
420 /* not for me */ 422 /* not for me */
421 if (!is_my_mac(ethhdr->h_dest)) 423 if (!batadv_is_my_mac(ethhdr->h_dest))
422 goto out; 424 goto out;
423 425
424 icmp_packet = (struct icmp_packet_rr *)skb->data; 426 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
425 427
426 /* add record route information if not full */ 428 /* add record route information if not full */
427 if ((hdr_size == sizeof(struct icmp_packet_rr)) && 429 if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
428 (icmp_packet->rr_cur < BAT_RR_LEN)) { 430 (icmp_packet->rr_cur < BATADV_RR_LEN)) {
429 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), 431 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
430 ethhdr->h_dest, ETH_ALEN); 432 ethhdr->h_dest, ETH_ALEN);
431 icmp_packet->rr_cur++; 433 icmp_packet->rr_cur++;
432 } 434 }
433 435
434 /* packet for me */ 436 /* packet for me */
435 if (is_my_mac(icmp_packet->dst)) 437 if (batadv_is_my_mac(icmp_packet->dst))
436 return recv_my_icmp_packet(bat_priv, skb, hdr_size); 438 return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
437 439
438 /* TTL exceeded */ 440 /* TTL exceeded */
439 if (icmp_packet->header.ttl < 2) 441 if (icmp_packet->header.ttl < 2)
440 return recv_icmp_ttl_exceeded(bat_priv, skb); 442 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
441 443
442 /* get routing information */ 444 /* get routing information */
443 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 445 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
444 if (!orig_node) 446 if (!orig_node)
445 goto out; 447 goto out;
446 448
447 router = orig_node_get_router(orig_node); 449 router = batadv_orig_node_get_router(orig_node);
448 if (!router) 450 if (!router)
449 goto out; 451 goto out;
450 452
@@ -452,20 +454,20 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
452 if (skb_cow(skb, ETH_HLEN) < 0) 454 if (skb_cow(skb, ETH_HLEN) < 0)
453 goto out; 455 goto out;
454 456
455 icmp_packet = (struct icmp_packet_rr *)skb->data; 457 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
456 458
457 /* decrement ttl */ 459 /* decrement ttl */
458 icmp_packet->header.ttl--; 460 icmp_packet->header.ttl--;
459 461
460 /* route it */ 462 /* route it */
461 send_skb_packet(skb, router->if_incoming, router->addr); 463 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
462 ret = NET_RX_SUCCESS; 464 ret = NET_RX_SUCCESS;
463 465
464out: 466out:
465 if (router) 467 if (router)
466 neigh_node_free_ref(router); 468 batadv_neigh_node_free_ref(router);
467 if (orig_node) 469 if (orig_node)
468 orig_node_free_ref(orig_node); 470 batadv_orig_node_free_ref(orig_node);
469 return ret; 471 return ret;
470} 472}
471 473
@@ -473,12 +475,14 @@ out:
473 * robin fashion over the remaining interfaces. 475 * robin fashion over the remaining interfaces.
474 * 476 *
475 * This method rotates the bonding list and increases the 477 * This method rotates the bonding list and increases the
476 * returned router's refcount. */ 478 * returned router's refcount.
477static struct neigh_node *find_bond_router(struct orig_node *primary_orig, 479 */
478 const struct hard_iface *recv_if) 480static struct batadv_neigh_node *
481batadv_find_bond_router(struct batadv_orig_node *primary_orig,
482 const struct batadv_hard_iface *recv_if)
479{ 483{
480 struct neigh_node *tmp_neigh_node; 484 struct batadv_neigh_node *tmp_neigh_node;
481 struct neigh_node *router = NULL, *first_candidate = NULL; 485 struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
482 486
483 rcu_read_lock(); 487 rcu_read_lock();
484 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, 488 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -506,10 +510,12 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
506 goto out; 510 goto out;
507 511
508 /* selected should point to the next element 512 /* selected should point to the next element
509 * after the current router */ 513 * after the current router
514 */
510 spin_lock_bh(&primary_orig->neigh_list_lock); 515 spin_lock_bh(&primary_orig->neigh_list_lock);
511 /* this is a list_move(), which unfortunately 516 /* this is a list_move(), which unfortunately
512 * does not exist as rcu version */ 517 * does not exist as rcu version
518 */
513 list_del_rcu(&primary_orig->bond_list); 519 list_del_rcu(&primary_orig->bond_list);
514 list_add_rcu(&primary_orig->bond_list, 520 list_add_rcu(&primary_orig->bond_list,
515 &router->bonding_list); 521 &router->bonding_list);
@@ -524,12 +530,14 @@ out:
524 * remaining candidates which are not using 530 * remaining candidates which are not using
525 * this interface. 531 * this interface.
526 * 532 *
527 * Increases the returned router's refcount */ 533 * Increases the returned router's refcount
528static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, 534 */
529 const struct hard_iface *recv_if) 535static struct batadv_neigh_node *
536batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
537 const struct batadv_hard_iface *recv_if)
530{ 538{
531 struct neigh_node *tmp_neigh_node; 539 struct batadv_neigh_node *tmp_neigh_node;
532 struct neigh_node *router = NULL, *first_candidate = NULL; 540 struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
533 541
534 rcu_read_lock(); 542 rcu_read_lock();
535 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, 543 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -545,19 +553,21 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
545 continue; 553 continue;
546 554
547 /* if we don't have a router yet 555 /* if we don't have a router yet
548 * or this one is better, choose it. */ 556 * or this one is better, choose it.
557 */
549 if ((!router) || 558 if ((!router) ||
550 (tmp_neigh_node->tq_avg > router->tq_avg)) { 559 (tmp_neigh_node->tq_avg > router->tq_avg)) {
551 /* decrement refcount of 560 /* decrement refcount of
552 * previously selected router */ 561 * previously selected router
562 */
553 if (router) 563 if (router)
554 neigh_node_free_ref(router); 564 batadv_neigh_node_free_ref(router);
555 565
556 router = tmp_neigh_node; 566 router = tmp_neigh_node;
557 atomic_inc_not_zero(&router->refcount); 567 atomic_inc_not_zero(&router->refcount);
558 } 568 }
559 569
560 neigh_node_free_ref(tmp_neigh_node); 570 batadv_neigh_node_free_ref(tmp_neigh_node);
561 } 571 }
562 572
563 /* use the first candidate if nothing was found. */ 573 /* use the first candidate if nothing was found. */
@@ -569,19 +579,22 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
569 return router; 579 return router;
570} 580}
571 581
572int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) 582int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
573{ 583{
574 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 584 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
575 struct tt_query_packet *tt_query; 585 struct batadv_tt_query_packet *tt_query;
576 uint16_t tt_len; 586 uint16_t tt_size;
577 struct ethhdr *ethhdr; 587 struct ethhdr *ethhdr;
588 char tt_flag;
589 size_t packet_size;
578 590
579 /* drop packet if it has not necessary minimum size */ 591 /* drop packet if it has not necessary minimum size */
580 if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet)))) 592 if (unlikely(!pskb_may_pull(skb,
593 sizeof(struct batadv_tt_query_packet))))
581 goto out; 594 goto out;
582 595
583 /* I could need to modify it */ 596 /* I could need to modify it */
584 if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0) 597 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
585 goto out; 598 goto out;
586 599
587 ethhdr = (struct ethhdr *)skb_mac_header(skb); 600 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -594,45 +607,59 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
594 if (is_broadcast_ether_addr(ethhdr->h_source)) 607 if (is_broadcast_ether_addr(ethhdr->h_source))
595 goto out; 608 goto out;
596 609
597 tt_query = (struct tt_query_packet *)skb->data; 610 tt_query = (struct batadv_tt_query_packet *)skb->data;
598 611
599 tt_query->tt_data = ntohs(tt_query->tt_data); 612 switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
613 case BATADV_TT_REQUEST:
614 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
600 615
601 switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
602 case TT_REQUEST:
603 /* If we cannot provide an answer the tt_request is 616 /* If we cannot provide an answer the tt_request is
604 * forwarded */ 617 * forwarded
605 if (!send_tt_response(bat_priv, tt_query)) { 618 */
606 bat_dbg(DBG_TT, bat_priv, 619 if (!batadv_send_tt_response(bat_priv, tt_query)) {
607 "Routing TT_REQUEST to %pM [%c]\n", 620 if (tt_query->flags & BATADV_TT_FULL_TABLE)
608 tt_query->dst, 621 tt_flag = 'F';
609 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); 622 else
610 tt_query->tt_data = htons(tt_query->tt_data); 623 tt_flag = '.';
611 return route_unicast_packet(skb, recv_if); 624
625 batadv_dbg(BATADV_DBG_TT, bat_priv,
626 "Routing TT_REQUEST to %pM [%c]\n",
627 tt_query->dst,
628 tt_flag);
629 return batadv_route_unicast_packet(skb, recv_if);
612 } 630 }
613 break; 631 break;
614 case TT_RESPONSE: 632 case BATADV_TT_RESPONSE:
615 if (is_my_mac(tt_query->dst)) { 633 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
634
635 if (batadv_is_my_mac(tt_query->dst)) {
616 /* packet needs to be linearized to access the TT 636 /* packet needs to be linearized to access the TT
617 * changes */ 637 * changes
638 */
618 if (skb_linearize(skb) < 0) 639 if (skb_linearize(skb) < 0)
619 goto out; 640 goto out;
641 /* skb_linearize() possibly changed skb->data */
642 tt_query = (struct batadv_tt_query_packet *)skb->data;
620 643
621 tt_len = tt_query->tt_data * sizeof(struct tt_change); 644 tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
622 645
623 /* Ensure we have all the claimed data */ 646 /* Ensure we have all the claimed data */
624 if (unlikely(skb_headlen(skb) < 647 packet_size = sizeof(struct batadv_tt_query_packet);
625 sizeof(struct tt_query_packet) + tt_len)) 648 packet_size += tt_size;
649 if (unlikely(skb_headlen(skb) < packet_size))
626 goto out; 650 goto out;
627 651
628 handle_tt_response(bat_priv, tt_query); 652 batadv_handle_tt_response(bat_priv, tt_query);
629 } else { 653 } else {
630 bat_dbg(DBG_TT, bat_priv, 654 if (tt_query->flags & BATADV_TT_FULL_TABLE)
631 "Routing TT_RESPONSE to %pM [%c]\n", 655 tt_flag = 'F';
632 tt_query->dst, 656 else
633 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); 657 tt_flag = '.';
634 tt_query->tt_data = htons(tt_query->tt_data); 658 batadv_dbg(BATADV_DBG_TT, bat_priv,
635 return route_unicast_packet(skb, recv_if); 659 "Routing TT_RESPONSE to %pM [%c]\n",
660 tt_query->dst,
661 tt_flag);
662 return batadv_route_unicast_packet(skb, recv_if);
636 } 663 }
637 break; 664 break;
638 } 665 }
@@ -642,15 +669,16 @@ out:
642 return NET_RX_DROP; 669 return NET_RX_DROP;
643} 670}
644 671
645int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) 672int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
646{ 673{
647 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 674 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
648 struct roam_adv_packet *roam_adv_packet; 675 struct batadv_roam_adv_packet *roam_adv_packet;
649 struct orig_node *orig_node; 676 struct batadv_orig_node *orig_node;
650 struct ethhdr *ethhdr; 677 struct ethhdr *ethhdr;
651 678
652 /* drop packet if it has not necessary minimum size */ 679 /* drop packet if it has not necessary minimum size */
653 if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet)))) 680 if (unlikely(!pskb_may_pull(skb,
681 sizeof(struct batadv_roam_adv_packet))))
654 goto out; 682 goto out;
655 683
656 ethhdr = (struct ethhdr *)skb_mac_header(skb); 684 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -663,35 +691,39 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
663 if (is_broadcast_ether_addr(ethhdr->h_source)) 691 if (is_broadcast_ether_addr(ethhdr->h_source))
664 goto out; 692 goto out;
665 693
666 roam_adv_packet = (struct roam_adv_packet *)skb->data; 694 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
695
696 roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
667 697
668 if (!is_my_mac(roam_adv_packet->dst)) 698 if (!batadv_is_my_mac(roam_adv_packet->dst))
669 return route_unicast_packet(skb, recv_if); 699 return batadv_route_unicast_packet(skb, recv_if);
670 700
671 /* check if it is a backbone gateway. we don't accept 701 /* check if it is a backbone gateway. we don't accept
672 * roaming advertisement from it, as it has the same 702 * roaming advertisement from it, as it has the same
673 * entries as we have. 703 * entries as we have.
674 */ 704 */
675 if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) 705 if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
676 goto out; 706 goto out;
677 707
678 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); 708 orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
679 if (!orig_node) 709 if (!orig_node)
680 goto out; 710 goto out;
681 711
682 bat_dbg(DBG_TT, bat_priv, 712 batadv_dbg(BATADV_DBG_TT, bat_priv,
683 "Received ROAMING_ADV from %pM (client %pM)\n", 713 "Received ROAMING_ADV from %pM (client %pM)\n",
684 roam_adv_packet->src, roam_adv_packet->client); 714 roam_adv_packet->src, roam_adv_packet->client);
685 715
686 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 716 batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
687 atomic_read(&orig_node->last_ttvn) + 1, true, false); 717 BATADV_TT_CLIENT_ROAM,
718 atomic_read(&orig_node->last_ttvn) + 1);
688 719
689 /* Roaming phase starts: I have new information but the ttvn has not 720 /* Roaming phase starts: I have new information but the ttvn has not
690 * been incremented yet. This flag will make me check all the incoming 721 * been incremented yet. This flag will make me check all the incoming
691 * packets for the correct destination. */ 722 * packets for the correct destination.
723 */
692 bat_priv->tt_poss_change = true; 724 bat_priv->tt_poss_change = true;
693 725
694 orig_node_free_ref(orig_node); 726 batadv_orig_node_free_ref(orig_node);
695out: 727out:
696 /* returning NET_RX_DROP will make the caller function kfree the skb */ 728 /* returning NET_RX_DROP will make the caller function kfree the skb */
697 return NET_RX_DROP; 729 return NET_RX_DROP;
@@ -699,26 +731,30 @@ out:
699 731
700/* find a suitable router for this originator, and use 732/* find a suitable router for this originator, and use
701 * bonding if possible. increases the found neighbors 733 * bonding if possible. increases the found neighbors
702 * refcount.*/ 734 * refcount.
703struct neigh_node *find_router(struct bat_priv *bat_priv, 735 */
704 struct orig_node *orig_node, 736struct batadv_neigh_node *
705 const struct hard_iface *recv_if) 737batadv_find_router(struct batadv_priv *bat_priv,
738 struct batadv_orig_node *orig_node,
739 const struct batadv_hard_iface *recv_if)
706{ 740{
707 struct orig_node *primary_orig_node; 741 struct batadv_orig_node *primary_orig_node;
708 struct orig_node *router_orig; 742 struct batadv_orig_node *router_orig;
709 struct neigh_node *router; 743 struct batadv_neigh_node *router;
710 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 744 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
711 int bonding_enabled; 745 int bonding_enabled;
746 uint8_t *primary_addr;
712 747
713 if (!orig_node) 748 if (!orig_node)
714 return NULL; 749 return NULL;
715 750
716 router = orig_node_get_router(orig_node); 751 router = batadv_orig_node_get_router(orig_node);
717 if (!router) 752 if (!router)
718 goto err; 753 goto err;
719 754
720 /* without bonding, the first node should 755 /* without bonding, the first node should
721 * always choose the default router. */ 756 * always choose the default router.
757 */
722 bonding_enabled = atomic_read(&bat_priv->bonding); 758 bonding_enabled = atomic_read(&bat_priv->bonding);
723 759
724 rcu_read_lock(); 760 rcu_read_lock();
@@ -730,43 +766,47 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
730 if ((!recv_if) && (!bonding_enabled)) 766 if ((!recv_if) && (!bonding_enabled))
731 goto return_router; 767 goto return_router;
732 768
769 primary_addr = router_orig->primary_addr;
770
733 /* if we have something in the primary_addr, we can search 771 /* if we have something in the primary_addr, we can search
734 * for a potential bonding candidate. */ 772 * for a potential bonding candidate.
735 if (compare_eth(router_orig->primary_addr, zero_mac)) 773 */
774 if (batadv_compare_eth(primary_addr, zero_mac))
736 goto return_router; 775 goto return_router;
737 776
738 /* find the orig_node which has the primary interface. might 777 /* find the orig_node which has the primary interface. might
739 * even be the same as our router_orig in many cases */ 778 * even be the same as our router_orig in many cases
740 779 */
741 if (compare_eth(router_orig->primary_addr, router_orig->orig)) { 780 if (batadv_compare_eth(primary_addr, router_orig->orig)) {
742 primary_orig_node = router_orig; 781 primary_orig_node = router_orig;
743 } else { 782 } else {
744 primary_orig_node = orig_hash_find(bat_priv, 783 primary_orig_node = batadv_orig_hash_find(bat_priv,
745 router_orig->primary_addr); 784 primary_addr);
746 if (!primary_orig_node) 785 if (!primary_orig_node)
747 goto return_router; 786 goto return_router;
748 787
749 orig_node_free_ref(primary_orig_node); 788 batadv_orig_node_free_ref(primary_orig_node);
750 } 789 }
751 790
752 /* with less than 2 candidates, we can't do any 791 /* with less than 2 candidates, we can't do any
753 * bonding and prefer the original router. */ 792 * bonding and prefer the original router.
793 */
754 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 794 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
755 goto return_router; 795 goto return_router;
756 796
757 /* all nodes between should choose a candidate which 797 /* all nodes between should choose a candidate which
758 * is is not on the interface where the packet came 798 * is is not on the interface where the packet came
759 * in. */ 799 * in.
760 800 */
761 neigh_node_free_ref(router); 801 batadv_neigh_node_free_ref(router);
762 802
763 if (bonding_enabled) 803 if (bonding_enabled)
764 router = find_bond_router(primary_orig_node, recv_if); 804 router = batadv_find_bond_router(primary_orig_node, recv_if);
765 else 805 else
766 router = find_ifalter_router(primary_orig_node, recv_if); 806 router = batadv_find_ifalter_router(primary_orig_node, recv_if);
767 807
768return_router: 808return_router:
769 if (router && router->if_incoming->if_status != IF_ACTIVE) 809 if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
770 goto err_unlock; 810 goto err_unlock;
771 811
772 rcu_read_unlock(); 812 rcu_read_unlock();
@@ -775,11 +815,11 @@ err_unlock:
775 rcu_read_unlock(); 815 rcu_read_unlock();
776err: 816err:
777 if (router) 817 if (router)
778 neigh_node_free_ref(router); 818 batadv_neigh_node_free_ref(router);
779 return NULL; 819 return NULL;
780} 820}
781 821
782static int check_unicast_packet(struct sk_buff *skb, int hdr_size) 822static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
783{ 823{
784 struct ethhdr *ethhdr; 824 struct ethhdr *ethhdr;
785 825
@@ -798,23 +838,24 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
798 return -1; 838 return -1;
799 839
800 /* not for me */ 840 /* not for me */
801 if (!is_my_mac(ethhdr->h_dest)) 841 if (!batadv_is_my_mac(ethhdr->h_dest))
802 return -1; 842 return -1;
803 843
804 return 0; 844 return 0;
805} 845}
806 846
807static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 847static int batadv_route_unicast_packet(struct sk_buff *skb,
848 struct batadv_hard_iface *recv_if)
808{ 849{
809 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 850 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
810 struct orig_node *orig_node = NULL; 851 struct batadv_orig_node *orig_node = NULL;
811 struct neigh_node *neigh_node = NULL; 852 struct batadv_neigh_node *neigh_node = NULL;
812 struct unicast_packet *unicast_packet; 853 struct batadv_unicast_packet *unicast_packet;
813 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); 854 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
814 int ret = NET_RX_DROP; 855 int ret = NET_RX_DROP;
815 struct sk_buff *new_skb; 856 struct sk_buff *new_skb;
816 857
817 unicast_packet = (struct unicast_packet *)skb->data; 858 unicast_packet = (struct batadv_unicast_packet *)skb->data;
818 859
819 /* TTL exceeded */ 860 /* TTL exceeded */
820 if (unicast_packet->header.ttl < 2) { 861 if (unicast_packet->header.ttl < 2) {
@@ -824,13 +865,13 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
824 } 865 }
825 866
826 /* get routing information */ 867 /* get routing information */
827 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 868 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
828 869
829 if (!orig_node) 870 if (!orig_node)
830 goto out; 871 goto out;
831 872
832 /* find_router() increases neigh_nodes refcount if found. */ 873 /* find_router() increases neigh_nodes refcount if found. */
833 neigh_node = find_router(bat_priv, orig_node, recv_if); 874 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
834 875
835 if (!neigh_node) 876 if (!neigh_node)
836 goto out; 877 goto out;
@@ -839,20 +880,22 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
839 if (skb_cow(skb, ETH_HLEN) < 0) 880 if (skb_cow(skb, ETH_HLEN) < 0)
840 goto out; 881 goto out;
841 882
842 unicast_packet = (struct unicast_packet *)skb->data; 883 unicast_packet = (struct batadv_unicast_packet *)skb->data;
843 884
844 if (unicast_packet->header.packet_type == BAT_UNICAST && 885 if (unicast_packet->header.packet_type == BATADV_UNICAST &&
845 atomic_read(&bat_priv->fragmentation) && 886 atomic_read(&bat_priv->fragmentation) &&
846 skb->len > neigh_node->if_incoming->net_dev->mtu) { 887 skb->len > neigh_node->if_incoming->net_dev->mtu) {
847 ret = frag_send_skb(skb, bat_priv, 888 ret = batadv_frag_send_skb(skb, bat_priv,
848 neigh_node->if_incoming, neigh_node->addr); 889 neigh_node->if_incoming,
890 neigh_node->addr);
849 goto out; 891 goto out;
850 } 892 }
851 893
852 if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG && 894 if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
853 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { 895 batadv_frag_can_reassemble(skb,
896 neigh_node->if_incoming->net_dev->mtu)) {
854 897
855 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 898 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
856 899
857 if (ret == NET_RX_DROP) 900 if (ret == NET_RX_DROP)
858 goto out; 901 goto out;
@@ -864,141 +907,153 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
864 } 907 }
865 908
866 skb = new_skb; 909 skb = new_skb;
867 unicast_packet = (struct unicast_packet *)skb->data; 910 unicast_packet = (struct batadv_unicast_packet *)skb->data;
868 } 911 }
869 912
870 /* decrement ttl */ 913 /* decrement ttl */
871 unicast_packet->header.ttl--; 914 unicast_packet->header.ttl--;
872 915
916 /* Update stats counter */
917 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
918 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
919 skb->len + ETH_HLEN);
920
873 /* route it */ 921 /* route it */
874 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 922 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
875 ret = NET_RX_SUCCESS; 923 ret = NET_RX_SUCCESS;
876 924
877out: 925out:
878 if (neigh_node) 926 if (neigh_node)
879 neigh_node_free_ref(neigh_node); 927 batadv_neigh_node_free_ref(neigh_node);
880 if (orig_node) 928 if (orig_node)
881 orig_node_free_ref(orig_node); 929 batadv_orig_node_free_ref(orig_node);
882 return ret; 930 return ret;
883} 931}
884 932
885static int check_unicast_ttvn(struct bat_priv *bat_priv, 933static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
886 struct sk_buff *skb) { 934 struct sk_buff *skb) {
887 uint8_t curr_ttvn; 935 uint8_t curr_ttvn;
888 struct orig_node *orig_node; 936 struct batadv_orig_node *orig_node;
889 struct ethhdr *ethhdr; 937 struct ethhdr *ethhdr;
890 struct hard_iface *primary_if; 938 struct batadv_hard_iface *primary_if;
891 struct unicast_packet *unicast_packet; 939 struct batadv_unicast_packet *unicast_packet;
892 bool tt_poss_change; 940 bool tt_poss_change;
941 int is_old_ttvn;
893 942
894 /* I could need to modify it */ 943 /* I could need to modify it */
895 if (skb_cow(skb, sizeof(struct unicast_packet)) < 0) 944 if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0)
896 return 0; 945 return 0;
897 946
898 unicast_packet = (struct unicast_packet *)skb->data; 947 unicast_packet = (struct batadv_unicast_packet *)skb->data;
899 948
900 if (is_my_mac(unicast_packet->dest)) { 949 if (batadv_is_my_mac(unicast_packet->dest)) {
901 tt_poss_change = bat_priv->tt_poss_change; 950 tt_poss_change = bat_priv->tt_poss_change;
902 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 951 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
903 } else { 952 } else {
904 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 953 orig_node = batadv_orig_hash_find(bat_priv,
954 unicast_packet->dest);
905 955
906 if (!orig_node) 956 if (!orig_node)
907 return 0; 957 return 0;
908 958
909 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 959 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
910 tt_poss_change = orig_node->tt_poss_change; 960 tt_poss_change = orig_node->tt_poss_change;
911 orig_node_free_ref(orig_node); 961 batadv_orig_node_free_ref(orig_node);
912 } 962 }
913 963
914 /* Check whether I have to reroute the packet */ 964 /* Check whether I have to reroute the packet */
915 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { 965 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
966 if (is_old_ttvn || tt_poss_change) {
916 /* check if there is enough data before accessing it */ 967 /* check if there is enough data before accessing it */
917 if (pskb_may_pull(skb, sizeof(struct unicast_packet) + 968 if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
918 ETH_HLEN) < 0) 969 ETH_HLEN) < 0)
919 return 0; 970 return 0;
920 971
921 ethhdr = (struct ethhdr *)(skb->data + 972 ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
922 sizeof(struct unicast_packet));
923 973
924 /* we don't have an updated route for this client, so we should 974 /* we don't have an updated route for this client, so we should
925 * not try to reroute the packet!! 975 * not try to reroute the packet!!
926 */ 976 */
927 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) 977 if (batadv_tt_global_client_is_roaming(bat_priv,
978 ethhdr->h_dest))
928 return 1; 979 return 1;
929 980
930 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); 981 orig_node = batadv_transtable_search(bat_priv, NULL,
982 ethhdr->h_dest);
931 983
932 if (!orig_node) { 984 if (!orig_node) {
933 if (!is_my_client(bat_priv, ethhdr->h_dest)) 985 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
934 return 0; 986 return 0;
935 primary_if = primary_if_get_selected(bat_priv); 987 primary_if = batadv_primary_if_get_selected(bat_priv);
936 if (!primary_if) 988 if (!primary_if)
937 return 0; 989 return 0;
938 memcpy(unicast_packet->dest, 990 memcpy(unicast_packet->dest,
939 primary_if->net_dev->dev_addr, ETH_ALEN); 991 primary_if->net_dev->dev_addr, ETH_ALEN);
940 hardif_free_ref(primary_if); 992 batadv_hardif_free_ref(primary_if);
941 } else { 993 } else {
942 memcpy(unicast_packet->dest, orig_node->orig, 994 memcpy(unicast_packet->dest, orig_node->orig,
943 ETH_ALEN); 995 ETH_ALEN);
944 curr_ttvn = (uint8_t) 996 curr_ttvn = (uint8_t)
945 atomic_read(&orig_node->last_ttvn); 997 atomic_read(&orig_node->last_ttvn);
946 orig_node_free_ref(orig_node); 998 batadv_orig_node_free_ref(orig_node);
947 } 999 }
948 1000
949 bat_dbg(DBG_ROUTES, bat_priv, 1001 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
950 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", 1002 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
951 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, 1003 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
952 unicast_packet->dest); 1004 unicast_packet->dest);
953 1005
954 unicast_packet->ttvn = curr_ttvn; 1006 unicast_packet->ttvn = curr_ttvn;
955 } 1007 }
956 return 1; 1008 return 1;
957} 1009}
958 1010
959int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1011int batadv_recv_unicast_packet(struct sk_buff *skb,
1012 struct batadv_hard_iface *recv_if)
960{ 1013{
961 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1014 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
962 struct unicast_packet *unicast_packet; 1015 struct batadv_unicast_packet *unicast_packet;
963 int hdr_size = sizeof(*unicast_packet); 1016 int hdr_size = sizeof(*unicast_packet);
964 1017
965 if (check_unicast_packet(skb, hdr_size) < 0) 1018 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
966 return NET_RX_DROP; 1019 return NET_RX_DROP;
967 1020
968 if (!check_unicast_ttvn(bat_priv, skb)) 1021 if (!batadv_check_unicast_ttvn(bat_priv, skb))
969 return NET_RX_DROP; 1022 return NET_RX_DROP;
970 1023
971 unicast_packet = (struct unicast_packet *)skb->data; 1024 unicast_packet = (struct batadv_unicast_packet *)skb->data;
972 1025
973 /* packet for me */ 1026 /* packet for me */
974 if (is_my_mac(unicast_packet->dest)) { 1027 if (batadv_is_my_mac(unicast_packet->dest)) {
975 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1028 batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
1029 hdr_size);
976 return NET_RX_SUCCESS; 1030 return NET_RX_SUCCESS;
977 } 1031 }
978 1032
979 return route_unicast_packet(skb, recv_if); 1033 return batadv_route_unicast_packet(skb, recv_if);
980} 1034}
981 1035
982int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1036int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1037 struct batadv_hard_iface *recv_if)
983{ 1038{
984 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1039 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
985 struct unicast_frag_packet *unicast_packet; 1040 struct batadv_unicast_frag_packet *unicast_packet;
986 int hdr_size = sizeof(*unicast_packet); 1041 int hdr_size = sizeof(*unicast_packet);
987 struct sk_buff *new_skb = NULL; 1042 struct sk_buff *new_skb = NULL;
988 int ret; 1043 int ret;
989 1044
990 if (check_unicast_packet(skb, hdr_size) < 0) 1045 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
991 return NET_RX_DROP; 1046 return NET_RX_DROP;
992 1047
993 if (!check_unicast_ttvn(bat_priv, skb)) 1048 if (!batadv_check_unicast_ttvn(bat_priv, skb))
994 return NET_RX_DROP; 1049 return NET_RX_DROP;
995 1050
996 unicast_packet = (struct unicast_frag_packet *)skb->data; 1051 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
997 1052
998 /* packet for me */ 1053 /* packet for me */
999 if (is_my_mac(unicast_packet->dest)) { 1054 if (batadv_is_my_mac(unicast_packet->dest)) {
1000 1055
1001 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 1056 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
1002 1057
1003 if (ret == NET_RX_DROP) 1058 if (ret == NET_RX_DROP)
1004 return NET_RX_DROP; 1059 return NET_RX_DROP;
@@ -1007,20 +1062,21 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1007 if (!new_skb) 1062 if (!new_skb)
1008 return NET_RX_SUCCESS; 1063 return NET_RX_SUCCESS;
1009 1064
1010 interface_rx(recv_if->soft_iface, new_skb, recv_if, 1065 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
1011 sizeof(struct unicast_packet)); 1066 sizeof(struct batadv_unicast_packet));
1012 return NET_RX_SUCCESS; 1067 return NET_RX_SUCCESS;
1013 } 1068 }
1014 1069
1015 return route_unicast_packet(skb, recv_if); 1070 return batadv_route_unicast_packet(skb, recv_if);
1016} 1071}
1017 1072
1018 1073
1019int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1074int batadv_recv_bcast_packet(struct sk_buff *skb,
1075 struct batadv_hard_iface *recv_if)
1020{ 1076{
1021 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1077 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1022 struct orig_node *orig_node = NULL; 1078 struct batadv_orig_node *orig_node = NULL;
1023 struct bcast_packet *bcast_packet; 1079 struct batadv_bcast_packet *bcast_packet;
1024 struct ethhdr *ethhdr; 1080 struct ethhdr *ethhdr;
1025 int hdr_size = sizeof(*bcast_packet); 1081 int hdr_size = sizeof(*bcast_packet);
1026 int ret = NET_RX_DROP; 1082 int ret = NET_RX_DROP;
@@ -1041,19 +1097,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1041 goto out; 1097 goto out;
1042 1098
1043 /* ignore broadcasts sent by myself */ 1099 /* ignore broadcasts sent by myself */
1044 if (is_my_mac(ethhdr->h_source)) 1100 if (batadv_is_my_mac(ethhdr->h_source))
1045 goto out; 1101 goto out;
1046 1102
1047 bcast_packet = (struct bcast_packet *)skb->data; 1103 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1048 1104
1049 /* ignore broadcasts originated by myself */ 1105 /* ignore broadcasts originated by myself */
1050 if (is_my_mac(bcast_packet->orig)) 1106 if (batadv_is_my_mac(bcast_packet->orig))
1051 goto out; 1107 goto out;
1052 1108
1053 if (bcast_packet->header.ttl < 2) 1109 if (bcast_packet->header.ttl < 2)
1054 goto out; 1110 goto out;
1055 1111
1056 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1112 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
1057 1113
1058 if (!orig_node) 1114 if (!orig_node)
1059 goto out; 1115 goto out;
@@ -1061,39 +1117,40 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1061 spin_lock_bh(&orig_node->bcast_seqno_lock); 1117 spin_lock_bh(&orig_node->bcast_seqno_lock);
1062 1118
1063 /* check whether the packet is a duplicate */ 1119 /* check whether the packet is a duplicate */
1064 if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno, 1120 if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1065 ntohl(bcast_packet->seqno))) 1121 ntohl(bcast_packet->seqno)))
1066 goto spin_unlock; 1122 goto spin_unlock;
1067 1123
1068 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1124 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1069 1125
1070 /* check whether the packet is old and the host just restarted. */ 1126 /* check whether the packet is old and the host just restarted. */
1071 if (window_protected(bat_priv, seq_diff, 1127 if (batadv_window_protected(bat_priv, seq_diff,
1072 &orig_node->bcast_seqno_reset)) 1128 &orig_node->bcast_seqno_reset))
1073 goto spin_unlock; 1129 goto spin_unlock;
1074 1130
1075 /* mark broadcast in flood history, update window position 1131 /* mark broadcast in flood history, update window position
1076 * if required. */ 1132 * if required.
1077 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) 1133 */
1134 if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1078 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); 1135 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1079 1136
1080 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1137 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1081 1138
1082 /* check whether this has been sent by another originator before */ 1139 /* check whether this has been sent by another originator before */
1083 if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) 1140 if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
1084 goto out; 1141 goto out;
1085 1142
1086 /* rebroadcast packet */ 1143 /* rebroadcast packet */
1087 add_bcast_packet_to_list(bat_priv, skb, 1); 1144 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
1088 1145
1089 /* don't hand the broadcast up if it is from an originator 1146 /* don't hand the broadcast up if it is from an originator
1090 * from the same backbone. 1147 * from the same backbone.
1091 */ 1148 */
1092 if (bla_is_backbone_gw(skb, orig_node, hdr_size)) 1149 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
1093 goto out; 1150 goto out;
1094 1151
1095 /* broadcast for me */ 1152 /* broadcast for me */
1096 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1153 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1097 ret = NET_RX_SUCCESS; 1154 ret = NET_RX_SUCCESS;
1098 goto out; 1155 goto out;
1099 1156
@@ -1101,15 +1158,16 @@ spin_unlock:
1101 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1158 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1102out: 1159out:
1103 if (orig_node) 1160 if (orig_node)
1104 orig_node_free_ref(orig_node); 1161 batadv_orig_node_free_ref(orig_node);
1105 return ret; 1162 return ret;
1106} 1163}
1107 1164
1108int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1165int batadv_recv_vis_packet(struct sk_buff *skb,
1166 struct batadv_hard_iface *recv_if)
1109{ 1167{
1110 struct vis_packet *vis_packet; 1168 struct batadv_vis_packet *vis_packet;
1111 struct ethhdr *ethhdr; 1169 struct ethhdr *ethhdr;
1112 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1170 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1113 int hdr_size = sizeof(*vis_packet); 1171 int hdr_size = sizeof(*vis_packet);
1114 1172
1115 /* keep skb linear */ 1173 /* keep skb linear */
@@ -1119,29 +1177,29 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1119 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1177 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1120 return NET_RX_DROP; 1178 return NET_RX_DROP;
1121 1179
1122 vis_packet = (struct vis_packet *)skb->data; 1180 vis_packet = (struct batadv_vis_packet *)skb->data;
1123 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1181 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1124 1182
1125 /* not for me */ 1183 /* not for me */
1126 if (!is_my_mac(ethhdr->h_dest)) 1184 if (!batadv_is_my_mac(ethhdr->h_dest))
1127 return NET_RX_DROP; 1185 return NET_RX_DROP;
1128 1186
1129 /* ignore own packets */ 1187 /* ignore own packets */
1130 if (is_my_mac(vis_packet->vis_orig)) 1188 if (batadv_is_my_mac(vis_packet->vis_orig))
1131 return NET_RX_DROP; 1189 return NET_RX_DROP;
1132 1190
1133 if (is_my_mac(vis_packet->sender_orig)) 1191 if (batadv_is_my_mac(vis_packet->sender_orig))
1134 return NET_RX_DROP; 1192 return NET_RX_DROP;
1135 1193
1136 switch (vis_packet->vis_type) { 1194 switch (vis_packet->vis_type) {
1137 case VIS_TYPE_SERVER_SYNC: 1195 case BATADV_VIS_TYPE_SERVER_SYNC:
1138 receive_server_sync_packet(bat_priv, vis_packet, 1196 batadv_receive_server_sync_packet(bat_priv, vis_packet,
1139 skb_headlen(skb)); 1197 skb_headlen(skb));
1140 break; 1198 break;
1141 1199
1142 case VIS_TYPE_CLIENT_UPDATE: 1200 case BATADV_VIS_TYPE_CLIENT_UPDATE:
1143 receive_client_update_packet(bat_priv, vis_packet, 1201 batadv_receive_client_update_packet(bat_priv, vis_packet,
1144 skb_headlen(skb)); 1202 skb_headlen(skb));
1145 break; 1203 break;
1146 1204
1147 default: /* ignore unknown packet */ 1205 default: /* ignore unknown packet */
@@ -1149,6 +1207,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1149 } 1207 }
1150 1208
1151 /* We take a copy of the data in the packet, so we should 1209 /* We take a copy of the data in the packet, so we should
1152 always free the skbuf. */ 1210 * always free the skbuf.
1211 */
1153 return NET_RX_DROP; 1212 return NET_RX_DROP;
1154} 1213}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index d6bbbebb6567..9262279ea667 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,36 +15,45 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ROUTING_H_ 20#ifndef _NET_BATMAN_ADV_ROUTING_H_
23#define _NET_BATMAN_ADV_ROUTING_H_ 21#define _NET_BATMAN_ADV_ROUTING_H_
24 22
25void slide_own_bcast_window(struct hard_iface *hard_iface); 23void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface);
26bool check_management_packet(struct sk_buff *skb, 24bool batadv_check_management_packet(struct sk_buff *skb,
27 struct hard_iface *hard_iface, 25 struct batadv_hard_iface *hard_iface,
28 int header_len); 26 int header_len);
29void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 27void batadv_update_route(struct batadv_priv *bat_priv,
30 struct neigh_node *neigh_node); 28 struct batadv_orig_node *orig_node,
31int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 29 struct batadv_neigh_node *neigh_node);
32int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 30int batadv_recv_icmp_packet(struct sk_buff *skb,
33int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 31 struct batadv_hard_iface *recv_if);
34int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int batadv_recv_unicast_packet(struct sk_buff *skb,
35int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33 struct batadv_hard_iface *recv_if);
36int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); 34int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
37int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); 35 struct batadv_hard_iface *recv_if);
38struct neigh_node *find_router(struct bat_priv *bat_priv, 36int batadv_recv_bcast_packet(struct sk_buff *skb,
39 struct orig_node *orig_node, 37 struct batadv_hard_iface *recv_if);
40 const struct hard_iface *recv_if); 38int batadv_recv_vis_packet(struct sk_buff *skb,
41void bonding_candidate_del(struct orig_node *orig_node, 39 struct batadv_hard_iface *recv_if);
42 struct neigh_node *neigh_node); 40int batadv_recv_tt_query(struct sk_buff *skb,
43void bonding_candidate_add(struct orig_node *orig_node, 41 struct batadv_hard_iface *recv_if);
44 struct neigh_node *neigh_node); 42int batadv_recv_roam_adv(struct sk_buff *skb,
45void bonding_save_primary(const struct orig_node *orig_node, 43 struct batadv_hard_iface *recv_if);
46 struct orig_node *orig_neigh_node, 44struct batadv_neigh_node *
47 const struct batman_ogm_packet *batman_ogm_packet); 45batadv_find_router(struct batadv_priv *bat_priv,
48int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 46 struct batadv_orig_node *orig_node,
49 unsigned long *last_reset); 47 const struct batadv_hard_iface *recv_if);
48void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
49 struct batadv_neigh_node *neigh_node);
50void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
51 struct batadv_neigh_node *neigh_node);
52void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
53 struct batadv_orig_node *orig_neigh_node,
54 const struct batadv_ogm_packet
55 *batman_ogm_packet);
56int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
57 unsigned long *last_reset);
50 58
51#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ 59#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f47299f22c68..3b4b2daa3b3e 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -29,16 +27,18 @@
29#include "gateway_common.h" 27#include "gateway_common.h"
30#include "originator.h" 28#include "originator.h"
31 29
32static void send_outstanding_bcast_packet(struct work_struct *work); 30static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
33 31
34/* send out an already prepared packet to the given address via the 32/* send out an already prepared packet to the given address via the
35 * specified batman interface */ 33 * specified batman interface
36int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 34 */
37 const uint8_t *dst_addr) 35int batadv_send_skb_packet(struct sk_buff *skb,
36 struct batadv_hard_iface *hard_iface,
37 const uint8_t *dst_addr)
38{ 38{
39 struct ethhdr *ethhdr; 39 struct ethhdr *ethhdr;
40 40
41 if (hard_iface->if_status != IF_ACTIVE) 41 if (hard_iface->if_status != BATADV_IF_ACTIVE)
42 goto send_skb_err; 42 goto send_skb_err;
43 43
44 if (unlikely(!hard_iface->net_dev)) 44 if (unlikely(!hard_iface->net_dev))
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
51 } 51 }
52 52
53 /* push to the ethernet header. */ 53 /* push to the ethernet header. */
54 if (my_skb_head_push(skb, ETH_HLEN) < 0) 54 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55 goto send_skb_err; 55 goto send_skb_err;
56 56
57 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
@@ -59,129 +59,57 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 ethhdr = (struct ethhdr *)skb_mac_header(skb); 59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 62 ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
63 63
64 skb_set_network_header(skb, ETH_HLEN); 64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL; 65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN); 66 skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
67 67
68 skb->dev = hard_iface->net_dev; 68 skb->dev = hard_iface->net_dev;
69 69
70 /* dev_queue_xmit() returns a negative result on error. However on 70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */ 72 * (which is > 0). This will not be treated as an error.
73 73 */
74 return dev_queue_xmit(skb); 74 return dev_queue_xmit(skb);
75send_skb_err: 75send_skb_err:
76 kfree_skb(skb); 76 kfree_skb(skb);
77 return NET_XMIT_DROP; 77 return NET_XMIT_DROP;
78} 78}
79 79
80static void realloc_packet_buffer(struct hard_iface *hard_iface, 80void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
81 int new_len)
82{ 81{
83 unsigned char *new_buff; 82 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84
85 new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) {
89 memcpy(new_buff, hard_iface->packet_buff,
90 BATMAN_OGM_HLEN);
91
92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff;
94 hard_iface->packet_len = new_len;
95 }
96}
97 83
98/* when calling this function (hard_iface == primary_if) has to be true */ 84 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
99static int prepare_packet_buffer(struct bat_priv *bat_priv, 85 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
100 struct hard_iface *hard_iface)
101{
102 int new_len;
103
104 new_len = BATMAN_OGM_HLEN +
105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu)
110 new_len = BATMAN_OGM_HLEN;
111
112 realloc_packet_buffer(hard_iface, new_len);
113
114 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
115
116 /* reset the sending counter */
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
121 hard_iface->packet_len - BATMAN_OGM_HLEN);
122}
123
124static int reset_packet_buffer(struct bat_priv *bat_priv,
125 struct hard_iface *hard_iface)
126{
127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
128 return 0;
129}
130
131void schedule_bat_ogm(struct hard_iface *hard_iface)
132{
133 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134 struct hard_iface *primary_if;
135 int tt_num_changes = -1;
136
137 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138 (hard_iface->if_status == IF_TO_BE_REMOVED))
139 return; 86 return;
140 87
141 /** 88 /* the interface gets activated here to avoid race conditions between
142 * the interface gets activated here to avoid race conditions between
143 * the moment of activating the interface in 89 * the moment of activating the interface in
144 * hardif_activate_interface() where the originator mac is set and 90 * hardif_activate_interface() where the originator mac is set and
145 * outdated packets (especially uninitialized mac addresses) in the 91 * outdated packets (especially uninitialized mac addresses) in the
146 * packet queue 92 * packet queue
147 */ 93 */
148 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 94 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
149 hard_iface->if_status = IF_ACTIVE; 95 hard_iface->if_status = BATADV_IF_ACTIVE;
150
151 primary_if = primary_if_get_selected(bat_priv);
152
153 if (hard_iface == primary_if) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
156 tt_commit_changes(bat_priv);
157 tt_num_changes = prepare_packet_buffer(bat_priv,
158 hard_iface);
159 }
160
161 /* if the changes have been sent often enough */
162 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
163 tt_num_changes = reset_packet_buffer(bat_priv,
164 hard_iface);
165 }
166
167 if (primary_if)
168 hardif_free_ref(primary_if);
169 96
170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes); 97 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
171} 98}
172 99
173static void forw_packet_free(struct forw_packet *forw_packet) 100static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
174{ 101{
175 if (forw_packet->skb) 102 if (forw_packet->skb)
176 kfree_skb(forw_packet->skb); 103 kfree_skb(forw_packet->skb);
177 if (forw_packet->if_incoming) 104 if (forw_packet->if_incoming)
178 hardif_free_ref(forw_packet->if_incoming); 105 batadv_hardif_free_ref(forw_packet->if_incoming);
179 kfree(forw_packet); 106 kfree(forw_packet);
180} 107}
181 108
182static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, 109static void
183 struct forw_packet *forw_packet, 110_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
184 unsigned long send_time) 111 struct batadv_forw_packet *forw_packet,
112 unsigned long send_time)
185{ 113{
186 INIT_HLIST_NODE(&forw_packet->list); 114 INIT_HLIST_NODE(&forw_packet->list);
187 115
@@ -192,8 +120,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
192 120
193 /* start timer for this packet */ 121 /* start timer for this packet */
194 INIT_DELAYED_WORK(&forw_packet->delayed_work, 122 INIT_DELAYED_WORK(&forw_packet->delayed_work,
195 send_outstanding_bcast_packet); 123 batadv_send_outstanding_bcast_packet);
196 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, 124 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
197 send_time); 125 send_time);
198} 126}
199 127
@@ -204,21 +132,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
204 * errors. 132 * errors.
205 * 133 *
206 * The skb is not consumed, so the caller should make sure that the 134 * The skb is not consumed, so the caller should make sure that the
207 * skb is freed. */ 135 * skb is freed.
208int add_bcast_packet_to_list(struct bat_priv *bat_priv, 136 */
209 const struct sk_buff *skb, unsigned long delay) 137int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
138 const struct sk_buff *skb,
139 unsigned long delay)
210{ 140{
211 struct hard_iface *primary_if = NULL; 141 struct batadv_hard_iface *primary_if = NULL;
212 struct forw_packet *forw_packet; 142 struct batadv_forw_packet *forw_packet;
213 struct bcast_packet *bcast_packet; 143 struct batadv_bcast_packet *bcast_packet;
214 struct sk_buff *newskb; 144 struct sk_buff *newskb;
215 145
216 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 146 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
217 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); 147 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
148 "bcast packet queue full\n");
218 goto out; 149 goto out;
219 } 150 }
220 151
221 primary_if = primary_if_get_selected(bat_priv); 152 primary_if = batadv_primary_if_get_selected(bat_priv);
222 if (!primary_if) 153 if (!primary_if)
223 goto out_and_inc; 154 goto out_and_inc;
224 155
@@ -232,7 +163,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
232 goto packet_free; 163 goto packet_free;
233 164
234 /* as we have a copy now, it is safe to decrease the TTL */ 165 /* as we have a copy now, it is safe to decrease the TTL */
235 bcast_packet = (struct bcast_packet *)newskb->data; 166 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
236 bcast_packet->header.ttl--; 167 bcast_packet->header.ttl--;
237 168
238 skb_reset_mac_header(newskb); 169 skb_reset_mac_header(newskb);
@@ -243,7 +174,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
243 /* how often did we send the bcast packet ? */ 174 /* how often did we send the bcast packet ? */
244 forw_packet->num_packets = 0; 175 forw_packet->num_packets = 0;
245 176
246 _add_bcast_packet_to_list(bat_priv, forw_packet, delay); 177 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
247 return NETDEV_TX_OK; 178 return NETDEV_TX_OK;
248 179
249packet_free: 180packet_free:
@@ -252,38 +183,43 @@ out_and_inc:
252 atomic_inc(&bat_priv->bcast_queue_left); 183 atomic_inc(&bat_priv->bcast_queue_left);
253out: 184out:
254 if (primary_if) 185 if (primary_if)
255 hardif_free_ref(primary_if); 186 batadv_hardif_free_ref(primary_if);
256 return NETDEV_TX_BUSY; 187 return NETDEV_TX_BUSY;
257} 188}
258 189
259static void send_outstanding_bcast_packet(struct work_struct *work) 190static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
260{ 191{
261 struct hard_iface *hard_iface; 192 struct batadv_hard_iface *hard_iface;
262 struct delayed_work *delayed_work = 193 struct delayed_work *delayed_work =
263 container_of(work, struct delayed_work, work); 194 container_of(work, struct delayed_work, work);
264 struct forw_packet *forw_packet = 195 struct batadv_forw_packet *forw_packet;
265 container_of(delayed_work, struct forw_packet, delayed_work);
266 struct sk_buff *skb1; 196 struct sk_buff *skb1;
267 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; 197 struct net_device *soft_iface;
268 struct bat_priv *bat_priv = netdev_priv(soft_iface); 198 struct batadv_priv *bat_priv;
199
200 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201 delayed_work);
202 soft_iface = forw_packet->if_incoming->soft_iface;
203 bat_priv = netdev_priv(soft_iface);
269 204
270 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 205 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
271 hlist_del(&forw_packet->list); 206 hlist_del(&forw_packet->list);
272 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 207 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
273 208
274 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 209 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
275 goto out; 210 goto out;
276 211
277 /* rebroadcast packet */ 212 /* rebroadcast packet */
278 rcu_read_lock(); 213 rcu_read_lock();
279 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 214 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
280 if (hard_iface->soft_iface != soft_iface) 215 if (hard_iface->soft_iface != soft_iface)
281 continue; 216 continue;
282 217
283 /* send a copy of the saved skb */ 218 /* send a copy of the saved skb */
284 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 219 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
285 if (skb1) 220 if (skb1)
286 send_skb_packet(skb1, hard_iface, broadcast_addr); 221 batadv_send_skb_packet(skb1, hard_iface,
222 batadv_broadcast_addr);
287 } 223 }
288 rcu_read_unlock(); 224 rcu_read_unlock();
289 225
@@ -291,72 +227,72 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
291 227
292 /* if we still have some more bcasts to send */ 228 /* if we still have some more bcasts to send */
293 if (forw_packet->num_packets < 3) { 229 if (forw_packet->num_packets < 3) {
294 _add_bcast_packet_to_list(bat_priv, forw_packet, 230 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
295 msecs_to_jiffies(5)); 231 msecs_to_jiffies(5));
296 return; 232 return;
297 } 233 }
298 234
299out: 235out:
300 forw_packet_free(forw_packet); 236 batadv_forw_packet_free(forw_packet);
301 atomic_inc(&bat_priv->bcast_queue_left); 237 atomic_inc(&bat_priv->bcast_queue_left);
302} 238}
303 239
304void send_outstanding_bat_ogm_packet(struct work_struct *work) 240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
305{ 241{
306 struct delayed_work *delayed_work = 242 struct delayed_work *delayed_work =
307 container_of(work, struct delayed_work, work); 243 container_of(work, struct delayed_work, work);
308 struct forw_packet *forw_packet = 244 struct batadv_forw_packet *forw_packet;
309 container_of(delayed_work, struct forw_packet, delayed_work); 245 struct batadv_priv *bat_priv;
310 struct bat_priv *bat_priv;
311 246
247 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248 delayed_work);
312 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
313 spin_lock_bh(&bat_priv->forw_bat_list_lock); 250 spin_lock_bh(&bat_priv->forw_bat_list_lock);
314 hlist_del(&forw_packet->list); 251 hlist_del(&forw_packet->list);
315 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 252 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
316 253
317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 254 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
318 goto out; 255 goto out;
319 256
320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); 257 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
321 258
322 /** 259 /* we have to have at least one packet in the queue
323 * we have to have at least one packet in the queue
324 * to determine the queues wake up time unless we are 260 * to determine the queues wake up time unless we are
325 * shutting down 261 * shutting down
326 */ 262 */
327 if (forw_packet->own) 263 if (forw_packet->own)
328 schedule_bat_ogm(forw_packet->if_incoming); 264 batadv_schedule_bat_ogm(forw_packet->if_incoming);
329 265
330out: 266out:
331 /* don't count own packet */ 267 /* don't count own packet */
332 if (!forw_packet->own) 268 if (!forw_packet->own)
333 atomic_inc(&bat_priv->batman_queue_left); 269 atomic_inc(&bat_priv->batman_queue_left);
334 270
335 forw_packet_free(forw_packet); 271 batadv_forw_packet_free(forw_packet);
336} 272}
337 273
338void purge_outstanding_packets(struct bat_priv *bat_priv, 274void
339 const struct hard_iface *hard_iface) 275batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
276 const struct batadv_hard_iface *hard_iface)
340{ 277{
341 struct forw_packet *forw_packet; 278 struct batadv_forw_packet *forw_packet;
342 struct hlist_node *tmp_node, *safe_tmp_node; 279 struct hlist_node *tmp_node, *safe_tmp_node;
343 bool pending; 280 bool pending;
344 281
345 if (hard_iface) 282 if (hard_iface)
346 bat_dbg(DBG_BATMAN, bat_priv, 283 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
347 "purge_outstanding_packets(): %s\n", 284 "purge_outstanding_packets(): %s\n",
348 hard_iface->net_dev->name); 285 hard_iface->net_dev->name);
349 else 286 else
350 bat_dbg(DBG_BATMAN, bat_priv, 287 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
351 "purge_outstanding_packets()\n"); 288 "purge_outstanding_packets()\n");
352 289
353 /* free bcast list */ 290 /* free bcast list */
354 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 291 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
355 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 292 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
356 &bat_priv->forw_bcast_list, list) { 293 &bat_priv->forw_bcast_list, list) {
357 294
358 /** 295 /* if purge_outstanding_packets() was called with an argument
359 * if purge_outstanding_packets() was called with an argument
360 * we delete only packets belonging to the given interface 296 * we delete only packets belonging to the given interface
361 */ 297 */
362 if ((hard_iface) && 298 if ((hard_iface) &&
@@ -365,8 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
365 301
366 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 302 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
367 303
368 /** 304 /* batadv_send_outstanding_bcast_packet() will lock the list to
369 * send_outstanding_bcast_packet() will lock the list to
370 * delete the item from the list 305 * delete the item from the list
371 */ 306 */
372 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 307 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +309,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
374 309
375 if (pending) { 310 if (pending) {
376 hlist_del(&forw_packet->list); 311 hlist_del(&forw_packet->list);
377 forw_packet_free(forw_packet); 312 batadv_forw_packet_free(forw_packet);
378 } 313 }
379 } 314 }
380 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 315 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +319,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
384 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 319 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
385 &bat_priv->forw_bat_list, list) { 320 &bat_priv->forw_bat_list, list) {
386 321
387 /** 322 /* if purge_outstanding_packets() was called with an argument
388 * if purge_outstanding_packets() was called with an argument
389 * we delete only packets belonging to the given interface 323 * we delete only packets belonging to the given interface
390 */ 324 */
391 if ((hard_iface) && 325 if ((hard_iface) &&
@@ -394,8 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
394 328
395 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 329 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396 330
397 /** 331 /* send_outstanding_bat_packet() will lock the list to
398 * send_outstanding_bat_packet() will lock the list to
399 * delete the item from the list 332 * delete the item from the list
400 */ 333 */
401 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 334 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +336,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
403 336
404 if (pending) { 337 if (pending) {
405 hlist_del(&forw_packet->list); 338 hlist_del(&forw_packet->list);
406 forw_packet_free(forw_packet); 339 batadv_forw_packet_free(forw_packet);
407 } 340 }
408 } 341 }
409 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 342 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 824ef06f9b01..643329b787ed 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,19 +15,21 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_SEND_H_ 20#ifndef _NET_BATMAN_ADV_SEND_H_
23#define _NET_BATMAN_ADV_SEND_H_ 21#define _NET_BATMAN_ADV_SEND_H_
24 22
25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 23int batadv_send_skb_packet(struct sk_buff *skb,
26 const uint8_t *dst_addr); 24 struct batadv_hard_iface *hard_iface,
27void schedule_bat_ogm(struct hard_iface *hard_iface); 25 const uint8_t *dst_addr);
28int add_bcast_packet_to_list(struct bat_priv *bat_priv, 26void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
29 const struct sk_buff *skb, unsigned long delay); 27int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
30void send_outstanding_bat_ogm_packet(struct work_struct *work); 28 const struct sk_buff *skb,
31void purge_outstanding_packets(struct bat_priv *bat_priv, 29 unsigned long delay);
32 const struct hard_iface *hard_iface); 30void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
31void
32batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
33 const struct batadv_hard_iface *hard_iface);
33 34
34#endif /* _NET_BATMAN_ADV_SEND_H_ */ 35#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6e2530b02043..109ea2aae96c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -24,12 +22,12 @@
24#include "hard-interface.h" 22#include "hard-interface.h"
25#include "routing.h" 23#include "routing.h"
26#include "send.h" 24#include "send.h"
27#include "bat_debugfs.h" 25#include "debugfs.h"
28#include "translation-table.h" 26#include "translation-table.h"
29#include "hash.h" 27#include "hash.h"
30#include "gateway_common.h" 28#include "gateway_common.h"
31#include "gateway_client.h" 29#include "gateway_client.h"
32#include "bat_sysfs.h" 30#include "sysfs.h"
33#include "originator.h" 31#include "originator.h"
34#include <linux/slab.h> 32#include <linux/slab.h>
35#include <linux/ethtool.h> 33#include <linux/ethtool.h>
@@ -39,27 +37,33 @@
39#include "bridge_loop_avoidance.h" 37#include "bridge_loop_avoidance.h"
40 38
41 39
42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 40static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
43static void bat_get_drvinfo(struct net_device *dev, 41static void batadv_get_drvinfo(struct net_device *dev,
44 struct ethtool_drvinfo *info); 42 struct ethtool_drvinfo *info);
45static u32 bat_get_msglevel(struct net_device *dev); 43static u32 batadv_get_msglevel(struct net_device *dev);
46static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void batadv_set_msglevel(struct net_device *dev, u32 value);
47static u32 bat_get_link(struct net_device *dev); 45static u32 batadv_get_link(struct net_device *dev);
48 46static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
49static const struct ethtool_ops bat_ethtool_ops = { 47static void batadv_get_ethtool_stats(struct net_device *dev,
50 .get_settings = bat_get_settings, 48 struct ethtool_stats *stats, u64 *data);
51 .get_drvinfo = bat_get_drvinfo, 49static int batadv_get_sset_count(struct net_device *dev, int stringset);
52 .get_msglevel = bat_get_msglevel, 50
53 .set_msglevel = bat_set_msglevel, 51static const struct ethtool_ops batadv_ethtool_ops = {
54 .get_link = bat_get_link, 52 .get_settings = batadv_get_settings,
53 .get_drvinfo = batadv_get_drvinfo,
54 .get_msglevel = batadv_get_msglevel,
55 .set_msglevel = batadv_set_msglevel,
56 .get_link = batadv_get_link,
57 .get_strings = batadv_get_strings,
58 .get_ethtool_stats = batadv_get_ethtool_stats,
59 .get_sset_count = batadv_get_sset_count,
55}; 60};
56 61
57int my_skb_head_push(struct sk_buff *skb, unsigned int len) 62int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
58{ 63{
59 int result; 64 int result;
60 65
61 /** 66 /* TODO: We must check if we can release all references to non-payload
62 * TODO: We must check if we can release all references to non-payload
63 * data using skb_header_release in our skbs to allow skb_cow_header to 67 * data using skb_header_release in our skbs to allow skb_cow_header to
64 * work optimally. This means that those skbs are not allowed to read 68 * work optimally. This means that those skbs are not allowed to read
65 * or write any data which is before the current position of skb->data 69 * or write any data which is before the current position of skb->data
@@ -74,37 +78,37 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
74 return 0; 78 return 0;
75} 79}
76 80
77static int interface_open(struct net_device *dev) 81static int batadv_interface_open(struct net_device *dev)
78{ 82{
79 netif_start_queue(dev); 83 netif_start_queue(dev);
80 return 0; 84 return 0;
81} 85}
82 86
83static int interface_release(struct net_device *dev) 87static int batadv_interface_release(struct net_device *dev)
84{ 88{
85 netif_stop_queue(dev); 89 netif_stop_queue(dev);
86 return 0; 90 return 0;
87} 91}
88 92
89static struct net_device_stats *interface_stats(struct net_device *dev) 93static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
90{ 94{
91 struct bat_priv *bat_priv = netdev_priv(dev); 95 struct batadv_priv *bat_priv = netdev_priv(dev);
92 return &bat_priv->stats; 96 return &bat_priv->stats;
93} 97}
94 98
95static int interface_set_mac_addr(struct net_device *dev, void *p) 99static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
96{ 100{
97 struct bat_priv *bat_priv = netdev_priv(dev); 101 struct batadv_priv *bat_priv = netdev_priv(dev);
98 struct sockaddr *addr = p; 102 struct sockaddr *addr = p;
99 103
100 if (!is_valid_ether_addr(addr->sa_data)) 104 if (!is_valid_ether_addr(addr->sa_data))
101 return -EADDRNOTAVAIL; 105 return -EADDRNOTAVAIL;
102 106
103 /* only modify transtable if it has been initialized before */ 107 /* only modify transtable if it has been initialized before */
104 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 108 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
105 tt_local_remove(bat_priv, dev->dev_addr, 109 batadv_tt_local_remove(bat_priv, dev->dev_addr,
106 "mac address changed", false); 110 "mac address changed", false);
107 tt_local_add(dev, addr->sa_data, NULL_IFINDEX); 111 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
108 } 112 }
109 113
110 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 114 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -112,10 +116,10 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
112 return 0; 116 return 0;
113} 117}
114 118
115static int interface_change_mtu(struct net_device *dev, int new_mtu) 119static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
116{ 120{
117 /* check ranges */ 121 /* check ranges */
118 if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev))) 122 if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev)))
119 return -EINVAL; 123 return -EINVAL;
120 124
121 dev->mtu = new_mtu; 125 dev->mtu = new_mtu;
@@ -123,13 +127,15 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
123 return 0; 127 return 0;
124} 128}
125 129
126static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) 130static int batadv_interface_tx(struct sk_buff *skb,
131 struct net_device *soft_iface)
127{ 132{
128 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 133 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
129 struct bat_priv *bat_priv = netdev_priv(soft_iface); 134 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
130 struct hard_iface *primary_if = NULL; 135 struct batadv_hard_iface *primary_if = NULL;
131 struct bcast_packet *bcast_packet; 136 struct batadv_bcast_packet *bcast_packet;
132 struct vlan_ethhdr *vhdr; 137 struct vlan_ethhdr *vhdr;
138 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
133 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 139 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
134 0x00}; 140 0x00};
135 unsigned int header_len = 0; 141 unsigned int header_len = 0;
@@ -137,7 +143,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
137 short vid __maybe_unused = -1; 143 short vid __maybe_unused = -1;
138 bool do_bcast = false; 144 bool do_bcast = false;
139 145
140 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 146 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
141 goto dropped; 147 goto dropped;
142 148
143 soft_iface->trans_start = jiffies; 149 soft_iface->trans_start = jiffies;
@@ -147,45 +153,47 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
147 vhdr = (struct vlan_ethhdr *)skb->data; 153 vhdr = (struct vlan_ethhdr *)skb->data;
148 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 154 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
149 155
150 if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) 156 if (vhdr->h_vlan_encapsulated_proto != ethertype)
151 break; 157 break;
152 158
153 /* fall through */ 159 /* fall through */
154 case ETH_P_BATMAN: 160 case BATADV_ETH_P_BATMAN:
155 goto dropped; 161 goto dropped;
156 } 162 }
157 163
158 if (bla_tx(bat_priv, skb, vid)) 164 if (batadv_bla_tx(bat_priv, skb, vid))
159 goto dropped; 165 goto dropped;
160 166
161 /* Register the client MAC in the transtable */ 167 /* Register the client MAC in the transtable */
162 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 168 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
163 169
164 /* don't accept stp packets. STP does not help in meshes. 170 /* don't accept stp packets. STP does not help in meshes.
165 * better use the bridge loop avoidance ... 171 * better use the bridge loop avoidance ...
166 */ 172 */
167 if (compare_eth(ethhdr->h_dest, stp_addr)) 173 if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
168 goto dropped; 174 goto dropped;
169 175
170 if (is_multicast_ether_addr(ethhdr->h_dest)) { 176 if (is_multicast_ether_addr(ethhdr->h_dest)) {
171 do_bcast = true; 177 do_bcast = true;
172 178
173 switch (atomic_read(&bat_priv->gw_mode)) { 179 switch (atomic_read(&bat_priv->gw_mode)) {
174 case GW_MODE_SERVER: 180 case BATADV_GW_MODE_SERVER:
175 /* gateway servers should not send dhcp 181 /* gateway servers should not send dhcp
176 * requests into the mesh */ 182 * requests into the mesh
177 ret = gw_is_dhcp_target(skb, &header_len); 183 */
184 ret = batadv_gw_is_dhcp_target(skb, &header_len);
178 if (ret) 185 if (ret)
179 goto dropped; 186 goto dropped;
180 break; 187 break;
181 case GW_MODE_CLIENT: 188 case BATADV_GW_MODE_CLIENT:
182 /* gateway clients should send dhcp requests 189 /* gateway clients should send dhcp requests
183 * via unicast to their gateway */ 190 * via unicast to their gateway
184 ret = gw_is_dhcp_target(skb, &header_len); 191 */
192 ret = batadv_gw_is_dhcp_target(skb, &header_len);
185 if (ret) 193 if (ret)
186 do_bcast = false; 194 do_bcast = false;
187 break; 195 break;
188 case GW_MODE_OFF: 196 case BATADV_GW_MODE_OFF:
189 default: 197 default:
190 break; 198 break;
191 } 199 }
@@ -193,22 +201,24 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
193 201
194 /* ethernet packet should be broadcasted */ 202 /* ethernet packet should be broadcasted */
195 if (do_bcast) { 203 if (do_bcast) {
196 primary_if = primary_if_get_selected(bat_priv); 204 primary_if = batadv_primary_if_get_selected(bat_priv);
197 if (!primary_if) 205 if (!primary_if)
198 goto dropped; 206 goto dropped;
199 207
200 if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0) 208 if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
201 goto dropped; 209 goto dropped;
202 210
203 bcast_packet = (struct bcast_packet *)skb->data; 211 bcast_packet = (struct batadv_bcast_packet *)skb->data;
204 bcast_packet->header.version = COMPAT_VERSION; 212 bcast_packet->header.version = BATADV_COMPAT_VERSION;
205 bcast_packet->header.ttl = TTL; 213 bcast_packet->header.ttl = BATADV_TTL;
206 214
207 /* batman packet type: broadcast */ 215 /* batman packet type: broadcast */
208 bcast_packet->header.packet_type = BAT_BCAST; 216 bcast_packet->header.packet_type = BATADV_BCAST;
217 bcast_packet->reserved = 0;
209 218
210 /* hw address of first interface is the orig mac because only 219 /* hw address of first interface is the orig mac because only
211 * this mac is known throughout the mesh */ 220 * this mac is known throughout the mesh
221 */
212 memcpy(bcast_packet->orig, 222 memcpy(bcast_packet->orig,
213 primary_if->net_dev->dev_addr, ETH_ALEN); 223 primary_if->net_dev->dev_addr, ETH_ALEN);
214 224
@@ -216,21 +226,22 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
216 bcast_packet->seqno = 226 bcast_packet->seqno =
217 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 227 htonl(atomic_inc_return(&bat_priv->bcast_seqno));
218 228
219 add_bcast_packet_to_list(bat_priv, skb, 1); 229 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
220 230
221 /* a copy is stored in the bcast list, therefore removing 231 /* a copy is stored in the bcast list, therefore removing
222 * the original skb. */ 232 * the original skb.
233 */
223 kfree_skb(skb); 234 kfree_skb(skb);
224 235
225 /* unicast packet */ 236 /* unicast packet */
226 } else { 237 } else {
227 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { 238 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
228 ret = gw_out_of_range(bat_priv, skb, ethhdr); 239 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
229 if (ret) 240 if (ret)
230 goto dropped; 241 goto dropped;
231 } 242 }
232 243
233 ret = unicast_send_skb(skb, bat_priv); 244 ret = batadv_unicast_send_skb(skb, bat_priv);
234 if (ret != 0) 245 if (ret != 0)
235 goto dropped_freed; 246 goto dropped_freed;
236 } 247 }
@@ -245,18 +256,23 @@ dropped_freed:
245 bat_priv->stats.tx_dropped++; 256 bat_priv->stats.tx_dropped++;
246end: 257end:
247 if (primary_if) 258 if (primary_if)
248 hardif_free_ref(primary_if); 259 batadv_hardif_free_ref(primary_if);
249 return NETDEV_TX_OK; 260 return NETDEV_TX_OK;
250} 261}
251 262
252void interface_rx(struct net_device *soft_iface, 263void batadv_interface_rx(struct net_device *soft_iface,
253 struct sk_buff *skb, struct hard_iface *recv_if, 264 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
254 int hdr_size) 265 int hdr_size)
255{ 266{
256 struct bat_priv *bat_priv = netdev_priv(soft_iface); 267 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
257 struct ethhdr *ethhdr; 268 struct ethhdr *ethhdr;
258 struct vlan_ethhdr *vhdr; 269 struct vlan_ethhdr *vhdr;
270 struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
259 short vid __maybe_unused = -1; 271 short vid __maybe_unused = -1;
272 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
273 bool is_bcast;
274
275 is_bcast = (batadv_header->packet_type == BATADV_BCAST);
260 276
261 /* check if enough space is available for pulling, and pull */ 277 /* check if enough space is available for pulling, and pull */
262 if (!pskb_may_pull(skb, hdr_size)) 278 if (!pskb_may_pull(skb, hdr_size))
@@ -272,11 +288,11 @@ void interface_rx(struct net_device *soft_iface,
272 vhdr = (struct vlan_ethhdr *)skb->data; 288 vhdr = (struct vlan_ethhdr *)skb->data;
273 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 289 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
274 290
275 if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) 291 if (vhdr->h_vlan_encapsulated_proto != ethertype)
276 break; 292 break;
277 293
278 /* fall through */ 294 /* fall through */
279 case ETH_P_BATMAN: 295 case BATADV_ETH_P_BATMAN:
280 goto dropped; 296 goto dropped;
281 } 297 }
282 298
@@ -287,22 +303,23 @@ void interface_rx(struct net_device *soft_iface,
287 303
288 /* should not be necessary anymore as we use skb_pull_rcsum() 304 /* should not be necessary anymore as we use skb_pull_rcsum()
289 * TODO: please verify this and remove this TODO 305 * TODO: please verify this and remove this TODO
290 * -- Dec 21st 2009, Simon Wunderlich */ 306 * -- Dec 21st 2009, Simon Wunderlich
307 */
291 308
292/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ 309 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
293 310
294 bat_priv->stats.rx_packets++; 311 bat_priv->stats.rx_packets++;
295 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 312 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
296 313
297 soft_iface->last_rx = jiffies; 314 soft_iface->last_rx = jiffies;
298 315
299 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 316 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
300 goto dropped; 317 goto dropped;
301 318
302 /* Let the bridge loop avoidance check the packet. If will 319 /* Let the bridge loop avoidance check the packet. If will
303 * not handle it, we can safely push it up. 320 * not handle it, we can safely push it up.
304 */ 321 */
305 if (bla_rx(bat_priv, skb, vid)) 322 if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
306 goto out; 323 goto out;
307 324
308 netif_rx(skb); 325 netif_rx(skb);
@@ -314,49 +331,50 @@ out:
314 return; 331 return;
315} 332}
316 333
317static const struct net_device_ops bat_netdev_ops = { 334static const struct net_device_ops batadv_netdev_ops = {
318 .ndo_open = interface_open, 335 .ndo_open = batadv_interface_open,
319 .ndo_stop = interface_release, 336 .ndo_stop = batadv_interface_release,
320 .ndo_get_stats = interface_stats, 337 .ndo_get_stats = batadv_interface_stats,
321 .ndo_set_mac_address = interface_set_mac_addr, 338 .ndo_set_mac_address = batadv_interface_set_mac_addr,
322 .ndo_change_mtu = interface_change_mtu, 339 .ndo_change_mtu = batadv_interface_change_mtu,
323 .ndo_start_xmit = interface_tx, 340 .ndo_start_xmit = batadv_interface_tx,
324 .ndo_validate_addr = eth_validate_addr 341 .ndo_validate_addr = eth_validate_addr
325}; 342};
326 343
327static void interface_setup(struct net_device *dev) 344static void batadv_interface_setup(struct net_device *dev)
328{ 345{
329 struct bat_priv *priv = netdev_priv(dev); 346 struct batadv_priv *priv = netdev_priv(dev);
330 347
331 ether_setup(dev); 348 ether_setup(dev);
332 349
333 dev->netdev_ops = &bat_netdev_ops; 350 dev->netdev_ops = &batadv_netdev_ops;
334 dev->destructor = free_netdev; 351 dev->destructor = free_netdev;
335 dev->tx_queue_len = 0; 352 dev->tx_queue_len = 0;
336 353
337 /** 354 /* can't call min_mtu, because the needed variables
338 * can't call min_mtu, because the needed variables
339 * have not been initialized yet 355 * have not been initialized yet
340 */ 356 */
341 dev->mtu = ETH_DATA_LEN; 357 dev->mtu = ETH_DATA_LEN;
342 /* reserve more space in the skbuff for our header */ 358 /* reserve more space in the skbuff for our header */
343 dev->hard_header_len = BAT_HEADER_LEN; 359 dev->hard_header_len = BATADV_HEADER_LEN;
344 360
345 /* generate random address */ 361 /* generate random address */
346 eth_hw_addr_random(dev); 362 eth_hw_addr_random(dev);
347 363
348 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 364 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
349 365
350 memset(priv, 0, sizeof(*priv)); 366 memset(priv, 0, sizeof(*priv));
351} 367}
352 368
353struct net_device *softif_create(const char *name) 369struct net_device *batadv_softif_create(const char *name)
354{ 370{
355 struct net_device *soft_iface; 371 struct net_device *soft_iface;
356 struct bat_priv *bat_priv; 372 struct batadv_priv *bat_priv;
357 int ret; 373 int ret;
374 size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
358 375
359 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup); 376 soft_iface = alloc_netdev(sizeof(*bat_priv), name,
377 batadv_interface_setup);
360 378
361 if (!soft_iface) 379 if (!soft_iface)
362 goto out; 380 goto out;
@@ -374,18 +392,18 @@ struct net_device *softif_create(const char *name)
374 atomic_set(&bat_priv->bonding, 0); 392 atomic_set(&bat_priv->bonding, 0);
375 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 393 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
376 atomic_set(&bat_priv->ap_isolation, 0); 394 atomic_set(&bat_priv->ap_isolation, 0);
377 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 395 atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
378 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 396 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
379 atomic_set(&bat_priv->gw_sel_class, 20); 397 atomic_set(&bat_priv->gw_sel_class, 20);
380 atomic_set(&bat_priv->gw_bandwidth, 41); 398 atomic_set(&bat_priv->gw_bandwidth, 41);
381 atomic_set(&bat_priv->orig_interval, 1000); 399 atomic_set(&bat_priv->orig_interval, 1000);
382 atomic_set(&bat_priv->hop_penalty, 30); 400 atomic_set(&bat_priv->hop_penalty, 30);
383 atomic_set(&bat_priv->log_level, 0); 401 atomic_set(&bat_priv->log_level, 0);
384 atomic_set(&bat_priv->fragmentation, 1); 402 atomic_set(&bat_priv->fragmentation, 1);
385 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); 403 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
386 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN); 404 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
387 405
388 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 406 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
389 atomic_set(&bat_priv->bcast_seqno, 1); 407 atomic_set(&bat_priv->bcast_seqno, 1);
390 atomic_set(&bat_priv->ttvn, 0); 408 atomic_set(&bat_priv->ttvn, 0);
391 atomic_set(&bat_priv->tt_local_changes, 0); 409 atomic_set(&bat_priv->tt_local_changes, 0);
@@ -399,28 +417,34 @@ struct net_device *softif_create(const char *name)
399 bat_priv->primary_if = NULL; 417 bat_priv->primary_if = NULL;
400 bat_priv->num_ifaces = 0; 418 bat_priv->num_ifaces = 0;
401 419
402 ret = bat_algo_select(bat_priv, bat_routing_algo); 420 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
403 if (ret < 0) 421 if (!bat_priv->bat_counters)
404 goto unreg_soft_iface; 422 goto unreg_soft_iface;
405 423
406 ret = sysfs_add_meshif(soft_iface); 424 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
407 if (ret < 0) 425 if (ret < 0)
408 goto unreg_soft_iface; 426 goto free_bat_counters;
409 427
410 ret = debugfs_add_meshif(soft_iface); 428 ret = batadv_sysfs_add_meshif(soft_iface);
429 if (ret < 0)
430 goto free_bat_counters;
431
432 ret = batadv_debugfs_add_meshif(soft_iface);
411 if (ret < 0) 433 if (ret < 0)
412 goto unreg_sysfs; 434 goto unreg_sysfs;
413 435
414 ret = mesh_init(soft_iface); 436 ret = batadv_mesh_init(soft_iface);
415 if (ret < 0) 437 if (ret < 0)
416 goto unreg_debugfs; 438 goto unreg_debugfs;
417 439
418 return soft_iface; 440 return soft_iface;
419 441
420unreg_debugfs: 442unreg_debugfs:
421 debugfs_del_meshif(soft_iface); 443 batadv_debugfs_del_meshif(soft_iface);
422unreg_sysfs: 444unreg_sysfs:
423 sysfs_del_meshif(soft_iface); 445 batadv_sysfs_del_meshif(soft_iface);
446free_bat_counters:
447 free_percpu(bat_priv->bat_counters);
424unreg_soft_iface: 448unreg_soft_iface:
425 unregister_netdevice(soft_iface); 449 unregister_netdevice(soft_iface);
426 return NULL; 450 return NULL;
@@ -431,24 +455,24 @@ out:
431 return NULL; 455 return NULL;
432} 456}
433 457
434void softif_destroy(struct net_device *soft_iface) 458void batadv_softif_destroy(struct net_device *soft_iface)
435{ 459{
436 debugfs_del_meshif(soft_iface); 460 batadv_debugfs_del_meshif(soft_iface);
437 sysfs_del_meshif(soft_iface); 461 batadv_sysfs_del_meshif(soft_iface);
438 mesh_free(soft_iface); 462 batadv_mesh_free(soft_iface);
439 unregister_netdevice(soft_iface); 463 unregister_netdevice(soft_iface);
440} 464}
441 465
442int softif_is_valid(const struct net_device *net_dev) 466int batadv_softif_is_valid(const struct net_device *net_dev)
443{ 467{
444 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) 468 if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
445 return 1; 469 return 1;
446 470
447 return 0; 471 return 0;
448} 472}
449 473
450/* ethtool */ 474/* ethtool */
451static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 475static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
452{ 476{
453 cmd->supported = 0; 477 cmd->supported = 0;
454 cmd->advertising = 0; 478 cmd->advertising = 0;
@@ -464,25 +488,73 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
464 return 0; 488 return 0;
465} 489}
466 490
467static void bat_get_drvinfo(struct net_device *dev, 491static void batadv_get_drvinfo(struct net_device *dev,
468 struct ethtool_drvinfo *info) 492 struct ethtool_drvinfo *info)
469{ 493{
470 strcpy(info->driver, "B.A.T.M.A.N. advanced"); 494 strcpy(info->driver, "B.A.T.M.A.N. advanced");
471 strcpy(info->version, SOURCE_VERSION); 495 strcpy(info->version, BATADV_SOURCE_VERSION);
472 strcpy(info->fw_version, "N/A"); 496 strcpy(info->fw_version, "N/A");
473 strcpy(info->bus_info, "batman"); 497 strcpy(info->bus_info, "batman");
474} 498}
475 499
476static u32 bat_get_msglevel(struct net_device *dev) 500static u32 batadv_get_msglevel(struct net_device *dev)
477{ 501{
478 return -EOPNOTSUPP; 502 return -EOPNOTSUPP;
479} 503}
480 504
481static void bat_set_msglevel(struct net_device *dev, u32 value) 505static void batadv_set_msglevel(struct net_device *dev, u32 value)
482{ 506{
483} 507}
484 508
485static u32 bat_get_link(struct net_device *dev) 509static u32 batadv_get_link(struct net_device *dev)
486{ 510{
487 return 1; 511 return 1;
488} 512}
513
514/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
515 * Declare each description string in struct.name[] to get fixed sized buffer
516 * and compile time checking for strings longer than ETH_GSTRING_LEN.
517 */
518static const struct {
519 const char name[ETH_GSTRING_LEN];
520} batadv_counters_strings[] = {
521 { "forward" },
522 { "forward_bytes" },
523 { "mgmt_tx" },
524 { "mgmt_tx_bytes" },
525 { "mgmt_rx" },
526 { "mgmt_rx_bytes" },
527 { "tt_request_tx" },
528 { "tt_request_rx" },
529 { "tt_response_tx" },
530 { "tt_response_rx" },
531 { "tt_roam_adv_tx" },
532 { "tt_roam_adv_rx" },
533};
534
535static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
536 uint8_t *data)
537{
538 if (stringset == ETH_SS_STATS)
539 memcpy(data, batadv_counters_strings,
540 sizeof(batadv_counters_strings));
541}
542
543static void batadv_get_ethtool_stats(struct net_device *dev,
544 struct ethtool_stats *stats,
545 uint64_t *data)
546{
547 struct batadv_priv *bat_priv = netdev_priv(dev);
548 int i;
549
550 for (i = 0; i < BATADV_CNT_NUM; i++)
551 data[i] = batadv_sum_counter(bat_priv, i);
552}
553
554static int batadv_get_sset_count(struct net_device *dev, int stringset)
555{
556 if (stringset == ETH_SS_STATS)
557 return BATADV_CNT_NUM;
558
559 return -EOPNOTSUPP;
560}
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 020300673884..852c683b06a1 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,18 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 20#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
24 22
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
26void interface_rx(struct net_device *soft_iface, 24void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
27 struct sk_buff *skb, struct hard_iface *recv_if, 25 struct batadv_hard_iface *recv_if, int hdr_size);
28 int hdr_size); 26struct net_device *batadv_softif_create(const char *name);
29struct net_device *softif_create(const char *name); 27void batadv_softif_destroy(struct net_device *soft_iface);
30void softif_destroy(struct net_device *soft_iface); 28int batadv_softif_is_valid(const struct net_device *net_dev);
31int softif_is_valid(const struct net_device *net_dev);
32 29
33#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 30#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
new file mode 100644
index 000000000000..66518c75c217
--- /dev/null
+++ b/net/batman-adv/sysfs.c
@@ -0,0 +1,787 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include "main.h"
21#include "sysfs.h"
22#include "translation-table.h"
23#include "originator.h"
24#include "hard-interface.h"
25#include "gateway_common.h"
26#include "gateway_client.h"
27#include "vis.h"
28
29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
30{
31 struct device *dev = container_of(obj->parent, struct device, kobj);
32 return to_net_dev(dev);
33}
34
35static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
36{
37 struct net_device *net_dev = batadv_kobj_to_netdev(obj);
38 return netdev_priv(net_dev);
39}
40
41#define BATADV_UEV_TYPE_VAR "BATTYPE="
42#define BATADV_UEV_ACTION_VAR "BATACTION="
43#define BATADV_UEV_DATA_VAR "BATDATA="
44
45static char *batadv_uev_action_str[] = {
46 "add",
47 "del",
48 "change"
49};
50
51static char *batadv_uev_type_str[] = {
52 "gw"
53};
54
55/* Use this, if you have customized show and store functions */
56#define BATADV_ATTR(_name, _mode, _show, _store) \
57struct batadv_attribute batadv_attr_##_name = { \
58 .attr = {.name = __stringify(_name), \
59 .mode = _mode }, \
60 .show = _show, \
61 .store = _store, \
62};
63
64#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
65ssize_t batadv_store_##_name(struct kobject *kobj, \
66 struct attribute *attr, char *buff, \
67 size_t count) \
68{ \
69 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
70 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
71 return __batadv_store_bool_attr(buff, count, _post_func, attr, \
72 &bat_priv->_name, net_dev); \
73}
74
75#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
76ssize_t batadv_show_##_name(struct kobject *kobj, \
77 struct attribute *attr, char *buff) \
78{ \
79 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
80 return sprintf(buff, "%s\n", \
81 atomic_read(&bat_priv->_name) == 0 ? \
82 "disabled" : "enabled"); \
83} \
84
85/* Use this, if you are going to turn a [name] in the soft-interface
86 * (bat_priv) on or off
87 */
88#define BATADV_ATTR_SIF_BOOL(_name, _mode, _post_func) \
89 static BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
90 static BATADV_ATTR_SIF_SHOW_BOOL(_name) \
91 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
92 batadv_store_##_name)
93
94
95#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
96ssize_t batadv_store_##_name(struct kobject *kobj, \
97 struct attribute *attr, char *buff, \
98 size_t count) \
99{ \
100 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
101 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
102 return __batadv_store_uint_attr(buff, count, _min, _max, \
103 _post_func, attr, \
104 &bat_priv->_name, net_dev); \
105}
106
107#define BATADV_ATTR_SIF_SHOW_UINT(_name) \
108ssize_t batadv_show_##_name(struct kobject *kobj, \
109 struct attribute *attr, char *buff) \
110{ \
111 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
112 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
113} \
114
115/* Use this, if you are going to set [name] in the soft-interface
116 * (bat_priv) to an unsigned integer value
117 */
118#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
119 static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
120 static BATADV_ATTR_SIF_SHOW_UINT(_name) \
121 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
122 batadv_store_##_name)
123
124
125#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
126ssize_t batadv_store_##_name(struct kobject *kobj, \
127 struct attribute *attr, char *buff, \
128 size_t count) \
129{ \
130 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
131 struct batadv_hard_iface *hard_iface; \
132 ssize_t length; \
133 \
134 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
135 if (!hard_iface) \
136 return 0; \
137 \
138 length = __batadv_store_uint_attr(buff, count, _min, _max, \
139 _post_func, attr, \
140 &hard_iface->_name, net_dev); \
141 \
142 batadv_hardif_free_ref(hard_iface); \
143 return length; \
144}
145
146#define BATADV_ATTR_HIF_SHOW_UINT(_name) \
147ssize_t batadv_show_##_name(struct kobject *kobj, \
148 struct attribute *attr, char *buff) \
149{ \
150 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
151 struct batadv_hard_iface *hard_iface; \
152 ssize_t length; \
153 \
154 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
155 if (!hard_iface) \
156 return 0; \
157 \
158 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
159 \
160 batadv_hardif_free_ref(hard_iface); \
161 return length; \
162}
163
164/* Use this, if you are going to set [name] in hard_iface to an
165 * unsigned integer value
166 */
167#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
168 static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
169 static BATADV_ATTR_HIF_SHOW_UINT(_name) \
170 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
171 batadv_store_##_name)
172
173
174static int batadv_store_bool_attr(char *buff, size_t count,
175 struct net_device *net_dev,
176 const char *attr_name, atomic_t *attr)
177{
178 int enabled = -1;
179
180 if (buff[count - 1] == '\n')
181 buff[count - 1] = '\0';
182
183 if ((strncmp(buff, "1", 2) == 0) ||
184 (strncmp(buff, "enable", 7) == 0) ||
185 (strncmp(buff, "enabled", 8) == 0))
186 enabled = 1;
187
188 if ((strncmp(buff, "0", 2) == 0) ||
189 (strncmp(buff, "disable", 8) == 0) ||
190 (strncmp(buff, "disabled", 9) == 0))
191 enabled = 0;
192
193 if (enabled < 0) {
194 batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
195 attr_name, buff);
196 return -EINVAL;
197 }
198
199 if (atomic_read(attr) == enabled)
200 return count;
201
202 batadv_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
203 atomic_read(attr) == 1 ? "enabled" : "disabled",
204 enabled == 1 ? "enabled" : "disabled");
205
206 atomic_set(attr, (unsigned int)enabled);
207 return count;
208}
209
210static inline ssize_t
211__batadv_store_bool_attr(char *buff, size_t count,
212 void (*post_func)(struct net_device *),
213 struct attribute *attr,
214 atomic_t *attr_store, struct net_device *net_dev)
215{
216 int ret;
217
218 ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
219 attr_store);
220 if (post_func && ret)
221 post_func(net_dev);
222
223 return ret;
224}
225
226static int batadv_store_uint_attr(const char *buff, size_t count,
227 struct net_device *net_dev,
228 const char *attr_name,
229 unsigned int min, unsigned int max,
230 atomic_t *attr)
231{
232 unsigned long uint_val;
233 int ret;
234
235 ret = kstrtoul(buff, 10, &uint_val);
236 if (ret) {
237 batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
238 attr_name, buff);
239 return -EINVAL;
240 }
241
242 if (uint_val < min) {
243 batadv_info(net_dev, "%s: Value is too small: %lu min: %u\n",
244 attr_name, uint_val, min);
245 return -EINVAL;
246 }
247
248 if (uint_val > max) {
249 batadv_info(net_dev, "%s: Value is too big: %lu max: %u\n",
250 attr_name, uint_val, max);
251 return -EINVAL;
252 }
253
254 if (atomic_read(attr) == uint_val)
255 return count;
256
257 batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
258 attr_name, atomic_read(attr), uint_val);
259
260 atomic_set(attr, uint_val);
261 return count;
262}
263
264static inline ssize_t
265__batadv_store_uint_attr(const char *buff, size_t count,
266 int min, int max,
267 void (*post_func)(struct net_device *),
268 const struct attribute *attr,
269 atomic_t *attr_store, struct net_device *net_dev)
270{
271 int ret;
272
273 ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
274 attr_store);
275 if (post_func && ret)
276 post_func(net_dev);
277
278 return ret;
279}
280
281static ssize_t batadv_show_vis_mode(struct kobject *kobj,
282 struct attribute *attr, char *buff)
283{
284 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
285 int vis_mode = atomic_read(&bat_priv->vis_mode);
286 const char *mode;
287
288 if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
289 mode = "client";
290 else
291 mode = "server";
292
293 return sprintf(buff, "%s\n", mode);
294}
295
296static ssize_t batadv_store_vis_mode(struct kobject *kobj,
297 struct attribute *attr, char *buff,
298 size_t count)
299{
300 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
301 struct batadv_priv *bat_priv = netdev_priv(net_dev);
302 unsigned long val;
303 int ret, vis_mode_tmp = -1;
304 const char *old_mode, *new_mode;
305
306 ret = kstrtoul(buff, 10, &val);
307
308 if (((count == 2) && (!ret) &&
309 (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
310 (strncmp(buff, "client", 6) == 0) ||
311 (strncmp(buff, "off", 3) == 0))
312 vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
313
314 if (((count == 2) && (!ret) &&
315 (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
316 (strncmp(buff, "server", 6) == 0))
317 vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
318
319 if (vis_mode_tmp < 0) {
320 if (buff[count - 1] == '\n')
321 buff[count - 1] = '\0';
322
323 batadv_info(net_dev,
324 "Invalid parameter for 'vis mode' setting received: %s\n",
325 buff);
326 return -EINVAL;
327 }
328
329 if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
330 return count;
331
332 if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
333 old_mode = "client";
334 else
335 old_mode = "server";
336
337 if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
338 new_mode = "client";
339 else
340 new_mode = "server";
341
342 batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
343 new_mode);
344
345 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
346 return count;
347}
348
349static ssize_t batadv_show_bat_algo(struct kobject *kobj,
350 struct attribute *attr, char *buff)
351{
352 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
353 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
354}
355
356static void batadv_post_gw_deselect(struct net_device *net_dev)
357{
358 struct batadv_priv *bat_priv = netdev_priv(net_dev);
359 batadv_gw_deselect(bat_priv);
360}
361
362static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
363 char *buff)
364{
365 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
366 int bytes_written;
367
368 switch (atomic_read(&bat_priv->gw_mode)) {
369 case BATADV_GW_MODE_CLIENT:
370 bytes_written = sprintf(buff, "%s\n",
371 BATADV_GW_MODE_CLIENT_NAME);
372 break;
373 case BATADV_GW_MODE_SERVER:
374 bytes_written = sprintf(buff, "%s\n",
375 BATADV_GW_MODE_SERVER_NAME);
376 break;
377 default:
378 bytes_written = sprintf(buff, "%s\n",
379 BATADV_GW_MODE_OFF_NAME);
380 break;
381 }
382
383 return bytes_written;
384}
385
386static ssize_t batadv_store_gw_mode(struct kobject *kobj,
387 struct attribute *attr, char *buff,
388 size_t count)
389{
390 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
391 struct batadv_priv *bat_priv = netdev_priv(net_dev);
392 char *curr_gw_mode_str;
393 int gw_mode_tmp = -1;
394
395 if (buff[count - 1] == '\n')
396 buff[count - 1] = '\0';
397
398 if (strncmp(buff, BATADV_GW_MODE_OFF_NAME,
399 strlen(BATADV_GW_MODE_OFF_NAME)) == 0)
400 gw_mode_tmp = BATADV_GW_MODE_OFF;
401
402 if (strncmp(buff, BATADV_GW_MODE_CLIENT_NAME,
403 strlen(BATADV_GW_MODE_CLIENT_NAME)) == 0)
404 gw_mode_tmp = BATADV_GW_MODE_CLIENT;
405
406 if (strncmp(buff, BATADV_GW_MODE_SERVER_NAME,
407 strlen(BATADV_GW_MODE_SERVER_NAME)) == 0)
408 gw_mode_tmp = BATADV_GW_MODE_SERVER;
409
410 if (gw_mode_tmp < 0) {
411 batadv_info(net_dev,
412 "Invalid parameter for 'gw mode' setting received: %s\n",
413 buff);
414 return -EINVAL;
415 }
416
417 if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
418 return count;
419
420 switch (atomic_read(&bat_priv->gw_mode)) {
421 case BATADV_GW_MODE_CLIENT:
422 curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
423 break;
424 case BATADV_GW_MODE_SERVER:
425 curr_gw_mode_str = BATADV_GW_MODE_SERVER_NAME;
426 break;
427 default:
428 curr_gw_mode_str = BATADV_GW_MODE_OFF_NAME;
429 break;
430 }
431
432 batadv_info(net_dev, "Changing gw mode from: %s to: %s\n",
433 curr_gw_mode_str, buff);
434
435 batadv_gw_deselect(bat_priv);
436 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
437 return count;
438}
439
440static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
441 struct attribute *attr, char *buff)
442{
443 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
444 int down, up;
445 int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
446
447 batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
448 return sprintf(buff, "%i%s/%i%s\n",
449 (down > 2048 ? down / 1024 : down),
450 (down > 2048 ? "MBit" : "KBit"),
451 (up > 2048 ? up / 1024 : up),
452 (up > 2048 ? "MBit" : "KBit"));
453}
454
455static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
456 struct attribute *attr, char *buff,
457 size_t count)
458{
459 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
460
461 if (buff[count - 1] == '\n')
462 buff[count - 1] = '\0';
463
464 return batadv_gw_bandwidth_set(net_dev, buff, count);
465}
466
467BATADV_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
468BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
469#ifdef CONFIG_BATMAN_ADV_BLA
470BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
471#endif
472BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
473BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
474static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
475 batadv_store_vis_mode);
476static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
477static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
478 batadv_store_gw_mode);
479BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
480 INT_MAX, NULL);
481BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
482 NULL);
483BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
484 batadv_post_gw_deselect);
485static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
486 batadv_store_gw_bwidth);
487#ifdef CONFIG_BATMAN_ADV_DEBUG
488BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
489#endif
490
491static struct batadv_attribute *batadv_mesh_attrs[] = {
492 &batadv_attr_aggregated_ogms,
493 &batadv_attr_bonding,
494#ifdef CONFIG_BATMAN_ADV_BLA
495 &batadv_attr_bridge_loop_avoidance,
496#endif
497 &batadv_attr_fragmentation,
498 &batadv_attr_ap_isolation,
499 &batadv_attr_vis_mode,
500 &batadv_attr_routing_algo,
501 &batadv_attr_gw_mode,
502 &batadv_attr_orig_interval,
503 &batadv_attr_hop_penalty,
504 &batadv_attr_gw_sel_class,
505 &batadv_attr_gw_bandwidth,
506#ifdef CONFIG_BATMAN_ADV_DEBUG
507 &batadv_attr_log_level,
508#endif
509 NULL,
510};
511
512int batadv_sysfs_add_meshif(struct net_device *dev)
513{
514 struct kobject *batif_kobject = &dev->dev.kobj;
515 struct batadv_priv *bat_priv = netdev_priv(dev);
516 struct batadv_attribute **bat_attr;
517 int err;
518
519 bat_priv->mesh_obj = kobject_create_and_add(BATADV_SYSFS_IF_MESH_SUBDIR,
520 batif_kobject);
521 if (!bat_priv->mesh_obj) {
522 batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
523 BATADV_SYSFS_IF_MESH_SUBDIR);
524 goto out;
525 }
526
527 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) {
528 err = sysfs_create_file(bat_priv->mesh_obj,
529 &((*bat_attr)->attr));
530 if (err) {
531 batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
532 dev->name, BATADV_SYSFS_IF_MESH_SUBDIR,
533 ((*bat_attr)->attr).name);
534 goto rem_attr;
535 }
536 }
537
538 return 0;
539
540rem_attr:
541 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
542 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
543
544 kobject_put(bat_priv->mesh_obj);
545 bat_priv->mesh_obj = NULL;
546out:
547 return -ENOMEM;
548}
549
550void batadv_sysfs_del_meshif(struct net_device *dev)
551{
552 struct batadv_priv *bat_priv = netdev_priv(dev);
553 struct batadv_attribute **bat_attr;
554
555 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
556 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
557
558 kobject_put(bat_priv->mesh_obj);
559 bat_priv->mesh_obj = NULL;
560}
561
562static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
563 struct attribute *attr, char *buff)
564{
565 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
566 struct batadv_hard_iface *hard_iface;
567 ssize_t length;
568 const char *ifname;
569
570 hard_iface = batadv_hardif_get_by_netdev(net_dev);
571 if (!hard_iface)
572 return 0;
573
574 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
575 ifname = "none";
576 else
577 ifname = hard_iface->soft_iface->name;
578
579 length = sprintf(buff, "%s\n", ifname);
580
581 batadv_hardif_free_ref(hard_iface);
582
583 return length;
584}
585
586static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
587 struct attribute *attr, char *buff,
588 size_t count)
589{
590 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
591 struct batadv_hard_iface *hard_iface;
592 int status_tmp = -1;
593 int ret = count;
594
595 hard_iface = batadv_hardif_get_by_netdev(net_dev);
596 if (!hard_iface)
597 return count;
598
599 if (buff[count - 1] == '\n')
600 buff[count - 1] = '\0';
601
602 if (strlen(buff) >= IFNAMSIZ) {
603 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
604 buff);
605 batadv_hardif_free_ref(hard_iface);
606 return -EINVAL;
607 }
608
609 if (strncmp(buff, "none", 4) == 0)
610 status_tmp = BATADV_IF_NOT_IN_USE;
611 else
612 status_tmp = BATADV_IF_I_WANT_YOU;
613
614 if (hard_iface->if_status == status_tmp)
615 goto out;
616
617 if ((hard_iface->soft_iface) &&
618 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
619 goto out;
620
621 if (!rtnl_trylock()) {
622 ret = -ERESTARTSYS;
623 goto out;
624 }
625
626 if (status_tmp == BATADV_IF_NOT_IN_USE) {
627 batadv_hardif_disable_interface(hard_iface);
628 goto unlock;
629 }
630
631 /* if the interface already is in use */
632 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
633 batadv_hardif_disable_interface(hard_iface);
634
635 ret = batadv_hardif_enable_interface(hard_iface, buff);
636
637unlock:
638 rtnl_unlock();
639out:
640 batadv_hardif_free_ref(hard_iface);
641 return ret;
642}
643
644static ssize_t batadv_show_iface_status(struct kobject *kobj,
645 struct attribute *attr, char *buff)
646{
647 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
648 struct batadv_hard_iface *hard_iface;
649 ssize_t length;
650
651 hard_iface = batadv_hardif_get_by_netdev(net_dev);
652 if (!hard_iface)
653 return 0;
654
655 switch (hard_iface->if_status) {
656 case BATADV_IF_TO_BE_REMOVED:
657 length = sprintf(buff, "disabling\n");
658 break;
659 case BATADV_IF_INACTIVE:
660 length = sprintf(buff, "inactive\n");
661 break;
662 case BATADV_IF_ACTIVE:
663 length = sprintf(buff, "active\n");
664 break;
665 case BATADV_IF_TO_BE_ACTIVATED:
666 length = sprintf(buff, "enabling\n");
667 break;
668 case BATADV_IF_NOT_IN_USE:
669 default:
670 length = sprintf(buff, "not in use\n");
671 break;
672 }
673
674 batadv_hardif_free_ref(hard_iface);
675
676 return length;
677}
678
679static BATADV_ATTR(mesh_iface, S_IRUGO | S_IWUSR, batadv_show_mesh_iface,
680 batadv_store_mesh_iface);
681static BATADV_ATTR(iface_status, S_IRUGO, batadv_show_iface_status, NULL);
682
683static struct batadv_attribute *batadv_batman_attrs[] = {
684 &batadv_attr_mesh_iface,
685 &batadv_attr_iface_status,
686 NULL,
687};
688
689int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
690{
691 struct kobject *hardif_kobject = &dev->dev.kobj;
692 struct batadv_attribute **bat_attr;
693 int err;
694
695 *hardif_obj = kobject_create_and_add(BATADV_SYSFS_IF_BAT_SUBDIR,
696 hardif_kobject);
697
698 if (!*hardif_obj) {
699 batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
700 BATADV_SYSFS_IF_BAT_SUBDIR);
701 goto out;
702 }
703
704 for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr) {
705 err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
706 if (err) {
707 batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
708 dev->name, BATADV_SYSFS_IF_BAT_SUBDIR,
709 ((*bat_attr)->attr).name);
710 goto rem_attr;
711 }
712 }
713
714 return 0;
715
716rem_attr:
717 for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr)
718 sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
719out:
720 return -ENOMEM;
721}
722
723void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
724{
725 kobject_put(*hardif_obj);
726 *hardif_obj = NULL;
727}
728
729int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
730 enum batadv_uev_action action, const char *data)
731{
732 int ret = -ENOMEM;
733 struct batadv_hard_iface *primary_if = NULL;
734 struct kobject *bat_kobj;
735 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
736
737 primary_if = batadv_primary_if_get_selected(bat_priv);
738 if (!primary_if)
739 goto out;
740
741 bat_kobj = &primary_if->soft_iface->dev.kobj;
742
743 uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
744 strlen(batadv_uev_type_str[type]) + 1,
745 GFP_ATOMIC);
746 if (!uevent_env[0])
747 goto out;
748
749 sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
750 batadv_uev_type_str[type]);
751
752 uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
753 strlen(batadv_uev_action_str[action]) + 1,
754 GFP_ATOMIC);
755 if (!uevent_env[1])
756 goto out;
757
758 sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
759 batadv_uev_action_str[action]);
760
761 /* If the event is DEL, ignore the data field */
762 if (action != BATADV_UEV_DEL) {
763 uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
764 strlen(data) + 1, GFP_ATOMIC);
765 if (!uevent_env[2])
766 goto out;
767
768 sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
769 }
770
771 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
772out:
773 kfree(uevent_env[0]);
774 kfree(uevent_env[1]);
775 kfree(uevent_env[2]);
776
777 if (primary_if)
778 batadv_hardif_free_ref(primary_if);
779
780 if (ret)
781 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
782 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
783 batadv_uev_type_str[type],
784 batadv_uev_action_str[action],
785 (action == BATADV_UEV_DEL ? "NULL" : data), ret);
786 return ret;
787}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/sysfs.h
index fece77ae586e..3fd1412b0620 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,17 +15,15 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23#ifndef _NET_BATMAN_ADV_SYSFS_H_ 20#ifndef _NET_BATMAN_ADV_SYSFS_H_
24#define _NET_BATMAN_ADV_SYSFS_H_ 21#define _NET_BATMAN_ADV_SYSFS_H_
25 22
26#define SYSFS_IF_MESH_SUBDIR "mesh" 23#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
27#define SYSFS_IF_BAT_SUBDIR "batman_adv" 24#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
28 25
29struct bat_attribute { 26struct batadv_attribute {
30 struct attribute attr; 27 struct attribute attr;
31 ssize_t (*show)(struct kobject *kobj, struct attribute *attr, 28 ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
32 char *buf); 29 char *buf);
@@ -34,11 +31,12 @@ struct bat_attribute {
34 char *buf, size_t count); 31 char *buf, size_t count);
35}; 32};
36 33
37int sysfs_add_meshif(struct net_device *dev); 34int batadv_sysfs_add_meshif(struct net_device *dev);
38void sysfs_del_meshif(struct net_device *dev); 35void batadv_sysfs_del_meshif(struct net_device *dev);
39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); 36int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
40void sysfs_del_hardif(struct kobject **hardif_obj); 37 struct net_device *dev);
41int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, 38void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
42 enum uev_action action, const char *data); 39int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
40 enum batadv_uev_action action, const char *data);
43 41
44#endif /* _NET_BATMAN_ADV_SYSFS_H_ */ 42#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a66c2dcd1088..a438f4b582fc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,44 +29,46 @@
31 29
32#include <linux/crc16.h> 30#include <linux/crc16.h>
33 31
34static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 32static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
35 struct orig_node *orig_node); 33 struct batadv_orig_node *orig_node);
36static void tt_purge(struct work_struct *work); 34static void batadv_tt_purge(struct work_struct *work);
37static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry); 35static void
36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
38 37
39/* returns 1 if they are the same mac addr */ 38/* returns 1 if they are the same mac addr */
40static int compare_tt(const struct hlist_node *node, const void *data2) 39static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
41{ 40{
42 const void *data1 = container_of(node, struct tt_common_entry, 41 const void *data1 = container_of(node, struct batadv_tt_common_entry,
43 hash_entry); 42 hash_entry);
44 43
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46} 45}
47 46
48static void tt_start_timer(struct bat_priv *bat_priv) 47static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
49{ 48{
50 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); 49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
51 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
52 msecs_to_jiffies(5000)); 51 msecs_to_jiffies(5000));
53} 52}
54 53
55static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, 54static struct batadv_tt_common_entry *
56 const void *data) 55batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
57{ 56{
58 struct hlist_head *head; 57 struct hlist_head *head;
59 struct hlist_node *node; 58 struct hlist_node *node;
60 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL; 59 struct batadv_tt_common_entry *tt_common_entry;
60 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
61 uint32_t index; 61 uint32_t index;
62 62
63 if (!hash) 63 if (!hash)
64 return NULL; 64 return NULL;
65 65
66 index = choose_orig(data, hash->size); 66 index = batadv_choose_orig(data, hash->size);
67 head = &hash->table[index]; 67 head = &hash->table[index];
68 68
69 rcu_read_lock(); 69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { 70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 if (!compare_eth(tt_common_entry, data)) 71 if (!batadv_compare_eth(tt_common_entry, data))
72 continue; 72 continue;
73 73
74 if (!atomic_inc_not_zero(&tt_common_entry->refcount)) 74 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
@@ -82,79 +82,87 @@ static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
82 return tt_common_entry_tmp; 82 return tt_common_entry_tmp;
83} 83}
84 84
85static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 85static struct batadv_tt_local_entry *
86 const void *data) 86batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
87{ 87{
88 struct tt_common_entry *tt_common_entry; 88 struct batadv_tt_common_entry *tt_common_entry;
89 struct tt_local_entry *tt_local_entry = NULL; 89 struct batadv_tt_local_entry *tt_local_entry = NULL;
90 90
91 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data); 91 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
92 if (tt_common_entry) 92 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry, 93 tt_local_entry = container_of(tt_common_entry,
94 struct tt_local_entry, common); 94 struct batadv_tt_local_entry,
95 common);
95 return tt_local_entry; 96 return tt_local_entry;
96} 97}
97 98
98static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, 99static struct batadv_tt_global_entry *
99 const void *data) 100batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
100{ 101{
101 struct tt_common_entry *tt_common_entry; 102 struct batadv_tt_common_entry *tt_common_entry;
102 struct tt_global_entry *tt_global_entry = NULL; 103 struct batadv_tt_global_entry *tt_global_entry = NULL;
103 104
104 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data); 105 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
105 if (tt_common_entry) 106 if (tt_common_entry)
106 tt_global_entry = container_of(tt_common_entry, 107 tt_global_entry = container_of(tt_common_entry,
107 struct tt_global_entry, common); 108 struct batadv_tt_global_entry,
109 common);
108 return tt_global_entry; 110 return tt_global_entry;
109 111
110} 112}
111 113
112static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) 114static void
115batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
113{ 116{
114 if (atomic_dec_and_test(&tt_local_entry->common.refcount)) 117 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu); 118 kfree_rcu(tt_local_entry, common.rcu);
116} 119}
117 120
118static void tt_global_entry_free_rcu(struct rcu_head *rcu) 121static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
119{ 122{
120 struct tt_common_entry *tt_common_entry; 123 struct batadv_tt_common_entry *tt_common_entry;
121 struct tt_global_entry *tt_global_entry; 124 struct batadv_tt_global_entry *tt_global_entry;
122 125
123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu); 126 tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 127 tt_global_entry = container_of(tt_common_entry,
125 common); 128 struct batadv_tt_global_entry, common);
126 129
127 kfree(tt_global_entry); 130 kfree(tt_global_entry);
128} 131}
129 132
130static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 133static void
134batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
131{ 135{
132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { 136 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry); 137 batadv_tt_global_del_orig_list(tt_global_entry);
134 call_rcu(&tt_global_entry->common.rcu, 138 call_rcu(&tt_global_entry->common.rcu,
135 tt_global_entry_free_rcu); 139 batadv_tt_global_entry_free_rcu);
136 } 140 }
137} 141}
138 142
139static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) 143static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140{ 144{
141 struct tt_orig_list_entry *orig_entry; 145 struct batadv_tt_orig_list_entry *orig_entry;
142 146
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); 147 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
144 atomic_dec(&orig_entry->orig_node->tt_size); 148 batadv_orig_node_free_ref(orig_entry->orig_node);
145 orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry); 149 kfree(orig_entry);
147} 150}
148 151
149static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) 152static void
153batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
150{ 154{
151 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); 155 /* to avoid race conditions, immediately decrease the tt counter */
156 atomic_dec(&orig_entry->orig_node->tt_size);
157 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
152} 158}
153 159
154static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 160static void batadv_tt_local_event(struct batadv_priv *bat_priv,
155 uint8_t flags) 161 const uint8_t *addr, uint8_t flags)
156{ 162{
157 struct tt_change_node *tt_change_node; 163 struct batadv_tt_change_node *tt_change_node, *entry, *safe;
164 bool event_removed = false;
165 bool del_op_requested, del_op_entry;
158 166
159 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); 167 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
160 168
@@ -164,50 +172,82 @@ static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
164 tt_change_node->change.flags = flags; 172 tt_change_node->change.flags = flags;
165 memcpy(tt_change_node->change.addr, addr, ETH_ALEN); 173 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
166 174
175 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
176
177 /* check for ADD+DEL or DEL+ADD events */
167 spin_lock_bh(&bat_priv->tt_changes_list_lock); 178 spin_lock_bh(&bat_priv->tt_changes_list_lock);
179 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
180 list) {
181 if (!batadv_compare_eth(entry->change.addr, addr))
182 continue;
183
184 /* DEL+ADD in the same orig interval have no effect and can be
185 * removed to avoid silly behaviour on the receiver side. The
186 * other way around (ADD+DEL) can happen in case of roaming of
187 * a client still in the NEW state. Roaming of NEW clients is
188 * now possible due to automatically recognition of "temporary"
189 * clients
190 */
191 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
192 if (!del_op_requested && del_op_entry)
193 goto del;
194 if (del_op_requested && !del_op_entry)
195 goto del;
196 continue;
197del:
198 list_del(&entry->list);
199 kfree(entry);
200 event_removed = true;
201 goto unlock;
202 }
203
168 /* track the change in the OGMinterval list */ 204 /* track the change in the OGMinterval list */
169 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); 205 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
170 atomic_inc(&bat_priv->tt_local_changes); 206
207unlock:
171 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 208 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
172 209
173 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 210 if (event_removed)
211 atomic_dec(&bat_priv->tt_local_changes);
212 else
213 atomic_inc(&bat_priv->tt_local_changes);
174} 214}
175 215
176int tt_len(int changes_num) 216int batadv_tt_len(int changes_num)
177{ 217{
178 return changes_num * sizeof(struct tt_change); 218 return changes_num * sizeof(struct batadv_tt_change);
179} 219}
180 220
181static int tt_local_init(struct bat_priv *bat_priv) 221static int batadv_tt_local_init(struct batadv_priv *bat_priv)
182{ 222{
183 if (bat_priv->tt_local_hash) 223 if (bat_priv->tt_local_hash)
184 return 1; 224 return 0;
185 225
186 bat_priv->tt_local_hash = hash_new(1024); 226 bat_priv->tt_local_hash = batadv_hash_new(1024);
187 227
188 if (!bat_priv->tt_local_hash) 228 if (!bat_priv->tt_local_hash)
189 return 0; 229 return -ENOMEM;
190 230
191 return 1; 231 return 0;
192} 232}
193 233
194void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 234void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
195 int ifindex) 235 int ifindex)
196{ 236{
197 struct bat_priv *bat_priv = netdev_priv(soft_iface); 237 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
198 struct tt_local_entry *tt_local_entry = NULL; 238 struct batadv_tt_local_entry *tt_local_entry = NULL;
199 struct tt_global_entry *tt_global_entry = NULL; 239 struct batadv_tt_global_entry *tt_global_entry = NULL;
200 struct hlist_head *head; 240 struct hlist_head *head;
201 struct hlist_node *node; 241 struct hlist_node *node;
202 struct tt_orig_list_entry *orig_entry; 242 struct batadv_tt_orig_list_entry *orig_entry;
203 int hash_added; 243 int hash_added;
204 244
205 tt_local_entry = tt_local_hash_find(bat_priv, addr); 245 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
206 246
207 if (tt_local_entry) { 247 if (tt_local_entry) {
208 tt_local_entry->last_seen = jiffies; 248 tt_local_entry->last_seen = jiffies;
209 /* possibly unset the TT_CLIENT_PENDING flag */ 249 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
210 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING; 250 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
211 goto out; 251 goto out;
212 } 252 }
213 253
@@ -215,40 +255,42 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
215 if (!tt_local_entry) 255 if (!tt_local_entry)
216 goto out; 256 goto out;
217 257
218 bat_dbg(DBG_TT, bat_priv, 258 batadv_dbg(BATADV_DBG_TT, bat_priv,
219 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 259 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
220 (uint8_t)atomic_read(&bat_priv->ttvn)); 260 (uint8_t)atomic_read(&bat_priv->ttvn));
221 261
222 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 262 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
223 tt_local_entry->common.flags = NO_FLAGS; 263 tt_local_entry->common.flags = BATADV_NO_FLAGS;
224 if (is_wifi_iface(ifindex)) 264 if (batadv_is_wifi_iface(ifindex))
225 tt_local_entry->common.flags |= TT_CLIENT_WIFI; 265 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
226 atomic_set(&tt_local_entry->common.refcount, 2); 266 atomic_set(&tt_local_entry->common.refcount, 2);
227 tt_local_entry->last_seen = jiffies; 267 tt_local_entry->last_seen = jiffies;
228 268
229 /* the batman interface mac address should never be purged */ 269 /* the batman interface mac address should never be purged */
230 if (compare_eth(addr, soft_iface->dev_addr)) 270 if (batadv_compare_eth(addr, soft_iface->dev_addr))
231 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; 271 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
232 272
233 /* The local entry has to be marked as NEW to avoid to send it in 273 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment 274 * a full table response going out before the next ttvn increment
235 * (consistency check) */ 275 * (consistency check)
236 tt_local_entry->common.flags |= TT_CLIENT_NEW; 276 */
277 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
237 278
238 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, 279 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
239 &tt_local_entry->common, 280 batadv_choose_orig,
240 &tt_local_entry->common.hash_entry); 281 &tt_local_entry->common,
282 &tt_local_entry->common.hash_entry);
241 283
242 if (unlikely(hash_added != 0)) { 284 if (unlikely(hash_added != 0)) {
243 /* remove the reference for the hash */ 285 /* remove the reference for the hash */
244 tt_local_entry_free_ref(tt_local_entry); 286 batadv_tt_local_entry_free_ref(tt_local_entry);
245 goto out; 287 goto out;
246 } 288 }
247 289
248 tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 290 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
249 291
250 /* remove address from global hash if present */ 292 /* remove address from global hash if present */
251 tt_global_entry = tt_global_hash_find(bat_priv, addr); 293 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
252 294
253 /* Check whether it is a roaming! */ 295 /* Check whether it is a roaming! */
254 if (tt_global_entry) { 296 if (tt_global_entry) {
@@ -258,31 +300,85 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
258 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 300 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259 orig_entry->orig_node->tt_poss_change = true; 301 orig_entry->orig_node->tt_poss_change = true;
260 302
261 send_roam_adv(bat_priv, tt_global_entry->common.addr, 303 batadv_send_roam_adv(bat_priv,
262 orig_entry->orig_node); 304 tt_global_entry->common.addr,
305 orig_entry->orig_node);
263 } 306 }
264 rcu_read_unlock(); 307 rcu_read_unlock();
265 /* The global entry has to be marked as ROAMING and 308 /* The global entry has to be marked as ROAMING and
266 * has to be kept for consistency purpose 309 * has to be kept for consistency purpose
267 */ 310 */
268 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 311 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
269 tt_global_entry->roam_at = jiffies; 312 tt_global_entry->roam_at = jiffies;
270 } 313 }
271out: 314out:
272 if (tt_local_entry) 315 if (tt_local_entry)
273 tt_local_entry_free_ref(tt_local_entry); 316 batadv_tt_local_entry_free_ref(tt_local_entry);
274 if (tt_global_entry) 317 if (tt_global_entry)
275 tt_global_entry_free_ref(tt_global_entry); 318 batadv_tt_global_entry_free_ref(tt_global_entry);
319}
320
321static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
322 int *packet_buff_len,
323 int min_packet_len,
324 int new_packet_len)
325{
326 unsigned char *new_buff;
327
328 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
329
330 /* keep old buffer if kmalloc should fail */
331 if (new_buff) {
332 memcpy(new_buff, *packet_buff, min_packet_len);
333 kfree(*packet_buff);
334 *packet_buff = new_buff;
335 *packet_buff_len = new_packet_len;
336 }
337}
338
339static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
340 unsigned char **packet_buff,
341 int *packet_buff_len,
342 int min_packet_len)
343{
344 struct batadv_hard_iface *primary_if;
345 int req_len;
346
347 primary_if = batadv_primary_if_get_selected(bat_priv);
348
349 req_len = min_packet_len;
350 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
351
352 /* if we have too many changes for one packet don't send any
353 * and wait for the tt table request which will be fragmented
354 */
355 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
356 req_len = min_packet_len;
357
358 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
359 min_packet_len, req_len);
360
361 if (primary_if)
362 batadv_hardif_free_ref(primary_if);
276} 363}
277 364
278int tt_changes_fill_buffer(struct bat_priv *bat_priv, 365static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
279 unsigned char *buff, int buff_len) 366 unsigned char **packet_buff,
367 int *packet_buff_len,
368 int min_packet_len)
280{ 369{
281 int count = 0, tot_changes = 0; 370 struct batadv_tt_change_node *entry, *safe;
282 struct tt_change_node *entry, *safe; 371 int count = 0, tot_changes = 0, new_len;
372 unsigned char *tt_buff;
373
374 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
375 packet_buff_len, min_packet_len);
376
377 new_len = *packet_buff_len - min_packet_len;
378 tt_buff = *packet_buff + min_packet_len;
283 379
284 if (buff_len > 0) 380 if (new_len > 0)
285 tot_changes = buff_len / tt_len(1); 381 tot_changes = new_len / batadv_tt_len(1);
286 382
287 spin_lock_bh(&bat_priv->tt_changes_list_lock); 383 spin_lock_bh(&bat_priv->tt_changes_list_lock);
288 atomic_set(&bat_priv->tt_local_changes, 0); 384 atomic_set(&bat_priv->tt_local_changes, 0);
@@ -290,8 +386,8 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
290 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 386 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
291 list) { 387 list) {
292 if (count < tot_changes) { 388 if (count < tot_changes) {
293 memcpy(buff + tt_len(count), 389 memcpy(tt_buff + batadv_tt_len(count),
294 &entry->change, sizeof(struct tt_change)); 390 &entry->change, sizeof(struct batadv_tt_change));
295 count++; 391 count++;
296 } 392 }
297 list_del(&entry->list); 393 list_del(&entry->list);
@@ -304,37 +400,35 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
304 kfree(bat_priv->tt_buff); 400 kfree(bat_priv->tt_buff);
305 bat_priv->tt_buff_len = 0; 401 bat_priv->tt_buff_len = 0;
306 bat_priv->tt_buff = NULL; 402 bat_priv->tt_buff = NULL;
307 /* We check whether this new OGM has no changes due to size 403 /* check whether this new OGM has no changes due to size problems */
308 * problems */ 404 if (new_len > 0) {
309 if (buff_len > 0) { 405 /* if kmalloc() fails we will reply with the full table
310 /**
311 * if kmalloc() fails we will reply with the full table
312 * instead of providing the diff 406 * instead of providing the diff
313 */ 407 */
314 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); 408 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
315 if (bat_priv->tt_buff) { 409 if (bat_priv->tt_buff) {
316 memcpy(bat_priv->tt_buff, buff, buff_len); 410 memcpy(bat_priv->tt_buff, tt_buff, new_len);
317 bat_priv->tt_buff_len = buff_len; 411 bat_priv->tt_buff_len = new_len;
318 } 412 }
319 } 413 }
320 spin_unlock_bh(&bat_priv->tt_buff_lock); 414 spin_unlock_bh(&bat_priv->tt_buff_lock);
321 415
322 return tot_changes; 416 return count;
323} 417}
324 418
325int tt_local_seq_print_text(struct seq_file *seq, void *offset) 419int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
326{ 420{
327 struct net_device *net_dev = (struct net_device *)seq->private; 421 struct net_device *net_dev = (struct net_device *)seq->private;
328 struct bat_priv *bat_priv = netdev_priv(net_dev); 422 struct batadv_priv *bat_priv = netdev_priv(net_dev);
329 struct hashtable_t *hash = bat_priv->tt_local_hash; 423 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
330 struct tt_common_entry *tt_common_entry; 424 struct batadv_tt_common_entry *tt_common_entry;
331 struct hard_iface *primary_if; 425 struct batadv_hard_iface *primary_if;
332 struct hlist_node *node; 426 struct hlist_node *node;
333 struct hlist_head *head; 427 struct hlist_head *head;
334 uint32_t i; 428 uint32_t i;
335 int ret = 0; 429 int ret = 0;
336 430
337 primary_if = primary_if_get_selected(bat_priv); 431 primary_if = batadv_primary_if_get_selected(bat_priv);
338 if (!primary_if) { 432 if (!primary_if) {
339 ret = seq_printf(seq, 433 ret = seq_printf(seq,
340 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 434 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -342,7 +436,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
342 goto out; 436 goto out;
343 } 437 }
344 438
345 if (primary_if->if_status != IF_ACTIVE) { 439 if (primary_if->if_status != BATADV_IF_ACTIVE) {
346 ret = seq_printf(seq, 440 ret = seq_printf(seq,
347 "BATMAN mesh %s disabled - primary interface not active\n", 441 "BATMAN mesh %s disabled - primary interface not active\n",
348 net_dev->name); 442 net_dev->name);
@@ -362,63 +456,94 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
362 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 456 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
363 tt_common_entry->addr, 457 tt_common_entry->addr,
364 (tt_common_entry->flags & 458 (tt_common_entry->flags &
365 TT_CLIENT_ROAM ? 'R' : '.'), 459 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
366 (tt_common_entry->flags & 460 (tt_common_entry->flags &
367 TT_CLIENT_NOPURGE ? 'P' : '.'), 461 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
368 (tt_common_entry->flags & 462 (tt_common_entry->flags &
369 TT_CLIENT_NEW ? 'N' : '.'), 463 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
370 (tt_common_entry->flags & 464 (tt_common_entry->flags &
371 TT_CLIENT_PENDING ? 'X' : '.'), 465 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
372 (tt_common_entry->flags & 466 (tt_common_entry->flags &
373 TT_CLIENT_WIFI ? 'W' : '.')); 467 BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
374 } 468 }
375 rcu_read_unlock(); 469 rcu_read_unlock();
376 } 470 }
377out: 471out:
378 if (primary_if) 472 if (primary_if)
379 hardif_free_ref(primary_if); 473 batadv_hardif_free_ref(primary_if);
380 return ret; 474 return ret;
381} 475}
382 476
383static void tt_local_set_pending(struct bat_priv *bat_priv, 477static void
384 struct tt_local_entry *tt_local_entry, 478batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
385 uint16_t flags, const char *message) 479 struct batadv_tt_local_entry *tt_local_entry,
480 uint16_t flags, const char *message)
386{ 481{
387 tt_local_event(bat_priv, tt_local_entry->common.addr, 482 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
388 tt_local_entry->common.flags | flags); 483 tt_local_entry->common.flags | flags);
389 484
390 /* The local client has to be marked as "pending to be removed" but has 485 /* The local client has to be marked as "pending to be removed" but has
391 * to be kept in the table in order to send it in a full table 486 * to be kept in the table in order to send it in a full table
392 * response issued before the net ttvn increment (consistency check) */ 487 * response issued before the net ttvn increment (consistency check)
393 tt_local_entry->common.flags |= TT_CLIENT_PENDING; 488 */
489 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
394 490
395 bat_dbg(DBG_TT, bat_priv, 491 batadv_dbg(BATADV_DBG_TT, bat_priv,
396 "Local tt entry (%pM) pending to be removed: %s\n", 492 "Local tt entry (%pM) pending to be removed: %s\n",
397 tt_local_entry->common.addr, message); 493 tt_local_entry->common.addr, message);
398} 494}
399 495
400void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 496void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
401 const char *message, bool roaming) 497 const char *message, bool roaming)
402{ 498{
403 struct tt_local_entry *tt_local_entry = NULL; 499 struct batadv_tt_local_entry *tt_local_entry = NULL;
500 uint16_t flags;
404 501
405 tt_local_entry = tt_local_hash_find(bat_priv, addr); 502 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
406 if (!tt_local_entry) 503 if (!tt_local_entry)
407 goto out; 504 goto out;
408 505
409 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | 506 flags = BATADV_TT_CLIENT_DEL;
410 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message); 507 if (roaming)
508 flags |= BATADV_TT_CLIENT_ROAM;
509
510 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
411out: 511out:
412 if (tt_local_entry) 512 if (tt_local_entry)
413 tt_local_entry_free_ref(tt_local_entry); 513 batadv_tt_local_entry_free_ref(tt_local_entry);
414} 514}
415 515
416static void tt_local_purge(struct bat_priv *bat_priv) 516static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
517 struct hlist_head *head)
417{ 518{
418 struct hashtable_t *hash = bat_priv->tt_local_hash; 519 struct batadv_tt_local_entry *tt_local_entry;
419 struct tt_local_entry *tt_local_entry; 520 struct batadv_tt_common_entry *tt_common_entry;
420 struct tt_common_entry *tt_common_entry;
421 struct hlist_node *node, *node_tmp; 521 struct hlist_node *node, *node_tmp;
522
523 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
524 hash_entry) {
525 tt_local_entry = container_of(tt_common_entry,
526 struct batadv_tt_local_entry,
527 common);
528 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
529 continue;
530
531 /* entry already marked for deletion */
532 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
533 continue;
534
535 if (!batadv_has_timed_out(tt_local_entry->last_seen,
536 BATADV_TT_LOCAL_TIMEOUT))
537 continue;
538
539 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
540 BATADV_TT_CLIENT_DEL, "timed out");
541 }
542}
543
544static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
545{
546 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
422 struct hlist_head *head; 547 struct hlist_head *head;
423 spinlock_t *list_lock; /* protects write access to the hash lists */ 548 spinlock_t *list_lock; /* protects write access to the hash lists */
424 uint32_t i; 549 uint32_t i;
@@ -428,36 +553,18 @@ static void tt_local_purge(struct bat_priv *bat_priv)
428 list_lock = &hash->list_locks[i]; 553 list_lock = &hash->list_locks[i];
429 554
430 spin_lock_bh(list_lock); 555 spin_lock_bh(list_lock);
431 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 556 batadv_tt_local_purge_list(bat_priv, head);
432 head, hash_entry) {
433 tt_local_entry = container_of(tt_common_entry,
434 struct tt_local_entry,
435 common);
436 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
437 continue;
438
439 /* entry already marked for deletion */
440 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
441 continue;
442
443 if (!has_timed_out(tt_local_entry->last_seen,
444 TT_LOCAL_TIMEOUT))
445 continue;
446
447 tt_local_set_pending(bat_priv, tt_local_entry,
448 TT_CLIENT_DEL, "timed out");
449 }
450 spin_unlock_bh(list_lock); 557 spin_unlock_bh(list_lock);
451 } 558 }
452 559
453} 560}
454 561
455static void tt_local_table_free(struct bat_priv *bat_priv) 562static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
456{ 563{
457 struct hashtable_t *hash; 564 struct batadv_hashtable *hash;
458 spinlock_t *list_lock; /* protects write access to the hash lists */ 565 spinlock_t *list_lock; /* protects write access to the hash lists */
459 struct tt_common_entry *tt_common_entry; 566 struct batadv_tt_common_entry *tt_common_entry;
460 struct tt_local_entry *tt_local_entry; 567 struct batadv_tt_local_entry *tt_local;
461 struct hlist_node *node, *node_tmp; 568 struct hlist_node *node, *node_tmp;
462 struct hlist_head *head; 569 struct hlist_head *head;
463 uint32_t i; 570 uint32_t i;
@@ -475,35 +582,35 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
475 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 582 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
476 head, hash_entry) { 583 head, hash_entry) {
477 hlist_del_rcu(node); 584 hlist_del_rcu(node);
478 tt_local_entry = container_of(tt_common_entry, 585 tt_local = container_of(tt_common_entry,
479 struct tt_local_entry, 586 struct batadv_tt_local_entry,
480 common); 587 common);
481 tt_local_entry_free_ref(tt_local_entry); 588 batadv_tt_local_entry_free_ref(tt_local);
482 } 589 }
483 spin_unlock_bh(list_lock); 590 spin_unlock_bh(list_lock);
484 } 591 }
485 592
486 hash_destroy(hash); 593 batadv_hash_destroy(hash);
487 594
488 bat_priv->tt_local_hash = NULL; 595 bat_priv->tt_local_hash = NULL;
489} 596}
490 597
491static int tt_global_init(struct bat_priv *bat_priv) 598static int batadv_tt_global_init(struct batadv_priv *bat_priv)
492{ 599{
493 if (bat_priv->tt_global_hash) 600 if (bat_priv->tt_global_hash)
494 return 1; 601 return 0;
495 602
496 bat_priv->tt_global_hash = hash_new(1024); 603 bat_priv->tt_global_hash = batadv_hash_new(1024);
497 604
498 if (!bat_priv->tt_global_hash) 605 if (!bat_priv->tt_global_hash)
499 return 0; 606 return -ENOMEM;
500 607
501 return 1; 608 return 0;
502} 609}
503 610
504static void tt_changes_list_free(struct bat_priv *bat_priv) 611static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
505{ 612{
506 struct tt_change_node *entry, *safe; 613 struct batadv_tt_change_node *entry, *safe;
507 614
508 spin_lock_bh(&bat_priv->tt_changes_list_lock); 615 spin_lock_bh(&bat_priv->tt_changes_list_lock);
509 616
@@ -520,10 +627,11 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
520/* find out if an orig_node is already in the list of a tt_global_entry. 627/* find out if an orig_node is already in the list of a tt_global_entry.
521 * returns 1 if found, 0 otherwise 628 * returns 1 if found, 0 otherwise
522 */ 629 */
523static bool tt_global_entry_has_orig(const struct tt_global_entry *entry, 630static bool
524 const struct orig_node *orig_node) 631batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
632 const struct batadv_orig_node *orig_node)
525{ 633{
526 struct tt_orig_list_entry *tmp_orig_entry; 634 struct batadv_tt_orig_list_entry *tmp_orig_entry;
527 const struct hlist_head *head; 635 const struct hlist_head *head;
528 struct hlist_node *node; 636 struct hlist_node *node;
529 bool found = false; 637 bool found = false;
@@ -540,11 +648,11 @@ static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
540 return found; 648 return found;
541} 649}
542 650
543static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, 651static void
544 struct orig_node *orig_node, 652batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
545 int ttvn) 653 struct batadv_orig_node *orig_node, int ttvn)
546{ 654{
547 struct tt_orig_list_entry *orig_entry; 655 struct batadv_tt_orig_list_entry *orig_entry;
548 656
549 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); 657 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
550 if (!orig_entry) 658 if (!orig_entry)
@@ -563,91 +671,95 @@ static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
563} 671}
564 672
565/* caller must hold orig_node refcount */ 673/* caller must hold orig_node refcount */
566int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 674int batadv_tt_global_add(struct batadv_priv *bat_priv,
567 const unsigned char *tt_addr, uint8_t ttvn, bool roaming, 675 struct batadv_orig_node *orig_node,
568 bool wifi) 676 const unsigned char *tt_addr, uint8_t flags,
677 uint8_t ttvn)
569{ 678{
570 struct tt_global_entry *tt_global_entry = NULL; 679 struct batadv_tt_global_entry *tt_global_entry = NULL;
571 int ret = 0; 680 int ret = 0;
572 int hash_added; 681 int hash_added;
682 struct batadv_tt_common_entry *common;
573 683
574 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 684 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
575 685
576 if (!tt_global_entry) { 686 if (!tt_global_entry) {
577 tt_global_entry = kzalloc(sizeof(*tt_global_entry), 687 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
578 GFP_ATOMIC);
579 if (!tt_global_entry) 688 if (!tt_global_entry)
580 goto out; 689 goto out;
581 690
582 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); 691 common = &tt_global_entry->common;
692 memcpy(common->addr, tt_addr, ETH_ALEN);
583 693
584 tt_global_entry->common.flags = NO_FLAGS; 694 common->flags = flags;
585 tt_global_entry->roam_at = 0; 695 tt_global_entry->roam_at = 0;
586 atomic_set(&tt_global_entry->common.refcount, 2); 696 atomic_set(&common->refcount, 2);
587 697
588 INIT_HLIST_HEAD(&tt_global_entry->orig_list); 698 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
589 spin_lock_init(&tt_global_entry->list_lock); 699 spin_lock_init(&tt_global_entry->list_lock);
590 700
591 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, 701 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
592 choose_orig, &tt_global_entry->common, 702 batadv_compare_tt,
593 &tt_global_entry->common.hash_entry); 703 batadv_choose_orig, common,
704 &common->hash_entry);
594 705
595 if (unlikely(hash_added != 0)) { 706 if (unlikely(hash_added != 0)) {
596 /* remove the reference for the hash */ 707 /* remove the reference for the hash */
597 tt_global_entry_free_ref(tt_global_entry); 708 batadv_tt_global_entry_free_ref(tt_global_entry);
598 goto out_remove; 709 goto out_remove;
599 } 710 }
600 711
601 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn); 712 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
713 ttvn);
602 } else { 714 } else {
603 /* there is already a global entry, use this one. */ 715 /* there is already a global entry, use this one. */
604 716
605 /* If there is the TT_CLIENT_ROAM flag set, there is only one 717 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
606 * originator left in the list and we previously received a 718 * one originator left in the list and we previously received a
607 * delete + roaming change for this originator. 719 * delete + roaming change for this originator.
608 * 720 *
609 * We should first delete the old originator before adding the 721 * We should first delete the old originator before adding the
610 * new one. 722 * new one.
611 */ 723 */
612 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) { 724 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
613 tt_global_del_orig_list(tt_global_entry); 725 batadv_tt_global_del_orig_list(tt_global_entry);
614 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM; 726 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
615 tt_global_entry->roam_at = 0; 727 tt_global_entry->roam_at = 0;
616 } 728 }
617 729
618 if (!tt_global_entry_has_orig(tt_global_entry, orig_node)) 730 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
619 tt_global_add_orig_entry(tt_global_entry, orig_node, 731 orig_node))
620 ttvn); 732 batadv_tt_global_add_orig_entry(tt_global_entry,
733 orig_node, ttvn);
621 } 734 }
622 735
623 if (wifi) 736 batadv_dbg(BATADV_DBG_TT, bat_priv,
624 tt_global_entry->common.flags |= TT_CLIENT_WIFI; 737 "Creating new global tt entry: %pM (via %pM)\n",
625 738 tt_global_entry->common.addr, orig_node->orig);
626 bat_dbg(DBG_TT, bat_priv,
627 "Creating new global tt entry: %pM (via %pM)\n",
628 tt_global_entry->common.addr, orig_node->orig);
629 739
630out_remove: 740out_remove:
631 /* remove address from local hash if present */ 741 /* remove address from local hash if present */
632 tt_local_remove(bat_priv, tt_global_entry->common.addr, 742 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
633 "global tt received", roaming); 743 "global tt received",
744 flags & BATADV_TT_CLIENT_ROAM);
634 ret = 1; 745 ret = 1;
635out: 746out:
636 if (tt_global_entry) 747 if (tt_global_entry)
637 tt_global_entry_free_ref(tt_global_entry); 748 batadv_tt_global_entry_free_ref(tt_global_entry);
638 return ret; 749 return ret;
639} 750}
640 751
641/* print all orig nodes who announce the address for this global entry. 752/* print all orig nodes who announce the address for this global entry.
642 * it is assumed that the caller holds rcu_read_lock(); 753 * it is assumed that the caller holds rcu_read_lock();
643 */ 754 */
644static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, 755static void
645 struct seq_file *seq) 756batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
757 struct seq_file *seq)
646{ 758{
647 struct hlist_head *head; 759 struct hlist_head *head;
648 struct hlist_node *node; 760 struct hlist_node *node;
649 struct tt_orig_list_entry *orig_entry; 761 struct batadv_tt_orig_list_entry *orig_entry;
650 struct tt_common_entry *tt_common_entry; 762 struct batadv_tt_common_entry *tt_common_entry;
651 uint16_t flags; 763 uint16_t flags;
652 uint8_t last_ttvn; 764 uint8_t last_ttvn;
653 765
@@ -661,25 +773,25 @@ static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
661 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", 773 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
662 tt_global_entry->common.addr, orig_entry->ttvn, 774 tt_global_entry->common.addr, orig_entry->ttvn,
663 orig_entry->orig_node->orig, last_ttvn, 775 orig_entry->orig_node->orig, last_ttvn,
664 (flags & TT_CLIENT_ROAM ? 'R' : '.'), 776 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
665 (flags & TT_CLIENT_WIFI ? 'W' : '.')); 777 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
666 } 778 }
667} 779}
668 780
669int tt_global_seq_print_text(struct seq_file *seq, void *offset) 781int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
670{ 782{
671 struct net_device *net_dev = (struct net_device *)seq->private; 783 struct net_device *net_dev = (struct net_device *)seq->private;
672 struct bat_priv *bat_priv = netdev_priv(net_dev); 784 struct batadv_priv *bat_priv = netdev_priv(net_dev);
673 struct hashtable_t *hash = bat_priv->tt_global_hash; 785 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
674 struct tt_common_entry *tt_common_entry; 786 struct batadv_tt_common_entry *tt_common_entry;
675 struct tt_global_entry *tt_global_entry; 787 struct batadv_tt_global_entry *tt_global;
676 struct hard_iface *primary_if; 788 struct batadv_hard_iface *primary_if;
677 struct hlist_node *node; 789 struct hlist_node *node;
678 struct hlist_head *head; 790 struct hlist_head *head;
679 uint32_t i; 791 uint32_t i;
680 int ret = 0; 792 int ret = 0;
681 793
682 primary_if = primary_if_get_selected(bat_priv); 794 primary_if = batadv_primary_if_get_selected(bat_priv);
683 if (!primary_if) { 795 if (!primary_if) {
684 ret = seq_printf(seq, 796 ret = seq_printf(seq,
685 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 797 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -687,7 +799,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
687 goto out; 799 goto out;
688 } 800 }
689 801
690 if (primary_if->if_status != IF_ACTIVE) { 802 if (primary_if->if_status != BATADV_IF_ACTIVE) {
691 ret = seq_printf(seq, 803 ret = seq_printf(seq,
692 "BATMAN mesh %s disabled - primary interface not active\n", 804 "BATMAN mesh %s disabled - primary interface not active\n",
693 net_dev->name); 805 net_dev->name);
@@ -706,87 +818,91 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
706 rcu_read_lock(); 818 rcu_read_lock();
707 hlist_for_each_entry_rcu(tt_common_entry, node, 819 hlist_for_each_entry_rcu(tt_common_entry, node,
708 head, hash_entry) { 820 head, hash_entry) {
709 tt_global_entry = container_of(tt_common_entry, 821 tt_global = container_of(tt_common_entry,
710 struct tt_global_entry, 822 struct batadv_tt_global_entry,
711 common); 823 common);
712 tt_global_print_entry(tt_global_entry, seq); 824 batadv_tt_global_print_entry(tt_global, seq);
713 } 825 }
714 rcu_read_unlock(); 826 rcu_read_unlock();
715 } 827 }
716out: 828out:
717 if (primary_if) 829 if (primary_if)
718 hardif_free_ref(primary_if); 830 batadv_hardif_free_ref(primary_if);
719 return ret; 831 return ret;
720} 832}
721 833
722/* deletes the orig list of a tt_global_entry */ 834/* deletes the orig list of a tt_global_entry */
723static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry) 835static void
836batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
724{ 837{
725 struct hlist_head *head; 838 struct hlist_head *head;
726 struct hlist_node *node, *safe; 839 struct hlist_node *node, *safe;
727 struct tt_orig_list_entry *orig_entry; 840 struct batadv_tt_orig_list_entry *orig_entry;
728 841
729 spin_lock_bh(&tt_global_entry->list_lock); 842 spin_lock_bh(&tt_global_entry->list_lock);
730 head = &tt_global_entry->orig_list; 843 head = &tt_global_entry->orig_list;
731 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 844 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
732 hlist_del_rcu(node); 845 hlist_del_rcu(node);
733 tt_orig_list_entry_free_ref(orig_entry); 846 batadv_tt_orig_list_entry_free_ref(orig_entry);
734 } 847 }
735 spin_unlock_bh(&tt_global_entry->list_lock); 848 spin_unlock_bh(&tt_global_entry->list_lock);
736 849
737} 850}
738 851
739static void tt_global_del_orig_entry(struct bat_priv *bat_priv, 852static void
740 struct tt_global_entry *tt_global_entry, 853batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
741 struct orig_node *orig_node, 854 struct batadv_tt_global_entry *tt_global_entry,
742 const char *message) 855 struct batadv_orig_node *orig_node,
856 const char *message)
743{ 857{
744 struct hlist_head *head; 858 struct hlist_head *head;
745 struct hlist_node *node, *safe; 859 struct hlist_node *node, *safe;
746 struct tt_orig_list_entry *orig_entry; 860 struct batadv_tt_orig_list_entry *orig_entry;
747 861
748 spin_lock_bh(&tt_global_entry->list_lock); 862 spin_lock_bh(&tt_global_entry->list_lock);
749 head = &tt_global_entry->orig_list; 863 head = &tt_global_entry->orig_list;
750 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 864 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
751 if (orig_entry->orig_node == orig_node) { 865 if (orig_entry->orig_node == orig_node) {
752 bat_dbg(DBG_TT, bat_priv, 866 batadv_dbg(BATADV_DBG_TT, bat_priv,
753 "Deleting %pM from global tt entry %pM: %s\n", 867 "Deleting %pM from global tt entry %pM: %s\n",
754 orig_node->orig, tt_global_entry->common.addr, 868 orig_node->orig,
755 message); 869 tt_global_entry->common.addr, message);
756 hlist_del_rcu(node); 870 hlist_del_rcu(node);
757 tt_orig_list_entry_free_ref(orig_entry); 871 batadv_tt_orig_list_entry_free_ref(orig_entry);
758 } 872 }
759 } 873 }
760 spin_unlock_bh(&tt_global_entry->list_lock); 874 spin_unlock_bh(&tt_global_entry->list_lock);
761} 875}
762 876
763static void tt_global_del_struct(struct bat_priv *bat_priv, 877static void
764 struct tt_global_entry *tt_global_entry, 878batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
765 const char *message) 879 struct batadv_tt_global_entry *tt_global_entry,
880 const char *message)
766{ 881{
767 bat_dbg(DBG_TT, bat_priv, 882 batadv_dbg(BATADV_DBG_TT, bat_priv,
768 "Deleting global tt entry %pM: %s\n", 883 "Deleting global tt entry %pM: %s\n",
769 tt_global_entry->common.addr, message); 884 tt_global_entry->common.addr, message);
770 885
771 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, 886 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
772 tt_global_entry->common.addr); 887 batadv_choose_orig, tt_global_entry->common.addr);
773 tt_global_entry_free_ref(tt_global_entry); 888 batadv_tt_global_entry_free_ref(tt_global_entry);
774 889
775} 890}
776 891
777/* If the client is to be deleted, we check if it is the last origantor entry 892/* If the client is to be deleted, we check if it is the last origantor entry
778 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer, 893 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
779 * otherwise we simply remove the originator scheduled for deletion. 894 * timer, otherwise we simply remove the originator scheduled for deletion.
780 */ 895 */
781static void tt_global_del_roaming(struct bat_priv *bat_priv, 896static void
782 struct tt_global_entry *tt_global_entry, 897batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
783 struct orig_node *orig_node, 898 struct batadv_tt_global_entry *tt_global_entry,
784 const char *message) 899 struct batadv_orig_node *orig_node,
900 const char *message)
785{ 901{
786 bool last_entry = true; 902 bool last_entry = true;
787 struct hlist_head *head; 903 struct hlist_head *head;
788 struct hlist_node *node; 904 struct hlist_node *node;
789 struct tt_orig_list_entry *orig_entry; 905 struct batadv_tt_orig_list_entry *orig_entry;
790 906
791 /* no local entry exists, case 1: 907 /* no local entry exists, case 1:
792 * Check if this is the last one or if other entries exist. 908 * Check if this is the last one or if other entries exist.
@@ -804,37 +920,37 @@ static void tt_global_del_roaming(struct bat_priv *bat_priv,
804 920
805 if (last_entry) { 921 if (last_entry) {
806 /* its the last one, mark for roaming. */ 922 /* its the last one, mark for roaming. */
807 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 923 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
808 tt_global_entry->roam_at = jiffies; 924 tt_global_entry->roam_at = jiffies;
809 } else 925 } else
810 /* there is another entry, we can simply delete this 926 /* there is another entry, we can simply delete this
811 * one and can still use the other one. 927 * one and can still use the other one.
812 */ 928 */
813 tt_global_del_orig_entry(bat_priv, tt_global_entry, 929 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
814 orig_node, message); 930 orig_node, message);
815} 931}
816 932
817 933
818 934
819static void tt_global_del(struct bat_priv *bat_priv, 935static void batadv_tt_global_del(struct batadv_priv *bat_priv,
820 struct orig_node *orig_node, 936 struct batadv_orig_node *orig_node,
821 const unsigned char *addr, 937 const unsigned char *addr,
822 const char *message, bool roaming) 938 const char *message, bool roaming)
823{ 939{
824 struct tt_global_entry *tt_global_entry = NULL; 940 struct batadv_tt_global_entry *tt_global_entry = NULL;
825 struct tt_local_entry *tt_local_entry = NULL; 941 struct batadv_tt_local_entry *local_entry = NULL;
826 942
827 tt_global_entry = tt_global_hash_find(bat_priv, addr); 943 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
828 if (!tt_global_entry) 944 if (!tt_global_entry)
829 goto out; 945 goto out;
830 946
831 if (!roaming) { 947 if (!roaming) {
832 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node, 948 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
833 message); 949 orig_node, message);
834 950
835 if (hlist_empty(&tt_global_entry->orig_list)) 951 if (hlist_empty(&tt_global_entry->orig_list))
836 tt_global_del_struct(bat_priv, tt_global_entry, 952 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
837 message); 953 message);
838 954
839 goto out; 955 goto out;
840 } 956 }
@@ -843,41 +959,42 @@ static void tt_global_del(struct bat_priv *bat_priv,
843 * event, there are two possibilities: 959 * event, there are two possibilities:
844 * 1) the client roamed from node A to node B => if there 960 * 1) the client roamed from node A to node B => if there
845 * is only one originator left for this client, we mark 961 * is only one originator left for this client, we mark
846 * it with TT_CLIENT_ROAM, we start a timer and we 962 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
847 * wait for node B to claim it. In case of timeout 963 * wait for node B to claim it. In case of timeout
848 * the entry is purged. 964 * the entry is purged.
849 * 965 *
850 * If there are other originators left, we directly delete 966 * If there are other originators left, we directly delete
851 * the originator. 967 * the originator.
852 * 2) the client roamed to us => we can directly delete 968 * 2) the client roamed to us => we can directly delete
853 * the global entry, since it is useless now. */ 969 * the global entry, since it is useless now.
854 970 */
855 tt_local_entry = tt_local_hash_find(bat_priv, 971 local_entry = batadv_tt_local_hash_find(bat_priv,
856 tt_global_entry->common.addr); 972 tt_global_entry->common.addr);
857 if (tt_local_entry) { 973 if (local_entry) {
858 /* local entry exists, case 2: client roamed to us. */ 974 /* local entry exists, case 2: client roamed to us. */
859 tt_global_del_orig_list(tt_global_entry); 975 batadv_tt_global_del_orig_list(tt_global_entry);
860 tt_global_del_struct(bat_priv, tt_global_entry, message); 976 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
861 } else 977 } else
862 /* no local entry exists, case 1: check for roaming */ 978 /* no local entry exists, case 1: check for roaming */
863 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, 979 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
864 message); 980 orig_node, message);
865 981
866 982
867out: 983out:
868 if (tt_global_entry) 984 if (tt_global_entry)
869 tt_global_entry_free_ref(tt_global_entry); 985 batadv_tt_global_entry_free_ref(tt_global_entry);
870 if (tt_local_entry) 986 if (local_entry)
871 tt_local_entry_free_ref(tt_local_entry); 987 batadv_tt_local_entry_free_ref(local_entry);
872} 988}
873 989
874void tt_global_del_orig(struct bat_priv *bat_priv, 990void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
875 struct orig_node *orig_node, const char *message) 991 struct batadv_orig_node *orig_node,
992 const char *message)
876{ 993{
877 struct tt_global_entry *tt_global_entry; 994 struct batadv_tt_global_entry *tt_global;
878 struct tt_common_entry *tt_common_entry; 995 struct batadv_tt_common_entry *tt_common_entry;
879 uint32_t i; 996 uint32_t i;
880 struct hashtable_t *hash = bat_priv->tt_global_hash; 997 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
881 struct hlist_node *node, *safe; 998 struct hlist_node *node, *safe;
882 struct hlist_head *head; 999 struct hlist_head *head;
883 spinlock_t *list_lock; /* protects write access to the hash lists */ 1000 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -892,34 +1009,56 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
892 spin_lock_bh(list_lock); 1009 spin_lock_bh(list_lock);
893 hlist_for_each_entry_safe(tt_common_entry, node, safe, 1010 hlist_for_each_entry_safe(tt_common_entry, node, safe,
894 head, hash_entry) { 1011 head, hash_entry) {
895 tt_global_entry = container_of(tt_common_entry, 1012 tt_global = container_of(tt_common_entry,
896 struct tt_global_entry, 1013 struct batadv_tt_global_entry,
897 common); 1014 common);
898 1015
899 tt_global_del_orig_entry(bat_priv, tt_global_entry, 1016 batadv_tt_global_del_orig_entry(bat_priv, tt_global,
900 orig_node, message); 1017 orig_node, message);
901 1018
902 if (hlist_empty(&tt_global_entry->orig_list)) { 1019 if (hlist_empty(&tt_global->orig_list)) {
903 bat_dbg(DBG_TT, bat_priv, 1020 batadv_dbg(BATADV_DBG_TT, bat_priv,
904 "Deleting global tt entry %pM: %s\n", 1021 "Deleting global tt entry %pM: %s\n",
905 tt_global_entry->common.addr, 1022 tt_global->common.addr, message);
906 message);
907 hlist_del_rcu(node); 1023 hlist_del_rcu(node);
908 tt_global_entry_free_ref(tt_global_entry); 1024 batadv_tt_global_entry_free_ref(tt_global);
909 } 1025 }
910 } 1026 }
911 spin_unlock_bh(list_lock); 1027 spin_unlock_bh(list_lock);
912 } 1028 }
913 atomic_set(&orig_node->tt_size, 0);
914 orig_node->tt_initialised = false; 1029 orig_node->tt_initialised = false;
915} 1030}
916 1031
917static void tt_global_roam_purge(struct bat_priv *bat_priv) 1032static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
1033 struct hlist_head *head)
918{ 1034{
919 struct hashtable_t *hash = bat_priv->tt_global_hash; 1035 struct batadv_tt_common_entry *tt_common_entry;
920 struct tt_common_entry *tt_common_entry; 1036 struct batadv_tt_global_entry *tt_global_entry;
921 struct tt_global_entry *tt_global_entry;
922 struct hlist_node *node, *node_tmp; 1037 struct hlist_node *node, *node_tmp;
1038
1039 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1040 hash_entry) {
1041 tt_global_entry = container_of(tt_common_entry,
1042 struct batadv_tt_global_entry,
1043 common);
1044 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1045 continue;
1046 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1047 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1048 continue;
1049
1050 batadv_dbg(BATADV_DBG_TT, bat_priv,
1051 "Deleting global tt entry (%pM): Roaming timeout\n",
1052 tt_global_entry->common.addr);
1053
1054 hlist_del_rcu(node);
1055 batadv_tt_global_entry_free_ref(tt_global_entry);
1056 }
1057}
1058
1059static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
1060{
1061 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
923 struct hlist_head *head; 1062 struct hlist_head *head;
924 spinlock_t *list_lock; /* protects write access to the hash lists */ 1063 spinlock_t *list_lock; /* protects write access to the hash lists */
925 uint32_t i; 1064 uint32_t i;
@@ -929,35 +1068,18 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
929 list_lock = &hash->list_locks[i]; 1068 list_lock = &hash->list_locks[i];
930 1069
931 spin_lock_bh(list_lock); 1070 spin_lock_bh(list_lock);
932 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1071 batadv_tt_global_roam_purge_list(bat_priv, head);
933 head, hash_entry) {
934 tt_global_entry = container_of(tt_common_entry,
935 struct tt_global_entry,
936 common);
937 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
938 continue;
939 if (!has_timed_out(tt_global_entry->roam_at,
940 TT_CLIENT_ROAM_TIMEOUT))
941 continue;
942
943 bat_dbg(DBG_TT, bat_priv,
944 "Deleting global tt entry (%pM): Roaming timeout\n",
945 tt_global_entry->common.addr);
946
947 hlist_del_rcu(node);
948 tt_global_entry_free_ref(tt_global_entry);
949 }
950 spin_unlock_bh(list_lock); 1072 spin_unlock_bh(list_lock);
951 } 1073 }
952 1074
953} 1075}
954 1076
955static void tt_global_table_free(struct bat_priv *bat_priv) 1077static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
956{ 1078{
957 struct hashtable_t *hash; 1079 struct batadv_hashtable *hash;
958 spinlock_t *list_lock; /* protects write access to the hash lists */ 1080 spinlock_t *list_lock; /* protects write access to the hash lists */
959 struct tt_common_entry *tt_common_entry; 1081 struct batadv_tt_common_entry *tt_common_entry;
960 struct tt_global_entry *tt_global_entry; 1082 struct batadv_tt_global_entry *tt_global;
961 struct hlist_node *node, *node_tmp; 1083 struct hlist_node *node, *node_tmp;
962 struct hlist_head *head; 1084 struct hlist_head *head;
963 uint32_t i; 1085 uint32_t i;
@@ -975,56 +1097,60 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
975 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1097 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
976 head, hash_entry) { 1098 head, hash_entry) {
977 hlist_del_rcu(node); 1099 hlist_del_rcu(node);
978 tt_global_entry = container_of(tt_common_entry, 1100 tt_global = container_of(tt_common_entry,
979 struct tt_global_entry, 1101 struct batadv_tt_global_entry,
980 common); 1102 common);
981 tt_global_entry_free_ref(tt_global_entry); 1103 batadv_tt_global_entry_free_ref(tt_global);
982 } 1104 }
983 spin_unlock_bh(list_lock); 1105 spin_unlock_bh(list_lock);
984 } 1106 }
985 1107
986 hash_destroy(hash); 1108 batadv_hash_destroy(hash);
987 1109
988 bat_priv->tt_global_hash = NULL; 1110 bat_priv->tt_global_hash = NULL;
989} 1111}
990 1112
991static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, 1113static bool
992 struct tt_global_entry *tt_global_entry) 1114_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
1115 struct batadv_tt_global_entry *tt_global_entry)
993{ 1116{
994 bool ret = false; 1117 bool ret = false;
995 1118
996 if (tt_local_entry->common.flags & TT_CLIENT_WIFI && 1119 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
997 tt_global_entry->common.flags & TT_CLIENT_WIFI) 1120 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
998 ret = true; 1121 ret = true;
999 1122
1000 return ret; 1123 return ret;
1001} 1124}
1002 1125
1003struct orig_node *transtable_search(struct bat_priv *bat_priv, 1126struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1004 const uint8_t *src, const uint8_t *addr) 1127 const uint8_t *src,
1128 const uint8_t *addr)
1005{ 1129{
1006 struct tt_local_entry *tt_local_entry = NULL; 1130 struct batadv_tt_local_entry *tt_local_entry = NULL;
1007 struct tt_global_entry *tt_global_entry = NULL; 1131 struct batadv_tt_global_entry *tt_global_entry = NULL;
1008 struct orig_node *orig_node = NULL; 1132 struct batadv_orig_node *orig_node = NULL;
1009 struct neigh_node *router = NULL; 1133 struct batadv_neigh_node *router = NULL;
1010 struct hlist_head *head; 1134 struct hlist_head *head;
1011 struct hlist_node *node; 1135 struct hlist_node *node;
1012 struct tt_orig_list_entry *orig_entry; 1136 struct batadv_tt_orig_list_entry *orig_entry;
1013 int best_tq; 1137 int best_tq;
1014 1138
1015 if (src && atomic_read(&bat_priv->ap_isolation)) { 1139 if (src && atomic_read(&bat_priv->ap_isolation)) {
1016 tt_local_entry = tt_local_hash_find(bat_priv, src); 1140 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1017 if (!tt_local_entry) 1141 if (!tt_local_entry)
1018 goto out; 1142 goto out;
1019 } 1143 }
1020 1144
1021 tt_global_entry = tt_global_hash_find(bat_priv, addr); 1145 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1022 if (!tt_global_entry) 1146 if (!tt_global_entry)
1023 goto out; 1147 goto out;
1024 1148
1025 /* check whether the clients should not communicate due to AP 1149 /* check whether the clients should not communicate due to AP
1026 * isolation */ 1150 * isolation
1027 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) 1151 */
1152 if (tt_local_entry &&
1153 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1028 goto out; 1154 goto out;
1029 1155
1030 best_tq = 0; 1156 best_tq = 0;
@@ -1032,7 +1158,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1032 rcu_read_lock(); 1158 rcu_read_lock();
1033 head = &tt_global_entry->orig_list; 1159 head = &tt_global_entry->orig_list;
1034 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 1160 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1035 router = orig_node_get_router(orig_entry->orig_node); 1161 router = batadv_orig_node_get_router(orig_entry->orig_node);
1036 if (!router) 1162 if (!router)
1037 continue; 1163 continue;
1038 1164
@@ -1040,7 +1166,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1040 orig_node = orig_entry->orig_node; 1166 orig_node = orig_entry->orig_node;
1041 best_tq = router->tq_avg; 1167 best_tq = router->tq_avg;
1042 } 1168 }
1043 neigh_node_free_ref(router); 1169 batadv_neigh_node_free_ref(router);
1044 } 1170 }
1045 /* found anything? */ 1171 /* found anything? */
1046 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) 1172 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
@@ -1048,21 +1174,21 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1048 rcu_read_unlock(); 1174 rcu_read_unlock();
1049out: 1175out:
1050 if (tt_global_entry) 1176 if (tt_global_entry)
1051 tt_global_entry_free_ref(tt_global_entry); 1177 batadv_tt_global_entry_free_ref(tt_global_entry);
1052 if (tt_local_entry) 1178 if (tt_local_entry)
1053 tt_local_entry_free_ref(tt_local_entry); 1179 batadv_tt_local_entry_free_ref(tt_local_entry);
1054 1180
1055 return orig_node; 1181 return orig_node;
1056} 1182}
1057 1183
1058/* Calculates the checksum of the local table of a given orig_node */ 1184/* Calculates the checksum of the local table of a given orig_node */
1059static uint16_t tt_global_crc(struct bat_priv *bat_priv, 1185static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1060 struct orig_node *orig_node) 1186 struct batadv_orig_node *orig_node)
1061{ 1187{
1062 uint16_t total = 0, total_one; 1188 uint16_t total = 0, total_one;
1063 struct hashtable_t *hash = bat_priv->tt_global_hash; 1189 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
1064 struct tt_common_entry *tt_common_entry; 1190 struct batadv_tt_common_entry *tt_common;
1065 struct tt_global_entry *tt_global_entry; 1191 struct batadv_tt_global_entry *tt_global;
1066 struct hlist_node *node; 1192 struct hlist_node *node;
1067 struct hlist_head *head; 1193 struct hlist_head *head;
1068 uint32_t i; 1194 uint32_t i;
@@ -1072,30 +1198,29 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1072 head = &hash->table[i]; 1198 head = &hash->table[i];
1073 1199
1074 rcu_read_lock(); 1200 rcu_read_lock();
1075 hlist_for_each_entry_rcu(tt_common_entry, node, 1201 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1076 head, hash_entry) { 1202 tt_global = container_of(tt_common,
1077 tt_global_entry = container_of(tt_common_entry, 1203 struct batadv_tt_global_entry,
1078 struct tt_global_entry, 1204 common);
1079 common);
1080 /* Roaming clients are in the global table for 1205 /* Roaming clients are in the global table for
1081 * consistency only. They don't have to be 1206 * consistency only. They don't have to be
1082 * taken into account while computing the 1207 * taken into account while computing the
1083 * global crc 1208 * global crc
1084 */ 1209 */
1085 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) 1210 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1086 continue; 1211 continue;
1087 1212
1088 /* find out if this global entry is announced by this 1213 /* find out if this global entry is announced by this
1089 * originator 1214 * originator
1090 */ 1215 */
1091 if (!tt_global_entry_has_orig(tt_global_entry, 1216 if (!batadv_tt_global_entry_has_orig(tt_global,
1092 orig_node)) 1217 orig_node))
1093 continue; 1218 continue;
1094 1219
1095 total_one = 0; 1220 total_one = 0;
1096 for (j = 0; j < ETH_ALEN; j++) 1221 for (j = 0; j < ETH_ALEN; j++)
1097 total_one = crc16_byte(total_one, 1222 total_one = crc16_byte(total_one,
1098 tt_global_entry->common.addr[j]); 1223 tt_common->addr[j]);
1099 total ^= total_one; 1224 total ^= total_one;
1100 } 1225 }
1101 rcu_read_unlock(); 1226 rcu_read_unlock();
@@ -1105,11 +1230,11 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1105} 1230}
1106 1231
1107/* Calculates the checksum of the local table */ 1232/* Calculates the checksum of the local table */
1108uint16_t tt_local_crc(struct bat_priv *bat_priv) 1233static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1109{ 1234{
1110 uint16_t total = 0, total_one; 1235 uint16_t total = 0, total_one;
1111 struct hashtable_t *hash = bat_priv->tt_local_hash; 1236 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
1112 struct tt_common_entry *tt_common_entry; 1237 struct batadv_tt_common_entry *tt_common;
1113 struct hlist_node *node; 1238 struct hlist_node *node;
1114 struct hlist_head *head; 1239 struct hlist_head *head;
1115 uint32_t i; 1240 uint32_t i;
@@ -1119,16 +1244,16 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
1119 head = &hash->table[i]; 1244 head = &hash->table[i];
1120 1245
1121 rcu_read_lock(); 1246 rcu_read_lock();
1122 hlist_for_each_entry_rcu(tt_common_entry, node, 1247 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1123 head, hash_entry) {
1124 /* not yet committed clients have not to be taken into 1248 /* not yet committed clients have not to be taken into
1125 * account while computing the CRC */ 1249 * account while computing the CRC
1126 if (tt_common_entry->flags & TT_CLIENT_NEW) 1250 */
1251 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1127 continue; 1252 continue;
1128 total_one = 0; 1253 total_one = 0;
1129 for (j = 0; j < ETH_ALEN; j++) 1254 for (j = 0; j < ETH_ALEN; j++)
1130 total_one = crc16_byte(total_one, 1255 total_one = crc16_byte(total_one,
1131 tt_common_entry->addr[j]); 1256 tt_common->addr[j]);
1132 total ^= total_one; 1257 total ^= total_one;
1133 } 1258 }
1134 rcu_read_unlock(); 1259 rcu_read_unlock();
@@ -1137,9 +1262,9 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
1137 return total; 1262 return total;
1138} 1263}
1139 1264
1140static void tt_req_list_free(struct bat_priv *bat_priv) 1265static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1141{ 1266{
1142 struct tt_req_node *node, *safe; 1267 struct batadv_tt_req_node *node, *safe;
1143 1268
1144 spin_lock_bh(&bat_priv->tt_req_list_lock); 1269 spin_lock_bh(&bat_priv->tt_req_list_lock);
1145 1270
@@ -1151,15 +1276,16 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
1151 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1276 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1152} 1277}
1153 1278
1154static void tt_save_orig_buffer(struct bat_priv *bat_priv, 1279static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
1155 struct orig_node *orig_node, 1280 struct batadv_orig_node *orig_node,
1156 const unsigned char *tt_buff, 1281 const unsigned char *tt_buff,
1157 uint8_t tt_num_changes) 1282 uint8_t tt_num_changes)
1158{ 1283{
1159 uint16_t tt_buff_len = tt_len(tt_num_changes); 1284 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1160 1285
1161 /* Replace the old buffer only if I received something in the 1286 /* Replace the old buffer only if I received something in the
1162 * last OGM (the OGM could carry no changes) */ 1287 * last OGM (the OGM could carry no changes)
1288 */
1163 spin_lock_bh(&orig_node->tt_buff_lock); 1289 spin_lock_bh(&orig_node->tt_buff_lock);
1164 if (tt_buff_len > 0) { 1290 if (tt_buff_len > 0) {
1165 kfree(orig_node->tt_buff); 1291 kfree(orig_node->tt_buff);
@@ -1173,13 +1299,14 @@ static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1173 spin_unlock_bh(&orig_node->tt_buff_lock); 1299 spin_unlock_bh(&orig_node->tt_buff_lock);
1174} 1300}
1175 1301
1176static void tt_req_purge(struct bat_priv *bat_priv) 1302static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1177{ 1303{
1178 struct tt_req_node *node, *safe; 1304 struct batadv_tt_req_node *node, *safe;
1179 1305
1180 spin_lock_bh(&bat_priv->tt_req_list_lock); 1306 spin_lock_bh(&bat_priv->tt_req_list_lock);
1181 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1307 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1182 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) { 1308 if (batadv_has_timed_out(node->issued_at,
1309 BATADV_TT_REQUEST_TIMEOUT)) {
1183 list_del(&node->list); 1310 list_del(&node->list);
1184 kfree(node); 1311 kfree(node);
1185 } 1312 }
@@ -1188,17 +1315,19 @@ static void tt_req_purge(struct bat_priv *bat_priv)
1188} 1315}
1189 1316
1190/* returns the pointer to the new tt_req_node struct if no request 1317/* returns the pointer to the new tt_req_node struct if no request
1191 * has already been issued for this orig_node, NULL otherwise */ 1318 * has already been issued for this orig_node, NULL otherwise
1192static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, 1319 */
1193 struct orig_node *orig_node) 1320static struct batadv_tt_req_node *
1321batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1322 struct batadv_orig_node *orig_node)
1194{ 1323{
1195 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 1324 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1196 1325
1197 spin_lock_bh(&bat_priv->tt_req_list_lock); 1326 spin_lock_bh(&bat_priv->tt_req_list_lock);
1198 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 1327 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1199 if (compare_eth(tt_req_node_tmp, orig_node) && 1328 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1200 !has_timed_out(tt_req_node_tmp->issued_at, 1329 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1201 TT_REQUEST_TIMEOUT)) 1330 BATADV_TT_REQUEST_TIMEOUT))
1202 goto unlock; 1331 goto unlock;
1203 } 1332 }
1204 1333
@@ -1216,63 +1345,67 @@ unlock:
1216} 1345}
1217 1346
1218/* data_ptr is useless here, but has to be kept to respect the prototype */ 1347/* data_ptr is useless here, but has to be kept to respect the prototype */
1219static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) 1348static int batadv_tt_local_valid_entry(const void *entry_ptr,
1349 const void *data_ptr)
1220{ 1350{
1221 const struct tt_common_entry *tt_common_entry = entry_ptr; 1351 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1222 1352
1223 if (tt_common_entry->flags & TT_CLIENT_NEW) 1353 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1224 return 0; 1354 return 0;
1225 return 1; 1355 return 1;
1226} 1356}
1227 1357
1228static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) 1358static int batadv_tt_global_valid(const void *entry_ptr,
1359 const void *data_ptr)
1229{ 1360{
1230 const struct tt_common_entry *tt_common_entry = entry_ptr; 1361 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1231 const struct tt_global_entry *tt_global_entry; 1362 const struct batadv_tt_global_entry *tt_global_entry;
1232 const struct orig_node *orig_node = data_ptr; 1363 const struct batadv_orig_node *orig_node = data_ptr;
1233 1364
1234 if (tt_common_entry->flags & TT_CLIENT_ROAM) 1365 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
1235 return 0; 1366 return 0;
1236 1367
1237 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 1368 tt_global_entry = container_of(tt_common_entry,
1369 struct batadv_tt_global_entry,
1238 common); 1370 common);
1239 1371
1240 return tt_global_entry_has_orig(tt_global_entry, orig_node); 1372 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1241} 1373}
1242 1374
1243static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1375static struct sk_buff *
1244 struct hashtable_t *hash, 1376batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1245 struct hard_iface *primary_if, 1377 struct batadv_hashtable *hash,
1246 int (*valid_cb)(const void *, 1378 struct batadv_hard_iface *primary_if,
1247 const void *), 1379 int (*valid_cb)(const void *, const void *),
1248 void *cb_data) 1380 void *cb_data)
1249{ 1381{
1250 struct tt_common_entry *tt_common_entry; 1382 struct batadv_tt_common_entry *tt_common_entry;
1251 struct tt_query_packet *tt_response; 1383 struct batadv_tt_query_packet *tt_response;
1252 struct tt_change *tt_change; 1384 struct batadv_tt_change *tt_change;
1253 struct hlist_node *node; 1385 struct hlist_node *node;
1254 struct hlist_head *head; 1386 struct hlist_head *head;
1255 struct sk_buff *skb = NULL; 1387 struct sk_buff *skb = NULL;
1256 uint16_t tt_tot, tt_count; 1388 uint16_t tt_tot, tt_count;
1257 ssize_t tt_query_size = sizeof(struct tt_query_packet); 1389 ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
1258 uint32_t i; 1390 uint32_t i;
1391 size_t len;
1259 1392
1260 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { 1393 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1261 tt_len = primary_if->soft_iface->mtu - tt_query_size; 1394 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1262 tt_len -= tt_len % sizeof(struct tt_change); 1395 tt_len -= tt_len % sizeof(struct batadv_tt_change);
1263 } 1396 }
1264 tt_tot = tt_len / sizeof(struct tt_change); 1397 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1265 1398
1266 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); 1399 len = tt_query_size + tt_len;
1400 skb = dev_alloc_skb(len + ETH_HLEN);
1267 if (!skb) 1401 if (!skb)
1268 goto out; 1402 goto out;
1269 1403
1270 skb_reserve(skb, ETH_HLEN); 1404 skb_reserve(skb, ETH_HLEN);
1271 tt_response = (struct tt_query_packet *)skb_put(skb, 1405 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1272 tt_query_size + tt_len);
1273 tt_response->ttvn = ttvn; 1406 tt_response->ttvn = ttvn;
1274 1407
1275 tt_change = (struct tt_change *)(skb->data + tt_query_size); 1408 tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
1276 tt_count = 0; 1409 tt_count = 0;
1277 1410
1278 rcu_read_lock(); 1411 rcu_read_lock();
@@ -1289,7 +1422,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1289 1422
1290 memcpy(tt_change->addr, tt_common_entry->addr, 1423 memcpy(tt_change->addr, tt_common_entry->addr,
1291 ETH_ALEN); 1424 ETH_ALEN);
1292 tt_change->flags = NO_FLAGS; 1425 tt_change->flags = BATADV_NO_FLAGS;
1293 1426
1294 tt_count++; 1427 tt_count++;
1295 tt_change++; 1428 tt_change++;
@@ -1298,72 +1431,78 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1298 rcu_read_unlock(); 1431 rcu_read_unlock();
1299 1432
1300 /* store in the message the number of entries we have successfully 1433 /* store in the message the number of entries we have successfully
1301 * copied */ 1434 * copied
1435 */
1302 tt_response->tt_data = htons(tt_count); 1436 tt_response->tt_data = htons(tt_count);
1303 1437
1304out: 1438out:
1305 return skb; 1439 return skb;
1306} 1440}
1307 1441
1308static int send_tt_request(struct bat_priv *bat_priv, 1442static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1309 struct orig_node *dst_orig_node, 1443 struct batadv_orig_node *dst_orig_node,
1310 uint8_t ttvn, uint16_t tt_crc, bool full_table) 1444 uint8_t ttvn, uint16_t tt_crc,
1445 bool full_table)
1311{ 1446{
1312 struct sk_buff *skb = NULL; 1447 struct sk_buff *skb = NULL;
1313 struct tt_query_packet *tt_request; 1448 struct batadv_tt_query_packet *tt_request;
1314 struct neigh_node *neigh_node = NULL; 1449 struct batadv_neigh_node *neigh_node = NULL;
1315 struct hard_iface *primary_if; 1450 struct batadv_hard_iface *primary_if;
1316 struct tt_req_node *tt_req_node = NULL; 1451 struct batadv_tt_req_node *tt_req_node = NULL;
1317 int ret = 1; 1452 int ret = 1;
1453 size_t tt_req_len;
1318 1454
1319 primary_if = primary_if_get_selected(bat_priv); 1455 primary_if = batadv_primary_if_get_selected(bat_priv);
1320 if (!primary_if) 1456 if (!primary_if)
1321 goto out; 1457 goto out;
1322 1458
1323 /* The new tt_req will be issued only if I'm not waiting for a 1459 /* The new tt_req will be issued only if I'm not waiting for a
1324 * reply from the same orig_node yet */ 1460 * reply from the same orig_node yet
1325 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); 1461 */
1462 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1326 if (!tt_req_node) 1463 if (!tt_req_node)
1327 goto out; 1464 goto out;
1328 1465
1329 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); 1466 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
1330 if (!skb) 1467 if (!skb)
1331 goto out; 1468 goto out;
1332 1469
1333 skb_reserve(skb, ETH_HLEN); 1470 skb_reserve(skb, ETH_HLEN);
1334 1471
1335 tt_request = (struct tt_query_packet *)skb_put(skb, 1472 tt_req_len = sizeof(*tt_request);
1336 sizeof(struct tt_query_packet)); 1473 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
1337 1474
1338 tt_request->header.packet_type = BAT_TT_QUERY; 1475 tt_request->header.packet_type = BATADV_TT_QUERY;
1339 tt_request->header.version = COMPAT_VERSION; 1476 tt_request->header.version = BATADV_COMPAT_VERSION;
1340 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1477 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1341 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1478 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1342 tt_request->header.ttl = TTL; 1479 tt_request->header.ttl = BATADV_TTL;
1343 tt_request->ttvn = ttvn; 1480 tt_request->ttvn = ttvn;
1344 tt_request->tt_data = htons(tt_crc); 1481 tt_request->tt_data = htons(tt_crc);
1345 tt_request->flags = TT_REQUEST; 1482 tt_request->flags = BATADV_TT_REQUEST;
1346 1483
1347 if (full_table) 1484 if (full_table)
1348 tt_request->flags |= TT_FULL_TABLE; 1485 tt_request->flags |= BATADV_TT_FULL_TABLE;
1349 1486
1350 neigh_node = orig_node_get_router(dst_orig_node); 1487 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1351 if (!neigh_node) 1488 if (!neigh_node)
1352 goto out; 1489 goto out;
1353 1490
1354 bat_dbg(DBG_TT, bat_priv, 1491 batadv_dbg(BATADV_DBG_TT, bat_priv,
1355 "Sending TT_REQUEST to %pM via %pM [%c]\n", 1492 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1356 dst_orig_node->orig, neigh_node->addr, 1493 dst_orig_node->orig, neigh_node->addr,
1357 (full_table ? 'F' : '.')); 1494 (full_table ? 'F' : '.'));
1495
1496 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1358 1497
1359 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1498 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1360 ret = 0; 1499 ret = 0;
1361 1500
1362out: 1501out:
1363 if (neigh_node) 1502 if (neigh_node)
1364 neigh_node_free_ref(neigh_node); 1503 batadv_neigh_node_free_ref(neigh_node);
1365 if (primary_if) 1504 if (primary_if)
1366 hardif_free_ref(primary_if); 1505 batadv_hardif_free_ref(primary_if);
1367 if (ret) 1506 if (ret)
1368 kfree_skb(skb); 1507 kfree_skb(skb);
1369 if (ret && tt_req_node) { 1508 if (ret && tt_req_node) {
@@ -1375,39 +1514,42 @@ out:
1375 return ret; 1514 return ret;
1376} 1515}
1377 1516
1378static bool send_other_tt_response(struct bat_priv *bat_priv, 1517static bool
1379 struct tt_query_packet *tt_request) 1518batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1519 struct batadv_tt_query_packet *tt_request)
1380{ 1520{
1381 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; 1521 struct batadv_orig_node *req_dst_orig_node = NULL;
1382 struct neigh_node *neigh_node = NULL; 1522 struct batadv_orig_node *res_dst_orig_node = NULL;
1383 struct hard_iface *primary_if = NULL; 1523 struct batadv_neigh_node *neigh_node = NULL;
1524 struct batadv_hard_iface *primary_if = NULL;
1384 uint8_t orig_ttvn, req_ttvn, ttvn; 1525 uint8_t orig_ttvn, req_ttvn, ttvn;
1385 int ret = false; 1526 int ret = false;
1386 unsigned char *tt_buff; 1527 unsigned char *tt_buff;
1387 bool full_table; 1528 bool full_table;
1388 uint16_t tt_len, tt_tot; 1529 uint16_t tt_len, tt_tot;
1389 struct sk_buff *skb = NULL; 1530 struct sk_buff *skb = NULL;
1390 struct tt_query_packet *tt_response; 1531 struct batadv_tt_query_packet *tt_response;
1532 size_t len;
1391 1533
1392 bat_dbg(DBG_TT, bat_priv, 1534 batadv_dbg(BATADV_DBG_TT, bat_priv,
1393 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", 1535 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1394 tt_request->src, tt_request->ttvn, tt_request->dst, 1536 tt_request->src, tt_request->ttvn, tt_request->dst,
1395 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1537 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1396 1538
1397 /* Let's get the orig node of the REAL destination */ 1539 /* Let's get the orig node of the REAL destination */
1398 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); 1540 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1399 if (!req_dst_orig_node) 1541 if (!req_dst_orig_node)
1400 goto out; 1542 goto out;
1401 1543
1402 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); 1544 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1403 if (!res_dst_orig_node) 1545 if (!res_dst_orig_node)
1404 goto out; 1546 goto out;
1405 1547
1406 neigh_node = orig_node_get_router(res_dst_orig_node); 1548 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1407 if (!neigh_node) 1549 if (!neigh_node)
1408 goto out; 1550 goto out;
1409 1551
1410 primary_if = primary_if_get_selected(bat_priv); 1552 primary_if = batadv_primary_if_get_selected(bat_priv);
1411 if (!primary_if) 1553 if (!primary_if)
1412 goto out; 1554 goto out;
1413 1555
@@ -1416,71 +1558,75 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1416 1558
1417 /* I don't have the requested data */ 1559 /* I don't have the requested data */
1418 if (orig_ttvn != req_ttvn || 1560 if (orig_ttvn != req_ttvn ||
1419 tt_request->tt_data != req_dst_orig_node->tt_crc) 1561 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1420 goto out; 1562 goto out;
1421 1563
1422 /* If the full table has been explicitly requested */ 1564 /* If the full table has been explicitly requested */
1423 if (tt_request->flags & TT_FULL_TABLE || 1565 if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1424 !req_dst_orig_node->tt_buff) 1566 !req_dst_orig_node->tt_buff)
1425 full_table = true; 1567 full_table = true;
1426 else 1568 else
1427 full_table = false; 1569 full_table = false;
1428 1570
1429 /* In this version, fragmentation is not implemented, then 1571 /* In this version, fragmentation is not implemented, then
1430 * I'll send only one packet with as much TT entries as I can */ 1572 * I'll send only one packet with as much TT entries as I can
1573 */
1431 if (!full_table) { 1574 if (!full_table) {
1432 spin_lock_bh(&req_dst_orig_node->tt_buff_lock); 1575 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1433 tt_len = req_dst_orig_node->tt_buff_len; 1576 tt_len = req_dst_orig_node->tt_buff_len;
1434 tt_tot = tt_len / sizeof(struct tt_change); 1577 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1435 1578
1436 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1579 len = sizeof(*tt_response) + tt_len;
1437 tt_len + ETH_HLEN); 1580 skb = dev_alloc_skb(len + ETH_HLEN);
1438 if (!skb) 1581 if (!skb)
1439 goto unlock; 1582 goto unlock;
1440 1583
1441 skb_reserve(skb, ETH_HLEN); 1584 skb_reserve(skb, ETH_HLEN);
1442 tt_response = (struct tt_query_packet *)skb_put(skb, 1585 tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
1443 sizeof(struct tt_query_packet) + tt_len); 1586 len);
1444 tt_response->ttvn = req_ttvn; 1587 tt_response->ttvn = req_ttvn;
1445 tt_response->tt_data = htons(tt_tot); 1588 tt_response->tt_data = htons(tt_tot);
1446 1589
1447 tt_buff = skb->data + sizeof(struct tt_query_packet); 1590 tt_buff = skb->data + sizeof(*tt_response);
1448 /* Copy the last orig_node's OGM buffer */ 1591 /* Copy the last orig_node's OGM buffer */
1449 memcpy(tt_buff, req_dst_orig_node->tt_buff, 1592 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1450 req_dst_orig_node->tt_buff_len); 1593 req_dst_orig_node->tt_buff_len);
1451 1594
1452 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1595 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1453 } else { 1596 } else {
1454 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * 1597 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
1455 sizeof(struct tt_change); 1598 tt_len *= sizeof(struct batadv_tt_change);
1456 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1599 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1457 1600
1458 skb = tt_response_fill_table(tt_len, ttvn, 1601 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1459 bat_priv->tt_global_hash, 1602 bat_priv->tt_global_hash,
1460 primary_if, tt_global_valid_entry, 1603 primary_if,
1461 req_dst_orig_node); 1604 batadv_tt_global_valid,
1605 req_dst_orig_node);
1462 if (!skb) 1606 if (!skb)
1463 goto out; 1607 goto out;
1464 1608
1465 tt_response = (struct tt_query_packet *)skb->data; 1609 tt_response = (struct batadv_tt_query_packet *)skb->data;
1466 } 1610 }
1467 1611
1468 tt_response->header.packet_type = BAT_TT_QUERY; 1612 tt_response->header.packet_type = BATADV_TT_QUERY;
1469 tt_response->header.version = COMPAT_VERSION; 1613 tt_response->header.version = BATADV_COMPAT_VERSION;
1470 tt_response->header.ttl = TTL; 1614 tt_response->header.ttl = BATADV_TTL;
1471 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1615 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1472 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1616 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1473 tt_response->flags = TT_RESPONSE; 1617 tt_response->flags = BATADV_TT_RESPONSE;
1474 1618
1475 if (full_table) 1619 if (full_table)
1476 tt_response->flags |= TT_FULL_TABLE; 1620 tt_response->flags |= BATADV_TT_FULL_TABLE;
1621
1622 batadv_dbg(BATADV_DBG_TT, bat_priv,
1623 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1624 res_dst_orig_node->orig, neigh_node->addr,
1625 req_dst_orig_node->orig, req_ttvn);
1477 1626
1478 bat_dbg(DBG_TT, bat_priv, 1627 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1479 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1480 res_dst_orig_node->orig, neigh_node->addr,
1481 req_dst_orig_node->orig, req_ttvn);
1482 1628
1483 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1629 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1484 ret = true; 1630 ret = true;
1485 goto out; 1631 goto out;
1486 1632
@@ -1489,114 +1635,122 @@ unlock:
1489 1635
1490out: 1636out:
1491 if (res_dst_orig_node) 1637 if (res_dst_orig_node)
1492 orig_node_free_ref(res_dst_orig_node); 1638 batadv_orig_node_free_ref(res_dst_orig_node);
1493 if (req_dst_orig_node) 1639 if (req_dst_orig_node)
1494 orig_node_free_ref(req_dst_orig_node); 1640 batadv_orig_node_free_ref(req_dst_orig_node);
1495 if (neigh_node) 1641 if (neigh_node)
1496 neigh_node_free_ref(neigh_node); 1642 batadv_neigh_node_free_ref(neigh_node);
1497 if (primary_if) 1643 if (primary_if)
1498 hardif_free_ref(primary_if); 1644 batadv_hardif_free_ref(primary_if);
1499 if (!ret) 1645 if (!ret)
1500 kfree_skb(skb); 1646 kfree_skb(skb);
1501 return ret; 1647 return ret;
1502 1648
1503} 1649}
1504static bool send_my_tt_response(struct bat_priv *bat_priv, 1650
1505 struct tt_query_packet *tt_request) 1651static bool
1652batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1653 struct batadv_tt_query_packet *tt_request)
1506{ 1654{
1507 struct orig_node *orig_node = NULL; 1655 struct batadv_orig_node *orig_node = NULL;
1508 struct neigh_node *neigh_node = NULL; 1656 struct batadv_neigh_node *neigh_node = NULL;
1509 struct hard_iface *primary_if = NULL; 1657 struct batadv_hard_iface *primary_if = NULL;
1510 uint8_t my_ttvn, req_ttvn, ttvn; 1658 uint8_t my_ttvn, req_ttvn, ttvn;
1511 int ret = false; 1659 int ret = false;
1512 unsigned char *tt_buff; 1660 unsigned char *tt_buff;
1513 bool full_table; 1661 bool full_table;
1514 uint16_t tt_len, tt_tot; 1662 uint16_t tt_len, tt_tot;
1515 struct sk_buff *skb = NULL; 1663 struct sk_buff *skb = NULL;
1516 struct tt_query_packet *tt_response; 1664 struct batadv_tt_query_packet *tt_response;
1665 size_t len;
1517 1666
1518 bat_dbg(DBG_TT, bat_priv, 1667 batadv_dbg(BATADV_DBG_TT, bat_priv,
1519 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", 1668 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1520 tt_request->src, tt_request->ttvn, 1669 tt_request->src, tt_request->ttvn,
1521 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1670 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1522 1671
1523 1672
1524 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1673 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1525 req_ttvn = tt_request->ttvn; 1674 req_ttvn = tt_request->ttvn;
1526 1675
1527 orig_node = orig_hash_find(bat_priv, tt_request->src); 1676 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1528 if (!orig_node) 1677 if (!orig_node)
1529 goto out; 1678 goto out;
1530 1679
1531 neigh_node = orig_node_get_router(orig_node); 1680 neigh_node = batadv_orig_node_get_router(orig_node);
1532 if (!neigh_node) 1681 if (!neigh_node)
1533 goto out; 1682 goto out;
1534 1683
1535 primary_if = primary_if_get_selected(bat_priv); 1684 primary_if = batadv_primary_if_get_selected(bat_priv);
1536 if (!primary_if) 1685 if (!primary_if)
1537 goto out; 1686 goto out;
1538 1687
1539 /* If the full table has been explicitly requested or the gap 1688 /* If the full table has been explicitly requested or the gap
1540 * is too big send the whole local translation table */ 1689 * is too big send the whole local translation table
1541 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || 1690 */
1691 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1542 !bat_priv->tt_buff) 1692 !bat_priv->tt_buff)
1543 full_table = true; 1693 full_table = true;
1544 else 1694 else
1545 full_table = false; 1695 full_table = false;
1546 1696
1547 /* In this version, fragmentation is not implemented, then 1697 /* In this version, fragmentation is not implemented, then
1548 * I'll send only one packet with as much TT entries as I can */ 1698 * I'll send only one packet with as much TT entries as I can
1699 */
1549 if (!full_table) { 1700 if (!full_table) {
1550 spin_lock_bh(&bat_priv->tt_buff_lock); 1701 spin_lock_bh(&bat_priv->tt_buff_lock);
1551 tt_len = bat_priv->tt_buff_len; 1702 tt_len = bat_priv->tt_buff_len;
1552 tt_tot = tt_len / sizeof(struct tt_change); 1703 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1553 1704
1554 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1705 len = sizeof(*tt_response) + tt_len;
1555 tt_len + ETH_HLEN); 1706 skb = dev_alloc_skb(len + ETH_HLEN);
1556 if (!skb) 1707 if (!skb)
1557 goto unlock; 1708 goto unlock;
1558 1709
1559 skb_reserve(skb, ETH_HLEN); 1710 skb_reserve(skb, ETH_HLEN);
1560 tt_response = (struct tt_query_packet *)skb_put(skb, 1711 tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
1561 sizeof(struct tt_query_packet) + tt_len); 1712 len);
1562 tt_response->ttvn = req_ttvn; 1713 tt_response->ttvn = req_ttvn;
1563 tt_response->tt_data = htons(tt_tot); 1714 tt_response->tt_data = htons(tt_tot);
1564 1715
1565 tt_buff = skb->data + sizeof(struct tt_query_packet); 1716 tt_buff = skb->data + sizeof(*tt_response);
1566 memcpy(tt_buff, bat_priv->tt_buff, 1717 memcpy(tt_buff, bat_priv->tt_buff,
1567 bat_priv->tt_buff_len); 1718 bat_priv->tt_buff_len);
1568 spin_unlock_bh(&bat_priv->tt_buff_lock); 1719 spin_unlock_bh(&bat_priv->tt_buff_lock);
1569 } else { 1720 } else {
1570 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * 1721 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
1571 sizeof(struct tt_change); 1722 tt_len *= sizeof(struct batadv_tt_change);
1572 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1723 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1573 1724
1574 skb = tt_response_fill_table(tt_len, ttvn, 1725 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1575 bat_priv->tt_local_hash, 1726 bat_priv->tt_local_hash,
1576 primary_if, tt_local_valid_entry, 1727 primary_if,
1577 NULL); 1728 batadv_tt_local_valid_entry,
1729 NULL);
1578 if (!skb) 1730 if (!skb)
1579 goto out; 1731 goto out;
1580 1732
1581 tt_response = (struct tt_query_packet *)skb->data; 1733 tt_response = (struct batadv_tt_query_packet *)skb->data;
1582 } 1734 }
1583 1735
1584 tt_response->header.packet_type = BAT_TT_QUERY; 1736 tt_response->header.packet_type = BATADV_TT_QUERY;
1585 tt_response->header.version = COMPAT_VERSION; 1737 tt_response->header.version = BATADV_COMPAT_VERSION;
1586 tt_response->header.ttl = TTL; 1738 tt_response->header.ttl = BATADV_TTL;
1587 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1739 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1588 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1740 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1589 tt_response->flags = TT_RESPONSE; 1741 tt_response->flags = BATADV_TT_RESPONSE;
1590 1742
1591 if (full_table) 1743 if (full_table)
1592 tt_response->flags |= TT_FULL_TABLE; 1744 tt_response->flags |= BATADV_TT_FULL_TABLE;
1593 1745
1594 bat_dbg(DBG_TT, bat_priv, 1746 batadv_dbg(BATADV_DBG_TT, bat_priv,
1595 "Sending TT_RESPONSE to %pM via %pM [%c]\n", 1747 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1596 orig_node->orig, neigh_node->addr, 1748 orig_node->orig, neigh_node->addr,
1597 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1749 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1598 1750
1599 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1751 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1752
1753 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1600 ret = true; 1754 ret = true;
1601 goto out; 1755 goto out;
1602 1756
@@ -1604,49 +1758,50 @@ unlock:
1604 spin_unlock_bh(&bat_priv->tt_buff_lock); 1758 spin_unlock_bh(&bat_priv->tt_buff_lock);
1605out: 1759out:
1606 if (orig_node) 1760 if (orig_node)
1607 orig_node_free_ref(orig_node); 1761 batadv_orig_node_free_ref(orig_node);
1608 if (neigh_node) 1762 if (neigh_node)
1609 neigh_node_free_ref(neigh_node); 1763 batadv_neigh_node_free_ref(neigh_node);
1610 if (primary_if) 1764 if (primary_if)
1611 hardif_free_ref(primary_if); 1765 batadv_hardif_free_ref(primary_if);
1612 if (!ret) 1766 if (!ret)
1613 kfree_skb(skb); 1767 kfree_skb(skb);
1614 /* This packet was for me, so it doesn't need to be re-routed */ 1768 /* This packet was for me, so it doesn't need to be re-routed */
1615 return true; 1769 return true;
1616} 1770}
1617 1771
1618bool send_tt_response(struct bat_priv *bat_priv, 1772bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1619 struct tt_query_packet *tt_request) 1773 struct batadv_tt_query_packet *tt_request)
1620{ 1774{
1621 if (is_my_mac(tt_request->dst)) { 1775 if (batadv_is_my_mac(tt_request->dst)) {
1622 /* don't answer backbone gws! */ 1776 /* don't answer backbone gws! */
1623 if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) 1777 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1624 return true; 1778 return true;
1625 1779
1626 return send_my_tt_response(bat_priv, tt_request); 1780 return batadv_send_my_tt_response(bat_priv, tt_request);
1627 } else { 1781 } else {
1628 return send_other_tt_response(bat_priv, tt_request); 1782 return batadv_send_other_tt_response(bat_priv, tt_request);
1629 } 1783 }
1630} 1784}
1631 1785
1632static void _tt_update_changes(struct bat_priv *bat_priv, 1786static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1633 struct orig_node *orig_node, 1787 struct batadv_orig_node *orig_node,
1634 struct tt_change *tt_change, 1788 struct batadv_tt_change *tt_change,
1635 uint16_t tt_num_changes, uint8_t ttvn) 1789 uint16_t tt_num_changes, uint8_t ttvn)
1636{ 1790{
1637 int i; 1791 int i;
1792 int roams;
1638 1793
1639 for (i = 0; i < tt_num_changes; i++) { 1794 for (i = 0; i < tt_num_changes; i++) {
1640 if ((tt_change + i)->flags & TT_CLIENT_DEL) 1795 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1641 tt_global_del(bat_priv, orig_node, 1796 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1642 (tt_change + i)->addr, 1797 batadv_tt_global_del(bat_priv, orig_node,
1643 "tt removed by changes", 1798 (tt_change + i)->addr,
1644 (tt_change + i)->flags & TT_CLIENT_ROAM); 1799 "tt removed by changes",
1645 else 1800 roams);
1646 if (!tt_global_add(bat_priv, orig_node, 1801 } else {
1647 (tt_change + i)->addr, ttvn, false, 1802 if (!batadv_tt_global_add(bat_priv, orig_node,
1648 (tt_change + i)->flags & 1803 (tt_change + i)->addr,
1649 TT_CLIENT_WIFI)) 1804 (tt_change + i)->flags, ttvn))
1650 /* In case of problem while storing a 1805 /* In case of problem while storing a
1651 * global_entry, we stop the updating 1806 * global_entry, we stop the updating
1652 * procedure without committing the 1807 * procedure without committing the
@@ -1654,25 +1809,27 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1654 * corrupted data on tt_request 1809 * corrupted data on tt_request
1655 */ 1810 */
1656 return; 1811 return;
1812 }
1657 } 1813 }
1658 orig_node->tt_initialised = true; 1814 orig_node->tt_initialised = true;
1659} 1815}
1660 1816
1661static void tt_fill_gtable(struct bat_priv *bat_priv, 1817static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1662 struct tt_query_packet *tt_response) 1818 struct batadv_tt_query_packet *tt_response)
1663{ 1819{
1664 struct orig_node *orig_node = NULL; 1820 struct batadv_orig_node *orig_node = NULL;
1665 1821
1666 orig_node = orig_hash_find(bat_priv, tt_response->src); 1822 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1667 if (!orig_node) 1823 if (!orig_node)
1668 goto out; 1824 goto out;
1669 1825
1670 /* Purge the old table first.. */ 1826 /* Purge the old table first.. */
1671 tt_global_del_orig(bat_priv, orig_node, "Received full table"); 1827 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1672 1828
1673 _tt_update_changes(bat_priv, orig_node, 1829 _batadv_tt_update_changes(bat_priv, orig_node,
1674 (struct tt_change *)(tt_response + 1), 1830 (struct batadv_tt_change *)(tt_response + 1),
1675 tt_response->tt_data, tt_response->ttvn); 1831 ntohs(tt_response->tt_data),
1832 tt_response->ttvn);
1676 1833
1677 spin_lock_bh(&orig_node->tt_buff_lock); 1834 spin_lock_bh(&orig_node->tt_buff_lock);
1678 kfree(orig_node->tt_buff); 1835 kfree(orig_node->tt_buff);
@@ -1684,71 +1841,76 @@ static void tt_fill_gtable(struct bat_priv *bat_priv,
1684 1841
1685out: 1842out:
1686 if (orig_node) 1843 if (orig_node)
1687 orig_node_free_ref(orig_node); 1844 batadv_orig_node_free_ref(orig_node);
1688} 1845}
1689 1846
1690static void tt_update_changes(struct bat_priv *bat_priv, 1847static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1691 struct orig_node *orig_node, 1848 struct batadv_orig_node *orig_node,
1692 uint16_t tt_num_changes, uint8_t ttvn, 1849 uint16_t tt_num_changes, uint8_t ttvn,
1693 struct tt_change *tt_change) 1850 struct batadv_tt_change *tt_change)
1694{ 1851{
1695 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, 1852 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1696 ttvn); 1853 tt_num_changes, ttvn);
1697 1854
1698 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, 1855 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1699 tt_num_changes); 1856 (unsigned char *)tt_change, tt_num_changes);
1700 atomic_set(&orig_node->last_ttvn, ttvn); 1857 atomic_set(&orig_node->last_ttvn, ttvn);
1701} 1858}
1702 1859
1703bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) 1860bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1704{ 1861{
1705 struct tt_local_entry *tt_local_entry = NULL; 1862 struct batadv_tt_local_entry *tt_local_entry = NULL;
1706 bool ret = false; 1863 bool ret = false;
1707 1864
1708 tt_local_entry = tt_local_hash_find(bat_priv, addr); 1865 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1709 if (!tt_local_entry) 1866 if (!tt_local_entry)
1710 goto out; 1867 goto out;
1711 /* Check if the client has been logically deleted (but is kept for 1868 /* Check if the client has been logically deleted (but is kept for
1712 * consistency purpose) */ 1869 * consistency purpose)
1713 if (tt_local_entry->common.flags & TT_CLIENT_PENDING) 1870 */
1871 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1714 goto out; 1872 goto out;
1715 ret = true; 1873 ret = true;
1716out: 1874out:
1717 if (tt_local_entry) 1875 if (tt_local_entry)
1718 tt_local_entry_free_ref(tt_local_entry); 1876 batadv_tt_local_entry_free_ref(tt_local_entry);
1719 return ret; 1877 return ret;
1720} 1878}
1721 1879
1722void handle_tt_response(struct bat_priv *bat_priv, 1880void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1723 struct tt_query_packet *tt_response) 1881 struct batadv_tt_query_packet *tt_response)
1724{ 1882{
1725 struct tt_req_node *node, *safe; 1883 struct batadv_tt_req_node *node, *safe;
1726 struct orig_node *orig_node = NULL; 1884 struct batadv_orig_node *orig_node = NULL;
1885 struct batadv_tt_change *tt_change;
1727 1886
1728 bat_dbg(DBG_TT, bat_priv, 1887 batadv_dbg(BATADV_DBG_TT, bat_priv,
1729 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", 1888 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1730 tt_response->src, tt_response->ttvn, tt_response->tt_data, 1889 tt_response->src, tt_response->ttvn,
1731 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1890 ntohs(tt_response->tt_data),
1891 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1732 1892
1733 /* we should have never asked a backbone gw */ 1893 /* we should have never asked a backbone gw */
1734 if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) 1894 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1735 goto out; 1895 goto out;
1736 1896
1737 orig_node = orig_hash_find(bat_priv, tt_response->src); 1897 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1738 if (!orig_node) 1898 if (!orig_node)
1739 goto out; 1899 goto out;
1740 1900
1741 if (tt_response->flags & TT_FULL_TABLE) 1901 if (tt_response->flags & BATADV_TT_FULL_TABLE) {
1742 tt_fill_gtable(bat_priv, tt_response); 1902 batadv_tt_fill_gtable(bat_priv, tt_response);
1743 else 1903 } else {
1744 tt_update_changes(bat_priv, orig_node, tt_response->tt_data, 1904 tt_change = (struct batadv_tt_change *)(tt_response + 1);
1745 tt_response->ttvn, 1905 batadv_tt_update_changes(bat_priv, orig_node,
1746 (struct tt_change *)(tt_response + 1)); 1906 ntohs(tt_response->tt_data),
1907 tt_response->ttvn, tt_change);
1908 }
1747 1909
1748 /* Delete the tt_req_node from pending tt_requests list */ 1910 /* Delete the tt_req_node from pending tt_requests list */
1749 spin_lock_bh(&bat_priv->tt_req_list_lock); 1911 spin_lock_bh(&bat_priv->tt_req_list_lock);
1750 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1912 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1751 if (!compare_eth(node->addr, tt_response->src)) 1913 if (!batadv_compare_eth(node->addr, tt_response->src))
1752 continue; 1914 continue;
1753 list_del(&node->list); 1915 list_del(&node->list);
1754 kfree(node); 1916 kfree(node);
@@ -1756,31 +1918,36 @@ void handle_tt_response(struct bat_priv *bat_priv,
1756 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1918 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1757 1919
1758 /* Recalculate the CRC for this orig_node and store it */ 1920 /* Recalculate the CRC for this orig_node and store it */
1759 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 1921 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1760 /* Roaming phase is over: tables are in sync again. I can 1922 /* Roaming phase is over: tables are in sync again. I can
1761 * unset the flag */ 1923 * unset the flag
1924 */
1762 orig_node->tt_poss_change = false; 1925 orig_node->tt_poss_change = false;
1763out: 1926out:
1764 if (orig_node) 1927 if (orig_node)
1765 orig_node_free_ref(orig_node); 1928 batadv_orig_node_free_ref(orig_node);
1766} 1929}
1767 1930
1768int tt_init(struct bat_priv *bat_priv) 1931int batadv_tt_init(struct batadv_priv *bat_priv)
1769{ 1932{
1770 if (!tt_local_init(bat_priv)) 1933 int ret;
1771 return 0;
1772 1934
1773 if (!tt_global_init(bat_priv)) 1935 ret = batadv_tt_local_init(bat_priv);
1774 return 0; 1936 if (ret < 0)
1937 return ret;
1775 1938
1776 tt_start_timer(bat_priv); 1939 ret = batadv_tt_global_init(bat_priv);
1940 if (ret < 0)
1941 return ret;
1942
1943 batadv_tt_start_timer(bat_priv);
1777 1944
1778 return 1; 1945 return 1;
1779} 1946}
1780 1947
1781static void tt_roam_list_free(struct bat_priv *bat_priv) 1948static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
1782{ 1949{
1783 struct tt_roam_node *node, *safe; 1950 struct batadv_tt_roam_node *node, *safe;
1784 1951
1785 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1952 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1786 1953
@@ -1792,13 +1959,14 @@ static void tt_roam_list_free(struct bat_priv *bat_priv)
1792 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 1959 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1793} 1960}
1794 1961
1795static void tt_roam_purge(struct bat_priv *bat_priv) 1962static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1796{ 1963{
1797 struct tt_roam_node *node, *safe; 1964 struct batadv_tt_roam_node *node, *safe;
1798 1965
1799 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1966 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1800 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1967 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1801 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME)) 1968 if (!batadv_has_timed_out(node->first_time,
1969 BATADV_ROAMING_MAX_TIME))
1802 continue; 1970 continue;
1803 1971
1804 list_del(&node->list); 1972 list_del(&node->list);
@@ -1811,24 +1979,27 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
1811 * maximum number of possible roaming phases. In this case the ROAMING_ADV 1979 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1812 * will not be sent. 1980 * will not be sent.
1813 * 1981 *
1814 * returns true if the ROAMING_ADV can be sent, false otherwise */ 1982 * returns true if the ROAMING_ADV can be sent, false otherwise
1815static bool tt_check_roam_count(struct bat_priv *bat_priv, 1983 */
1816 uint8_t *client) 1984static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
1985 uint8_t *client)
1817{ 1986{
1818 struct tt_roam_node *tt_roam_node; 1987 struct batadv_tt_roam_node *tt_roam_node;
1819 bool ret = false; 1988 bool ret = false;
1820 1989
1821 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1990 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1822 /* The new tt_req will be issued only if I'm not waiting for a 1991 /* The new tt_req will be issued only if I'm not waiting for a
1823 * reply from the same orig_node yet */ 1992 * reply from the same orig_node yet
1993 */
1824 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { 1994 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1825 if (!compare_eth(tt_roam_node->addr, client)) 1995 if (!batadv_compare_eth(tt_roam_node->addr, client))
1826 continue; 1996 continue;
1827 1997
1828 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME)) 1998 if (batadv_has_timed_out(tt_roam_node->first_time,
1999 BATADV_ROAMING_MAX_TIME))
1829 continue; 2000 continue;
1830 2001
1831 if (!atomic_dec_not_zero(&tt_roam_node->counter)) 2002 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
1832 /* Sorry, you roamed too many times! */ 2003 /* Sorry, you roamed too many times! */
1833 goto unlock; 2004 goto unlock;
1834 ret = true; 2005 ret = true;
@@ -1841,7 +2012,8 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
1841 goto unlock; 2012 goto unlock;
1842 2013
1843 tt_roam_node->first_time = jiffies; 2014 tt_roam_node->first_time = jiffies;
1844 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1); 2015 atomic_set(&tt_roam_node->counter,
2016 BATADV_ROAMING_MAX_COUNT - 1);
1845 memcpy(tt_roam_node->addr, client, ETH_ALEN); 2017 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1846 2018
1847 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); 2019 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
@@ -1853,97 +2025,103 @@ unlock:
1853 return ret; 2025 return ret;
1854} 2026}
1855 2027
1856static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 2028static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
1857 struct orig_node *orig_node) 2029 struct batadv_orig_node *orig_node)
1858{ 2030{
1859 struct neigh_node *neigh_node = NULL; 2031 struct batadv_neigh_node *neigh_node = NULL;
1860 struct sk_buff *skb = NULL; 2032 struct sk_buff *skb = NULL;
1861 struct roam_adv_packet *roam_adv_packet; 2033 struct batadv_roam_adv_packet *roam_adv_packet;
1862 int ret = 1; 2034 int ret = 1;
1863 struct hard_iface *primary_if; 2035 struct batadv_hard_iface *primary_if;
2036 size_t len = sizeof(*roam_adv_packet);
1864 2037
1865 /* before going on we have to check whether the client has 2038 /* before going on we have to check whether the client has
1866 * already roamed to us too many times */ 2039 * already roamed to us too many times
1867 if (!tt_check_roam_count(bat_priv, client)) 2040 */
2041 if (!batadv_tt_check_roam_count(bat_priv, client))
1868 goto out; 2042 goto out;
1869 2043
1870 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN); 2044 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
1871 if (!skb) 2045 if (!skb)
1872 goto out; 2046 goto out;
1873 2047
1874 skb_reserve(skb, ETH_HLEN); 2048 skb_reserve(skb, ETH_HLEN);
1875 2049
1876 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, 2050 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
1877 sizeof(struct roam_adv_packet));
1878 2051
1879 roam_adv_packet->header.packet_type = BAT_ROAM_ADV; 2052 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
1880 roam_adv_packet->header.version = COMPAT_VERSION; 2053 roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
1881 roam_adv_packet->header.ttl = TTL; 2054 roam_adv_packet->header.ttl = BATADV_TTL;
1882 primary_if = primary_if_get_selected(bat_priv); 2055 roam_adv_packet->reserved = 0;
2056 primary_if = batadv_primary_if_get_selected(bat_priv);
1883 if (!primary_if) 2057 if (!primary_if)
1884 goto out; 2058 goto out;
1885 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 2059 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1886 hardif_free_ref(primary_if); 2060 batadv_hardif_free_ref(primary_if);
1887 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); 2061 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1888 memcpy(roam_adv_packet->client, client, ETH_ALEN); 2062 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1889 2063
1890 neigh_node = orig_node_get_router(orig_node); 2064 neigh_node = batadv_orig_node_get_router(orig_node);
1891 if (!neigh_node) 2065 if (!neigh_node)
1892 goto out; 2066 goto out;
1893 2067
1894 bat_dbg(DBG_TT, bat_priv, 2068 batadv_dbg(BATADV_DBG_TT, bat_priv,
1895 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", 2069 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1896 orig_node->orig, client, neigh_node->addr); 2070 orig_node->orig, client, neigh_node->addr);
2071
2072 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
1897 2073
1898 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 2074 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1899 ret = 0; 2075 ret = 0;
1900 2076
1901out: 2077out:
1902 if (neigh_node) 2078 if (neigh_node)
1903 neigh_node_free_ref(neigh_node); 2079 batadv_neigh_node_free_ref(neigh_node);
1904 if (ret) 2080 if (ret)
1905 kfree_skb(skb); 2081 kfree_skb(skb);
1906 return; 2082 return;
1907} 2083}
1908 2084
1909static void tt_purge(struct work_struct *work) 2085static void batadv_tt_purge(struct work_struct *work)
1910{ 2086{
1911 struct delayed_work *delayed_work = 2087 struct delayed_work *delayed_work;
1912 container_of(work, struct delayed_work, work); 2088 struct batadv_priv *bat_priv;
1913 struct bat_priv *bat_priv =
1914 container_of(delayed_work, struct bat_priv, tt_work);
1915 2089
1916 tt_local_purge(bat_priv); 2090 delayed_work = container_of(work, struct delayed_work, work);
1917 tt_global_roam_purge(bat_priv); 2091 bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
1918 tt_req_purge(bat_priv);
1919 tt_roam_purge(bat_priv);
1920 2092
1921 tt_start_timer(bat_priv); 2093 batadv_tt_local_purge(bat_priv);
2094 batadv_tt_global_roam_purge(bat_priv);
2095 batadv_tt_req_purge(bat_priv);
2096 batadv_tt_roam_purge(bat_priv);
2097
2098 batadv_tt_start_timer(bat_priv);
1922} 2099}
1923 2100
1924void tt_free(struct bat_priv *bat_priv) 2101void batadv_tt_free(struct batadv_priv *bat_priv)
1925{ 2102{
1926 cancel_delayed_work_sync(&bat_priv->tt_work); 2103 cancel_delayed_work_sync(&bat_priv->tt_work);
1927 2104
1928 tt_local_table_free(bat_priv); 2105 batadv_tt_local_table_free(bat_priv);
1929 tt_global_table_free(bat_priv); 2106 batadv_tt_global_table_free(bat_priv);
1930 tt_req_list_free(bat_priv); 2107 batadv_tt_req_list_free(bat_priv);
1931 tt_changes_list_free(bat_priv); 2108 batadv_tt_changes_list_free(bat_priv);
1932 tt_roam_list_free(bat_priv); 2109 batadv_tt_roam_list_free(bat_priv);
1933 2110
1934 kfree(bat_priv->tt_buff); 2111 kfree(bat_priv->tt_buff);
1935} 2112}
1936 2113
1937/* This function will enable or disable the specified flags for all the entries 2114/* This function will enable or disable the specified flags for all the entries
1938 * in the given hash table and returns the number of modified entries */ 2115 * in the given hash table and returns the number of modified entries
1939static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, 2116 */
1940 bool enable) 2117static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2118 uint16_t flags, bool enable)
1941{ 2119{
1942 uint32_t i; 2120 uint32_t i;
1943 uint16_t changed_num = 0; 2121 uint16_t changed_num = 0;
1944 struct hlist_head *head; 2122 struct hlist_head *head;
1945 struct hlist_node *node; 2123 struct hlist_node *node;
1946 struct tt_common_entry *tt_common_entry; 2124 struct batadv_tt_common_entry *tt_common_entry;
1947 2125
1948 if (!hash) 2126 if (!hash)
1949 goto out; 2127 goto out;
@@ -1971,12 +2149,12 @@ out:
1971 return changed_num; 2149 return changed_num;
1972} 2150}
1973 2151
1974/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ 2152/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
1975static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) 2153static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
1976{ 2154{
1977 struct hashtable_t *hash = bat_priv->tt_local_hash; 2155 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
1978 struct tt_common_entry *tt_common_entry; 2156 struct batadv_tt_common_entry *tt_common;
1979 struct tt_local_entry *tt_local_entry; 2157 struct batadv_tt_local_entry *tt_local;
1980 struct hlist_node *node, *node_tmp; 2158 struct hlist_node *node, *node_tmp;
1981 struct hlist_head *head; 2159 struct hlist_head *head;
1982 spinlock_t *list_lock; /* protects write access to the hash lists */ 2160 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1990,103 +2168,149 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1990 list_lock = &hash->list_locks[i]; 2168 list_lock = &hash->list_locks[i];
1991 2169
1992 spin_lock_bh(list_lock); 2170 spin_lock_bh(list_lock);
1993 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 2171 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1994 head, hash_entry) { 2172 hash_entry) {
1995 if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) 2173 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
1996 continue; 2174 continue;
1997 2175
1998 bat_dbg(DBG_TT, bat_priv, 2176 batadv_dbg(BATADV_DBG_TT, bat_priv,
1999 "Deleting local tt entry (%pM): pending\n", 2177 "Deleting local tt entry (%pM): pending\n",
2000 tt_common_entry->addr); 2178 tt_common->addr);
2001 2179
2002 atomic_dec(&bat_priv->num_local_tt); 2180 atomic_dec(&bat_priv->num_local_tt);
2003 hlist_del_rcu(node); 2181 hlist_del_rcu(node);
2004 tt_local_entry = container_of(tt_common_entry, 2182 tt_local = container_of(tt_common,
2005 struct tt_local_entry, 2183 struct batadv_tt_local_entry,
2006 common); 2184 common);
2007 tt_local_entry_free_ref(tt_local_entry); 2185 batadv_tt_local_entry_free_ref(tt_local);
2008 } 2186 }
2009 spin_unlock_bh(list_lock); 2187 spin_unlock_bh(list_lock);
2010 } 2188 }
2011 2189
2012} 2190}
2013 2191
2014void tt_commit_changes(struct bat_priv *bat_priv) 2192static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2193 unsigned char **packet_buff,
2194 int *packet_buff_len, int packet_min_len)
2015{ 2195{
2016 uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, 2196 uint16_t changed_num = 0;
2017 TT_CLIENT_NEW, false); 2197
2018 /* all the reset entries have now to be effectively counted as local 2198 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2019 * entries */ 2199 return -ENOENT;
2200
2201 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
2202 BATADV_TT_CLIENT_NEW, false);
2203
2204 /* all reset entries have to be counted as local entries */
2020 atomic_add(changed_num, &bat_priv->num_local_tt); 2205 atomic_add(changed_num, &bat_priv->num_local_tt);
2021 tt_local_purge_pending_clients(bat_priv); 2206 batadv_tt_local_purge_pending_clients(bat_priv);
2207 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2022 2208
2023 /* Increment the TTVN only once per OGM interval */ 2209 /* Increment the TTVN only once per OGM interval */
2024 atomic_inc(&bat_priv->ttvn); 2210 atomic_inc(&bat_priv->ttvn);
2025 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", 2211 batadv_dbg(BATADV_DBG_TT, bat_priv,
2026 (uint8_t)atomic_read(&bat_priv->ttvn)); 2212 "Local changes committed, updating to ttvn %u\n",
2213 (uint8_t)atomic_read(&bat_priv->ttvn));
2027 bat_priv->tt_poss_change = false; 2214 bat_priv->tt_poss_change = false;
2215
2216 /* reset the sending counter */
2217 atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2218
2219 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2220 packet_buff_len, packet_min_len);
2028} 2221}
2029 2222
2030bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) 2223/* when calling this function (hard_iface == primary_if) has to be true */
2224int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2225 unsigned char **packet_buff, int *packet_buff_len,
2226 int packet_min_len)
2031{ 2227{
2032 struct tt_local_entry *tt_local_entry = NULL; 2228 int tt_num_changes;
2033 struct tt_global_entry *tt_global_entry = NULL; 2229
2034 bool ret = true; 2230 /* if at least one change happened */
2231 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2232 packet_buff_len,
2233 packet_min_len);
2234
2235 /* if the changes have been sent often enough */
2236 if ((tt_num_changes < 0) &&
2237 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2238 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2239 packet_min_len, packet_min_len);
2240 tt_num_changes = 0;
2241 }
2242
2243 return tt_num_changes;
2244}
2245
2246bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
2247 uint8_t *dst)
2248{
2249 struct batadv_tt_local_entry *tt_local_entry = NULL;
2250 struct batadv_tt_global_entry *tt_global_entry = NULL;
2251 bool ret = false;
2035 2252
2036 if (!atomic_read(&bat_priv->ap_isolation)) 2253 if (!atomic_read(&bat_priv->ap_isolation))
2037 return false; 2254 goto out;
2038 2255
2039 tt_local_entry = tt_local_hash_find(bat_priv, dst); 2256 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2040 if (!tt_local_entry) 2257 if (!tt_local_entry)
2041 goto out; 2258 goto out;
2042 2259
2043 tt_global_entry = tt_global_hash_find(bat_priv, src); 2260 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2044 if (!tt_global_entry) 2261 if (!tt_global_entry)
2045 goto out; 2262 goto out;
2046 2263
2047 if (_is_ap_isolated(tt_local_entry, tt_global_entry)) 2264 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2048 goto out; 2265 goto out;
2049 2266
2050 ret = false; 2267 ret = true;
2051 2268
2052out: 2269out:
2053 if (tt_global_entry) 2270 if (tt_global_entry)
2054 tt_global_entry_free_ref(tt_global_entry); 2271 batadv_tt_global_entry_free_ref(tt_global_entry);
2055 if (tt_local_entry) 2272 if (tt_local_entry)
2056 tt_local_entry_free_ref(tt_local_entry); 2273 batadv_tt_local_entry_free_ref(tt_local_entry);
2057 return ret; 2274 return ret;
2058} 2275}
2059 2276
2060void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 2277void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2061 const unsigned char *tt_buff, uint8_t tt_num_changes, 2278 struct batadv_orig_node *orig_node,
2062 uint8_t ttvn, uint16_t tt_crc) 2279 const unsigned char *tt_buff, uint8_t tt_num_changes,
2280 uint8_t ttvn, uint16_t tt_crc)
2063{ 2281{
2064 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 2282 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2065 bool full_table = true; 2283 bool full_table = true;
2284 struct batadv_tt_change *tt_change;
2066 2285
2067 /* don't care about a backbone gateways updates. */ 2286 /* don't care about a backbone gateways updates. */
2068 if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) 2287 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2069 return; 2288 return;
2070 2289
2071 /* orig table not initialised AND first diff is in the OGM OR the ttvn 2290 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2072 * increased by one -> we can apply the attached changes */ 2291 * increased by one -> we can apply the attached changes
2292 */
2073 if ((!orig_node->tt_initialised && ttvn == 1) || 2293 if ((!orig_node->tt_initialised && ttvn == 1) ||
2074 ttvn - orig_ttvn == 1) { 2294 ttvn - orig_ttvn == 1) {
2075 /* the OGM could not contain the changes due to their size or 2295 /* the OGM could not contain the changes due to their size or
2076 * because they have already been sent TT_OGM_APPEND_MAX times. 2296 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2077 * In this case send a tt request */ 2297 * times.
2298 * In this case send a tt request
2299 */
2078 if (!tt_num_changes) { 2300 if (!tt_num_changes) {
2079 full_table = false; 2301 full_table = false;
2080 goto request_table; 2302 goto request_table;
2081 } 2303 }
2082 2304
2083 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, 2305 tt_change = (struct batadv_tt_change *)tt_buff;
2084 (struct tt_change *)tt_buff); 2306 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2307 ttvn, tt_change);
2085 2308
2086 /* Even if we received the precomputed crc with the OGM, we 2309 /* Even if we received the precomputed crc with the OGM, we
2087 * prefer to recompute it to spot any possible inconsistency 2310 * prefer to recompute it to spot any possible inconsistency
2088 * in the global table */ 2311 * in the global table
2089 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 2312 */
2313 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2090 2314
2091 /* The ttvn alone is not enough to guarantee consistency 2315 /* The ttvn alone is not enough to guarantee consistency
2092 * because a single value could represent different states 2316 * because a single value could represent different states
@@ -2095,26 +2319,28 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
2095 * consistent or not. E.g. a node could disconnect while its 2319 * consistent or not. E.g. a node could disconnect while its
2096 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case 2320 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2097 * checking the CRC value is mandatory to detect the 2321 * checking the CRC value is mandatory to detect the
2098 * inconsistency */ 2322 * inconsistency
2323 */
2099 if (orig_node->tt_crc != tt_crc) 2324 if (orig_node->tt_crc != tt_crc)
2100 goto request_table; 2325 goto request_table;
2101 2326
2102 /* Roaming phase is over: tables are in sync again. I can 2327 /* Roaming phase is over: tables are in sync again. I can
2103 * unset the flag */ 2328 * unset the flag
2329 */
2104 orig_node->tt_poss_change = false; 2330 orig_node->tt_poss_change = false;
2105 } else { 2331 } else {
2106 /* if we missed more than one change or our tables are not 2332 /* if we missed more than one change or our tables are not
2107 * in sync anymore -> request fresh tt data */ 2333 * in sync anymore -> request fresh tt data
2108 2334 */
2109 if (!orig_node->tt_initialised || ttvn != orig_ttvn || 2335 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2110 orig_node->tt_crc != tt_crc) { 2336 orig_node->tt_crc != tt_crc) {
2111request_table: 2337request_table:
2112 bat_dbg(DBG_TT, bat_priv, 2338 batadv_dbg(BATADV_DBG_TT, bat_priv,
2113 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n", 2339 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2114 orig_node->orig, ttvn, orig_ttvn, tt_crc, 2340 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2115 orig_node->tt_crc, tt_num_changes); 2341 orig_node->tt_crc, tt_num_changes);
2116 send_tt_request(bat_priv, orig_node, ttvn, tt_crc, 2342 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2117 full_table); 2343 tt_crc, full_table);
2118 return; 2344 return;
2119 } 2345 }
2120 } 2346 }
@@ -2124,17 +2350,18 @@ request_table:
2124 * originator to another one. This entry is kept is still kept for consistency 2350 * originator to another one. This entry is kept is still kept for consistency
2125 * purposes 2351 * purposes
2126 */ 2352 */
2127bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) 2353bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2354 uint8_t *addr)
2128{ 2355{
2129 struct tt_global_entry *tt_global_entry; 2356 struct batadv_tt_global_entry *tt_global_entry;
2130 bool ret = false; 2357 bool ret = false;
2131 2358
2132 tt_global_entry = tt_global_hash_find(bat_priv, addr); 2359 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2133 if (!tt_global_entry) 2360 if (!tt_global_entry)
2134 goto out; 2361 goto out;
2135 2362
2136 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM; 2363 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2137 tt_global_entry_free_ref(tt_global_entry); 2364 batadv_tt_global_entry_free_ref(tt_global_entry);
2138out: 2365out:
2139 return ret; 2366 return ret;
2140} 2367}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c43374dc364d..ffa87355096b 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 4 *
@@ -16,44 +15,50 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 20#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 21#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 22
25int tt_len(int changes_num); 23int batadv_tt_len(int changes_num);
26int tt_changes_fill_buffer(struct bat_priv *bat_priv, 24int batadv_tt_init(struct batadv_priv *bat_priv);
27 unsigned char *buff, int buff_len); 25void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
28int tt_init(struct bat_priv *bat_priv); 26 int ifindex);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 27void batadv_tt_local_remove(struct batadv_priv *bat_priv,
30 int ifindex); 28 const uint8_t *addr, const char *message,
31void tt_local_remove(struct bat_priv *bat_priv, 29 bool roaming);
32 const uint8_t *addr, const char *message, bool roaming); 30int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
33int tt_local_seq_print_text(struct seq_file *seq, void *offset); 31void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
34void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 32 struct batadv_orig_node *orig_node,
35 const unsigned char *tt_buff, int tt_buff_len); 33 const unsigned char *tt_buff, int tt_buff_len);
36int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 34int batadv_tt_global_add(struct batadv_priv *bat_priv,
37 const unsigned char *addr, uint8_t ttvn, bool roaming, 35 struct batadv_orig_node *orig_node,
38 bool wifi); 36 const unsigned char *addr, uint8_t flags,
39int tt_global_seq_print_text(struct seq_file *seq, void *offset); 37 uint8_t ttvn);
40void tt_global_del_orig(struct bat_priv *bat_priv, 38int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
41 struct orig_node *orig_node, const char *message); 39void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
42struct orig_node *transtable_search(struct bat_priv *bat_priv, 40 struct batadv_orig_node *orig_node,
43 const uint8_t *src, const uint8_t *addr); 41 const char *message);
44uint16_t tt_local_crc(struct bat_priv *bat_priv); 42struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
45void tt_free(struct bat_priv *bat_priv); 43 const uint8_t *src,
46bool send_tt_response(struct bat_priv *bat_priv, 44 const uint8_t *addr);
47 struct tt_query_packet *tt_request); 45void batadv_tt_free(struct batadv_priv *bat_priv);
48bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 46bool batadv_send_tt_response(struct batadv_priv *bat_priv,
49void handle_tt_response(struct bat_priv *bat_priv, 47 struct batadv_tt_query_packet *tt_request);
50 struct tt_query_packet *tt_response); 48bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
51void tt_commit_changes(struct bat_priv *bat_priv); 49void batadv_handle_tt_response(struct batadv_priv *bat_priv,
52bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); 50 struct batadv_tt_query_packet *tt_response);
53void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 51bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
54 const unsigned char *tt_buff, uint8_t tt_num_changes, 52 uint8_t *dst);
55 uint8_t ttvn, uint16_t tt_crc); 53void batadv_tt_update_orig(struct batadv_priv *bat_priv,
56bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); 54 struct batadv_orig_node *orig_node,
55 const unsigned char *tt_buff, uint8_t tt_num_changes,
56 uint8_t ttvn, uint16_t tt_crc);
57int batadv_tt_append_diff(struct batadv_priv *bat_priv,
58 unsigned char **packet_buff, int *packet_buff_len,
59 int packet_min_len);
60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
61 uint8_t *addr);
57 62
58 63
59#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 64#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 61308e8016ff..12635fd2c3d3 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,24 +15,20 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23
24#ifndef _NET_BATMAN_ADV_TYPES_H_ 20#ifndef _NET_BATMAN_ADV_TYPES_H_
25#define _NET_BATMAN_ADV_TYPES_H_ 21#define _NET_BATMAN_ADV_TYPES_H_
26 22
27#include "packet.h" 23#include "packet.h"
28#include "bitarray.h" 24#include "bitarray.h"
25#include <linux/kernel.h>
29 26
30#define BAT_HEADER_LEN (ETH_HLEN + \ 27#define BATADV_HEADER_LEN \
31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ 28 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
32 sizeof(struct unicast_packet) : \ 29 sizeof(struct batadv_bcast_packet)))
33 sizeof(struct bcast_packet))))
34
35 30
36struct hard_iface { 31struct batadv_hard_iface {
37 struct list_head list; 32 struct list_head list;
38 int16_t if_num; 33 int16_t if_num;
39 char if_status; 34 char if_status;
@@ -50,7 +45,7 @@ struct hard_iface {
50}; 45};
51 46
52/** 47/**
53 * orig_node - structure for orig_list maintaining nodes of mesh 48 * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
54 * @primary_addr: hosts primary interface address 49 * @primary_addr: hosts primary interface address
55 * @last_seen: when last packet from this node was received 50 * @last_seen: when last packet from this node was received
56 * @bcast_seqno_reset: time when the broadcast seqno window was reset 51 * @bcast_seqno_reset: time when the broadcast seqno window was reset
@@ -64,10 +59,10 @@ struct hard_iface {
64 * @candidates: how many candidates are available 59 * @candidates: how many candidates are available
65 * @selected: next bonding candidate 60 * @selected: next bonding candidate
66 */ 61 */
67struct orig_node { 62struct batadv_orig_node {
68 uint8_t orig[ETH_ALEN]; 63 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 64 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node __rcu *router; /* rcu protected pointer */ 65 struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 66 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 67 uint8_t *bcast_own_sum;
73 unsigned long last_seen; 68 unsigned long last_seen;
@@ -86,11 +81,12 @@ struct orig_node {
86 * If true, then I sent a Roaming_adv to this orig_node and I have to 81 * If true, then I sent a Roaming_adv to this orig_node and I have to
87 * inspect every packet directed to it to check whether it is still 82 * inspect every packet directed to it to check whether it is still
88 * the true destination or not. This flag will be reset to false as 83 * the true destination or not. This flag will be reset to false as
89 * soon as I receive a new TTVN from this orig_node */ 84 * soon as I receive a new TTVN from this orig_node
85 */
90 bool tt_poss_change; 86 bool tt_poss_change;
91 uint32_t last_real_seqno; 87 uint32_t last_real_seqno;
92 uint8_t last_ttl; 88 uint8_t last_ttl;
93 DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE); 89 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
94 uint32_t last_bcast_seqno; 90 uint32_t last_bcast_seqno;
95 struct hlist_head neigh_list; 91 struct hlist_head neigh_list;
96 struct list_head frag_list; 92 struct list_head frag_list;
@@ -98,10 +94,11 @@ struct orig_node {
98 atomic_t refcount; 94 atomic_t refcount;
99 struct rcu_head rcu; 95 struct rcu_head rcu;
100 struct hlist_node hash_entry; 96 struct hlist_node hash_entry;
101 struct bat_priv *bat_priv; 97 struct batadv_priv *bat_priv;
102 unsigned long last_frag_packet; 98 unsigned long last_frag_packet;
103 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, 99 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
104 * neigh_node->real_bits, neigh_node->real_packet_count */ 100 * neigh_node->real_bits, neigh_node->real_packet_count
101 */
105 spinlock_t ogm_cnt_lock; 102 spinlock_t ogm_cnt_lock;
106 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ 103 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
107 spinlock_t bcast_seqno_lock; 104 spinlock_t bcast_seqno_lock;
@@ -110,47 +107,63 @@ struct orig_node {
110 struct list_head bond_list; 107 struct list_head bond_list;
111}; 108};
112 109
113struct gw_node { 110struct batadv_gw_node {
114 struct hlist_node list; 111 struct hlist_node list;
115 struct orig_node *orig_node; 112 struct batadv_orig_node *orig_node;
116 unsigned long deleted; 113 unsigned long deleted;
117 atomic_t refcount; 114 atomic_t refcount;
118 struct rcu_head rcu; 115 struct rcu_head rcu;
119}; 116};
120 117
121/** 118/* batadv_neigh_node
122 * neigh_node
123 * @last_seen: when last packet via this neighbor was received 119 * @last_seen: when last packet via this neighbor was received
124 */ 120 */
125struct neigh_node { 121struct batadv_neigh_node {
126 struct hlist_node list; 122 struct hlist_node list;
127 uint8_t addr[ETH_ALEN]; 123 uint8_t addr[ETH_ALEN];
128 uint8_t real_packet_count; 124 uint8_t real_packet_count;
129 uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; 125 uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
130 uint8_t tq_index; 126 uint8_t tq_index;
131 uint8_t tq_avg; 127 uint8_t tq_avg;
132 uint8_t last_ttl; 128 uint8_t last_ttl;
133 struct list_head bonding_list; 129 struct list_head bonding_list;
134 unsigned long last_seen; 130 unsigned long last_seen;
135 DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); 131 DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
136 atomic_t refcount; 132 atomic_t refcount;
137 struct rcu_head rcu; 133 struct rcu_head rcu;
138 struct orig_node *orig_node; 134 struct batadv_orig_node *orig_node;
139 struct hard_iface *if_incoming; 135 struct batadv_hard_iface *if_incoming;
140 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ 136 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
141}; 137};
142 138
143#ifdef CONFIG_BATMAN_ADV_BLA 139#ifdef CONFIG_BATMAN_ADV_BLA
144struct bcast_duplist_entry { 140struct batadv_bcast_duplist_entry {
145 uint8_t orig[ETH_ALEN]; 141 uint8_t orig[ETH_ALEN];
146 uint16_t crc; 142 uint16_t crc;
147 unsigned long entrytime; 143 unsigned long entrytime;
148}; 144};
149#endif 145#endif
150 146
151struct bat_priv { 147enum batadv_counters {
148 BATADV_CNT_FORWARD,
149 BATADV_CNT_FORWARD_BYTES,
150 BATADV_CNT_MGMT_TX,
151 BATADV_CNT_MGMT_TX_BYTES,
152 BATADV_CNT_MGMT_RX,
153 BATADV_CNT_MGMT_RX_BYTES,
154 BATADV_CNT_TT_REQUEST_TX,
155 BATADV_CNT_TT_REQUEST_RX,
156 BATADV_CNT_TT_RESPONSE_TX,
157 BATADV_CNT_TT_RESPONSE_RX,
158 BATADV_CNT_TT_ROAM_ADV_TX,
159 BATADV_CNT_TT_ROAM_ADV_RX,
160 BATADV_CNT_NUM,
161};
162
163struct batadv_priv {
152 atomic_t mesh_state; 164 atomic_t mesh_state;
153 struct net_device_stats stats; 165 struct net_device_stats stats;
166 uint64_t __percpu *bat_counters; /* Per cpu counters */
154 atomic_t aggregated_ogms; /* boolean */ 167 atomic_t aggregated_ogms; /* boolean */
155 atomic_t bonding; /* boolean */ 168 atomic_t bonding; /* boolean */
156 atomic_t fragmentation; /* boolean */ 169 atomic_t fragmentation; /* boolean */
@@ -174,10 +187,11 @@ struct bat_priv {
174 * If true, then I received a Roaming_adv and I have to inspect every 187 * If true, then I received a Roaming_adv and I have to inspect every
175 * packet directed to me to check whether I am still the true 188 * packet directed to me to check whether I am still the true
176 * destination or not. This flag will be reset to false as soon as I 189 * destination or not. This flag will be reset to false as soon as I
177 * increase my TTVN */ 190 * increase my TTVN
191 */
178 bool tt_poss_change; 192 bool tt_poss_change;
179 char num_ifaces; 193 char num_ifaces;
180 struct debug_log *debug_log; 194 struct batadv_debug_log *debug_log;
181 struct kobject *mesh_obj; 195 struct kobject *mesh_obj;
182 struct dentry *debug_dir; 196 struct dentry *debug_dir;
183 struct hlist_head forw_bat_list; 197 struct hlist_head forw_bat_list;
@@ -185,20 +199,20 @@ struct bat_priv {
185 struct hlist_head gw_list; 199 struct hlist_head gw_list;
186 struct list_head tt_changes_list; /* tracks changes in a OGM int */ 200 struct list_head tt_changes_list; /* tracks changes in a OGM int */
187 struct list_head vis_send_list; 201 struct list_head vis_send_list;
188 struct hashtable_t *orig_hash; 202 struct batadv_hashtable *orig_hash;
189 struct hashtable_t *tt_local_hash; 203 struct batadv_hashtable *tt_local_hash;
190 struct hashtable_t *tt_global_hash; 204 struct batadv_hashtable *tt_global_hash;
191#ifdef CONFIG_BATMAN_ADV_BLA 205#ifdef CONFIG_BATMAN_ADV_BLA
192 struct hashtable_t *claim_hash; 206 struct batadv_hashtable *claim_hash;
193 struct hashtable_t *backbone_hash; 207 struct batadv_hashtable *backbone_hash;
194#endif 208#endif
195 struct list_head tt_req_list; /* list of pending tt_requests */ 209 struct list_head tt_req_list; /* list of pending tt_requests */
196 struct list_head tt_roam_list; 210 struct list_head tt_roam_list;
197 struct hashtable_t *vis_hash; 211 struct batadv_hashtable *vis_hash;
198#ifdef CONFIG_BATMAN_ADV_BLA 212#ifdef CONFIG_BATMAN_ADV_BLA
199 struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; 213 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
200 int bcast_duplist_curr; 214 int bcast_duplist_curr;
201 struct bla_claim_dst claim_dest; 215 struct batadv_bla_claim_dst claim_dest;
202#endif 216#endif
203 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 217 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
204 spinlock_t forw_bcast_list_lock; /* protects */ 218 spinlock_t forw_bcast_list_lock; /* protects */
@@ -210,7 +224,7 @@ struct bat_priv {
210 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 224 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
211 atomic_t num_local_tt; 225 atomic_t num_local_tt;
212 /* Checksum of the local table, recomputed before sending a new OGM */ 226 /* Checksum of the local table, recomputed before sending a new OGM */
213 atomic_t tt_crc; 227 uint16_t tt_crc;
214 unsigned char *tt_buff; 228 unsigned char *tt_buff;
215 int16_t tt_buff_len; 229 int16_t tt_buff_len;
216 spinlock_t tt_buff_lock; /* protects tt_buff */ 230 spinlock_t tt_buff_lock; /* protects tt_buff */
@@ -218,29 +232,29 @@ struct bat_priv {
218 struct delayed_work orig_work; 232 struct delayed_work orig_work;
219 struct delayed_work vis_work; 233 struct delayed_work vis_work;
220 struct delayed_work bla_work; 234 struct delayed_work bla_work;
221 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 235 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
222 atomic_t gw_reselect; 236 atomic_t gw_reselect;
223 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 237 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
224 struct vis_info *my_vis_info; 238 struct batadv_vis_info *my_vis_info;
225 struct bat_algo_ops *bat_algo_ops; 239 struct batadv_algo_ops *bat_algo_ops;
226}; 240};
227 241
228struct socket_client { 242struct batadv_socket_client {
229 struct list_head queue_list; 243 struct list_head queue_list;
230 unsigned int queue_len; 244 unsigned int queue_len;
231 unsigned char index; 245 unsigned char index;
232 spinlock_t lock; /* protects queue_list, queue_len, index */ 246 spinlock_t lock; /* protects queue_list, queue_len, index */
233 wait_queue_head_t queue_wait; 247 wait_queue_head_t queue_wait;
234 struct bat_priv *bat_priv; 248 struct batadv_priv *bat_priv;
235}; 249};
236 250
237struct socket_packet { 251struct batadv_socket_packet {
238 struct list_head list; 252 struct list_head list;
239 size_t icmp_len; 253 size_t icmp_len;
240 struct icmp_packet_rr icmp_packet; 254 struct batadv_icmp_packet_rr icmp_packet;
241}; 255};
242 256
243struct tt_common_entry { 257struct batadv_tt_common_entry {
244 uint8_t addr[ETH_ALEN]; 258 uint8_t addr[ETH_ALEN];
245 struct hlist_node hash_entry; 259 struct hlist_node hash_entry;
246 uint16_t flags; 260 uint16_t flags;
@@ -248,31 +262,31 @@ struct tt_common_entry {
248 struct rcu_head rcu; 262 struct rcu_head rcu;
249}; 263};
250 264
251struct tt_local_entry { 265struct batadv_tt_local_entry {
252 struct tt_common_entry common; 266 struct batadv_tt_common_entry common;
253 unsigned long last_seen; 267 unsigned long last_seen;
254}; 268};
255 269
256struct tt_global_entry { 270struct batadv_tt_global_entry {
257 struct tt_common_entry common; 271 struct batadv_tt_common_entry common;
258 struct hlist_head orig_list; 272 struct hlist_head orig_list;
259 spinlock_t list_lock; /* protects the list */ 273 spinlock_t list_lock; /* protects the list */
260 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 274 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
261}; 275};
262 276
263struct tt_orig_list_entry { 277struct batadv_tt_orig_list_entry {
264 struct orig_node *orig_node; 278 struct batadv_orig_node *orig_node;
265 uint8_t ttvn; 279 uint8_t ttvn;
266 struct rcu_head rcu; 280 struct rcu_head rcu;
267 struct hlist_node list; 281 struct hlist_node list;
268}; 282};
269 283
270#ifdef CONFIG_BATMAN_ADV_BLA 284#ifdef CONFIG_BATMAN_ADV_BLA
271struct backbone_gw { 285struct batadv_backbone_gw {
272 uint8_t orig[ETH_ALEN]; 286 uint8_t orig[ETH_ALEN];
273 short vid; /* used VLAN ID */ 287 short vid; /* used VLAN ID */
274 struct hlist_node hash_entry; 288 struct hlist_node hash_entry;
275 struct bat_priv *bat_priv; 289 struct batadv_priv *bat_priv;
276 unsigned long lasttime; /* last time we heard of this backbone gw */ 290 unsigned long lasttime; /* last time we heard of this backbone gw */
277 atomic_t request_sent; 291 atomic_t request_sent;
278 atomic_t refcount; 292 atomic_t refcount;
@@ -280,10 +294,10 @@ struct backbone_gw {
280 uint16_t crc; /* crc checksum over all claims */ 294 uint16_t crc; /* crc checksum over all claims */
281}; 295};
282 296
283struct claim { 297struct batadv_claim {
284 uint8_t addr[ETH_ALEN]; 298 uint8_t addr[ETH_ALEN];
285 short vid; 299 short vid;
286 struct backbone_gw *backbone_gw; 300 struct batadv_backbone_gw *backbone_gw;
287 unsigned long lasttime; /* last time we heard of claim (locals only) */ 301 unsigned long lasttime; /* last time we heard of claim (locals only) */
288 struct rcu_head rcu; 302 struct rcu_head rcu;
289 atomic_t refcount; 303 atomic_t refcount;
@@ -291,29 +305,28 @@ struct claim {
291}; 305};
292#endif 306#endif
293 307
294struct tt_change_node { 308struct batadv_tt_change_node {
295 struct list_head list; 309 struct list_head list;
296 struct tt_change change; 310 struct batadv_tt_change change;
297}; 311};
298 312
299struct tt_req_node { 313struct batadv_tt_req_node {
300 uint8_t addr[ETH_ALEN]; 314 uint8_t addr[ETH_ALEN];
301 unsigned long issued_at; 315 unsigned long issued_at;
302 struct list_head list; 316 struct list_head list;
303}; 317};
304 318
305struct tt_roam_node { 319struct batadv_tt_roam_node {
306 uint8_t addr[ETH_ALEN]; 320 uint8_t addr[ETH_ALEN];
307 atomic_t counter; 321 atomic_t counter;
308 unsigned long first_time; 322 unsigned long first_time;
309 struct list_head list; 323 struct list_head list;
310}; 324};
311 325
312/** 326/* forw_packet - structure for forw_list maintaining packets to be
313 * forw_packet - structure for forw_list maintaining packets to be
314 * send/forwarded 327 * send/forwarded
315 */ 328 */
316struct forw_packet { 329struct batadv_forw_packet {
317 struct hlist_node list; 330 struct hlist_node list;
318 unsigned long send_time; 331 unsigned long send_time;
319 uint8_t own; 332 uint8_t own;
@@ -322,76 +335,76 @@ struct forw_packet {
322 uint32_t direct_link_flags; 335 uint32_t direct_link_flags;
323 uint8_t num_packets; 336 uint8_t num_packets;
324 struct delayed_work delayed_work; 337 struct delayed_work delayed_work;
325 struct hard_iface *if_incoming; 338 struct batadv_hard_iface *if_incoming;
326}; 339};
327 340
328/* While scanning for vis-entries of a particular vis-originator 341/* While scanning for vis-entries of a particular vis-originator
329 * this list collects its interfaces to create a subgraph/cluster 342 * this list collects its interfaces to create a subgraph/cluster
330 * out of them later 343 * out of them later
331 */ 344 */
332struct if_list_entry { 345struct batadv_if_list_entry {
333 uint8_t addr[ETH_ALEN]; 346 uint8_t addr[ETH_ALEN];
334 bool primary; 347 bool primary;
335 struct hlist_node list; 348 struct hlist_node list;
336}; 349};
337 350
338struct debug_log { 351struct batadv_debug_log {
339 char log_buff[LOG_BUF_LEN]; 352 char log_buff[BATADV_LOG_BUF_LEN];
340 unsigned long log_start; 353 unsigned long log_start;
341 unsigned long log_end; 354 unsigned long log_end;
342 spinlock_t lock; /* protects log_buff, log_start and log_end */ 355 spinlock_t lock; /* protects log_buff, log_start and log_end */
343 wait_queue_head_t queue_wait; 356 wait_queue_head_t queue_wait;
344}; 357};
345 358
346struct frag_packet_list_entry { 359struct batadv_frag_packet_list_entry {
347 struct list_head list; 360 struct list_head list;
348 uint16_t seqno; 361 uint16_t seqno;
349 struct sk_buff *skb; 362 struct sk_buff *skb;
350}; 363};
351 364
352struct vis_info { 365struct batadv_vis_info {
353 unsigned long first_seen; 366 unsigned long first_seen;
354 /* list of server-neighbors we received a vis-packet 367 /* list of server-neighbors we received a vis-packet
355 * from. we should not reply to them. */ 368 * from. we should not reply to them.
369 */
356 struct list_head recv_list; 370 struct list_head recv_list;
357 struct list_head send_list; 371 struct list_head send_list;
358 struct kref refcount; 372 struct kref refcount;
359 struct hlist_node hash_entry; 373 struct hlist_node hash_entry;
360 struct bat_priv *bat_priv; 374 struct batadv_priv *bat_priv;
361 /* this packet might be part of the vis send queue. */ 375 /* this packet might be part of the vis send queue. */
362 struct sk_buff *skb_packet; 376 struct sk_buff *skb_packet;
363 /* vis_info may follow here*/ 377 /* vis_info may follow here */
364} __packed; 378} __packed;
365 379
366struct vis_info_entry { 380struct batadv_vis_info_entry {
367 uint8_t src[ETH_ALEN]; 381 uint8_t src[ETH_ALEN];
368 uint8_t dest[ETH_ALEN]; 382 uint8_t dest[ETH_ALEN];
369 uint8_t quality; /* quality = 0 client */ 383 uint8_t quality; /* quality = 0 client */
370} __packed; 384} __packed;
371 385
372struct recvlist_node { 386struct batadv_recvlist_node {
373 struct list_head list; 387 struct list_head list;
374 uint8_t mac[ETH_ALEN]; 388 uint8_t mac[ETH_ALEN];
375}; 389};
376 390
377struct bat_algo_ops { 391struct batadv_algo_ops {
378 struct hlist_node list; 392 struct hlist_node list;
379 char *name; 393 char *name;
380 /* init routing info when hard-interface is enabled */ 394 /* init routing info when hard-interface is enabled */
381 int (*bat_iface_enable)(struct hard_iface *hard_iface); 395 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
382 /* de-init routing info when hard-interface is disabled */ 396 /* de-init routing info when hard-interface is disabled */
383 void (*bat_iface_disable)(struct hard_iface *hard_iface); 397 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
384 /* (re-)init mac addresses of the protocol information 398 /* (re-)init mac addresses of the protocol information
385 * belonging to this hard-interface 399 * belonging to this hard-interface
386 */ 400 */
387 void (*bat_iface_update_mac)(struct hard_iface *hard_iface); 401 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
388 /* called when primary interface is selected / changed */ 402 /* called when primary interface is selected / changed */
389 void (*bat_primary_iface_set)(struct hard_iface *hard_iface); 403 void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
390 /* prepare a new outgoing OGM for the send queue */ 404 /* prepare a new outgoing OGM for the send queue */
391 void (*bat_ogm_schedule)(struct hard_iface *hard_iface, 405 void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
392 int tt_num_changes);
393 /* send scheduled OGM */ 406 /* send scheduled OGM */
394 void (*bat_ogm_emit)(struct forw_packet *forw_packet); 407 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
395}; 408};
396 409
397#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 410#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 74175c210858..00164645b3f7 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Andreas Langer 3 * Andreas Langer
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,19 +29,20 @@
31#include "hard-interface.h" 29#include "hard-interface.h"
32 30
33 31
34static struct sk_buff *frag_merge_packet(struct list_head *head, 32static struct sk_buff *
35 struct frag_packet_list_entry *tfp, 33batadv_frag_merge_packet(struct list_head *head,
36 struct sk_buff *skb) 34 struct batadv_frag_packet_list_entry *tfp,
35 struct sk_buff *skb)
37{ 36{
38 struct unicast_frag_packet *up = 37 struct batadv_unicast_frag_packet *up;
39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb; 38 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet; 39 struct batadv_unicast_packet *unicast_packet;
42 int hdr_len = sizeof(*unicast_packet); 40 int hdr_len = sizeof(*unicast_packet);
43 int uni_diff = sizeof(*up) - hdr_len; 41 int uni_diff = sizeof(*up) - hdr_len;
44 42
43 up = (struct batadv_unicast_frag_packet *)skb->data;
45 /* set skb to the first part and tmp_skb to the second part */ 44 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) { 45 if (up->flags & BATADV_UNI_FRAG_HEAD) {
47 tmp_skb = tfp->skb; 46 tmp_skb = tfp->skb;
48 } else { 47 } else {
49 tmp_skb = skb; 48 tmp_skb = skb;
@@ -66,8 +65,9 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
66 kfree_skb(tmp_skb); 65 kfree_skb(tmp_skb);
67 66
68 memmove(skb->data + uni_diff, skb->data, hdr_len); 67 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff); 68 unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
70 unicast_packet->header.packet_type = BAT_UNICAST; 69 uni_diff);
70 unicast_packet->header.packet_type = BATADV_UNICAST;
71 71
72 return skb; 72 return skb;
73 73
@@ -77,11 +77,13 @@ err:
77 return NULL; 77 return NULL;
78} 78}
79 79
80static void frag_create_entry(struct list_head *head, struct sk_buff *skb) 80static void batadv_frag_create_entry(struct list_head *head,
81 struct sk_buff *skb)
81{ 82{
82 struct frag_packet_list_entry *tfp; 83 struct batadv_frag_packet_list_entry *tfp;
83 struct unicast_frag_packet *up = 84 struct batadv_unicast_frag_packet *up;
84 (struct unicast_frag_packet *)skb->data; 85
86 up = (struct batadv_unicast_frag_packet *)skb->data;
85 87
86 /* free and oldest packets stand at the end */ 88 /* free and oldest packets stand at the end */
87 tfp = list_entry((head)->prev, typeof(*tfp), list); 89 tfp = list_entry((head)->prev, typeof(*tfp), list);
@@ -93,15 +95,15 @@ static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
93 return; 95 return;
94} 96}
95 97
96static int frag_create_buffer(struct list_head *head) 98static int batadv_frag_create_buffer(struct list_head *head)
97{ 99{
98 int i; 100 int i;
99 struct frag_packet_list_entry *tfp; 101 struct batadv_frag_packet_list_entry *tfp;
100 102
101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) { 103 for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
102 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC); 104 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
103 if (!tfp) { 105 if (!tfp) {
104 frag_list_free(head); 106 batadv_frag_list_free(head);
105 return -ENOMEM; 107 return -ENOMEM;
106 } 108 }
107 tfp->skb = NULL; 109 tfp->skb = NULL;
@@ -113,14 +115,15 @@ static int frag_create_buffer(struct list_head *head)
113 return 0; 115 return 0;
114} 116}
115 117
116static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, 118static struct batadv_frag_packet_list_entry *
117 const struct unicast_frag_packet *up) 119batadv_frag_search_packet(struct list_head *head,
120 const struct batadv_unicast_frag_packet *up)
118{ 121{
119 struct frag_packet_list_entry *tfp; 122 struct batadv_frag_packet_list_entry *tfp;
120 struct unicast_frag_packet *tmp_up = NULL; 123 struct batadv_unicast_frag_packet *tmp_up = NULL;
121 uint16_t search_seqno; 124 uint16_t search_seqno;
122 125
123 if (up->flags & UNI_FRAG_HEAD) 126 if (up->flags & BATADV_UNI_FRAG_HEAD)
124 search_seqno = ntohs(up->seqno)+1; 127 search_seqno = ntohs(up->seqno)+1;
125 else 128 else
126 search_seqno = ntohs(up->seqno)-1; 129 search_seqno = ntohs(up->seqno)-1;
@@ -133,12 +136,12 @@ static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
133 if (tfp->seqno == ntohs(up->seqno)) 136 if (tfp->seqno == ntohs(up->seqno))
134 goto mov_tail; 137 goto mov_tail;
135 138
136 tmp_up = (struct unicast_frag_packet *)tfp->skb->data; 139 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
137 140
138 if (tfp->seqno == search_seqno) { 141 if (tfp->seqno == search_seqno) {
139 142
140 if ((tmp_up->flags & UNI_FRAG_HEAD) != 143 if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
141 (up->flags & UNI_FRAG_HEAD)) 144 (up->flags & BATADV_UNI_FRAG_HEAD))
142 return tfp; 145 return tfp;
143 else 146 else
144 goto mov_tail; 147 goto mov_tail;
@@ -151,9 +154,9 @@ mov_tail:
151 return NULL; 154 return NULL;
152} 155}
153 156
154void frag_list_free(struct list_head *head) 157void batadv_frag_list_free(struct list_head *head)
155{ 158{
156 struct frag_packet_list_entry *pf, *tmp_pf; 159 struct batadv_frag_packet_list_entry *pf, *tmp_pf;
157 160
158 if (!list_empty(head)) { 161 if (!list_empty(head)) {
159 162
@@ -172,64 +175,66 @@ void frag_list_free(struct list_head *head)
172 * or the skb could be reassembled (skb_new will point to the new packet and 175 * or the skb could be reassembled (skb_new will point to the new packet and
173 * skb was freed) 176 * skb was freed)
174 */ 177 */
175int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 178int batadv_frag_reassemble_skb(struct sk_buff *skb,
176 struct sk_buff **new_skb) 179 struct batadv_priv *bat_priv,
180 struct sk_buff **new_skb)
177{ 181{
178 struct orig_node *orig_node; 182 struct batadv_orig_node *orig_node;
179 struct frag_packet_list_entry *tmp_frag_entry; 183 struct batadv_frag_packet_list_entry *tmp_frag_entry;
180 int ret = NET_RX_DROP; 184 int ret = NET_RX_DROP;
181 struct unicast_frag_packet *unicast_packet = 185 struct batadv_unicast_frag_packet *unicast_packet;
182 (struct unicast_frag_packet *)skb->data;
183 186
187 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
184 *new_skb = NULL; 188 *new_skb = NULL;
185 189
186 orig_node = orig_hash_find(bat_priv, unicast_packet->orig); 190 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
187 if (!orig_node) 191 if (!orig_node)
188 goto out; 192 goto out;
189 193
190 orig_node->last_frag_packet = jiffies; 194 orig_node->last_frag_packet = jiffies;
191 195
192 if (list_empty(&orig_node->frag_list) && 196 if (list_empty(&orig_node->frag_list) &&
193 frag_create_buffer(&orig_node->frag_list)) { 197 batadv_frag_create_buffer(&orig_node->frag_list)) {
194 pr_debug("couldn't create frag buffer\n"); 198 pr_debug("couldn't create frag buffer\n");
195 goto out; 199 goto out;
196 } 200 }
197 201
198 tmp_frag_entry = frag_search_packet(&orig_node->frag_list, 202 tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
199 unicast_packet); 203 unicast_packet);
200 204
201 if (!tmp_frag_entry) { 205 if (!tmp_frag_entry) {
202 frag_create_entry(&orig_node->frag_list, skb); 206 batadv_frag_create_entry(&orig_node->frag_list, skb);
203 ret = NET_RX_SUCCESS; 207 ret = NET_RX_SUCCESS;
204 goto out; 208 goto out;
205 } 209 }
206 210
207 *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry, 211 *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
208 skb); 212 tmp_frag_entry, skb);
209 /* if not, merge failed */ 213 /* if not, merge failed */
210 if (*new_skb) 214 if (*new_skb)
211 ret = NET_RX_SUCCESS; 215 ret = NET_RX_SUCCESS;
212 216
213out: 217out:
214 if (orig_node) 218 if (orig_node)
215 orig_node_free_ref(orig_node); 219 batadv_orig_node_free_ref(orig_node);
216 return ret; 220 return ret;
217} 221}
218 222
219int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 223int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
220 struct hard_iface *hard_iface, const uint8_t dstaddr[]) 224 struct batadv_hard_iface *hard_iface,
225 const uint8_t dstaddr[])
221{ 226{
222 struct unicast_packet tmp_uc, *unicast_packet; 227 struct batadv_unicast_packet tmp_uc, *unicast_packet;
223 struct hard_iface *primary_if; 228 struct batadv_hard_iface *primary_if;
224 struct sk_buff *frag_skb; 229 struct sk_buff *frag_skb;
225 struct unicast_frag_packet *frag1, *frag2; 230 struct batadv_unicast_frag_packet *frag1, *frag2;
226 int uc_hdr_len = sizeof(*unicast_packet); 231 int uc_hdr_len = sizeof(*unicast_packet);
227 int ucf_hdr_len = sizeof(*frag1); 232 int ucf_hdr_len = sizeof(*frag1);
228 int data_len = skb->len - uc_hdr_len; 233 int data_len = skb->len - uc_hdr_len;
229 int large_tail = 0, ret = NET_RX_DROP; 234 int large_tail = 0, ret = NET_RX_DROP;
230 uint16_t seqno; 235 uint16_t seqno;
231 236
232 primary_if = primary_if_get_selected(bat_priv); 237 primary_if = batadv_primary_if_get_selected(bat_priv);
233 if (!primary_if) 238 if (!primary_if)
234 goto dropped; 239 goto dropped;
235 240
@@ -238,38 +243,38 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
238 goto dropped; 243 goto dropped;
239 skb_reserve(frag_skb, ucf_hdr_len); 244 skb_reserve(frag_skb, ucf_hdr_len);
240 245
241 unicast_packet = (struct unicast_packet *)skb->data; 246 unicast_packet = (struct batadv_unicast_packet *)skb->data;
242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len); 247 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); 248 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
244 249
245 if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || 250 if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
246 my_skb_head_push(frag_skb, ucf_hdr_len) < 0) 251 batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
247 goto drop_frag; 252 goto drop_frag;
248 253
249 frag1 = (struct unicast_frag_packet *)skb->data; 254 frag1 = (struct batadv_unicast_frag_packet *)skb->data;
250 frag2 = (struct unicast_frag_packet *)frag_skb->data; 255 frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
251 256
252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc)); 257 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
253 258
254 frag1->header.ttl--; 259 frag1->header.ttl--;
255 frag1->header.version = COMPAT_VERSION; 260 frag1->header.version = BATADV_COMPAT_VERSION;
256 frag1->header.packet_type = BAT_UNICAST_FRAG; 261 frag1->header.packet_type = BATADV_UNICAST_FRAG;
257 262
258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 263 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
259 memcpy(frag2, frag1, sizeof(*frag2)); 264 memcpy(frag2, frag1, sizeof(*frag2));
260 265
261 if (data_len & 1) 266 if (data_len & 1)
262 large_tail = UNI_FRAG_LARGETAIL; 267 large_tail = BATADV_UNI_FRAG_LARGETAIL;
263 268
264 frag1->flags = UNI_FRAG_HEAD | large_tail; 269 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
265 frag2->flags = large_tail; 270 frag2->flags = large_tail;
266 271
267 seqno = atomic_add_return(2, &hard_iface->frag_seqno); 272 seqno = atomic_add_return(2, &hard_iface->frag_seqno);
268 frag1->seqno = htons(seqno - 1); 273 frag1->seqno = htons(seqno - 1);
269 frag2->seqno = htons(seqno); 274 frag2->seqno = htons(seqno);
270 275
271 send_skb_packet(skb, hard_iface, dstaddr); 276 batadv_send_skb_packet(skb, hard_iface, dstaddr);
272 send_skb_packet(frag_skb, hard_iface, dstaddr); 277 batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
273 ret = NET_RX_SUCCESS; 278 ret = NET_RX_SUCCESS;
274 goto out; 279 goto out;
275 280
@@ -279,52 +284,53 @@ dropped:
279 kfree_skb(skb); 284 kfree_skb(skb);
280out: 285out:
281 if (primary_if) 286 if (primary_if)
282 hardif_free_ref(primary_if); 287 batadv_hardif_free_ref(primary_if);
283 return ret; 288 return ret;
284} 289}
285 290
286int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 291int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv)
287{ 292{
288 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 293 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
289 struct unicast_packet *unicast_packet; 294 struct batadv_unicast_packet *unicast_packet;
290 struct orig_node *orig_node; 295 struct batadv_orig_node *orig_node;
291 struct neigh_node *neigh_node; 296 struct batadv_neigh_node *neigh_node;
292 int data_len = skb->len; 297 int data_len = skb->len;
293 int ret = 1; 298 int ret = 1;
299 unsigned int dev_mtu;
294 300
295 /* get routing information */ 301 /* get routing information */
296 if (is_multicast_ether_addr(ethhdr->h_dest)) { 302 if (is_multicast_ether_addr(ethhdr->h_dest)) {
297 orig_node = gw_get_selected_orig(bat_priv); 303 orig_node = batadv_gw_get_selected_orig(bat_priv);
298 if (orig_node) 304 if (orig_node)
299 goto find_router; 305 goto find_router;
300 } 306 }
301 307
302 /* check for tt host - increases orig_node refcount. 308 /* check for tt host - increases orig_node refcount.
303 * returns NULL in case of AP isolation */ 309 * returns NULL in case of AP isolation
304 orig_node = transtable_search(bat_priv, ethhdr->h_source, 310 */
305 ethhdr->h_dest); 311 orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
312 ethhdr->h_dest);
306 313
307find_router: 314find_router:
308 /** 315 /* find_router():
309 * find_router():
310 * - if orig_node is NULL it returns NULL 316 * - if orig_node is NULL it returns NULL
311 * - increases neigh_nodes refcount if found. 317 * - increases neigh_nodes refcount if found.
312 */ 318 */
313 neigh_node = find_router(bat_priv, orig_node, NULL); 319 neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
314 320
315 if (!neigh_node) 321 if (!neigh_node)
316 goto out; 322 goto out;
317 323
318 if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0) 324 if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
319 goto out; 325 goto out;
320 326
321 unicast_packet = (struct unicast_packet *)skb->data; 327 unicast_packet = (struct batadv_unicast_packet *)skb->data;
322 328
323 unicast_packet->header.version = COMPAT_VERSION; 329 unicast_packet->header.version = BATADV_COMPAT_VERSION;
324 /* batman packet type: unicast */ 330 /* batman packet type: unicast */
325 unicast_packet->header.packet_type = BAT_UNICAST; 331 unicast_packet->header.packet_type = BATADV_UNICAST;
326 /* set unicast ttl */ 332 /* set unicast ttl */
327 unicast_packet->header.ttl = TTL; 333 unicast_packet->header.ttl = BATADV_TTL;
328 /* copy the destination for faster routing */ 334 /* copy the destination for faster routing */
329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 335 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
330 /* set the destination tt version number */ 336 /* set the destination tt version number */
@@ -336,28 +342,29 @@ find_router:
336 * try to reroute it because the ttvn contained in the header is less 342 * try to reroute it because the ttvn contained in the header is less
337 * than the current one 343 * than the current one
338 */ 344 */
339 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) 345 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
340 unicast_packet->ttvn = unicast_packet->ttvn - 1; 346 unicast_packet->ttvn = unicast_packet->ttvn - 1;
341 347
348 dev_mtu = neigh_node->if_incoming->net_dev->mtu;
342 if (atomic_read(&bat_priv->fragmentation) && 349 if (atomic_read(&bat_priv->fragmentation) &&
343 data_len + sizeof(*unicast_packet) > 350 data_len + sizeof(*unicast_packet) > dev_mtu) {
344 neigh_node->if_incoming->net_dev->mtu) {
345 /* send frag skb decreases ttl */ 351 /* send frag skb decreases ttl */
346 unicast_packet->header.ttl++; 352 unicast_packet->header.ttl++;
347 ret = frag_send_skb(skb, bat_priv, 353 ret = batadv_frag_send_skb(skb, bat_priv,
348 neigh_node->if_incoming, neigh_node->addr); 354 neigh_node->if_incoming,
355 neigh_node->addr);
349 goto out; 356 goto out;
350 } 357 }
351 358
352 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 359 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
353 ret = 0; 360 ret = 0;
354 goto out; 361 goto out;
355 362
356out: 363out:
357 if (neigh_node) 364 if (neigh_node)
358 neigh_node_free_ref(neigh_node); 365 batadv_neigh_node_free_ref(neigh_node);
359 if (orig_node) 366 if (orig_node)
360 orig_node_free_ref(orig_node); 367 batadv_orig_node_free_ref(orig_node);
361 if (ret == 1) 368 if (ret == 1)
362 kfree_skb(skb); 369 kfree_skb(skb);
363 return ret; 370 return ret;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index a9faf6b1db19..1c46e2eb1ef9 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Andreas Langer 3 * Andreas Langer
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_UNICAST_H_ 20#ifndef _NET_BATMAN_ADV_UNICAST_H_
@@ -24,33 +22,35 @@
24 22
25#include "packet.h" 23#include "packet.h"
26 24
27#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */ 25#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ 26#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
29 27
30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 28int batadv_frag_reassemble_skb(struct sk_buff *skb,
31 struct sk_buff **new_skb); 29 struct batadv_priv *bat_priv,
32void frag_list_free(struct list_head *head); 30 struct sk_buff **new_skb);
33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 31void batadv_frag_list_free(struct list_head *head);
34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 32int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
35 struct hard_iface *hard_iface, const uint8_t dstaddr[]); 33int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
34 struct batadv_hard_iface *hard_iface,
35 const uint8_t dstaddr[]);
36 36
37static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu) 37static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
38{ 38{
39 const struct unicast_frag_packet *unicast_packet; 39 const struct batadv_unicast_frag_packet *unicast_packet;
40 int uneven_correction = 0; 40 int uneven_correction = 0;
41 unsigned int merged_size; 41 unsigned int merged_size;
42 42
43 unicast_packet = (struct unicast_frag_packet *)skb->data; 43 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
44 44
45 if (unicast_packet->flags & UNI_FRAG_LARGETAIL) { 45 if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
46 if (unicast_packet->flags & UNI_FRAG_HEAD) 46 if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
47 uneven_correction = 1; 47 uneven_correction = 1;
48 else 48 else
49 uneven_correction = -1; 49 uneven_correction = -1;
50 } 50 }
51 51
52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2; 52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
53 merged_size += sizeof(struct unicast_packet) + uneven_correction; 53 merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
54 54
55 return merged_size <= mtu; 55 return merged_size <= mtu;
56} 56}
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cec216fb77c7..2a2ea0681469 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -28,16 +26,19 @@
28#include "hash.h" 26#include "hash.h"
29#include "originator.h" 27#include "originator.h"
30 28
31#define MAX_VIS_PACKET_SIZE 1000 29#define BATADV_MAX_VIS_PACKET_SIZE 1000
32 30
33static void start_vis_timer(struct bat_priv *bat_priv); 31static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
34 32
35/* free the info */ 33/* free the info */
36static void free_info(struct kref *ref) 34static void batadv_free_info(struct kref *ref)
37{ 35{
38 struct vis_info *info = container_of(ref, struct vis_info, refcount); 36 struct batadv_vis_info *info;
39 struct bat_priv *bat_priv = info->bat_priv; 37 struct batadv_priv *bat_priv;
40 struct recvlist_node *entry, *tmp; 38 struct batadv_recvlist_node *entry, *tmp;
39
40 info = container_of(ref, struct batadv_vis_info, refcount);
41 bat_priv = info->bat_priv;
41 42
42 list_del_init(&info->send_list); 43 list_del_init(&info->send_list);
43 spin_lock_bh(&bat_priv->vis_list_lock); 44 spin_lock_bh(&bat_priv->vis_list_lock);
@@ -52,29 +53,30 @@ static void free_info(struct kref *ref)
52} 53}
53 54
54/* Compare two vis packets, used by the hashing algorithm */ 55/* Compare two vis packets, used by the hashing algorithm */
55static int vis_info_cmp(const struct hlist_node *node, const void *data2) 56static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
56{ 57{
57 const struct vis_info *d1, *d2; 58 const struct batadv_vis_info *d1, *d2;
58 const struct vis_packet *p1, *p2; 59 const struct batadv_vis_packet *p1, *p2;
59 60
60 d1 = container_of(node, struct vis_info, hash_entry); 61 d1 = container_of(node, struct batadv_vis_info, hash_entry);
61 d2 = data2; 62 d2 = data2;
62 p1 = (struct vis_packet *)d1->skb_packet->data; 63 p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
63 p2 = (struct vis_packet *)d2->skb_packet->data; 64 p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
64 return compare_eth(p1->vis_orig, p2->vis_orig); 65 return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
65} 66}
66 67
67/* hash function to choose an entry in a hash table of given size */ 68/* hash function to choose an entry in a hash table of given size
68/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 69 * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
69static uint32_t vis_info_choose(const void *data, uint32_t size) 70 */
71static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
70{ 72{
71 const struct vis_info *vis_info = data; 73 const struct batadv_vis_info *vis_info = data;
72 const struct vis_packet *packet; 74 const struct batadv_vis_packet *packet;
73 const unsigned char *key; 75 const unsigned char *key;
74 uint32_t hash = 0; 76 uint32_t hash = 0;
75 size_t i; 77 size_t i;
76 78
77 packet = (struct vis_packet *)vis_info->skb_packet->data; 79 packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
78 key = packet->vis_orig; 80 key = packet->vis_orig;
79 for (i = 0; i < ETH_ALEN; i++) { 81 for (i = 0; i < ETH_ALEN; i++) {
80 hash += key[i]; 82 hash += key[i];
@@ -89,24 +91,24 @@ static uint32_t vis_info_choose(const void *data, uint32_t size)
89 return hash % size; 91 return hash % size;
90} 92}
91 93
92static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, 94static struct batadv_vis_info *
93 const void *data) 95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
94{ 96{
95 struct hashtable_t *hash = bat_priv->vis_hash; 97 struct batadv_hashtable *hash = bat_priv->vis_hash;
96 struct hlist_head *head; 98 struct hlist_head *head;
97 struct hlist_node *node; 99 struct hlist_node *node;
98 struct vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
99 uint32_t index; 101 uint32_t index;
100 102
101 if (!hash) 103 if (!hash)
102 return NULL; 104 return NULL;
103 105
104 index = vis_info_choose(data, hash->size); 106 index = batadv_vis_info_choose(data, hash->size);
105 head = &hash->table[index]; 107 head = &hash->table[index];
106 108
107 rcu_read_lock(); 109 rcu_read_lock();
108 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { 110 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
109 if (!vis_info_cmp(node, data)) 111 if (!batadv_vis_info_cmp(node, data))
110 continue; 112 continue;
111 113
112 vis_info_tmp = vis_info; 114 vis_info_tmp = vis_info;
@@ -118,16 +120,17 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
118} 120}
119 121
120/* insert interface to the list of interfaces of one originator, if it 122/* insert interface to the list of interfaces of one originator, if it
121 * does not already exist in the list */ 123 * does not already exist in the list
122static void vis_data_insert_interface(const uint8_t *interface, 124 */
123 struct hlist_head *if_list, 125static void batadv_vis_data_insert_interface(const uint8_t *interface,
124 bool primary) 126 struct hlist_head *if_list,
127 bool primary)
125{ 128{
126 struct if_list_entry *entry; 129 struct batadv_if_list_entry *entry;
127 struct hlist_node *pos; 130 struct hlist_node *pos;
128 131
129 hlist_for_each_entry(entry, pos, if_list, list) { 132 hlist_for_each_entry(entry, pos, if_list, list) {
130 if (compare_eth(entry->addr, interface)) 133 if (batadv_compare_eth(entry->addr, interface))
131 return; 134 return;
132 } 135 }
133 136
@@ -140,195 +143,145 @@ static void vis_data_insert_interface(const uint8_t *interface,
140 hlist_add_head(&entry->list, if_list); 143 hlist_add_head(&entry->list, if_list);
141} 144}
142 145
143static ssize_t vis_data_read_prim_sec(char *buff, 146static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
144 const struct hlist_head *if_list) 147 const struct hlist_head *if_list)
145{ 148{
146 struct if_list_entry *entry; 149 struct batadv_if_list_entry *entry;
147 struct hlist_node *pos; 150 struct hlist_node *pos;
148 size_t len = 0;
149 151
150 hlist_for_each_entry(entry, pos, if_list, list) { 152 hlist_for_each_entry(entry, pos, if_list, list) {
151 if (entry->primary) 153 if (entry->primary)
152 len += sprintf(buff + len, "PRIMARY, "); 154 seq_printf(seq, "PRIMARY, ");
153 else 155 else
154 len += sprintf(buff + len, "SEC %pM, ", entry->addr); 156 seq_printf(seq, "SEC %pM, ", entry->addr);
155 } 157 }
158}
156 159
157 return len; 160/* read an entry */
161static ssize_t
162batadv_vis_data_read_entry(struct seq_file *seq,
163 const struct batadv_vis_info_entry *entry,
164 const uint8_t *src, bool primary)
165{
166 if (primary && entry->quality == 0)
167 return seq_printf(seq, "TT %pM, ", entry->dest);
168 else if (batadv_compare_eth(entry->src, src))
169 return seq_printf(seq, "TQ %pM %d, ", entry->dest,
170 entry->quality);
171
172 return 0;
158} 173}
159 174
160static size_t vis_data_count_prim_sec(struct hlist_head *if_list) 175static void
176batadv_vis_data_insert_interfaces(struct hlist_head *list,
177 struct batadv_vis_packet *packet,
178 struct batadv_vis_info_entry *entries)
161{ 179{
162 struct if_list_entry *entry; 180 int i;
163 struct hlist_node *pos;
164 size_t count = 0;
165 181
166 hlist_for_each_entry(entry, pos, if_list, list) { 182 for (i = 0; i < packet->entries; i++) {
167 if (entry->primary) 183 if (entries[i].quality == 0)
168 count += 9; 184 continue;
169 else
170 count += 23;
171 }
172 185
173 return count; 186 if (batadv_compare_eth(entries[i].src, packet->vis_orig))
187 continue;
188
189 batadv_vis_data_insert_interface(entries[i].src, list, false);
190 }
174} 191}
175 192
176/* read an entry */ 193static void batadv_vis_data_read_entries(struct seq_file *seq,
177static ssize_t vis_data_read_entry(char *buff, 194 struct hlist_head *list,
178 const struct vis_info_entry *entry, 195 struct batadv_vis_packet *packet,
179 const uint8_t *src, bool primary) 196 struct batadv_vis_info_entry *entries)
180{ 197{
181 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 198 int i;
182 if (primary && entry->quality == 0) 199 struct batadv_if_list_entry *entry;
183 return sprintf(buff, "TT %pM, ", entry->dest); 200 struct hlist_node *pos;
184 else if (compare_eth(entry->src, src))
185 return sprintf(buff, "TQ %pM %d, ", entry->dest,
186 entry->quality);
187 201
188 return 0; 202 hlist_for_each_entry(entry, pos, list, list) {
203 seq_printf(seq, "%pM,", entry->addr);
204
205 for (i = 0; i < packet->entries; i++)
206 batadv_vis_data_read_entry(seq, &entries[i],
207 entry->addr, entry->primary);
208
209 /* add primary/secondary records */
210 if (batadv_compare_eth(entry->addr, packet->vis_orig))
211 batadv_vis_data_read_prim_sec(seq, list);
212
213 seq_printf(seq, "\n");
214 }
189} 215}
190 216
191int vis_seq_print_text(struct seq_file *seq, void *offset) 217static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
218 const struct hlist_head *head)
192{ 219{
193 struct hard_iface *primary_if;
194 struct hlist_node *node; 220 struct hlist_node *node;
221 struct batadv_vis_info *info;
222 struct batadv_vis_packet *packet;
223 uint8_t *entries_pos;
224 struct batadv_vis_info_entry *entries;
225 struct batadv_if_list_entry *entry;
226 struct hlist_node *pos, *n;
227
228 HLIST_HEAD(vis_if_list);
229
230 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
231 packet = (struct batadv_vis_packet *)info->skb_packet->data;
232 entries_pos = (uint8_t *)packet + sizeof(*packet);
233 entries = (struct batadv_vis_info_entry *)entries_pos;
234
235 batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
236 true);
237 batadv_vis_data_insert_interfaces(&vis_if_list, packet,
238 entries);
239 batadv_vis_data_read_entries(seq, &vis_if_list, packet,
240 entries);
241
242 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
243 hlist_del(&entry->list);
244 kfree(entry);
245 }
246 }
247}
248
249int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
250{
251 struct batadv_hard_iface *primary_if;
195 struct hlist_head *head; 252 struct hlist_head *head;
196 struct vis_info *info;
197 struct vis_packet *packet;
198 struct vis_info_entry *entries;
199 struct net_device *net_dev = (struct net_device *)seq->private; 253 struct net_device *net_dev = (struct net_device *)seq->private;
200 struct bat_priv *bat_priv = netdev_priv(net_dev); 254 struct batadv_priv *bat_priv = netdev_priv(net_dev);
201 struct hashtable_t *hash = bat_priv->vis_hash; 255 struct batadv_hashtable *hash = bat_priv->vis_hash;
202 HLIST_HEAD(vis_if_list);
203 struct if_list_entry *entry;
204 struct hlist_node *pos, *n;
205 uint32_t i; 256 uint32_t i;
206 int j, ret = 0; 257 int ret = 0;
207 int vis_server = atomic_read(&bat_priv->vis_mode); 258 int vis_server = atomic_read(&bat_priv->vis_mode);
208 size_t buff_pos, buf_size;
209 char *buff;
210 int compare;
211 259
212 primary_if = primary_if_get_selected(bat_priv); 260 primary_if = batadv_primary_if_get_selected(bat_priv);
213 if (!primary_if) 261 if (!primary_if)
214 goto out; 262 goto out;
215 263
216 if (vis_server == VIS_TYPE_CLIENT_UPDATE) 264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
217 goto out; 265 goto out;
218 266
219 buf_size = 1;
220 /* Estimate length */
221 spin_lock_bh(&bat_priv->vis_hash_lock); 267 spin_lock_bh(&bat_priv->vis_hash_lock);
222 for (i = 0; i < hash->size; i++) { 268 for (i = 0; i < hash->size; i++) {
223 head = &hash->table[i]; 269 head = &hash->table[i];
224 270 batadv_vis_seq_print_text_bucket(seq, head);
225 rcu_read_lock();
226 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
227 packet = (struct vis_packet *)info->skb_packet->data;
228 entries = (struct vis_info_entry *)
229 ((char *)packet + sizeof(*packet));
230
231 for (j = 0; j < packet->entries; j++) {
232 if (entries[j].quality == 0)
233 continue;
234 compare =
235 compare_eth(entries[j].src, packet->vis_orig);
236 vis_data_insert_interface(entries[j].src,
237 &vis_if_list,
238 compare);
239 }
240
241 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
242 buf_size += 18 + 26 * packet->entries;
243
244 /* add primary/secondary records */
245 if (compare_eth(entry->addr, packet->vis_orig))
246 buf_size +=
247 vis_data_count_prim_sec(&vis_if_list);
248
249 buf_size += 1;
250 }
251
252 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
253 list) {
254 hlist_del(&entry->list);
255 kfree(entry);
256 }
257 }
258 rcu_read_unlock();
259 }
260
261 buff = kmalloc(buf_size, GFP_ATOMIC);
262 if (!buff) {
263 spin_unlock_bh(&bat_priv->vis_hash_lock);
264 ret = -ENOMEM;
265 goto out;
266 }
267 buff[0] = '\0';
268 buff_pos = 0;
269
270 for (i = 0; i < hash->size; i++) {
271 head = &hash->table[i];
272
273 rcu_read_lock();
274 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
275 packet = (struct vis_packet *)info->skb_packet->data;
276 entries = (struct vis_info_entry *)
277 ((char *)packet + sizeof(*packet));
278
279 for (j = 0; j < packet->entries; j++) {
280 if (entries[j].quality == 0)
281 continue;
282 compare =
283 compare_eth(entries[j].src, packet->vis_orig);
284 vis_data_insert_interface(entries[j].src,
285 &vis_if_list,
286 compare);
287 }
288
289 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
290 buff_pos += sprintf(buff + buff_pos, "%pM,",
291 entry->addr);
292
293 for (j = 0; j < packet->entries; j++)
294 buff_pos += vis_data_read_entry(
295 buff + buff_pos,
296 &entries[j],
297 entry->addr,
298 entry->primary);
299
300 /* add primary/secondary records */
301 if (compare_eth(entry->addr, packet->vis_orig))
302 buff_pos +=
303 vis_data_read_prim_sec(buff + buff_pos,
304 &vis_if_list);
305
306 buff_pos += sprintf(buff + buff_pos, "\n");
307 }
308
309 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
310 list) {
311 hlist_del(&entry->list);
312 kfree(entry);
313 }
314 }
315 rcu_read_unlock();
316 } 271 }
317
318 spin_unlock_bh(&bat_priv->vis_hash_lock); 272 spin_unlock_bh(&bat_priv->vis_hash_lock);
319 273
320 seq_printf(seq, "%s", buff);
321 kfree(buff);
322
323out: 274out:
324 if (primary_if) 275 if (primary_if)
325 hardif_free_ref(primary_if); 276 batadv_hardif_free_ref(primary_if);
326 return ret; 277 return ret;
327} 278}
328 279
329/* add the info packet to the send list, if it was not 280/* add the info packet to the send list, if it was not
330 * already linked in. */ 281 * already linked in.
331static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) 282 */
283static void batadv_send_list_add(struct batadv_priv *bat_priv,
284 struct batadv_vis_info *info)
332{ 285{
333 if (list_empty(&info->send_list)) { 286 if (list_empty(&info->send_list)) {
334 kref_get(&info->refcount); 287 kref_get(&info->refcount);
@@ -337,20 +290,21 @@ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
337} 290}
338 291
339/* delete the info packet from the send list, if it was 292/* delete the info packet from the send list, if it was
340 * linked in. */ 293 * linked in.
341static void send_list_del(struct vis_info *info) 294 */
295static void batadv_send_list_del(struct batadv_vis_info *info)
342{ 296{
343 if (!list_empty(&info->send_list)) { 297 if (!list_empty(&info->send_list)) {
344 list_del_init(&info->send_list); 298 list_del_init(&info->send_list);
345 kref_put(&info->refcount, free_info); 299 kref_put(&info->refcount, batadv_free_info);
346 } 300 }
347} 301}
348 302
349/* tries to add one entry to the receive list. */ 303/* tries to add one entry to the receive list. */
350static void recv_list_add(struct bat_priv *bat_priv, 304static void batadv_recv_list_add(struct batadv_priv *bat_priv,
351 struct list_head *recv_list, const char *mac) 305 struct list_head *recv_list, const char *mac)
352{ 306{
353 struct recvlist_node *entry; 307 struct batadv_recvlist_node *entry;
354 308
355 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 309 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
356 if (!entry) 310 if (!entry)
@@ -363,14 +317,15 @@ static void recv_list_add(struct bat_priv *bat_priv,
363} 317}
364 318
365/* returns 1 if this mac is in the recv_list */ 319/* returns 1 if this mac is in the recv_list */
366static int recv_list_is_in(struct bat_priv *bat_priv, 320static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
367 const struct list_head *recv_list, const char *mac) 321 const struct list_head *recv_list,
322 const char *mac)
368{ 323{
369 const struct recvlist_node *entry; 324 const struct batadv_recvlist_node *entry;
370 325
371 spin_lock_bh(&bat_priv->vis_list_lock); 326 spin_lock_bh(&bat_priv->vis_list_lock);
372 list_for_each_entry(entry, recv_list, list) { 327 list_for_each_entry(entry, recv_list, list) {
373 if (compare_eth(entry->mac, mac)) { 328 if (batadv_compare_eth(entry->mac, mac)) {
374 spin_unlock_bh(&bat_priv->vis_list_lock); 329 spin_unlock_bh(&bat_priv->vis_list_lock);
375 return 1; 330 return 1;
376 } 331 }
@@ -381,17 +336,21 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
381 336
382/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, 337/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
383 * broken.. ). vis hash must be locked outside. is_new is set when the packet 338 * broken.. ). vis hash must be locked outside. is_new is set when the packet
384 * is newer than old entries in the hash. */ 339 * is newer than old entries in the hash.
385static struct vis_info *add_packet(struct bat_priv *bat_priv, 340 */
386 struct vis_packet *vis_packet, 341static struct batadv_vis_info *
387 int vis_info_len, int *is_new, 342batadv_add_packet(struct batadv_priv *bat_priv,
388 int make_broadcast) 343 struct batadv_vis_packet *vis_packet, int vis_info_len,
344 int *is_new, int make_broadcast)
389{ 345{
390 struct vis_info *info, *old_info; 346 struct batadv_vis_info *info, *old_info;
391 struct vis_packet *search_packet, *old_packet; 347 struct batadv_vis_packet *search_packet, *old_packet;
392 struct vis_info search_elem; 348 struct batadv_vis_info search_elem;
393 struct vis_packet *packet; 349 struct batadv_vis_packet *packet;
350 struct sk_buff *tmp_skb;
394 int hash_added; 351 int hash_added;
352 size_t len;
353 size_t max_entries;
395 354
396 *is_new = 0; 355 *is_new = 0;
397 /* sanity check */ 356 /* sanity check */
@@ -402,20 +361,23 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
402 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); 361 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
403 if (!search_elem.skb_packet) 362 if (!search_elem.skb_packet)
404 return NULL; 363 return NULL;
405 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, 364 len = sizeof(*search_packet);
406 sizeof(*search_packet)); 365 tmp_skb = search_elem.skb_packet;
366 search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
407 367
408 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); 368 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
409 old_info = vis_hash_find(bat_priv, &search_elem); 369 old_info = batadv_vis_hash_find(bat_priv, &search_elem);
410 kfree_skb(search_elem.skb_packet); 370 kfree_skb(search_elem.skb_packet);
411 371
412 if (old_info) { 372 if (old_info) {
413 old_packet = (struct vis_packet *)old_info->skb_packet->data; 373 tmp_skb = old_info->skb_packet;
414 if (!seq_after(ntohl(vis_packet->seqno), 374 old_packet = (struct batadv_vis_packet *)tmp_skb->data;
415 ntohl(old_packet->seqno))) { 375 if (!batadv_seq_after(ntohl(vis_packet->seqno),
376 ntohl(old_packet->seqno))) {
416 if (old_packet->seqno == vis_packet->seqno) { 377 if (old_packet->seqno == vis_packet->seqno) {
417 recv_list_add(bat_priv, &old_info->recv_list, 378 batadv_recv_list_add(bat_priv,
418 vis_packet->sender_orig); 379 &old_info->recv_list,
380 vis_packet->sender_orig);
419 return old_info; 381 return old_info;
420 } else { 382 } else {
421 /* newer packet is already in hash. */ 383 /* newer packet is already in hash. */
@@ -423,52 +385,53 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
423 } 385 }
424 } 386 }
425 /* remove old entry */ 387 /* remove old entry */
426 hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 388 batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
427 old_info); 389 batadv_vis_info_choose, old_info);
428 send_list_del(old_info); 390 batadv_send_list_del(old_info);
429 kref_put(&old_info->refcount, free_info); 391 kref_put(&old_info->refcount, batadv_free_info);
430 } 392 }
431 393
432 info = kmalloc(sizeof(*info), GFP_ATOMIC); 394 info = kmalloc(sizeof(*info), GFP_ATOMIC);
433 if (!info) 395 if (!info)
434 return NULL; 396 return NULL;
435 397
436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + 398 len = sizeof(*packet) + vis_info_len;
437 ETH_HLEN); 399 info->skb_packet = dev_alloc_skb(len + ETH_HLEN);
438 if (!info->skb_packet) { 400 if (!info->skb_packet) {
439 kfree(info); 401 kfree(info);
440 return NULL; 402 return NULL;
441 } 403 }
442 skb_reserve(info->skb_packet, ETH_HLEN); 404 skb_reserve(info->skb_packet, ETH_HLEN);
443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) 405 packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
444 + vis_info_len);
445 406
446 kref_init(&info->refcount); 407 kref_init(&info->refcount);
447 INIT_LIST_HEAD(&info->send_list); 408 INIT_LIST_HEAD(&info->send_list);
448 INIT_LIST_HEAD(&info->recv_list); 409 INIT_LIST_HEAD(&info->recv_list);
449 info->first_seen = jiffies; 410 info->first_seen = jiffies;
450 info->bat_priv = bat_priv; 411 info->bat_priv = bat_priv;
451 memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len); 412 memcpy(packet, vis_packet, len);
452 413
453 /* initialize and add new packet. */ 414 /* initialize and add new packet. */
454 *is_new = 1; 415 *is_new = 1;
455 416
456 /* Make it a broadcast packet, if required */ 417 /* Make it a broadcast packet, if required */
457 if (make_broadcast) 418 if (make_broadcast)
458 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 419 memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
459 420
460 /* repair if entries is longer than packet. */ 421 /* repair if entries is longer than packet. */
461 if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len) 422 max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
462 packet->entries = vis_info_len / sizeof(struct vis_info_entry); 423 if (packet->entries > max_entries)
424 packet->entries = max_entries;
463 425
464 recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); 426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
465 427
466 /* try to add it */ 428 /* try to add it */
467 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 429 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
468 info, &info->hash_entry); 430 batadv_vis_info_choose, info,
431 &info->hash_entry);
469 if (hash_added != 0) { 432 if (hash_added != 0) {
470 /* did not work (for some reason) */ 433 /* did not work (for some reason) */
471 kref_put(&info->refcount, free_info); 434 kref_put(&info->refcount, batadv_free_info);
472 info = NULL; 435 info = NULL;
473 } 436 }
474 437
@@ -476,37 +439,38 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
476} 439}
477 440
478/* handle the server sync packet, forward if needed. */ 441/* handle the server sync packet, forward if needed. */
479void receive_server_sync_packet(struct bat_priv *bat_priv, 442void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
480 struct vis_packet *vis_packet, 443 struct batadv_vis_packet *vis_packet,
481 int vis_info_len) 444 int vis_info_len)
482{ 445{
483 struct vis_info *info; 446 struct batadv_vis_info *info;
484 int is_new, make_broadcast; 447 int is_new, make_broadcast;
485 int vis_server = atomic_read(&bat_priv->vis_mode); 448 int vis_server = atomic_read(&bat_priv->vis_mode);
486 449
487 make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC); 450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
488 451
489 spin_lock_bh(&bat_priv->vis_hash_lock); 452 spin_lock_bh(&bat_priv->vis_hash_lock);
490 info = add_packet(bat_priv, vis_packet, vis_info_len, 453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
491 &is_new, make_broadcast); 454 &is_new, make_broadcast);
492 if (!info) 455 if (!info)
493 goto end; 456 goto end;
494 457
495 /* only if we are server ourselves and packet is newer than the one in 458 /* only if we are server ourselves and packet is newer than the one in
496 * hash.*/ 459 * hash.
497 if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) 460 */
498 send_list_add(bat_priv, info); 461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
462 batadv_send_list_add(bat_priv, info);
499end: 463end:
500 spin_unlock_bh(&bat_priv->vis_hash_lock); 464 spin_unlock_bh(&bat_priv->vis_hash_lock);
501} 465}
502 466
503/* handle an incoming client update packet and schedule forward if needed. */ 467/* handle an incoming client update packet and schedule forward if needed. */
504void receive_client_update_packet(struct bat_priv *bat_priv, 468void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
505 struct vis_packet *vis_packet, 469 struct batadv_vis_packet *vis_packet,
506 int vis_info_len) 470 int vis_info_len)
507{ 471{
508 struct vis_info *info; 472 struct batadv_vis_info *info;
509 struct vis_packet *packet; 473 struct batadv_vis_packet *packet;
510 int is_new; 474 int is_new;
511 int vis_server = atomic_read(&bat_priv->vis_mode); 475 int vis_server = atomic_read(&bat_priv->vis_mode);
512 int are_target = 0; 476 int are_target = 0;
@@ -516,28 +480,28 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
516 return; 480 return;
517 481
518 /* Are we the target for this VIS packet? */ 482 /* Are we the target for this VIS packet? */
519 if (vis_server == VIS_TYPE_SERVER_SYNC && 483 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
520 is_my_mac(vis_packet->target_orig)) 484 batadv_is_my_mac(vis_packet->target_orig))
521 are_target = 1; 485 are_target = 1;
522 486
523 spin_lock_bh(&bat_priv->vis_hash_lock); 487 spin_lock_bh(&bat_priv->vis_hash_lock);
524 info = add_packet(bat_priv, vis_packet, vis_info_len, 488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
525 &is_new, are_target); 489 &is_new, are_target);
526 490
527 if (!info) 491 if (!info)
528 goto end; 492 goto end;
529 /* note that outdated packets will be dropped at this point. */ 493 /* note that outdated packets will be dropped at this point. */
530 494
531 packet = (struct vis_packet *)info->skb_packet->data; 495 packet = (struct batadv_vis_packet *)info->skb_packet->data;
532 496
533 /* send only if we're the target server or ... */ 497 /* send only if we're the target server or ... */
534 if (are_target && is_new) { 498 if (are_target && is_new) {
535 packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */ 499 packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
536 send_list_add(bat_priv, info); 500 batadv_send_list_add(bat_priv, info);
537 501
538 /* ... we're not the recipient (and thus need to forward). */ 502 /* ... we're not the recipient (and thus need to forward). */
539 } else if (!is_my_mac(packet->target_orig)) { 503 } else if (!batadv_is_my_mac(packet->target_orig)) {
540 send_list_add(bat_priv, info); 504 batadv_send_list_add(bat_priv, info);
541 } 505 }
542 506
543end: 507end:
@@ -547,37 +511,38 @@ end:
547/* Walk the originators and find the VIS server with the best tq. Set the packet 511/* Walk the originators and find the VIS server with the best tq. Set the packet
548 * address to its address and return the best_tq. 512 * address to its address and return the best_tq.
549 * 513 *
550 * Must be called with the originator hash locked */ 514 * Must be called with the originator hash locked
551static int find_best_vis_server(struct bat_priv *bat_priv, 515 */
552 struct vis_info *info) 516static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
517 struct batadv_vis_info *info)
553{ 518{
554 struct hashtable_t *hash = bat_priv->orig_hash; 519 struct batadv_hashtable *hash = bat_priv->orig_hash;
555 struct neigh_node *router; 520 struct batadv_neigh_node *router;
556 struct hlist_node *node; 521 struct hlist_node *node;
557 struct hlist_head *head; 522 struct hlist_head *head;
558 struct orig_node *orig_node; 523 struct batadv_orig_node *orig_node;
559 struct vis_packet *packet; 524 struct batadv_vis_packet *packet;
560 int best_tq = -1; 525 int best_tq = -1;
561 uint32_t i; 526 uint32_t i;
562 527
563 packet = (struct vis_packet *)info->skb_packet->data; 528 packet = (struct batadv_vis_packet *)info->skb_packet->data;
564 529
565 for (i = 0; i < hash->size; i++) { 530 for (i = 0; i < hash->size; i++) {
566 head = &hash->table[i]; 531 head = &hash->table[i];
567 532
568 rcu_read_lock(); 533 rcu_read_lock();
569 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 534 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
570 router = orig_node_get_router(orig_node); 535 router = batadv_orig_node_get_router(orig_node);
571 if (!router) 536 if (!router)
572 continue; 537 continue;
573 538
574 if ((orig_node->flags & VIS_SERVER) && 539 if ((orig_node->flags & BATADV_VIS_SERVER) &&
575 (router->tq_avg > best_tq)) { 540 (router->tq_avg > best_tq)) {
576 best_tq = router->tq_avg; 541 best_tq = router->tq_avg;
577 memcpy(packet->target_orig, orig_node->orig, 542 memcpy(packet->target_orig, orig_node->orig,
578 ETH_ALEN); 543 ETH_ALEN);
579 } 544 }
580 neigh_node_free_ref(router); 545 batadv_neigh_node_free_ref(router);
581 } 546 }
582 rcu_read_unlock(); 547 rcu_read_unlock();
583 } 548 }
@@ -586,47 +551,52 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
586} 551}
587 552
588/* Return true if the vis packet is full. */ 553/* Return true if the vis packet is full. */
589static bool vis_packet_full(const struct vis_info *info) 554static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
590{ 555{
591 const struct vis_packet *packet; 556 const struct batadv_vis_packet *packet;
592 packet = (struct vis_packet *)info->skb_packet->data; 557 size_t num;
558
559 packet = (struct batadv_vis_packet *)info->skb_packet->data;
560 num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
593 561
594 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) 562 if (num < packet->entries + 1)
595 < packet->entries + 1)
596 return true; 563 return true;
597 return false; 564 return false;
598} 565}
599 566
600/* generates a packet of own vis data, 567/* generates a packet of own vis data,
601 * returns 0 on success, -1 if no packet could be generated */ 568 * returns 0 on success, -1 if no packet could be generated
602static int generate_vis_packet(struct bat_priv *bat_priv) 569 */
570static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
603{ 571{
604 struct hashtable_t *hash = bat_priv->orig_hash; 572 struct batadv_hashtable *hash = bat_priv->orig_hash;
605 struct hlist_node *node; 573 struct hlist_node *node;
606 struct hlist_head *head; 574 struct hlist_head *head;
607 struct orig_node *orig_node; 575 struct batadv_orig_node *orig_node;
608 struct neigh_node *router; 576 struct batadv_neigh_node *router;
609 struct vis_info *info = bat_priv->my_vis_info; 577 struct batadv_vis_info *info = bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 578 struct batadv_vis_packet *packet;
611 struct vis_info_entry *entry; 579 struct batadv_vis_info_entry *entry;
612 struct tt_common_entry *tt_common_entry; 580 struct batadv_tt_common_entry *tt_common_entry;
613 int best_tq = -1; 581 int best_tq = -1;
614 uint32_t i; 582 uint32_t i;
615 583
616 info->first_seen = jiffies; 584 info->first_seen = jiffies;
585 packet = (struct batadv_vis_packet *)info->skb_packet->data;
617 packet->vis_type = atomic_read(&bat_priv->vis_mode); 586 packet->vis_type = atomic_read(&bat_priv->vis_mode);
618 587
619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 588 memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
620 packet->header.ttl = TTL; 589 packet->header.ttl = BATADV_TTL;
621 packet->seqno = htonl(ntohl(packet->seqno) + 1); 590 packet->seqno = htonl(ntohl(packet->seqno) + 1);
622 packet->entries = 0; 591 packet->entries = 0;
592 packet->reserved = 0;
623 skb_trim(info->skb_packet, sizeof(*packet)); 593 skb_trim(info->skb_packet, sizeof(*packet));
624 594
625 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { 595 if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
626 best_tq = find_best_vis_server(bat_priv, info); 596 best_tq = batadv_find_best_vis_server(bat_priv, info);
627 597
628 if (best_tq < 0) 598 if (best_tq < 0)
629 return -1; 599 return best_tq;
630 } 600 }
631 601
632 for (i = 0; i < hash->size; i++) { 602 for (i = 0; i < hash->size; i++) {
@@ -634,21 +604,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
634 604
635 rcu_read_lock(); 605 rcu_read_lock();
636 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 606 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
637 router = orig_node_get_router(orig_node); 607 router = batadv_orig_node_get_router(orig_node);
638 if (!router) 608 if (!router)
639 continue; 609 continue;
640 610
641 if (!compare_eth(router->addr, orig_node->orig)) 611 if (!batadv_compare_eth(router->addr, orig_node->orig))
642 goto next; 612 goto next;
643 613
644 if (router->if_incoming->if_status != IF_ACTIVE) 614 if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
645 goto next; 615 goto next;
646 616
647 if (router->tq_avg < 1) 617 if (router->tq_avg < 1)
648 goto next; 618 goto next;
649 619
650 /* fill one entry into buffer. */ 620 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 621 entry = (struct batadv_vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 622 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 623 memcpy(entry->src,
654 router->if_incoming->net_dev->dev_addr, 624 router->if_incoming->net_dev->dev_addr,
@@ -658,9 +628,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
658 packet->entries++; 628 packet->entries++;
659 629
660next: 630next:
661 neigh_node_free_ref(router); 631 batadv_neigh_node_free_ref(router);
662 632
663 if (vis_packet_full(info)) 633 if (batadv_vis_packet_full(info))
664 goto unlock; 634 goto unlock;
665 } 635 }
666 rcu_read_unlock(); 636 rcu_read_unlock();
@@ -674,7 +644,7 @@ next:
674 rcu_read_lock(); 644 rcu_read_lock();
675 hlist_for_each_entry_rcu(tt_common_entry, node, head, 645 hlist_for_each_entry_rcu(tt_common_entry, node, head,
676 hash_entry) { 646 hash_entry) {
677 entry = (struct vis_info_entry *) 647 entry = (struct batadv_vis_info_entry *)
678 skb_put(info->skb_packet, 648 skb_put(info->skb_packet,
679 sizeof(*entry)); 649 sizeof(*entry));
680 memset(entry->src, 0, ETH_ALEN); 650 memset(entry->src, 0, ETH_ALEN);
@@ -682,7 +652,7 @@ next:
682 entry->quality = 0; /* 0 means TT */ 652 entry->quality = 0; /* 0 means TT */
683 packet->entries++; 653 packet->entries++;
684 654
685 if (vis_packet_full(info)) 655 if (batadv_vis_packet_full(info))
686 goto unlock; 656 goto unlock;
687 } 657 }
688 rcu_read_unlock(); 658 rcu_read_unlock();
@@ -696,14 +666,15 @@ unlock:
696} 666}
697 667
698/* free old vis packets. Must be called with this vis_hash_lock 668/* free old vis packets. Must be called with this vis_hash_lock
699 * held */ 669 * held
700static void purge_vis_packets(struct bat_priv *bat_priv) 670 */
671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
701{ 672{
702 uint32_t i; 673 uint32_t i;
703 struct hashtable_t *hash = bat_priv->vis_hash; 674 struct batadv_hashtable *hash = bat_priv->vis_hash;
704 struct hlist_node *node, *node_tmp; 675 struct hlist_node *node, *node_tmp;
705 struct hlist_head *head; 676 struct hlist_head *head;
706 struct vis_info *info; 677 struct batadv_vis_info *info;
707 678
708 for (i = 0; i < hash->size; i++) { 679 for (i = 0; i < hash->size; i++) {
709 head = &hash->table[i]; 680 head = &hash->table[i];
@@ -714,31 +685,32 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
714 if (info == bat_priv->my_vis_info) 685 if (info == bat_priv->my_vis_info)
715 continue; 686 continue;
716 687
717 if (has_timed_out(info->first_seen, VIS_TIMEOUT)) { 688 if (batadv_has_timed_out(info->first_seen,
689 BATADV_VIS_TIMEOUT)) {
718 hlist_del(node); 690 hlist_del(node);
719 send_list_del(info); 691 batadv_send_list_del(info);
720 kref_put(&info->refcount, free_info); 692 kref_put(&info->refcount, batadv_free_info);
721 } 693 }
722 } 694 }
723 } 695 }
724} 696}
725 697
726static void broadcast_vis_packet(struct bat_priv *bat_priv, 698static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
727 struct vis_info *info) 699 struct batadv_vis_info *info)
728{ 700{
729 struct neigh_node *router; 701 struct batadv_neigh_node *router;
730 struct hashtable_t *hash = bat_priv->orig_hash; 702 struct batadv_hashtable *hash = bat_priv->orig_hash;
731 struct hlist_node *node; 703 struct hlist_node *node;
732 struct hlist_head *head; 704 struct hlist_head *head;
733 struct orig_node *orig_node; 705 struct batadv_orig_node *orig_node;
734 struct vis_packet *packet; 706 struct batadv_vis_packet *packet;
735 struct sk_buff *skb; 707 struct sk_buff *skb;
736 struct hard_iface *hard_iface; 708 struct batadv_hard_iface *hard_iface;
737 uint8_t dstaddr[ETH_ALEN]; 709 uint8_t dstaddr[ETH_ALEN];
738 uint32_t i; 710 uint32_t i;
739 711
740 712
741 packet = (struct vis_packet *)info->skb_packet->data; 713 packet = (struct batadv_vis_packet *)info->skb_packet->data;
742 714
743 /* send to all routers in range. */ 715 /* send to all routers in range. */
744 for (i = 0; i < hash->size; i++) { 716 for (i = 0; i < hash->size; i++) {
@@ -747,18 +719,19 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
747 rcu_read_lock(); 719 rcu_read_lock();
748 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 720 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
749 /* if it's a vis server and reachable, send it. */ 721 /* if it's a vis server and reachable, send it. */
750 if (!(orig_node->flags & VIS_SERVER)) 722 if (!(orig_node->flags & BATADV_VIS_SERVER))
751 continue; 723 continue;
752 724
753 router = orig_node_get_router(orig_node); 725 router = batadv_orig_node_get_router(orig_node);
754 if (!router) 726 if (!router)
755 continue; 727 continue;
756 728
757 /* don't send it if we already received the packet from 729 /* don't send it if we already received the packet from
758 * this node. */ 730 * this node.
759 if (recv_list_is_in(bat_priv, &info->recv_list, 731 */
760 orig_node->orig)) { 732 if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
761 neigh_node_free_ref(router); 733 orig_node->orig)) {
734 batadv_neigh_node_free_ref(router);
762 continue; 735 continue;
763 } 736 }
764 737
@@ -766,57 +739,59 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
766 hard_iface = router->if_incoming; 739 hard_iface = router->if_incoming;
767 memcpy(dstaddr, router->addr, ETH_ALEN); 740 memcpy(dstaddr, router->addr, ETH_ALEN);
768 741
769 neigh_node_free_ref(router); 742 batadv_neigh_node_free_ref(router);
770 743
771 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 744 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
772 if (skb) 745 if (skb)
773 send_skb_packet(skb, hard_iface, dstaddr); 746 batadv_send_skb_packet(skb, hard_iface,
747 dstaddr);
774 748
775 } 749 }
776 rcu_read_unlock(); 750 rcu_read_unlock();
777 } 751 }
778} 752}
779 753
780static void unicast_vis_packet(struct bat_priv *bat_priv, 754static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
781 struct vis_info *info) 755 struct batadv_vis_info *info)
782{ 756{
783 struct orig_node *orig_node; 757 struct batadv_orig_node *orig_node;
784 struct neigh_node *router = NULL; 758 struct batadv_neigh_node *router = NULL;
785 struct sk_buff *skb; 759 struct sk_buff *skb;
786 struct vis_packet *packet; 760 struct batadv_vis_packet *packet;
787 761
788 packet = (struct vis_packet *)info->skb_packet->data; 762 packet = (struct batadv_vis_packet *)info->skb_packet->data;
789 763
790 orig_node = orig_hash_find(bat_priv, packet->target_orig); 764 orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
791 if (!orig_node) 765 if (!orig_node)
792 goto out; 766 goto out;
793 767
794 router = orig_node_get_router(orig_node); 768 router = batadv_orig_node_get_router(orig_node);
795 if (!router) 769 if (!router)
796 goto out; 770 goto out;
797 771
798 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 772 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
799 if (skb) 773 if (skb)
800 send_skb_packet(skb, router->if_incoming, router->addr); 774 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
801 775
802out: 776out:
803 if (router) 777 if (router)
804 neigh_node_free_ref(router); 778 batadv_neigh_node_free_ref(router);
805 if (orig_node) 779 if (orig_node)
806 orig_node_free_ref(orig_node); 780 batadv_orig_node_free_ref(orig_node);
807} 781}
808 782
809/* only send one vis packet. called from send_vis_packets() */ 783/* only send one vis packet. called from batadv_send_vis_packets() */
810static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) 784static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
785 struct batadv_vis_info *info)
811{ 786{
812 struct hard_iface *primary_if; 787 struct batadv_hard_iface *primary_if;
813 struct vis_packet *packet; 788 struct batadv_vis_packet *packet;
814 789
815 primary_if = primary_if_get_selected(bat_priv); 790 primary_if = batadv_primary_if_get_selected(bat_priv);
816 if (!primary_if) 791 if (!primary_if)
817 goto out; 792 goto out;
818 793
819 packet = (struct vis_packet *)info->skb_packet->data; 794 packet = (struct batadv_vis_packet *)info->skb_packet->data;
820 if (packet->header.ttl < 2) { 795 if (packet->header.ttl < 2) {
821 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 796 pr_debug("Error - can't send vis packet: ttl exceeded\n");
822 goto out; 797 goto out;
@@ -826,31 +801,31 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
826 packet->header.ttl--; 801 packet->header.ttl--;
827 802
828 if (is_broadcast_ether_addr(packet->target_orig)) 803 if (is_broadcast_ether_addr(packet->target_orig))
829 broadcast_vis_packet(bat_priv, info); 804 batadv_broadcast_vis_packet(bat_priv, info);
830 else 805 else
831 unicast_vis_packet(bat_priv, info); 806 batadv_unicast_vis_packet(bat_priv, info);
832 packet->header.ttl++; /* restore TTL */ 807 packet->header.ttl++; /* restore TTL */
833 808
834out: 809out:
835 if (primary_if) 810 if (primary_if)
836 hardif_free_ref(primary_if); 811 batadv_hardif_free_ref(primary_if);
837} 812}
838 813
839/* called from timer; send (and maybe generate) vis packet. */ 814/* called from timer; send (and maybe generate) vis packet. */
840static void send_vis_packets(struct work_struct *work) 815static void batadv_send_vis_packets(struct work_struct *work)
841{ 816{
842 struct delayed_work *delayed_work = 817 struct delayed_work *delayed_work =
843 container_of(work, struct delayed_work, work); 818 container_of(work, struct delayed_work, work);
844 struct bat_priv *bat_priv = 819 struct batadv_priv *bat_priv;
845 container_of(delayed_work, struct bat_priv, vis_work); 820 struct batadv_vis_info *info;
846 struct vis_info *info;
847 821
822 bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
848 spin_lock_bh(&bat_priv->vis_hash_lock); 823 spin_lock_bh(&bat_priv->vis_hash_lock);
849 purge_vis_packets(bat_priv); 824 batadv_purge_vis_packets(bat_priv);
850 825
851 if (generate_vis_packet(bat_priv) == 0) { 826 if (batadv_generate_vis_packet(bat_priv) == 0) {
852 /* schedule if generation was successful */ 827 /* schedule if generation was successful */
853 send_list_add(bat_priv, bat_priv->my_vis_info); 828 batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
854 } 829 }
855 830
856 while (!list_empty(&bat_priv->vis_send_list)) { 831 while (!list_empty(&bat_priv->vis_send_list)) {
@@ -860,98 +835,103 @@ static void send_vis_packets(struct work_struct *work)
860 kref_get(&info->refcount); 835 kref_get(&info->refcount);
861 spin_unlock_bh(&bat_priv->vis_hash_lock); 836 spin_unlock_bh(&bat_priv->vis_hash_lock);
862 837
863 send_vis_packet(bat_priv, info); 838 batadv_send_vis_packet(bat_priv, info);
864 839
865 spin_lock_bh(&bat_priv->vis_hash_lock); 840 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info); 841 batadv_send_list_del(info);
867 kref_put(&info->refcount, free_info); 842 kref_put(&info->refcount, batadv_free_info);
868 } 843 }
869 spin_unlock_bh(&bat_priv->vis_hash_lock); 844 spin_unlock_bh(&bat_priv->vis_hash_lock);
870 start_vis_timer(bat_priv); 845 batadv_start_vis_timer(bat_priv);
871} 846}
872 847
873/* init the vis server. this may only be called when if_list is already 848/* init the vis server. this may only be called when if_list is already
874 * initialized (e.g. bat0 is initialized, interfaces have been added) */ 849 * initialized (e.g. bat0 is initialized, interfaces have been added)
875int vis_init(struct bat_priv *bat_priv) 850 */
851int batadv_vis_init(struct batadv_priv *bat_priv)
876{ 852{
877 struct vis_packet *packet; 853 struct batadv_vis_packet *packet;
878 int hash_added; 854 int hash_added;
855 unsigned int len;
856 unsigned long first_seen;
857 struct sk_buff *tmp_skb;
879 858
880 if (bat_priv->vis_hash) 859 if (bat_priv->vis_hash)
881 return 1; 860 return 0;
882 861
883 spin_lock_bh(&bat_priv->vis_hash_lock); 862 spin_lock_bh(&bat_priv->vis_hash_lock);
884 863
885 bat_priv->vis_hash = hash_new(256); 864 bat_priv->vis_hash = batadv_hash_new(256);
886 if (!bat_priv->vis_hash) { 865 if (!bat_priv->vis_hash) {
887 pr_err("Can't initialize vis_hash\n"); 866 pr_err("Can't initialize vis_hash\n");
888 goto err; 867 goto err;
889 } 868 }
890 869
891 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 870 bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
892 if (!bat_priv->my_vis_info) 871 if (!bat_priv->my_vis_info)
893 goto err; 872 goto err;
894 873
895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 874 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
896 MAX_VIS_PACKET_SIZE + 875 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
897 ETH_HLEN);
898 if (!bat_priv->my_vis_info->skb_packet) 876 if (!bat_priv->my_vis_info->skb_packet)
899 goto free_info; 877 goto free_info;
900 878
901 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); 879 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, 880 tmp_skb = bat_priv->my_vis_info->skb_packet;
903 sizeof(*packet)); 881 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
904 882
905 /* prefill the vis info */ 883 /* prefill the vis info */
906 bat_priv->my_vis_info->first_seen = jiffies - 884 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
907 msecs_to_jiffies(VIS_INTERVAL); 885 bat_priv->my_vis_info->first_seen = first_seen;
908 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); 886 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
909 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 887 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
910 kref_init(&bat_priv->my_vis_info->refcount); 888 kref_init(&bat_priv->my_vis_info->refcount);
911 bat_priv->my_vis_info->bat_priv = bat_priv; 889 bat_priv->my_vis_info->bat_priv = bat_priv;
912 packet->header.version = COMPAT_VERSION; 890 packet->header.version = BATADV_COMPAT_VERSION;
913 packet->header.packet_type = BAT_VIS; 891 packet->header.packet_type = BATADV_VIS;
914 packet->header.ttl = TTL; 892 packet->header.ttl = BATADV_TTL;
915 packet->seqno = 0; 893 packet->seqno = 0;
894 packet->reserved = 0;
916 packet->entries = 0; 895 packet->entries = 0;
917 896
918 INIT_LIST_HEAD(&bat_priv->vis_send_list); 897 INIT_LIST_HEAD(&bat_priv->vis_send_list);
919 898
920 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 899 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
921 bat_priv->my_vis_info, 900 batadv_vis_info_choose,
922 &bat_priv->my_vis_info->hash_entry); 901 bat_priv->my_vis_info,
902 &bat_priv->my_vis_info->hash_entry);
923 if (hash_added != 0) { 903 if (hash_added != 0) {
924 pr_err("Can't add own vis packet into hash\n"); 904 pr_err("Can't add own vis packet into hash\n");
925 /* not in hash, need to remove it manually. */ 905 /* not in hash, need to remove it manually. */
926 kref_put(&bat_priv->my_vis_info->refcount, free_info); 906 kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
927 goto err; 907 goto err;
928 } 908 }
929 909
930 spin_unlock_bh(&bat_priv->vis_hash_lock); 910 spin_unlock_bh(&bat_priv->vis_hash_lock);
931 start_vis_timer(bat_priv); 911 batadv_start_vis_timer(bat_priv);
932 return 1; 912 return 0;
933 913
934free_info: 914free_info:
935 kfree(bat_priv->my_vis_info); 915 kfree(bat_priv->my_vis_info);
936 bat_priv->my_vis_info = NULL; 916 bat_priv->my_vis_info = NULL;
937err: 917err:
938 spin_unlock_bh(&bat_priv->vis_hash_lock); 918 spin_unlock_bh(&bat_priv->vis_hash_lock);
939 vis_quit(bat_priv); 919 batadv_vis_quit(bat_priv);
940 return 0; 920 return -ENOMEM;
941} 921}
942 922
943/* Decrease the reference count on a hash item info */ 923/* Decrease the reference count on a hash item info */
944static void free_info_ref(struct hlist_node *node, void *arg) 924static void batadv_free_info_ref(struct hlist_node *node, void *arg)
945{ 925{
946 struct vis_info *info; 926 struct batadv_vis_info *info;
947 927
948 info = container_of(node, struct vis_info, hash_entry); 928 info = container_of(node, struct batadv_vis_info, hash_entry);
949 send_list_del(info); 929 batadv_send_list_del(info);
950 kref_put(&info->refcount, free_info); 930 kref_put(&info->refcount, batadv_free_info);
951} 931}
952 932
953/* shutdown vis-server */ 933/* shutdown vis-server */
954void vis_quit(struct bat_priv *bat_priv) 934void batadv_vis_quit(struct batadv_priv *bat_priv)
955{ 935{
956 if (!bat_priv->vis_hash) 936 if (!bat_priv->vis_hash)
957 return; 937 return;
@@ -960,16 +940,16 @@ void vis_quit(struct bat_priv *bat_priv)
960 940
961 spin_lock_bh(&bat_priv->vis_hash_lock); 941 spin_lock_bh(&bat_priv->vis_hash_lock);
962 /* properly remove, kill timers ... */ 942 /* properly remove, kill timers ... */
963 hash_delete(bat_priv->vis_hash, free_info_ref, NULL); 943 batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
964 bat_priv->vis_hash = NULL; 944 bat_priv->vis_hash = NULL;
965 bat_priv->my_vis_info = NULL; 945 bat_priv->my_vis_info = NULL;
966 spin_unlock_bh(&bat_priv->vis_hash_lock); 946 spin_unlock_bh(&bat_priv->vis_hash_lock);
967} 947}
968 948
969/* schedule packets for (re)transmission */ 949/* schedule packets for (re)transmission */
970static void start_vis_timer(struct bat_priv *bat_priv) 950static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
971{ 951{
972 INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets); 952 INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
973 queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work, 953 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
974 msecs_to_jiffies(VIS_INTERVAL)); 954 msecs_to_jiffies(BATADV_VIS_INTERVAL));
975} 955}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index ee2e46e5347b..84e716ed8963 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,23 +15,22 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_VIS_H_ 20#ifndef _NET_BATMAN_ADV_VIS_H_
23#define _NET_BATMAN_ADV_VIS_H_ 21#define _NET_BATMAN_ADV_VIS_H_
24 22
25#define VIS_TIMEOUT 200000 /* timeout of vis packets 23/* timeout of vis packets in miliseconds */
26 * in miliseconds */ 24#define BATADV_VIS_TIMEOUT 200000
27 25
28int vis_seq_print_text(struct seq_file *seq, void *offset); 26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
29void receive_server_sync_packet(struct bat_priv *bat_priv, 27void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
30 struct vis_packet *vis_packet, 28 struct batadv_vis_packet *vis_packet,
31 int vis_info_len); 29 int vis_info_len);
32void receive_client_update_packet(struct bat_priv *bat_priv, 30void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
33 struct vis_packet *vis_packet, 31 struct batadv_vis_packet *vis_packet,
34 int vis_info_len); 32 int vis_info_len);
35int vis_init(struct bat_priv *bat_priv); 33int batadv_vis_init(struct batadv_priv *bat_priv);
36void vis_quit(struct bat_priv *bat_priv); 34void batadv_vis_quit(struct batadv_priv *bat_priv);
37 35
38#endif /* _NET_BATMAN_ADV_VIS_H_ */ 36#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f53..fa6d94a4602a 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 000000000000..4ff0bf3ba9a5
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#include <net/bluetooth/bluetooth.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h>
19
20/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
22{
23 struct a2mp_cmd *cmd;
24 int plen;
25
26 plen = sizeof(*cmd) + len;
27 cmd = kzalloc(plen, GFP_KERNEL);
28 if (!cmd)
29 return NULL;
30
31 cmd->code = code;
32 cmd->ident = ident;
33 cmd->len = cpu_to_le16(len);
34
35 memcpy(cmd->data, data, len);
36
37 return cmd;
38}
39
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
41 void *data)
42{
43 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd;
45 u16 total_len = len + sizeof(*cmd);
46 struct kvec iv;
47 struct msghdr msg;
48
49 cmd = __a2mp_build(code, ident, len, data);
50 if (!cmd)
51 return;
52
53 iv.iov_base = cmd;
54 iv.iov_len = total_len;
55
56 memset(&msg, 0, sizeof(msg));
57
58 msg.msg_iov = (struct iovec *) &iv;
59 msg.msg_iovlen = 1;
60
61 l2cap_chan_send(chan, &msg, total_len, 0);
62
63 kfree(cmd);
64}
65
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{
68 cl->id = 0;
69 cl->type = 0;
70 cl->status = 1;
71}
72
73/* hci_dev_list shall be locked */
74static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
75{
76 int i = 0;
77 struct hci_dev *hdev;
78
79 __a2mp_cl_bredr(cl);
80
81 list_for_each_entry(hdev, &hci_dev_list, list) {
82 /* Iterate through AMP controllers */
83 if (hdev->id == HCI_BREDR_ID)
84 continue;
85
86 /* Starting from second entry */
87 if (++i >= num_ctrl)
88 return;
89
90 cl[i].id = hdev->id;
91 cl[i].type = hdev->amp_type;
92 cl[i].status = hdev->amp_status;
93 }
94}
95
96/* Processing A2MP messages */
97static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
98 struct a2mp_cmd *hdr)
99{
100 struct a2mp_cmd_rej *rej = (void *) skb->data;
101
102 if (le16_to_cpu(hdr->len) < sizeof(*rej))
103 return -EINVAL;
104
105 BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
106
107 skb_pull(skb, sizeof(*rej));
108
109 return 0;
110}
111
112static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
113 struct a2mp_cmd *hdr)
114{
115 struct a2mp_discov_req *req = (void *) skb->data;
116 u16 len = le16_to_cpu(hdr->len);
117 struct a2mp_discov_rsp *rsp;
118 u16 ext_feat;
119 u8 num_ctrl;
120
121 if (len < sizeof(*req))
122 return -EINVAL;
123
124 skb_pull(skb, sizeof(*req));
125
126 ext_feat = le16_to_cpu(req->ext_feat);
127
128 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
129
130 /* check that packet is not broken for now */
131 while (ext_feat & A2MP_FEAT_EXT) {
132 if (len < sizeof(ext_feat))
133 return -EINVAL;
134
135 ext_feat = get_unaligned_le16(skb->data);
136 BT_DBG("efm 0x%4.4x", ext_feat);
137 len -= sizeof(ext_feat);
138 skb_pull(skb, sizeof(ext_feat));
139 }
140
141 read_lock(&hci_dev_list_lock);
142
143 num_ctrl = __hci_num_ctrl();
144 len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
145 rsp = kmalloc(len, GFP_ATOMIC);
146 if (!rsp) {
147 read_unlock(&hci_dev_list_lock);
148 return -ENOMEM;
149 }
150
151 rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
152 rsp->ext_feat = 0;
153
154 __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
155
156 read_unlock(&hci_dev_list_lock);
157
158 a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
159
160 kfree(rsp);
161 return 0;
162}
163
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr)
166{
167 struct a2mp_cl *cl = (void *) skb->data;
168
169 while (skb->len >= sizeof(*cl)) {
170 BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
171 cl->status);
172 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
173 }
174
175 /* TODO send A2MP_CHANGE_RSP */
176
177 return 0;
178}
179
180static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr)
182{
183 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev;
186
187 if (le16_to_cpu(hdr->len) < sizeof(*req))
188 return -EINVAL;
189
190 BT_DBG("id %d", req->id);
191
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) {
197 rsp.status = 0;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
203 }
204
205 if (hdev)
206 hci_dev_put(hdev);
207
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req));
211 return 0;
212}
213
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr)
216{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev;
219
220 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL;
222
223 BT_DBG("id %d", req->id);
224
225 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) {
227 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
230
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp);
233 goto clean;
234 }
235
236 /* Placeholder for HCI Read AMP Assoc */
237
238clean:
239 if (hdev)
240 hci_dev_put(hdev);
241
242 skb_pull(skb, sizeof(*req));
243 return 0;
244}
245
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr)
248{
249 struct a2mp_physlink_req *req = (void *) skb->data;
250
251 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev;
253
254 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL;
256
257 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
258
259 rsp.local_id = req->remote_id;
260 rsp.remote_id = req->local_id;
261
262 hdev = hci_dev_get(req->remote_id);
263 if (!hdev || hdev->amp_type != HCI_AMP) {
264 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
265 goto send_rsp;
266 }
267
268 /* TODO process physlink create */
269
270 rsp.status = A2MP_STATUS_SUCCESS;
271
272send_rsp:
273 if (hdev)
274 hci_dev_put(hdev);
275
276 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
277 &rsp);
278
279 skb_pull(skb, le16_to_cpu(hdr->len));
280 return 0;
281}
282
283static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
284 struct a2mp_cmd *hdr)
285{
286 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev;
289
290 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL;
292
293 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
294
295 rsp.local_id = req->remote_id;
296 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS;
298
299 hdev = hci_dev_get(req->local_id);
300 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp;
303 }
304
305 /* TODO Disconnect Phys Link here */
306
307 hci_dev_put(hdev);
308
309send_rsp:
310 a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
311
312 skb_pull(skb, sizeof(*req));
313 return 0;
314}
315
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr)
318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
320
321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0;
323}
324
325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{
328 struct a2mp_cmd *hdr = (void *) skb->data;
329 struct amp_mgr *mgr = chan->data;
330 int err = 0;
331
332 amp_mgr_get(mgr);
333
334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data;
336 u16 len = le16_to_cpu(hdr->len);
337
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
339
340 skb_pull(skb, sizeof(*hdr));
341
342 if (len > skb->len || !hdr->ident) {
343 err = -EINVAL;
344 break;
345 }
346
347 mgr->ident = hdr->ident;
348
349 switch (hdr->code) {
350 case A2MP_COMMAND_REJ:
351 a2mp_command_rej(mgr, skb, hdr);
352 break;
353
354 case A2MP_DISCOVER_REQ:
355 err = a2mp_discover_req(mgr, skb, hdr);
356 break;
357
358 case A2MP_CHANGE_NOTIFY:
359 err = a2mp_change_notify(mgr, skb, hdr);
360 break;
361
362 case A2MP_GETINFO_REQ:
363 err = a2mp_getinfo_req(mgr, skb, hdr);
364 break;
365
366 case A2MP_GETAMPASSOC_REQ:
367 err = a2mp_getampassoc_req(mgr, skb, hdr);
368 break;
369
370 case A2MP_CREATEPHYSLINK_REQ:
371 err = a2mp_createphyslink_req(mgr, skb, hdr);
372 break;
373
374 case A2MP_DISCONNPHYSLINK_REQ:
375 err = a2mp_discphyslink_req(mgr, skb, hdr);
376 break;
377
378 case A2MP_CHANGE_RSP:
379 case A2MP_DISCOVER_RSP:
380 case A2MP_GETINFO_RSP:
381 case A2MP_GETAMPASSOC_RSP:
382 case A2MP_CREATEPHYSLINK_RSP:
383 case A2MP_DISCONNPHYSLINK_RSP:
384 err = a2mp_cmd_rsp(mgr, skb, hdr);
385 break;
386
387 default:
388 BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
389 err = -EINVAL;
390 break;
391 }
392 }
393
394 if (err) {
395 struct a2mp_cmd_rej rej;
396 rej.reason = __constant_cpu_to_le16(0);
397
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399
400 a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
401 &rej);
402 }
403
404 /* Always free skb and return success error code to prevent
405 from sending L2CAP Disconnect over A2MP channel */
406 kfree_skb(skb);
407
408 amp_mgr_put(mgr);
409
410 return 0;
411}
412
413static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{
415 l2cap_chan_destroy(chan);
416}
417
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
419{
420 struct amp_mgr *mgr = chan->data;
421
422 if (!mgr)
423 return;
424
425 BT_DBG("chan %p state %s", chan, state_to_string(state));
426
427 chan->state = state;
428
429 switch (state) {
430 case BT_CLOSED:
431 if (mgr)
432 amp_mgr_put(mgr);
433 break;
434 }
435}
436
437static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
438 unsigned long len, int nb)
439{
440 return bt_skb_alloc(len, GFP_KERNEL);
441}
442
443static struct l2cap_ops a2mp_chan_ops = {
444 .name = "L2CAP A2MP channel",
445 .recv = a2mp_chan_recv_cb,
446 .close = a2mp_chan_close_cb,
447 .state_change = a2mp_chan_state_change_cb,
448 .alloc_skb = a2mp_chan_alloc_skb_cb,
449
450 /* Not implemented for A2MP */
451 .new_connection = l2cap_chan_no_new_connection,
452 .teardown = l2cap_chan_no_teardown,
453 .ready = l2cap_chan_no_ready,
454};
455
456static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
457{
458 struct l2cap_chan *chan;
459 int err;
460
461 chan = l2cap_chan_create();
462 if (!chan)
463 return NULL;
464
465 BT_DBG("chan %p", chan);
466
467 chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
468 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
469
470 chan->ops = &a2mp_chan_ops;
471
472 l2cap_chan_set_defaults(chan);
473 chan->remote_max_tx = chan->max_tx;
474 chan->remote_tx_win = chan->tx_win;
475
476 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
477 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
478
479 skb_queue_head_init(&chan->tx_q);
480
481 chan->mode = L2CAP_MODE_ERTM;
482
483 err = l2cap_ertm_init(chan);
484 if (err < 0) {
485 l2cap_chan_del(chan, 0);
486 return NULL;
487 }
488
489 chan->conf_state = 0;
490
491 l2cap_chan_add(conn, chan);
492
493 chan->remote_mps = chan->omtu;
494 chan->mps = chan->omtu;
495
496 chan->state = BT_CONNECTED;
497
498 return chan;
499}
500
501/* AMP Manager functions */
502void amp_mgr_get(struct amp_mgr *mgr)
503{
504 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
505
506 kref_get(&mgr->kref);
507}
508
509static void amp_mgr_destroy(struct kref *kref)
510{
511 struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
512
513 BT_DBG("mgr %p", mgr);
514
515 kfree(mgr);
516}
517
518int amp_mgr_put(struct amp_mgr *mgr)
519{
520 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
521
522 return kref_put(&mgr->kref, &amp_mgr_destroy);
523}
524
525static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
526{
527 struct amp_mgr *mgr;
528 struct l2cap_chan *chan;
529
530 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
531 if (!mgr)
532 return NULL;
533
534 BT_DBG("conn %p mgr %p", conn, mgr);
535
536 mgr->l2cap_conn = conn;
537
538 chan = a2mp_chan_open(conn);
539 if (!chan) {
540 kfree(mgr);
541 return NULL;
542 }
543
544 mgr->a2mp_chan = chan;
545 chan->data = mgr;
546
547 conn->hcon->amp_mgr = mgr;
548
549 kref_init(&mgr->kref);
550
551 return mgr;
552}
553
554struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
555 struct sk_buff *skb)
556{
557 struct amp_mgr *mgr;
558
559 mgr = amp_mgr_create(conn);
560 if (!mgr) {
561 BT_ERR("Could not create AMP manager");
562 return NULL;
563 }
564
565 BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
566
567 return mgr->a2mp_chan;
568}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc4..f7db5792ec64 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/poll.h>
37#include <net/sock.h>
38#include <asm/ioctls.h> 28#include <asm/ioctls.h>
39#include <linux/kmod.h>
40 29
41#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
42 31
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
418 return 0; 407 return 0;
419} 408}
420 409
421unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) 410unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 poll_table *wait)
422{ 412{
423 struct sock *sk = sock->sk; 413 struct sock *sk = sock->sk;
424 unsigned int mask = 0; 414 unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d656754..4a6620bc1570 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/freezer.h>
36#include <linux/errno.h>
37#include <linux/net.h>
38#include <linux/slab.h>
39#include <linux/kthread.h> 29#include <linux/kthread.h>
40#include <net/sock.h>
41
42#include <linux/socket.h>
43#include <linux/file.h> 30#include <linux/file.h>
44
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48
49#include <asm/unaligned.h> 32#include <asm/unaligned.h>
50 33
51#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
306 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 289 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
307}; 290};
308 291
309static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 292static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
310{ 293{
311 struct net_device *dev = s->dev; 294 struct net_device *dev = s->dev;
312 struct sk_buff *nskb; 295 struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
404 BNEP_COMPRESSED 387 BNEP_COMPRESSED
405}; 388};
406 389
407static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) 390static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
408{ 391{
409 struct ethhdr *eh = (void *) skb->data; 392 struct ethhdr *eh = (void *) skb->data;
410 struct socket *sock = s->sock; 393 struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d97..98f86f91d47c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29#include <linux/slab.h>
30
31#include <linux/socket.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36
37#include <asm/unaligned.h>
38 30
39#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
128} 120}
129 121
130#ifdef CONFIG_BT_BNEP_MC_FILTER 122#ifdef CONFIG_BT_BNEP_MC_FILTER
131static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 123static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
132{ 124{
133 struct ethhdr *eh = (void *) skb->data; 125 struct ethhdr *eh = (void *) skb->data;
134 126
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
140 132
141#ifdef CONFIG_BT_BNEP_PROTO_FILTER 133#ifdef CONFIG_BT_BNEP_PROTO_FILTER
142/* Determine ether protocol. Based on eth_type_trans. */ 134/* Determine ether protocol. Based on eth_type_trans. */
143static inline u16 bnep_net_eth_proto(struct sk_buff *skb) 135static u16 bnep_net_eth_proto(struct sk_buff *skb)
144{ 136{
145 struct ethhdr *eh = (void *) skb->data; 137 struct ethhdr *eh = (void *) skb->data;
146 u16 proto = ntohs(eh->h_proto); 138 u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
154 return ETH_P_802_2; 146 return ETH_P_802_2;
155} 147}
156 148
157static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) 149static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
158{ 150{
159 u16 proto = bnep_net_eth_proto(skb); 151 u16 proto = bnep_net_eth_proto(skb);
160 struct bnep_proto_filter *f = s->proto_filter; 152 struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810d..5e5f5b410e0b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/skbuff.h>
36#include <linux/socket.h>
37#include <linux/ioctl.h>
38#include <linux/file.h> 28#include <linux/file.h>
39#include <linux/init.h>
40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <linux/uaccess.h>
43#include <net/sock.h>
44
45 29
46#include "bnep.h" 30#include "bnep.h"
47 31
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed9731..5ad7da217474 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
24 24
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42 28
43#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/a2mp.h>
45 32
46static void hci_le_connect(struct hci_conn *conn) 33static void hci_le_connect(struct hci_conn *conn)
47{ 34{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
54 conn->sec_level = BT_SECURITY_LOW; 41 conn->sec_level = BT_SECURITY_LOW;
55 42
56 memset(&cp, 0, sizeof(cp)); 43 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060); 44 cp.scan_interval = __constant_cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030); 45 cp.scan_window = __constant_cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst); 46 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type; 47 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028); 48 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038); 49 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a); 50 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000); 51 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000); 52 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
66 53
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 54 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68} 55}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 86 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode; 87 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset | 88 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000); 89 __constant_cpu_to_le16(0x8000);
103 } 90 }
104 91
105 memcpy(conn->dev_class, ie->data.dev_class, 3); 92 memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -120,7 +107,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
120{ 107{
121 struct hci_cp_create_conn_cancel cp; 108 struct hci_cp_create_conn_cancel cp;
122 109
123 BT_DBG("%p", conn); 110 BT_DBG("hcon %p", conn);
124 111
125 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) 112 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
126 return; 113 return;
@@ -133,7 +120,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133{ 120{
134 struct hci_cp_disconnect cp; 121 struct hci_cp_disconnect cp;
135 122
136 BT_DBG("%p", conn); 123 BT_DBG("hcon %p", conn);
137 124
138 conn->state = BT_DISCONN; 125 conn->state = BT_DISCONN;
139 126
@@ -147,7 +134,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
147 struct hci_dev *hdev = conn->hdev; 134 struct hci_dev *hdev = conn->hdev;
148 struct hci_cp_add_sco cp; 135 struct hci_cp_add_sco cp;
149 136
150 BT_DBG("%p", conn); 137 BT_DBG("hcon %p", conn);
151 138
152 conn->state = BT_CONNECT; 139 conn->state = BT_CONNECT;
153 conn->out = true; 140 conn->out = true;
@@ -165,7 +152,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
165 struct hci_dev *hdev = conn->hdev; 152 struct hci_dev *hdev = conn->hdev;
166 struct hci_cp_setup_sync_conn cp; 153 struct hci_cp_setup_sync_conn cp;
167 154
168 BT_DBG("%p", conn); 155 BT_DBG("hcon %p", conn);
169 156
170 conn->state = BT_CONNECT; 157 conn->state = BT_CONNECT;
171 conn->out = true; 158 conn->out = true;
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
175 cp.handle = cpu_to_le16(handle); 162 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type); 163 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 164
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 165 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 166 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff); 167 cp.max_latency = __constant_cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 168 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff; 169 cp.retrans_effort = 0xff;
183 170
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
185} 172}
186 173
187void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 174void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier) 175 u16 latency, u16 to_multiplier)
189{ 176{
190 struct hci_cp_le_conn_update cp; 177 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev; 178 struct hci_dev *hdev = conn->hdev;
@@ -197,20 +184,19 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
197 cp.conn_interval_max = cpu_to_le16(max); 184 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency); 185 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier); 186 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001); 187 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001); 188 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
202 189
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 190 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204} 191}
205EXPORT_SYMBOL(hci_le_conn_update);
206 192
207void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 193void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16]) 194 __u8 ltk[16])
209{ 195{
210 struct hci_dev *hdev = conn->hdev; 196 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp; 197 struct hci_cp_le_start_enc cp;
212 198
213 BT_DBG("%p", conn); 199 BT_DBG("hcon %p", conn);
214 200
215 memset(&cp, 0, sizeof(cp)); 201 memset(&cp, 0, sizeof(cp));
216 202
@@ -221,18 +207,17 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
221 207
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 208 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223} 209}
224EXPORT_SYMBOL(hci_le_start_enc);
225 210
226/* Device _must_ be locked */ 211/* Device _must_ be locked */
227void hci_sco_setup(struct hci_conn *conn, __u8 status) 212void hci_sco_setup(struct hci_conn *conn, __u8 status)
228{ 213{
229 struct hci_conn *sco = conn->link; 214 struct hci_conn *sco = conn->link;
230 215
231 BT_DBG("%p", conn);
232
233 if (!sco) 216 if (!sco)
234 return; 217 return;
235 218
219 BT_DBG("hcon %p", conn);
220
236 if (!status) { 221 if (!status) {
237 if (lmp_esco_capable(conn->hdev)) 222 if (lmp_esco_capable(conn->hdev))
238 hci_setup_sync(sco, conn->handle); 223 hci_setup_sync(sco, conn->handle);
@@ -247,10 +232,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
247static void hci_conn_timeout(struct work_struct *work) 232static void hci_conn_timeout(struct work_struct *work)
248{ 233{
249 struct hci_conn *conn = container_of(work, struct hci_conn, 234 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work); 235 disc_work.work);
251 __u8 reason; 236 __u8 reason;
252 237
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); 238 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
254 239
255 if (atomic_read(&conn->refcnt)) 240 if (atomic_read(&conn->refcnt))
256 return; 241 return;
@@ -281,7 +266,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
281{ 266{
282 struct hci_dev *hdev = conn->hdev; 267 struct hci_dev *hdev = conn->hdev;
283 268
284 BT_DBG("conn %p mode %d", conn, conn->mode); 269 BT_DBG("hcon %p mode %d", conn, conn->mode);
285 270
286 if (test_bit(HCI_RAW, &hdev->flags)) 271 if (test_bit(HCI_RAW, &hdev->flags))
287 return; 272 return;
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 280 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp; 281 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle); 282 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0); 283 cp.max_latency = __constant_cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0); 284 cp.min_remote_timeout = __constant_cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0); 285 cp.min_local_timeout = __constant_cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 286 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 } 287 }
303 288
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
306 cp.handle = cpu_to_le16(conn->handle); 291 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 292 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 293 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4); 294 cp.attempt = __constant_cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1); 295 cp.timeout = __constant_cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 296 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 } 297 }
313} 298}
@@ -316,7 +301,7 @@ static void hci_conn_idle(unsigned long arg)
316{ 301{
317 struct hci_conn *conn = (void *) arg; 302 struct hci_conn *conn = (void *) arg;
318 303
319 BT_DBG("conn %p mode %d", conn, conn->mode); 304 BT_DBG("hcon %p mode %d", conn, conn->mode);
320 305
321 hci_conn_enter_sniff_mode(conn); 306 hci_conn_enter_sniff_mode(conn);
322} 307}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
327 struct hci_dev *hdev = conn->hdev; 312 struct hci_dev *hdev = conn->hdev;
328 313
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst); 315 &conn->dst);
331} 316}
332 317
333struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 361 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 362 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 363 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn); 364 (unsigned long) conn);
380 365
381 atomic_set(&conn->refcnt, 0); 366 atomic_set(&conn->refcnt, 0);
382 367
@@ -397,7 +382,7 @@ int hci_conn_del(struct hci_conn *conn)
397{ 382{
398 struct hci_dev *hdev = conn->hdev; 383 struct hci_dev *hdev = conn->hdev;
399 384
400 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 385 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
401 386
402 del_timer(&conn->idle_timer); 387 del_timer(&conn->idle_timer);
403 388
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
425 } 410 }
426 } 411 }
427 412
428
429 hci_chan_list_flush(conn); 413 hci_chan_list_flush(conn);
430 414
415 if (conn->amp_mgr)
416 amp_mgr_put(conn->amp_mgr);
417
431 hci_conn_hash_del(hdev, conn); 418 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify) 419 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 420 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,9 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454 read_lock(&hci_dev_list_lock); 441 read_lock(&hci_dev_list_lock);
455 442
456 list_for_each_entry(d, &hci_dev_list, list) { 443 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 444 if (!test_bit(HCI_UP, &d->flags) ||
445 test_bit(HCI_RAW, &d->flags) ||
446 d->dev_type != HCI_BREDR)
458 continue; 447 continue;
459 448
460 /* Simple routing: 449 /* Simple routing:
@@ -495,6 +484,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 if (type == LE_LINK) { 484 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 485 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) { 486 if (!le) {
487 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
488 BT_CONNECT);
489 if (le)
490 return ERR_PTR(-EBUSY);
491
498 le = hci_conn_add(hdev, LE_LINK, dst); 492 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le) 493 if (!le)
500 return ERR_PTR(-ENOMEM); 494 return ERR_PTR(-ENOMEM);
@@ -545,7 +539,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
545 hci_conn_hold(sco); 539 hci_conn_hold(sco);
546 540
547 if (acl->state == BT_CONNECTED && 541 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 542 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 543 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 544 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551 545
@@ -560,24 +554,22 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
560 554
561 return sco; 555 return sco;
562} 556}
563EXPORT_SYMBOL(hci_connect);
564 557
565/* Check link security requirement */ 558/* Check link security requirement */
566int hci_conn_check_link_mode(struct hci_conn *conn) 559int hci_conn_check_link_mode(struct hci_conn *conn)
567{ 560{
568 BT_DBG("conn %p", conn); 561 BT_DBG("hcon %p", conn);
569 562
570 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT)) 563 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
571 return 0; 564 return 0;
572 565
573 return 1; 566 return 1;
574} 567}
575EXPORT_SYMBOL(hci_conn_check_link_mode);
576 568
577/* Authenticate remote device */ 569/* Authenticate remote device */
578static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 570static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
579{ 571{
580 BT_DBG("conn %p", conn); 572 BT_DBG("hcon %p", conn);
581 573
582 if (conn->pending_sec_level > sec_level) 574 if (conn->pending_sec_level > sec_level)
583 sec_level = conn->pending_sec_level; 575 sec_level = conn->pending_sec_level;
@@ -600,7 +592,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
600 592
601 cp.handle = cpu_to_le16(conn->handle); 593 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 594 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp); 595 sizeof(cp), &cp);
604 if (conn->key_type != 0xff) 596 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 597 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 } 598 }
@@ -611,21 +603,21 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
611/* Encrypt the the link */ 603/* Encrypt the the link */
612static void hci_conn_encrypt(struct hci_conn *conn) 604static void hci_conn_encrypt(struct hci_conn *conn)
613{ 605{
614 BT_DBG("conn %p", conn); 606 BT_DBG("hcon %p", conn);
615 607
616 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 608 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
617 struct hci_cp_set_conn_encrypt cp; 609 struct hci_cp_set_conn_encrypt cp;
618 cp.handle = cpu_to_le16(conn->handle); 610 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01; 611 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 612 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp); 613 &cp);
622 } 614 }
623} 615}
624 616
625/* Enable security */ 617/* Enable security */
626int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 618int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
627{ 619{
628 BT_DBG("conn %p", conn); 620 BT_DBG("hcon %p", conn);
629 621
630 /* For sdp we don't need the link key. */ 622 /* For sdp we don't need the link key. */
631 if (sec_level == BT_SECURITY_SDP) 623 if (sec_level == BT_SECURITY_SDP)
@@ -648,8 +640,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
648 /* An unauthenticated combination key has sufficient security for 640 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */ 641 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && 642 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM || 643 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt; 644 goto encrypt;
654 645
655 /* A combination key has always sufficient security for the security 646 /* A combination key has always sufficient security for the security
@@ -657,8 +648,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
657 is generated using maximum PIN code length (16). 648 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */ 649 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION && 650 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH || 651 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
661 conn->pin_length == 16))
662 goto encrypt; 652 goto encrypt;
663 653
664auth: 654auth:
@@ -680,7 +670,7 @@ EXPORT_SYMBOL(hci_conn_security);
680/* Check secure link requirement */ 670/* Check secure link requirement */
681int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) 671int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
682{ 672{
683 BT_DBG("conn %p", conn); 673 BT_DBG("hcon %p", conn);
684 674
685 if (sec_level != BT_SECURITY_HIGH) 675 if (sec_level != BT_SECURITY_HIGH)
686 return 1; /* Accept if non-secure is required */ 676 return 1; /* Accept if non-secure is required */
@@ -695,23 +685,22 @@ EXPORT_SYMBOL(hci_conn_check_secure);
695/* Change link key */ 685/* Change link key */
696int hci_conn_change_link_key(struct hci_conn *conn) 686int hci_conn_change_link_key(struct hci_conn *conn)
697{ 687{
698 BT_DBG("conn %p", conn); 688 BT_DBG("hcon %p", conn);
699 689
700 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 690 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
701 struct hci_cp_change_conn_link_key cp; 691 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle); 692 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 693 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp); 694 sizeof(cp), &cp);
705 } 695 }
706 696
707 return 0; 697 return 0;
708} 698}
709EXPORT_SYMBOL(hci_conn_change_link_key);
710 699
711/* Switch role */ 700/* Switch role */
712int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 701int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
713{ 702{
714 BT_DBG("conn %p", conn); 703 BT_DBG("hcon %p", conn);
715 704
716 if (!role && conn->link_mode & HCI_LM_MASTER) 705 if (!role && conn->link_mode & HCI_LM_MASTER)
717 return 1; 706 return 1;
@@ -732,7 +721,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
732{ 721{
733 struct hci_dev *hdev = conn->hdev; 722 struct hci_dev *hdev = conn->hdev;
734 723
735 BT_DBG("conn %p mode %d", conn, conn->mode); 724 BT_DBG("hcon %p mode %d", conn, conn->mode);
736 725
737 if (test_bit(HCI_RAW, &hdev->flags)) 726 if (test_bit(HCI_RAW, &hdev->flags))
738 return; 727 return;
@@ -752,7 +741,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
752timer: 741timer:
753 if (hdev->idle_timeout > 0) 742 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer, 743 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 744 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756} 745}
757 746
758/* Drop all connection on the device */ 747/* Drop all connection on the device */
@@ -802,7 +791,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
802 791
803int hci_get_conn_list(void __user *arg) 792int hci_get_conn_list(void __user *arg)
804{ 793{
805 register struct hci_conn *c; 794 struct hci_conn *c;
806 struct hci_conn_list_req req, *cl; 795 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci; 796 struct hci_conn_info *ci;
808 struct hci_dev *hdev; 797 struct hci_dev *hdev;
@@ -906,7 +895,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
906 struct hci_dev *hdev = conn->hdev; 895 struct hci_dev *hdev = conn->hdev;
907 struct hci_chan *chan; 896 struct hci_chan *chan;
908 897
909 BT_DBG("%s conn %p", hdev->name, conn); 898 BT_DBG("%s hcon %p", hdev->name, conn);
910 899
911 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL); 900 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
912 if (!chan) 901 if (!chan)
@@ -925,7 +914,7 @@ int hci_chan_del(struct hci_chan *chan)
925 struct hci_conn *conn = chan->conn; 914 struct hci_conn *conn = chan->conn;
926 struct hci_dev *hdev = conn->hdev; 915 struct hci_dev *hdev = conn->hdev;
927 916
928 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); 917 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
929 918
930 list_del_rcu(&chan->list); 919 list_del_rcu(&chan->list);
931 920
@@ -941,7 +930,7 @@ void hci_chan_list_flush(struct hci_conn *conn)
941{ 930{
942 struct hci_chan *chan, *n; 931 struct hci_chan *chan, *n;
943 932
944 BT_DBG("conn %p", conn); 933 BT_DBG("hcon %p", conn);
945 934
946 list_for_each_entry_safe(chan, n, &conn->chan_list, list) 935 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
947 hci_chan_del(chan); 936 hci_chan_del(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647b..d4de5db18d5a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,34 +25,14 @@
25 25
26/* Bluetooth HCI core. */ 26/* Bluetooth HCI core. */
27 27
28#include <linux/jiffies.h> 28#include <linux/export.h>
29#include <linux/module.h> 29#include <linux/idr.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47 30
48#include <linux/uaccess.h> 31#include <linux/rfkill.h>
49#include <asm/unaligned.h>
50 32
51#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
53 35
54#define AUTO_OFF_TIMEOUT 2000
55
56static void hci_rx_work(struct work_struct *work); 36static void hci_rx_work(struct work_struct *work);
57static void hci_cmd_work(struct work_struct *work); 37static void hci_cmd_work(struct work_struct *work);
58static void hci_tx_work(struct work_struct *work); 38static void hci_tx_work(struct work_struct *work);
@@ -65,6 +45,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
65LIST_HEAD(hci_cb_list); 45LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock); 46DEFINE_RWLOCK(hci_cb_list_lock);
67 47
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
68/* ---- HCI notifications ---- */ 51/* ---- HCI notifications ---- */
69 52
70static void hci_notify(struct hci_dev *hdev, int event) 53static void hci_notify(struct hci_dev *hdev, int event)
@@ -76,7 +59,7 @@ static void hci_notify(struct hci_dev *hdev, int event)
76 59
77void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78{ 61{
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
80 63
81 /* If this is the init phase check if the completed command matches 64 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return. 65 * the last init command, and if not just return.
@@ -124,8 +107,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
124} 107}
125 108
126/* Execute request and wait for completion. */ 109/* Execute request and wait for completion. */
127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 110static int __hci_request(struct hci_dev *hdev,
128 unsigned long opt, __u32 timeout) 111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
129{ 113{
130 DECLARE_WAITQUEUE(wait, current); 114 DECLARE_WAITQUEUE(wait, current);
131 int err = 0; 115 int err = 0;
@@ -166,8 +150,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
166 return err; 150 return err;
167} 151}
168 152
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 153static int hci_request(struct hci_dev *hdev,
170 unsigned long opt, __u32 timeout) 154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
171{ 156{
172 int ret; 157 int ret;
173 158
@@ -201,12 +186,6 @@ static void bredr_init(struct hci_dev *hdev)
201 186
202 /* Mandatory initialization */ 187 /* Mandatory initialization */
203 188
204 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 }
209
210 /* Read Local Supported Features */ 189 /* Read Local Supported Features */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 191
@@ -235,7 +214,7 @@ static void bredr_init(struct hci_dev *hdev)
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 215
237 /* Connection accept timeout ~20 secs */ 216 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00); 217 param = __constant_cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 219
241 bacpy(&cp.bdaddr, BDADDR_ANY); 220 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -247,9 +226,6 @@ static void amp_init(struct hci_dev *hdev)
247{ 226{
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249 228
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */ 229 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 231
@@ -275,6 +251,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
275 } 251 }
276 skb_queue_purge(&hdev->driver_init); 252 skb_queue_purge(&hdev->driver_init);
277 253
254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
278 switch (hdev->dev_type) { 258 switch (hdev->dev_type) {
279 case HCI_BREDR: 259 case HCI_BREDR:
280 bredr_init(hdev); 260 bredr_init(hdev);
@@ -417,7 +397,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
417 INIT_LIST_HEAD(&cache->resolve); 397 INIT_LIST_HEAD(&cache->resolve);
418} 398}
419 399
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
421{ 402{
422 struct discovery_state *cache = &hdev->discovery; 403 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e; 404 struct inquiry_entry *e;
@@ -478,7 +459,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
478 459
479 list_for_each_entry(p, &cache->resolve, list) { 460 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING && 461 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi)) 462 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break; 463 break;
483 pos = &p->list; 464 pos = &p->list;
484 } 465 }
@@ -503,7 +484,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
503 *ssp = true; 484 *ssp = true;
504 485
505 if (ie->name_state == NAME_NEEDED && 486 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) { 487 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi; 488 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie); 489 hci_inquiry_cache_update_resolve(hdev, ie);
509 } 490 }
@@ -527,7 +508,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
527 508
528update: 509update:
529 if (name_known && ie->name_state != NAME_KNOWN && 510 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) { 511 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN; 512 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list); 513 list_del(&ie->list);
533 } 514 }
@@ -605,8 +586,7 @@ int hci_inquiry(void __user *arg)
605 586
606 hci_dev_lock(hdev); 587 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) || 589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev); 590 inquiry_cache_flush(hdev);
611 do_inquiry = 1; 591 do_inquiry = 1;
612 } 592 }
@@ -620,7 +600,9 @@ int hci_inquiry(void __user *arg)
620 goto done; 600 goto done;
621 } 601 }
622 602
623 /* for unlimited number of responses we will use buffer with 255 entries */ 603 /* for unlimited number of responses we will use buffer with
604 * 255 entries
605 */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 607
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +623,7 @@ int hci_inquiry(void __user *arg)
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir); 624 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp)) 626 ir.num_rsp))
645 err = -EFAULT; 627 err = -EFAULT;
646 } else 628 } else
647 err = -EFAULT; 629 err = -EFAULT;
@@ -701,12 +683,11 @@ int hci_dev_open(__u16 dev)
701 set_bit(HCI_INIT, &hdev->flags); 683 set_bit(HCI_INIT, &hdev->flags);
702 hdev->init_last_cmd = 0; 684 hdev->init_last_cmd = 0;
703 685
704 ret = __hci_request(hdev, hci_init_req, 0, 686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
705 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 687
707 if (lmp_host_le_capable(hdev)) 688 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0, 689 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 690 HCI_INIT_TIMEOUT);
710 691
711 clear_bit(HCI_INIT, &hdev->flags); 692 clear_bit(HCI_INIT, &hdev->flags);
712 } 693 }
@@ -791,10 +772,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
791 skb_queue_purge(&hdev->cmd_q); 772 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1); 773 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) && 774 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 775 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags); 776 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0, 777 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
797 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags); 778 clear_bit(HCI_INIT, &hdev->flags);
799 } 779 }
800 780
@@ -883,8 +863,7 @@ int hci_dev_reset(__u16 dev)
883 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 863 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
884 864
885 if (!test_bit(HCI_RAW, &hdev->flags)) 865 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0, 866 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 867
889done: 868done:
890 hci_req_unlock(hdev); 869 hci_req_unlock(hdev);
@@ -924,7 +903,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
924 switch (cmd) { 903 switch (cmd) {
925 case HCISETAUTH: 904 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 906 HCI_INIT_TIMEOUT);
928 break; 907 break;
929 908
930 case HCISETENCRYPT: 909 case HCISETENCRYPT:
@@ -936,23 +915,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
936 if (!test_bit(HCI_AUTH, &hdev->flags)) { 915 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */ 916 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 917 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 918 HCI_INIT_TIMEOUT);
940 if (err) 919 if (err)
941 break; 920 break;
942 } 921 }
943 922
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 923 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 924 HCI_INIT_TIMEOUT);
946 break; 925 break;
947 926
948 case HCISETSCAN: 927 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 928 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 929 HCI_INIT_TIMEOUT);
951 break; 930 break;
952 931
953 case HCISETLINKPOL: 932 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 933 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 934 HCI_INIT_TIMEOUT);
956 break; 935 break;
957 936
958 case HCISETLINKMODE: 937 case HCISETLINKMODE:
@@ -1102,8 +1081,7 @@ static void hci_power_on(struct work_struct *work)
1102 return; 1081 return;
1103 1082
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1083 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off, 1084 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 1085
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1086 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev); 1087 mgmt_index_added(hdev);
@@ -1112,7 +1090,7 @@ static void hci_power_on(struct work_struct *work)
1112static void hci_power_off(struct work_struct *work) 1090static void hci_power_off(struct work_struct *work)
1113{ 1091{
1114 struct hci_dev *hdev = container_of(work, struct hci_dev, 1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work); 1093 power_off.work);
1116 1094
1117 BT_DBG("%s", hdev->name); 1095 BT_DBG("%s", hdev->name);
1118 1096
@@ -1193,7 +1171,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1193} 1171}
1194 1172
1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1173static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type) 1174 u8 key_type, u8 old_key_type)
1197{ 1175{
1198 /* Legacy key */ 1176 /* Legacy key */
1199 if (key_type < 0x03) 1177 if (key_type < 0x03)
@@ -1234,7 +1212,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1234 1212
1235 list_for_each_entry(k, &hdev->long_term_keys, list) { 1213 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv || 1214 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand))) 1215 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue; 1216 continue;
1239 1217
1240 return k; 1218 return k;
@@ -1242,7 +1220,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1242 1220
1243 return NULL; 1221 return NULL;
1244} 1222}
1245EXPORT_SYMBOL(hci_find_ltk);
1246 1223
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1224struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type) 1225 u8 addr_type)
@@ -1251,12 +1228,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 1228
1252 list_for_each_entry(k, &hdev->long_term_keys, list) 1229 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type && 1230 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0) 1231 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k; 1232 return k;
1256 1233
1257 return NULL; 1234 return NULL;
1258} 1235}
1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 1236
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1237int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1238 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1259,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1283 * combination key for legacy pairing even when there's no 1259 * combination key for legacy pairing even when there's no
1284 * previous key */ 1260 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION && 1261 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) && 1262 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1287 old_key_type == 0xff) {
1288 type = HCI_LK_COMBINATION; 1263 type = HCI_LK_COMBINATION;
1289 if (conn) 1264 if (conn)
1290 conn->key_type = type; 1265 conn->key_type = type;
1291 } 1266 }
1292 1267
1293 bacpy(&key->bdaddr, bdaddr); 1268 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16); 1269 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1295 key->pin_len = pin_len; 1270 key->pin_len = pin_len;
1296 1271
1297 if (type == HCI_LK_CHANGED_COMBINATION) 1272 if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1383,11 +1358,19 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1383} 1358}
1384 1359
1385/* HCI command timer function */ 1360/* HCI command timer function */
1386static void hci_cmd_timer(unsigned long arg) 1361static void hci_cmd_timeout(unsigned long arg)
1387{ 1362{
1388 struct hci_dev *hdev = (void *) arg; 1363 struct hci_dev *hdev = (void *) arg;
1389 1364
1390 BT_ERR("%s command tx timeout", hdev->name); 1365 if (hdev->sent_cmd) {
1366 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1367 u16 opcode = __le16_to_cpu(sent->opcode);
1368
1369 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1370 } else {
1371 BT_ERR("%s command tx timeout", hdev->name);
1372 }
1373
1391 atomic_set(&hdev->cmd_cnt, 1); 1374 atomic_set(&hdev->cmd_cnt, 1);
1392 queue_work(hdev->workqueue, &hdev->cmd_work); 1375 queue_work(hdev->workqueue, &hdev->cmd_work);
1393} 1376}
@@ -1540,6 +1523,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1540 1523
1541 memset(&cp, 0, sizeof(cp)); 1524 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1; 1525 cp.enable = 1;
1526 cp.filter_dup = 1;
1543 1527
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1528 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545} 1529}
@@ -1684,7 +1668,7 @@ struct hci_dev *hci_alloc_dev(void)
1684 1668
1685 init_waitqueue_head(&hdev->req_wait_q); 1669 init_waitqueue_head(&hdev->req_wait_q);
1686 1670
1687 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev); 1671 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1688 1672
1689 hci_init_sysfs(hdev); 1673 hci_init_sysfs(hdev);
1690 discovery_init(hdev); 1674 discovery_init(hdev);
@@ -1707,41 +1691,39 @@ EXPORT_SYMBOL(hci_free_dev);
1707/* Register HCI device */ 1691/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev) 1692int hci_register_dev(struct hci_dev *hdev)
1709{ 1693{
1710 struct list_head *head, *p;
1711 int id, error; 1694 int id, error;
1712 1695
1713 if (!hdev->open || !hdev->close) 1696 if (!hdev->open || !hdev->close)
1714 return -EINVAL; 1697 return -EINVAL;
1715 1698
1716 write_lock(&hci_dev_list_lock);
1717
1718 /* Do not allow HCI_AMP devices to register at index 0, 1699 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID. 1700 * so the index can be used as the AMP controller ID.
1720 */ 1701 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1702 switch (hdev->dev_type) {
1722 head = &hci_dev_list; 1703 case HCI_BREDR:
1723 1704 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1724 /* Find first available device id */ 1705 break;
1725 list_for_each(p, &hci_dev_list) { 1706 case HCI_AMP:
1726 int nid = list_entry(p, struct hci_dev, list)->id; 1707 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1727 if (nid > id) 1708 break;
1728 break; 1709 default:
1729 if (nid == id) 1710 return -EINVAL;
1730 id++;
1731 head = p;
1732 } 1711 }
1733 1712
1713 if (id < 0)
1714 return id;
1715
1734 sprintf(hdev->name, "hci%d", id); 1716 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id; 1717 hdev->id = id;
1736 1718
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 1720
1739 list_add(&hdev->list, head); 1721 write_lock(&hci_dev_list_lock);
1740 1722 list_add(&hdev->list, &hci_dev_list);
1741 write_unlock(&hci_dev_list_lock); 1723 write_unlock(&hci_dev_list_lock);
1742 1724
1743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1725 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 WQ_MEM_RECLAIM, 1); 1726 WQ_MEM_RECLAIM, 1);
1745 if (!hdev->workqueue) { 1727 if (!hdev->workqueue) {
1746 error = -ENOMEM; 1728 error = -ENOMEM;
1747 goto err; 1729 goto err;
@@ -1752,7 +1734,8 @@ int hci_register_dev(struct hci_dev *hdev)
1752 goto err_wqueue; 1734 goto err_wqueue;
1753 1735
1754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1736 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1737 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738 hdev);
1756 if (hdev->rfkill) { 1739 if (hdev->rfkill) {
1757 if (rfkill_register(hdev->rfkill) < 0) { 1740 if (rfkill_register(hdev->rfkill) < 0) {
1758 rfkill_destroy(hdev->rfkill); 1741 rfkill_destroy(hdev->rfkill);
@@ -1760,8 +1743,11 @@ int hci_register_dev(struct hci_dev *hdev)
1760 } 1743 }
1761 } 1744 }
1762 1745
1763 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1764 set_bit(HCI_SETUP, &hdev->dev_flags); 1746 set_bit(HCI_SETUP, &hdev->dev_flags);
1747
1748 if (hdev->dev_type != HCI_AMP)
1749 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1750
1765 schedule_work(&hdev->power_on); 1751 schedule_work(&hdev->power_on);
1766 1752
1767 hci_notify(hdev, HCI_DEV_REG); 1753 hci_notify(hdev, HCI_DEV_REG);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
1772err_wqueue: 1758err_wqueue:
1773 destroy_workqueue(hdev->workqueue); 1759 destroy_workqueue(hdev->workqueue);
1774err: 1760err:
1761 ida_simple_remove(&hci_index_ida, hdev->id);
1775 write_lock(&hci_dev_list_lock); 1762 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list); 1763 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock); 1764 write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
1783/* Unregister HCI device */ 1770/* Unregister HCI device */
1784void hci_unregister_dev(struct hci_dev *hdev) 1771void hci_unregister_dev(struct hci_dev *hdev)
1785{ 1772{
1786 int i; 1773 int i, id;
1787 1774
1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 1776
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 1777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 1778
1779 id = hdev->id;
1780
1792 write_lock(&hci_dev_list_lock); 1781 write_lock(&hci_dev_list_lock);
1793 list_del(&hdev->list); 1782 list_del(&hdev->list);
1794 write_unlock(&hci_dev_list_lock); 1783 write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1799 kfree_skb(hdev->reassembly[i]); 1788 kfree_skb(hdev->reassembly[i]);
1800 1789
1801 if (!test_bit(HCI_INIT, &hdev->flags) && 1790 if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 hci_dev_lock(hdev); 1792 hci_dev_lock(hdev);
1804 mgmt_index_removed(hdev); 1793 mgmt_index_removed(hdev);
1805 hci_dev_unlock(hdev); 1794 hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1829 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1830 1819
1831 hci_dev_put(hdev); 1820 hci_dev_put(hdev);
1821
1822 ida_simple_remove(&hci_index_ida, id);
1832} 1823}
1833EXPORT_SYMBOL(hci_unregister_dev); 1824EXPORT_SYMBOL(hci_unregister_dev);
1834 1825
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
1853{ 1844{
1854 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 && !test_bit(HCI_INIT, &hdev->flags))) { 1847 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 kfree_skb(skb); 1848 kfree_skb(skb);
1858 return -ENXIO; 1849 return -ENXIO;
1859 } 1850 }
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
1872EXPORT_SYMBOL(hci_recv_frame); 1863EXPORT_SYMBOL(hci_recv_frame);
1873 1864
1874static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 int count, __u8 index) 1866 int count, __u8 index)
1876{ 1867{
1877 int len = 0; 1868 int len = 0;
1878 int hlen = 0; 1869 int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1881 struct bt_skb_cb *scb; 1872 struct bt_skb_cb *scb;
1882 1873
1883 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 index >= NUM_REASSEMBLY) 1875 index >= NUM_REASSEMBLY)
1885 return -EILSEQ; 1876 return -EILSEQ;
1886 1877
1887 skb = hdev->reassembly[index]; 1878 skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2023 type = bt_cb(skb)->pkt_type; 2014 type = bt_cb(skb)->pkt_type;
2024 2015
2025 rem = hci_reassembly(hdev, type, data, count, 2016 rem = hci_reassembly(hdev, type, data, count,
2026 STREAM_REASSEMBLY); 2017 STREAM_REASSEMBLY);
2027 if (rem < 0) 2018 if (rem < 0)
2028 return rem; 2019 return rem;
2029 2020
@@ -2096,7 +2087,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2096 struct hci_command_hdr *hdr; 2087 struct hci_command_hdr *hdr;
2097 struct sk_buff *skb; 2088 struct sk_buff *skb;
2098 2089
2099 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); 2090 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2100 2091
2101 skb = bt_skb_alloc(len, GFP_ATOMIC); 2092 skb = bt_skb_alloc(len, GFP_ATOMIC);
2102 if (!skb) { 2093 if (!skb) {
@@ -2138,7 +2129,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2138 if (hdr->opcode != cpu_to_le16(opcode)) 2129 if (hdr->opcode != cpu_to_le16(opcode))
2139 return NULL; 2130 return NULL;
2140 2131
2141 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2132 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2142 2133
2143 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 2134 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2144} 2135}
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2157} 2148}
2158 2149
2159static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 struct sk_buff *skb, __u16 flags) 2151 struct sk_buff *skb, __u16 flags)
2161{ 2152{
2162 struct hci_dev *hdev = conn->hdev; 2153 struct hci_dev *hdev = conn->hdev;
2163 struct sk_buff *list; 2154 struct sk_buff *list;
@@ -2208,7 +2199,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2208 struct hci_conn *conn = chan->conn; 2199 struct hci_conn *conn = chan->conn;
2209 struct hci_dev *hdev = conn->hdev; 2200 struct hci_dev *hdev = conn->hdev;
2210 2201
2211 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); 2202 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2212 2203
2213 skb->dev = (void *) hdev; 2204 skb->dev = (void *) hdev;
2214 2205
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 2207
2217 queue_work(hdev->workqueue, &hdev->tx_work); 2208 queue_work(hdev->workqueue, &hdev->tx_work);
2218} 2209}
2219EXPORT_SYMBOL(hci_send_acl);
2220 2210
2221/* Send SCO data */ 2211/* Send SCO data */
2222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2239 skb_queue_tail(&conn->data_q, skb); 2229 skb_queue_tail(&conn->data_q, skb);
2240 queue_work(hdev->workqueue, &hdev->tx_work); 2230 queue_work(hdev->workqueue, &hdev->tx_work);
2241} 2231}
2242EXPORT_SYMBOL(hci_send_sco);
2243 2232
2244/* ---- HCI TX task (outgoing data) ---- */ 2233/* ---- HCI TX task (outgoing data) ---- */
2245 2234
2246/* HCI Connection scheduler */ 2235/* HCI Connection scheduler */
2247static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
2248{ 2238{
2249 struct hci_conn_hash *h = &hdev->conn_hash; 2239 struct hci_conn_hash *h = &hdev->conn_hash;
2250 struct hci_conn *conn = NULL, *c; 2240 struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2303 return conn; 2293 return conn;
2304} 2294}
2305 2295
2306static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307{ 2297{
2308 struct hci_conn_hash *h = &hdev->conn_hash; 2298 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *c; 2299 struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2316 list_for_each_entry_rcu(c, &h->list, list) { 2306 list_for_each_entry_rcu(c, &h->list, list) {
2317 if (c->type == type && c->sent) { 2307 if (c->type == type && c->sent) {
2318 BT_ERR("%s killing stalled connection %s", 2308 BT_ERR("%s killing stalled connection %s",
2319 hdev->name, batostr(&c->dst)); 2309 hdev->name, batostr(&c->dst));
2320 hci_acl_disconn(c, 0x13); 2310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 } 2311 }
2322 } 2312 }
2323 2313
2324 rcu_read_unlock(); 2314 rcu_read_unlock();
2325} 2315}
2326 2316
2327static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 int *quote) 2318 int *quote)
2329{ 2319{
2330 struct hci_conn_hash *h = &hdev->conn_hash; 2320 struct hci_conn_hash *h = &hdev->conn_hash;
2331 struct hci_chan *chan = NULL; 2321 struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442 skb->priority = HCI_PRIO_MAX - 1; 2432 skb->priority = HCI_PRIO_MAX - 1;
2443 2433
2444 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 skb->priority); 2435 skb->priority);
2446 } 2436 }
2447 2437
2448 if (hci_conn_num(hdev, type) == num) 2438 if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460} 2450}
2461 2451
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{ 2453{
2464 if (!test_bit(HCI_RAW, &hdev->flags)) { 2454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 /* ACL tx timeout must be longer than maximum 2455 /* ACL tx timeout must be longer than maximum
2466 * link supervision timeout (40.9 seconds) */ 2456 * link supervision timeout (40.9 seconds) */
2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2458 HCI_ACL_TX_TIMEOUT))
2469 hci_link_tx_to(hdev, ACL_LINK); 2459 hci_link_tx_to(hdev, ACL_LINK);
2470 } 2460 }
2471} 2461}
2472 2462
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2463static void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{ 2464{
2475 unsigned int cnt = hdev->acl_cnt; 2465 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan; 2466 struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2480 __check_timeout(hdev, cnt); 2470 __check_timeout(hdev, cnt);
2481 2471
2482 while (hdev->acl_cnt && 2472 while (hdev->acl_cnt &&
2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 u32 priority = (skb_peek(&chan->data_q))->priority; 2474 u32 priority = (skb_peek(&chan->data_q))->priority;
2485 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 skb->len, skb->priority); 2477 skb->len, skb->priority);
2488 2478
2489 /* Stop if priority has changed */ 2479 /* Stop if priority has changed */
2490 if (skb->priority < priority) 2480 if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508 hci_prio_recalculate(hdev, ACL_LINK); 2498 hci_prio_recalculate(hdev, ACL_LINK);
2509} 2499}
2510 2500
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2501static void hci_sched_acl_blk(struct hci_dev *hdev)
2512{ 2502{
2513 unsigned int cnt = hdev->block_cnt; 2503 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan; 2504 struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2518 __check_timeout(hdev, cnt); 2508 __check_timeout(hdev, cnt);
2519 2509
2520 while (hdev->block_cnt > 0 && 2510 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority; 2512 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks; 2514 int blocks;
2525 2515
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority); 2517 skb->len, skb->priority);
2528 2518
2529 /* Stop if priority has changed */ 2519 /* Stop if priority has changed */
2530 if (skb->priority < priority) 2520 if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2537 return; 2527 return;
2538 2528
2539 hci_conn_enter_active_mode(chan->conn, 2529 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active); 2530 bt_cb(skb)->force_active);
2541 2531
2542 hci_send_frame(skb); 2532 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies; 2533 hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2554 hci_prio_recalculate(hdev, ACL_LINK); 2544 hci_prio_recalculate(hdev, ACL_LINK);
2555} 2545}
2556 2546
2557static inline void hci_sched_acl(struct hci_dev *hdev) 2547static void hci_sched_acl(struct hci_dev *hdev)
2558{ 2548{
2559 BT_DBG("%s", hdev->name); 2549 BT_DBG("%s", hdev->name);
2560 2550
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2573} 2563}
2574 2564
2575/* Schedule SCO */ 2565/* Schedule SCO */
2576static inline void hci_sched_sco(struct hci_dev *hdev) 2566static void hci_sched_sco(struct hci_dev *hdev)
2577{ 2567{
2578 struct hci_conn *conn; 2568 struct hci_conn *conn;
2579 struct sk_buff *skb; 2569 struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2596 } 2586 }
2597} 2587}
2598 2588
2599static inline void hci_sched_esco(struct hci_dev *hdev) 2589static void hci_sched_esco(struct hci_dev *hdev)
2600{ 2590{
2601 struct hci_conn *conn; 2591 struct hci_conn *conn;
2602 struct sk_buff *skb; 2592 struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2607 if (!hci_conn_num(hdev, ESCO_LINK)) 2597 if (!hci_conn_num(hdev, ESCO_LINK))
2608 return; 2598 return;
2609 2599
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len); 2603 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb); 2604 hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2619 } 2610 }
2620} 2611}
2621 2612
2622static inline void hci_sched_le(struct hci_dev *hdev) 2613static void hci_sched_le(struct hci_dev *hdev)
2623{ 2614{
2624 struct hci_chan *chan; 2615 struct hci_chan *chan;
2625 struct sk_buff *skb; 2616 struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2634 /* LE tx timeout must be longer than maximum 2625 /* LE tx timeout must be longer than maximum
2635 * link supervision timeout (40.9 seconds) */ 2626 * link supervision timeout (40.9 seconds) */
2636 if (!hdev->le_cnt && hdev->le_pkts && 2627 if (!hdev->le_cnt && hdev->le_pkts &&
2637 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 hci_link_tx_to(hdev, LE_LINK); 2629 hci_link_tx_to(hdev, LE_LINK);
2639 } 2630 }
2640 2631
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2644 u32 priority = (skb_peek(&chan->data_q))->priority; 2635 u32 priority = (skb_peek(&chan->data_q))->priority;
2645 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 skb->len, skb->priority); 2638 skb->len, skb->priority);
2648 2639
2649 /* Stop if priority has changed */ 2640 /* Stop if priority has changed */
2650 if (skb->priority < priority) 2641 if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
2676 struct sk_buff *skb; 2667 struct sk_buff *skb;
2677 2668
2678 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 hdev->sco_cnt, hdev->le_cnt); 2670 hdev->sco_cnt, hdev->le_cnt);
2680 2671
2681 /* Schedule queues and send stuff to HCI driver */ 2672 /* Schedule queues and send stuff to HCI driver */
2682 2673
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
2696/* ----- HCI RX task (incoming data processing) ----- */ 2687/* ----- HCI RX task (incoming data processing) ----- */
2697 2688
2698/* ACL data packet */ 2689/* ACL data packet */
2699static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700{ 2691{
2701 struct hci_acl_hdr *hdr = (void *) skb->data; 2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2702 struct hci_conn *conn; 2693 struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2708 flags = hci_flags(handle); 2699 flags = hci_flags(handle);
2709 handle = hci_handle(handle); 2700 handle = hci_handle(handle);
2710 2701
2711 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2702 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2703 handle, flags);
2712 2704
2713 hdev->stat.acl_rx++; 2705 hdev->stat.acl_rx++;
2714 2706
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 return; 2724 return;
2733 } else { 2725 } else {
2734 BT_ERR("%s ACL packet for unknown connection handle %d", 2726 BT_ERR("%s ACL packet for unknown connection handle %d",
2735 hdev->name, handle); 2727 hdev->name, handle);
2736 } 2728 }
2737 2729
2738 kfree_skb(skb); 2730 kfree_skb(skb);
2739} 2731}
2740 2732
2741/* SCO data packet */ 2733/* SCO data packet */
2742static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{ 2735{
2744 struct hci_sco_hdr *hdr = (void *) skb->data; 2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2745 struct hci_conn *conn; 2737 struct hci_conn *conn;
@@ -2749,7 +2741,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2749 2741
2750 handle = __le16_to_cpu(hdr->handle); 2742 handle = __le16_to_cpu(hdr->handle);
2751 2743
2752 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 2744 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2753 2745
2754 hdev->stat.sco_rx++; 2746 hdev->stat.sco_rx++;
2755 2747
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2763 return; 2755 return;
2764 } else { 2756 } else {
2765 BT_ERR("%s SCO packet for unknown connection handle %d", 2757 BT_ERR("%s SCO packet for unknown connection handle %d",
2766 hdev->name, handle); 2758 hdev->name, handle);
2767 } 2759 }
2768 2760
2769 kfree_skb(skb); 2761 kfree_skb(skb);
@@ -2829,7 +2821,8 @@ static void hci_cmd_work(struct work_struct *work)
2829 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 2821 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2830 struct sk_buff *skb; 2822 struct sk_buff *skb;
2831 2823
2832 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2824 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2825 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2833 2826
2834 /* Send queued commands */ 2827 /* Send queued commands */
2835 if (atomic_read(&hdev->cmd_cnt)) { 2828 if (atomic_read(&hdev->cmd_cnt)) {
@@ -2847,7 +2840,7 @@ static void hci_cmd_work(struct work_struct *work)
2847 del_timer(&hdev->cmd_timer); 2840 del_timer(&hdev->cmd_timer);
2848 else 2841 else
2849 mod_timer(&hdev->cmd_timer, 2842 mod_timer(&hdev->cmd_timer,
2850 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2843 jiffies + HCI_CMD_TIMEOUT);
2851 } else { 2844 } else {
2852 skb_queue_head(&hdev->cmd_q, skb); 2845 skb_queue_head(&hdev->cmd_q, skb);
2853 queue_work(hdev->workqueue, &hdev->cmd_work); 2846 queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4eefb7f65cf6..41ff978a33f9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h> 28#include <asm/unaligned.h>
42 29
43#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -49,7 +36,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49{ 36{
50 __u8 status = *((__u8 *) skb->data); 37 __u8 status = *((__u8 *) skb->data);
51 38
52 BT_DBG("%s status 0x%x", hdev->name, status); 39 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 40
54 if (status) { 41 if (status) {
55 hci_dev_lock(hdev); 42 hci_dev_lock(hdev);
@@ -73,7 +60,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{ 60{
74 __u8 status = *((__u8 *) skb->data); 61 __u8 status = *((__u8 *) skb->data);
75 62
76 BT_DBG("%s status 0x%x", hdev->name, status); 63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
77 64
78 if (status) 65 if (status)
79 return; 66 return;
@@ -85,7 +72,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
85{ 72{
86 __u8 status = *((__u8 *) skb->data); 73 __u8 status = *((__u8 *) skb->data);
87 74
88 BT_DBG("%s status 0x%x", hdev->name, status); 75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
89 76
90 if (status) 77 if (status)
91 return; 78 return;
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
95 hci_conn_check_pending(hdev); 82 hci_conn_check_pending(hdev);
96} 83}
97 84
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
99{ 87{
100 BT_DBG("%s", hdev->name); 88 BT_DBG("%s", hdev->name);
101} 89}
@@ -105,7 +93,7 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
105 struct hci_rp_role_discovery *rp = (void *) skb->data; 93 struct hci_rp_role_discovery *rp = (void *) skb->data;
106 struct hci_conn *conn; 94 struct hci_conn *conn;
107 95
108 BT_DBG("%s status 0x%x", hdev->name, rp->status); 96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
109 97
110 if (rp->status) 98 if (rp->status)
111 return; 99 return;
@@ -128,7 +116,7 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 struct hci_rp_read_link_policy *rp = (void *) skb->data; 116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
129 struct hci_conn *conn; 117 struct hci_conn *conn;
130 118
131 BT_DBG("%s status 0x%x", hdev->name, rp->status); 119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 120
133 if (rp->status) 121 if (rp->status)
134 return; 122 return;
@@ -148,7 +136,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
148 struct hci_conn *conn; 136 struct hci_conn *conn;
149 void *sent; 137 void *sent;
150 138
151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
152 140
153 if (rp->status) 141 if (rp->status)
154 return; 142 return;
@@ -166,11 +154,12 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
166 hci_dev_unlock(hdev); 154 hci_dev_unlock(hdev);
167} 155}
168 156
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
170{ 159{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172 161
173 BT_DBG("%s status 0x%x", hdev->name, rp->status); 162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
174 163
175 if (rp->status) 164 if (rp->status)
176 return; 165 return;
@@ -178,12 +167,13 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
178 hdev->link_policy = __le16_to_cpu(rp->policy); 167 hdev->link_policy = __le16_to_cpu(rp->policy);
179} 168}
180 169
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
182{ 172{
183 __u8 status = *((__u8 *) skb->data); 173 __u8 status = *((__u8 *) skb->data);
184 void *sent; 174 void *sent;
185 175
186 BT_DBG("%s status 0x%x", hdev->name, status); 176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 177
188 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
189 if (!sent) 179 if (!sent)
@@ -199,7 +189,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
199{ 189{
200 __u8 status = *((__u8 *) skb->data); 190 __u8 status = *((__u8 *) skb->data);
201 191
202 BT_DBG("%s status 0x%x", hdev->name, status); 192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
203 193
204 clear_bit(HCI_RESET, &hdev->flags); 194 clear_bit(HCI_RESET, &hdev->flags);
205 195
@@ -217,7 +207,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
217 __u8 status = *((__u8 *) skb->data); 207 __u8 status = *((__u8 *) skb->data);
218 void *sent; 208 void *sent;
219 209
220 BT_DBG("%s status 0x%x", hdev->name, status); 210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221 211
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent) 213 if (!sent)
@@ -239,7 +229,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239{ 229{
240 struct hci_rp_read_local_name *rp = (void *) skb->data; 230 struct hci_rp_read_local_name *rp = (void *) skb->data;
241 231
242 BT_DBG("%s status 0x%x", hdev->name, rp->status); 232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243 233
244 if (rp->status) 234 if (rp->status)
245 return; 235 return;
@@ -253,7 +243,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
253 __u8 status = *((__u8 *) skb->data); 243 __u8 status = *((__u8 *) skb->data);
254 void *sent; 244 void *sent;
255 245
256 BT_DBG("%s status 0x%x", hdev->name, status); 246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
257 247
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent) 249 if (!sent)
@@ -279,7 +269,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
279 __u8 status = *((__u8 *) skb->data); 269 __u8 status = *((__u8 *) skb->data);
280 void *sent; 270 void *sent;
281 271
282 BT_DBG("%s status 0x%x", hdev->name, status); 272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 273
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent) 275 if (!sent)
@@ -303,7 +293,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
303 int old_pscan, old_iscan; 293 int old_pscan, old_iscan;
304 void *sent; 294 void *sent;
305 295
306 BT_DBG("%s status 0x%x", hdev->name, status); 296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
307 297
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 298 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent) 299 if (!sent)
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
329 if (hdev->discov_timeout > 0) { 319 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to); 322 to);
333 } 323 }
334 } else if (old_iscan) 324 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0); 325 mgmt_discoverable(hdev, 0);
@@ -350,7 +340,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{ 340{
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352 342
353 BT_DBG("%s status 0x%x", hdev->name, rp->status); 343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
354 344
355 if (rp->status) 345 if (rp->status)
356 return; 346 return;
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 memcpy(hdev->dev_class, rp->dev_class, 3); 348 memcpy(hdev->dev_class, rp->dev_class, 3);
359 349
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362} 352}
363 353
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -366,7 +356,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
366 __u8 status = *((__u8 *) skb->data); 356 __u8 status = *((__u8 *) skb->data);
367 void *sent; 357 void *sent;
368 358
369 BT_DBG("%s status 0x%x", hdev->name, status); 359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 360
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent) 362 if (!sent)
@@ -388,7 +378,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting; 379 __u16 setting;
390 380
391 BT_DBG("%s status 0x%x", hdev->name, rp->status); 381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
392 382
393 if (rp->status) 383 if (rp->status)
394 return; 384 return;
@@ -400,19 +390,20 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
400 390
401 hdev->voice_setting = setting; 391 hdev->voice_setting = setting;
402 392
403 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
404 394
405 if (hdev->notify) 395 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407} 397}
408 398
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
410{ 401{
411 __u8 status = *((__u8 *) skb->data); 402 __u8 status = *((__u8 *) skb->data);
412 __u16 setting; 403 __u16 setting;
413 void *sent; 404 void *sent;
414 405
415 BT_DBG("%s status 0x%x", hdev->name, status); 406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
416 407
417 if (status) 408 if (status)
418 return; 409 return;
@@ -428,7 +419,7 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
428 419
429 hdev->voice_setting = setting; 420 hdev->voice_setting = setting;
430 421
431 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
432 423
433 if (hdev->notify) 424 if (hdev->notify)
434 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
@@ -438,7 +429,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
438{ 429{
439 __u8 status = *((__u8 *) skb->data); 430 __u8 status = *((__u8 *) skb->data);
440 431
441 BT_DBG("%s status 0x%x", hdev->name, status); 432 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442 433
443 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 434 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
444} 435}
@@ -448,7 +439,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448 __u8 status = *((__u8 *) skb->data); 439 __u8 status = *((__u8 *) skb->data);
449 void *sent; 440 void *sent;
450 441
451 BT_DBG("%s status 0x%x", hdev->name, status); 442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
452 443
453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
454 if (!sent) 445 if (!sent)
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
473 return 1; 464 return 1;
474 465
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757) 467 hdev->lmp_subver == 0x0757)
477 return 1; 468 return 1;
478 469
479 if (hdev->manufacturer == 15) { 470 if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
486 } 477 }
487 478
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805) 480 hdev->lmp_subver == 0x1805)
490 return 1; 481 return 1;
491 482
492 return 0; 483 return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568 559
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 560 if (lmp_ssp_capable(hdev)) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01; 562 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -606,7 +597,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606{ 597{
607 struct hci_rp_read_local_version *rp = (void *) skb->data; 598 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 599
609 BT_DBG("%s status 0x%x", hdev->name, rp->status); 600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 601
611 if (rp->status) 602 if (rp->status)
612 goto done; 603 goto done;
@@ -617,9 +608,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
617 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 608 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619 610
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 611 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
621 hdev->manufacturer, 612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
622 hdev->hci_ver, hdev->hci_rev);
623 613
624 if (test_bit(HCI_INIT, &hdev->flags)) 614 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev); 615 hci_setup(hdev);
@@ -646,11 +636,12 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647} 637}
648 638
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 639static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
650{ 641{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652 643
653 BT_DBG("%s status 0x%x", hdev->name, rp->status); 644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
654 645
655 if (rp->status) 646 if (rp->status)
656 goto done; 647 goto done;
@@ -664,11 +655,12 @@ done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665} 656}
666 657
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 658static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
668{ 660{
669 struct hci_rp_read_local_features *rp = (void *) skb->data; 661 struct hci_rp_read_local_features *rp = (void *) skb->data;
670 662
671 BT_DBG("%s status 0x%x", hdev->name, rp->status); 663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
672 664
673 if (rp->status) 665 if (rp->status)
674 return; 666 return;
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714 706
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1], 708 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3], 709 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5], 710 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]); 711 hdev->features[6], hdev->features[7]);
720} 712}
721 713
722static void hci_set_le_support(struct hci_dev *hdev) 714static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,11 +728,11 @@ static void hci_set_le_support(struct hci_dev *hdev)
736} 728}
737 729
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 730static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb) 731 struct sk_buff *skb)
740{ 732{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742 734
743 BT_DBG("%s status 0x%x", hdev->name, rp->status); 735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
744 736
745 if (rp->status) 737 if (rp->status)
746 goto done; 738 goto done;
@@ -762,11 +754,11 @@ done:
762} 754}
763 755
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 756static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb) 757 struct sk_buff *skb)
766{ 758{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768 760
769 BT_DBG("%s status 0x%x", hdev->name, rp->status); 761 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770 762
771 if (rp->status) 763 if (rp->status)
772 return; 764 return;
@@ -780,7 +772,7 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
780{ 772{
781 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 773 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
782 774
783 BT_DBG("%s status 0x%x", hdev->name, rp->status); 775 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
784 776
785 if (rp->status) 777 if (rp->status)
786 return; 778 return;
@@ -798,16 +790,15 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
798 hdev->acl_cnt = hdev->acl_pkts; 790 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts; 791 hdev->sco_cnt = hdev->sco_pkts;
800 792
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
802 hdev->acl_mtu, hdev->acl_pkts, 794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
803 hdev->sco_mtu, hdev->sco_pkts);
804} 795}
805 796
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 797static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
807{ 798{
808 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
809 800
810 BT_DBG("%s status 0x%x", hdev->name, rp->status); 801 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811 802
812 if (!rp->status) 803 if (!rp->status)
813 bacpy(&hdev->bdaddr, &rp->bdaddr); 804 bacpy(&hdev->bdaddr, &rp->bdaddr);
@@ -816,11 +807,11 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
816} 807}
817 808
818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 809static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb) 810 struct sk_buff *skb)
820{ 811{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822 813
823 BT_DBG("%s status 0x%x", hdev->name, rp->status); 814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824 815
825 if (rp->status) 816 if (rp->status)
826 return; 817 return;
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 hdev->block_cnt = hdev->num_blocks; 823 hdev->block_cnt = hdev->num_blocks;
833 824
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len); 826 hdev->block_cnt, hdev->block_len);
836 827
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838} 829}
@@ -841,17 +832,17 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
841{ 832{
842 __u8 status = *((__u8 *) skb->data); 833 __u8 status = *((__u8 *) skb->data);
843 834
844 BT_DBG("%s status 0x%x", hdev->name, status); 835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
845 836
846 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
847} 838}
848 839
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 840static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb) 841 struct sk_buff *skb)
851{ 842{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853 844
854 BT_DBG("%s status 0x%x", hdev->name, rp->status); 845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 846
856 if (rp->status) 847 if (rp->status)
857 return; 848 return;
@@ -871,11 +862,11 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
871} 862}
872 863
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 864static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb) 865 struct sk_buff *skb)
875{ 866{
876 __u8 status = *((__u8 *) skb->data); 867 __u8 status = *((__u8 *) skb->data);
877 868
878 BT_DBG("%s status 0x%x", hdev->name, status); 869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
879 870
880 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
881} 872}
@@ -884,27 +875,27 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
884{ 875{
885 __u8 status = *((__u8 *) skb->data); 876 __u8 status = *((__u8 *) skb->data);
886 877
887 BT_DBG("%s status 0x%x", hdev->name, status); 878 BT_DBG("%s status 0x%2.2x", hdev->name, status);
888 879
889 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
890} 881}
891 882
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 883static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb) 884 struct sk_buff *skb)
894{ 885{
895 __u8 status = *((__u8 *) skb->data); 886 __u8 status = *((__u8 *) skb->data);
896 887
897 BT_DBG("%s status 0x%x", hdev->name, status); 888 BT_DBG("%s status 0x%2.2x", hdev->name, status);
898 889
899 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
900} 891}
901 892
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 893static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb) 894 struct sk_buff *skb)
904{ 895{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 897
907 BT_DBG("%s status 0x%x", hdev->name, rp->status); 898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 899
909 if (!rp->status) 900 if (!rp->status)
910 hdev->inq_tx_power = rp->tx_power; 901 hdev->inq_tx_power = rp->tx_power;
@@ -916,7 +907,7 @@ static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
916{ 907{
917 __u8 status = *((__u8 *) skb->data); 908 __u8 status = *((__u8 *) skb->data);
918 909
919 BT_DBG("%s status 0x%x", hdev->name, status); 910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
920 911
921 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 912 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
922} 913}
@@ -927,7 +918,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
927 struct hci_cp_pin_code_reply *cp; 918 struct hci_cp_pin_code_reply *cp;
928 struct hci_conn *conn; 919 struct hci_conn *conn;
929 920
930 BT_DBG("%s status 0x%x", hdev->name, rp->status); 921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931 922
932 hci_dev_lock(hdev); 923 hci_dev_lock(hdev);
933 924
@@ -953,13 +944,13 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
953{ 944{
954 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 945 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
955 946
956 BT_DBG("%s status 0x%x", hdev->name, rp->status); 947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 948
958 hci_dev_lock(hdev); 949 hci_dev_lock(hdev);
959 950
960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status); 953 rp->status);
963 954
964 hci_dev_unlock(hdev); 955 hci_dev_unlock(hdev);
965} 956}
@@ -969,7 +960,7 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
969{ 960{
970 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 961 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
971 962
972 BT_DBG("%s status 0x%x", hdev->name, rp->status); 963 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973 964
974 if (rp->status) 965 if (rp->status)
975 return; 966 return;
@@ -988,7 +979,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
988{ 979{
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990 981
991 BT_DBG("%s status 0x%x", hdev->name, rp->status); 982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992 983
993 hci_dev_lock(hdev); 984 hci_dev_lock(hdev);
994 985
@@ -1000,11 +991,11 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000} 991}
1001 992
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 993static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb) 994 struct sk_buff *skb)
1004{ 995{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006 997
1007 BT_DBG("%s status 0x%x", hdev->name, rp->status); 998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 999
1009 hci_dev_lock(hdev); 1000 hci_dev_lock(hdev);
1010 1001
@@ -1019,7 +1010,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1019{ 1010{
1020 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021 1012
1022 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023 1014
1024 hci_dev_lock(hdev); 1015 hci_dev_lock(hdev);
1025 1016
@@ -1031,11 +1022,11 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031} 1022}
1032 1023
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1024static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb) 1025 struct sk_buff *skb)
1035{ 1026{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037 1028
1038 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1039 1030
1040 hci_dev_lock(hdev); 1031 hci_dev_lock(hdev);
1041 1032
@@ -1047,11 +1038,11 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1047} 1038}
1048 1039
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1040static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb) 1041 struct sk_buff *skb)
1051{ 1042{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053 1044
1054 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1045 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1055 1046
1056 hci_dev_lock(hdev); 1047 hci_dev_lock(hdev);
1057 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, 1048 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
@@ -1063,7 +1054,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1063{ 1054{
1064 __u8 status = *((__u8 *) skb->data); 1055 __u8 status = *((__u8 *) skb->data);
1065 1056
1066 BT_DBG("%s status 0x%x", hdev->name, status); 1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1067 1058
1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); 1059 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1069 1060
@@ -1076,12 +1067,12 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076} 1067}
1077 1068
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1069static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb) 1070 struct sk_buff *skb)
1080{ 1071{
1081 struct hci_cp_le_set_scan_enable *cp; 1072 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data); 1073 __u8 status = *((__u8 *) skb->data);
1083 1074
1084 BT_DBG("%s status 0x%x", hdev->name, status); 1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085 1076
1086 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1087 if (!cp) 1078 if (!cp)
@@ -1136,7 +1127,7 @@ static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1136{ 1127{
1137 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1128 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1138 1129
1139 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1140 1131
1141 if (rp->status) 1132 if (rp->status)
1142 return; 1133 return;
@@ -1148,7 +1139,7 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1148{ 1139{
1149 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 1140 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1150 1141
1151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1152 1143
1153 if (rp->status) 1144 if (rp->status)
1154 return; 1145 return;
@@ -1156,13 +1147,13 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157} 1148}
1158 1149
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1150static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb) 1151 struct sk_buff *skb)
1161{ 1152{
1162 struct hci_cp_write_le_host_supported *sent; 1153 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data); 1154 __u8 status = *((__u8 *) skb->data);
1164 1155
1165 BT_DBG("%s status 0x%x", hdev->name, status); 1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 1157
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1168 if (!sent) 1159 if (!sent)
@@ -1176,15 +1167,15 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1176 } 1167 }
1177 1168
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags)) 1170 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status); 1171 mgmt_le_enable_complete(hdev, sent->le, status);
1181 1172
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183} 1174}
1184 1175
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1176static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{ 1177{
1187 BT_DBG("%s status 0x%x", hdev->name, status); 1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1188 1179
1189 if (status) { 1180 if (status) {
1190 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1181 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1203,12 +1194,12 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1203 hci_dev_unlock(hdev); 1194 hci_dev_unlock(hdev);
1204} 1195}
1205 1196
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1197static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{ 1198{
1208 struct hci_cp_create_conn *cp; 1199 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn; 1200 struct hci_conn *conn;
1210 1201
1211 BT_DBG("%s status 0x%x", hdev->name, status); 1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212 1203
1213 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1204 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1214 if (!cp) 1205 if (!cp)
@@ -1218,7 +1209,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1218 1209
1219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1220 1211
1221 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 1212 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1222 1213
1223 if (status) { 1214 if (status) {
1224 if (conn && conn->state == BT_CONNECT) { 1215 if (conn && conn->state == BT_CONNECT) {
@@ -1249,7 +1240,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1249 struct hci_conn *acl, *sco; 1240 struct hci_conn *acl, *sco;
1250 __u16 handle; 1241 __u16 handle;
1251 1242
1252 BT_DBG("%s status 0x%x", hdev->name, status); 1243 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 1244
1254 if (!status) 1245 if (!status)
1255 return; 1246 return;
@@ -1260,7 +1251,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1260 1251
1261 handle = __le16_to_cpu(cp->handle); 1252 handle = __le16_to_cpu(cp->handle);
1262 1253
1263 BT_DBG("%s handle %d", hdev->name, handle); 1254 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1264 1255
1265 hci_dev_lock(hdev); 1256 hci_dev_lock(hdev);
1266 1257
@@ -1283,7 +1274,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1283 struct hci_cp_auth_requested *cp; 1274 struct hci_cp_auth_requested *cp;
1284 struct hci_conn *conn; 1275 struct hci_conn *conn;
1285 1276
1286 BT_DBG("%s status 0x%x", hdev->name, status); 1277 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1287 1278
1288 if (!status) 1279 if (!status)
1289 return; 1280 return;
@@ -1310,7 +1301,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1310 struct hci_cp_set_conn_encrypt *cp; 1301 struct hci_cp_set_conn_encrypt *cp;
1311 struct hci_conn *conn; 1302 struct hci_conn *conn;
1312 1303
1313 BT_DBG("%s status 0x%x", hdev->name, status); 1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1314 1305
1315 if (!status) 1306 if (!status)
1316 return; 1307 return;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1333} 1324}
1334 1325
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1326static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn) 1327 struct hci_conn *conn)
1337{ 1328{
1338 if (conn->state != BT_CONFIG || !conn->out) 1329 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0; 1330 return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1343 1334
1344 /* Only request authentication for SSP connections or non-SSP 1335 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */ 1336 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) && 1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1348 !(conn->auth_type & 0x01))
1349 return 0; 1339 return 0;
1350 1340
1351 return 1; 1341 return 1;
1352} 1342}
1353 1343
1354static inline int hci_resolve_name(struct hci_dev *hdev, 1344static int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e) 1345 struct inquiry_entry *e)
1356{ 1346{
1357 struct hci_cp_remote_name_req cp; 1347 struct hci_cp_remote_name_req cp;
@@ -1423,7 +1413,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1423 struct hci_cp_remote_name_req *cp; 1413 struct hci_cp_remote_name_req *cp;
1424 struct hci_conn *conn; 1414 struct hci_conn *conn;
1425 1415
1426 BT_DBG("%s status 0x%x", hdev->name, status); 1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1427 1417
1428 /* If successful wait for the name req complete event before 1418 /* If successful wait for the name req complete event before
1429 * checking for the need to do authentication */ 1419 * checking for the need to do authentication */
@@ -1462,7 +1452,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1462 struct hci_cp_read_remote_features *cp; 1452 struct hci_cp_read_remote_features *cp;
1463 struct hci_conn *conn; 1453 struct hci_conn *conn;
1464 1454
1465 BT_DBG("%s status 0x%x", hdev->name, status); 1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1466 1456
1467 if (!status) 1457 if (!status)
1468 return; 1458 return;
@@ -1489,7 +1479,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1489 struct hci_cp_read_remote_ext_features *cp; 1479 struct hci_cp_read_remote_ext_features *cp;
1490 struct hci_conn *conn; 1480 struct hci_conn *conn;
1491 1481
1492 BT_DBG("%s status 0x%x", hdev->name, status); 1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1493 1483
1494 if (!status) 1484 if (!status)
1495 return; 1485 return;
@@ -1517,7 +1507,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1517 struct hci_conn *acl, *sco; 1507 struct hci_conn *acl, *sco;
1518 __u16 handle; 1508 __u16 handle;
1519 1509
1520 BT_DBG("%s status 0x%x", hdev->name, status); 1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 1511
1522 if (!status) 1512 if (!status)
1523 return; 1513 return;
@@ -1528,7 +1518,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1528 1518
1529 handle = __le16_to_cpu(cp->handle); 1519 handle = __le16_to_cpu(cp->handle);
1530 1520
1531 BT_DBG("%s handle %d", hdev->name, handle); 1521 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1532 1522
1533 hci_dev_lock(hdev); 1523 hci_dev_lock(hdev);
1534 1524
@@ -1551,7 +1541,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1551 struct hci_cp_sniff_mode *cp; 1541 struct hci_cp_sniff_mode *cp;
1552 struct hci_conn *conn; 1542 struct hci_conn *conn;
1553 1543
1554 BT_DBG("%s status 0x%x", hdev->name, status); 1544 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555 1545
1556 if (!status) 1546 if (!status)
1557 return; 1547 return;
@@ -1578,7 +1568,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1578 struct hci_cp_exit_sniff_mode *cp; 1568 struct hci_cp_exit_sniff_mode *cp;
1579 struct hci_conn *conn; 1569 struct hci_conn *conn;
1580 1570
1581 BT_DBG("%s status 0x%x", hdev->name, status); 1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 1572
1583 if (!status) 1573 if (!status)
1584 return; 1574 return;
@@ -1627,7 +1617,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1627 struct hci_cp_le_create_conn *cp; 1617 struct hci_cp_le_create_conn *cp;
1628 struct hci_conn *conn; 1618 struct hci_conn *conn;
1629 1619
1630 BT_DBG("%s status 0x%x", hdev->name, status); 1620 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1631 1621
1632 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1622 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1633 if (!cp) 1623 if (!cp)
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639 1629
1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641 conn); 1631 conn);
1642 1632
1643 if (status) { 1633 if (status) {
1644 if (conn && conn->state == BT_CONNECT) { 1634 if (conn && conn->state == BT_CONNECT) {
@@ -1665,16 +1655,16 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1665 1655
1666static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1656static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1667{ 1657{
1668 BT_DBG("%s status 0x%x", hdev->name, status); 1658 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669} 1659}
1670 1660
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1672{ 1662{
1673 __u8 status = *((__u8 *) skb->data); 1663 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery; 1664 struct discovery_state *discov = &hdev->discovery;
1675 struct inquiry_entry *e; 1665 struct inquiry_entry *e;
1676 1666
1677 BT_DBG("%s status %d", hdev->name, status); 1667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678 1668
1679 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1669 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1680 1670
@@ -1708,7 +1698,7 @@ unlock:
1708 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1709} 1699}
1710 1700
1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1701static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1712{ 1702{
1713 struct inquiry_data data; 1703 struct inquiry_data data;
1714 struct inquiry_info *info = (void *) (skb->data + 1); 1704 struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1745 hci_dev_unlock(hdev); 1735 hci_dev_unlock(hdev);
1746} 1736}
1747 1737
1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1738static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{ 1739{
1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1751 struct hci_conn *conn; 1741 struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
1823 hci_conn_check_pending(hdev); 1813 hci_conn_check_pending(hdev);
1824} 1814}
1825 1815
1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1816static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827{ 1817{
1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1829 int mask = hdev->link_mode; 1819 int mask = hdev->link_mode;
1830 1820
1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1832 batostr(&ev->bdaddr), ev->link_type); 1822 ev->link_type);
1833 1823
1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1835 1825
1836 if ((mask & HCI_LM_ACCEPT) && 1826 if ((mask & HCI_LM_ACCEPT) &&
1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1838 /* Connection accepted */ 1828 /* Connection accepted */
1839 struct inquiry_entry *ie; 1829 struct inquiry_entry *ie;
1840 struct hci_conn *conn; 1830 struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1845 if (ie) 1835 if (ie)
1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1847 1837
1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1849 if (!conn) { 1840 if (!conn) {
1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) { 1842 if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1880 1871
1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1883 cp.max_latency = cpu_to_le16(0xffff); 1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1885 cp.retrans_effort = 0xff; 1876 cp.retrans_effort = 0xff;
1886 1877
@@ -1897,12 +1888,12 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1897 } 1888 }
1898} 1889}
1899 1890
1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1891static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{ 1892{
1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1903 struct hci_conn *conn; 1894 struct hci_conn *conn;
1904 1895
1905 BT_DBG("%s status %d", hdev->name, ev->status); 1896 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1906 1897
1907 hci_dev_lock(hdev); 1898 hci_dev_lock(hdev);
1908 1899
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1914 conn->state = BT_CLOSED; 1905 conn->state = BT_CLOSED;
1915 1906
1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1918 if (ev->status != 0) 1909 if (ev->status != 0)
1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status); 1911 conn->dst_type, ev->status);
1921 else 1912 else
1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1923 conn->dst_type); 1914 conn->dst_type);
@@ -1934,12 +1925,12 @@ unlock:
1934 hci_dev_unlock(hdev); 1925 hci_dev_unlock(hdev);
1935} 1926}
1936 1927
1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1928static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938{ 1929{
1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1940 struct hci_conn *conn; 1931 struct hci_conn *conn;
1941 1932
1942 BT_DBG("%s status %d", hdev->name, ev->status); 1933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1943 1934
1944 hci_dev_lock(hdev); 1935 hci_dev_lock(hdev);
1945 1936
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1949 1940
1950 if (!ev->status) { 1941 if (!ev->status) {
1951 if (!hci_conn_ssp_enabled(conn) && 1942 if (!hci_conn_ssp_enabled(conn) &&
1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1953 BT_INFO("re-auth of legacy device is not possible."); 1944 BT_INFO("re-auth of legacy device is not possible.");
1954 } else { 1945 } else {
1955 conn->link_mode |= HCI_LM_AUTH; 1946 conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1969 cp.handle = ev->handle; 1960 cp.handle = ev->handle;
1970 cp.encrypt = 0x01; 1961 cp.encrypt = 0x01;
1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1972 &cp); 1963 &cp);
1973 } else { 1964 } else {
1974 conn->state = BT_CONNECTED; 1965 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status); 1966 hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1989 cp.handle = ev->handle; 1980 cp.handle = ev->handle;
1990 cp.encrypt = 0x01; 1981 cp.encrypt = 0x01;
1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1992 &cp); 1983 &cp);
1993 } else { 1984 } else {
1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1986 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
2000 hci_dev_unlock(hdev); 1991 hci_dev_unlock(hdev);
2001} 1992}
2002 1993
2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1994static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004{ 1995{
2005 struct hci_ev_remote_name *ev = (void *) skb->data; 1996 struct hci_ev_remote_name *ev = (void *) skb->data;
2006 struct hci_conn *conn; 1997 struct hci_conn *conn;
@@ -2039,12 +2030,12 @@ unlock:
2039 hci_dev_unlock(hdev); 2030 hci_dev_unlock(hdev);
2040} 2031}
2041 2032
2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2033static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043{ 2034{
2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2045 struct hci_conn *conn; 2036 struct hci_conn *conn;
2046 2037
2047 BT_DBG("%s status %d", hdev->name, ev->status); 2038 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2048 2039
2049 hci_dev_lock(hdev); 2040 hci_dev_lock(hdev);
2050 2041
@@ -2082,12 +2073,13 @@ unlock:
2082 hci_dev_unlock(hdev); 2073 hci_dev_unlock(hdev);
2083} 2074}
2084 2075
2085static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2076static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2086{ 2078{
2087 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2088 struct hci_conn *conn; 2080 struct hci_conn *conn;
2089 2081
2090 BT_DBG("%s status %d", hdev->name, ev->status); 2082 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2091 2083
2092 hci_dev_lock(hdev); 2084 hci_dev_lock(hdev);
2093 2085
@@ -2104,12 +2096,13 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
2104 hci_dev_unlock(hdev); 2096 hci_dev_unlock(hdev);
2105} 2097}
2106 2098
2107static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2099static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2108{ 2101{
2109 struct hci_ev_remote_features *ev = (void *) skb->data; 2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2110 struct hci_conn *conn; 2103 struct hci_conn *conn;
2111 2104
2112 BT_DBG("%s status %d", hdev->name, ev->status); 2105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2113 2106
2114 hci_dev_lock(hdev); 2107 hci_dev_lock(hdev);
2115 2108
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2128 cp.handle = ev->handle; 2121 cp.handle = ev->handle;
2129 cp.page = 0x01; 2122 cp.page = 0x01;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2131 sizeof(cp), &cp); 2124 sizeof(cp), &cp);
2132 goto unlock; 2125 goto unlock;
2133 } 2126 }
2134 2127
@@ -2153,17 +2146,18 @@ unlock:
2153 hci_dev_unlock(hdev); 2146 hci_dev_unlock(hdev);
2154} 2147}
2155 2148
2156static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2149static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2157{ 2150{
2158 BT_DBG("%s", hdev->name); 2151 BT_DBG("%s", hdev->name);
2159} 2152}
2160 2153
2161static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2154static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2162{ 2156{
2163 BT_DBG("%s", hdev->name); 2157 BT_DBG("%s", hdev->name);
2164} 2158}
2165 2159
2166static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2167{ 2161{
2168 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2169 __u16 opcode; 2163 __u16 opcode;
@@ -2370,7 +2364,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2370 break; 2364 break;
2371 2365
2372 default: 2366 default:
2373 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2367 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2374 break; 2368 break;
2375 } 2369 }
2376 2370
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2384 } 2378 }
2385} 2379}
2386 2380
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{ 2382{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode; 2384 __u16 opcode;
@@ -2451,7 +2445,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2451 break; 2445 break;
2452 2446
2453 default: 2447 default:
2454 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2448 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2455 break; 2449 break;
2456 } 2450 }
2457 2451
@@ -2465,12 +2459,12 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 } 2459 }
2466} 2460}
2467 2461
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{ 2463{
2470 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn; 2465 struct hci_conn *conn;
2472 2466
2473 BT_DBG("%s status %d", hdev->name, ev->status); 2467 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2474 2468
2475 hci_dev_lock(hdev); 2469 hci_dev_lock(hdev);
2476 2470
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2491 hci_dev_unlock(hdev); 2485 hci_dev_unlock(hdev);
2492} 2486}
2493 2487
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{ 2489{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i; 2491 int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2502 } 2496 }
2503 2497
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name); 2500 BT_DBG("%s bad parameters", hdev->name);
2507 return; 2501 return;
2508 } 2502 }
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2557 queue_work(hdev->workqueue, &hdev->tx_work); 2551 queue_work(hdev->workqueue, &hdev->tx_work);
2558} 2552}
2559 2553
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2561 struct sk_buff *skb)
2562{ 2555{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i; 2557 int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2569 } 2562 }
2570 2563
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name); 2566 BT_DBG("%s bad parameters", hdev->name);
2574 return; 2567 return;
2575 } 2568 }
2576 2569
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl); 2571 ev->num_hndl);
2579 2572
2580 for (i = 0; i < ev->num_hndl; i++) { 2573 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i]; 2574 struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,12 +2600,12 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2607 queue_work(hdev->workqueue, &hdev->tx_work); 2600 queue_work(hdev->workqueue, &hdev->tx_work);
2608} 2601}
2609 2602
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2603static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{ 2604{
2612 struct hci_ev_mode_change *ev = (void *) skb->data; 2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn; 2606 struct hci_conn *conn;
2614 2607
2615 BT_DBG("%s status %d", hdev->name, ev->status); 2608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2616 2609
2617 hci_dev_lock(hdev); 2610 hci_dev_lock(hdev);
2618 2611
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2621 conn->mode = ev->mode; 2614 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval); 2615 conn->interval = __le16_to_cpu(ev->interval);
2623 2616
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE) 2619 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else 2621 else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2635 hci_dev_unlock(hdev); 2629 hci_dev_unlock(hdev);
2636} 2630}
2637 2631
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{ 2633{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn; 2635 struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2656 2650
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr); 2653 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure; 2655 u8 secure;
2662 2656
@@ -2672,7 +2666,7 @@ unlock:
2672 hci_dev_unlock(hdev); 2666 hci_dev_unlock(hdev);
2673} 2667}
2674 2668
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{ 2670{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp; 2672 struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2689 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) { 2684 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name, 2685 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr)); 2686 batostr(&ev->bdaddr));
2693 goto not_found; 2687 goto not_found;
2694 } 2688 }
2695 2689
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr)); 2691 batostr(&ev->bdaddr));
2698 2692
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name); 2695 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found; 2696 goto not_found;
2703 } 2697 }
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) { 2700 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff && 2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found; 2704 goto not_found;
2712 } 2705 }
2713 2706
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) { 2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \ 2709 BT_DBG("%s ignoring key unauthenticated for high security",
2717 security", hdev->name); 2710 hdev->name);
2718 goto not_found; 2711 goto not_found;
2719 } 2712 }
2720 2713
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2723 } 2716 }
2724 2717
2725 bacpy(&cp.bdaddr, &ev->bdaddr); 2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16); 2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2727 2720
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729 2722
@@ -2736,7 +2729,7 @@ not_found:
2736 hci_dev_unlock(hdev); 2729 hci_dev_unlock(hdev);
2737} 2730}
2738 2731
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2732static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{ 2733{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn; 2735 struct hci_conn *conn;
@@ -2760,17 +2753,17 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2760 2753
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len); 2756 ev->key_type, pin_len);
2764 2757
2765 hci_dev_unlock(hdev); 2758 hci_dev_unlock(hdev);
2766} 2759}
2767 2760
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2761static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{ 2762{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data; 2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn; 2764 struct hci_conn *conn;
2772 2765
2773 BT_DBG("%s status %d", hdev->name, ev->status); 2766 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2774 2767
2775 hci_dev_lock(hdev); 2768 hci_dev_lock(hdev);
2776 2769
@@ -2788,12 +2781,12 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
2788 hci_dev_unlock(hdev); 2781 hci_dev_unlock(hdev);
2789} 2782}
2790 2783
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2784static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{ 2785{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn; 2787 struct hci_conn *conn;
2795 2788
2796 BT_DBG("%s status %d", hdev->name, ev->status); 2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2797 2790
2798 hci_dev_lock(hdev); 2791 hci_dev_lock(hdev);
2799 2792
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
2804 hci_dev_unlock(hdev); 2797 hci_dev_unlock(hdev);
2805} 2798}
2806 2799
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{ 2801{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie; 2803 struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
2822 hci_dev_unlock(hdev); 2815 hci_dev_unlock(hdev);
2823} 2816}
2824 2817
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2818static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2826{ 2820{
2827 struct inquiry_data data; 2821 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data); 2822 int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2881 hci_dev_unlock(hdev); 2875 hci_dev_unlock(hdev);
2882} 2876}
2883 2877
2884static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2885{ 2880{
2886 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2887 struct hci_conn *conn; 2882 struct hci_conn *conn;
@@ -2929,12 +2924,13 @@ unlock:
2929 hci_dev_unlock(hdev); 2924 hci_dev_unlock(hdev);
2930} 2925}
2931 2926
2932static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2927static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2933{ 2929{
2934 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn; 2931 struct hci_conn *conn;
2936 2932
2937 BT_DBG("%s status %d", hdev->name, ev->status); 2933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2938 2934
2939 hci_dev_lock(hdev); 2935 hci_dev_lock(hdev);
2940 2936
@@ -2984,19 +2980,20 @@ unlock:
2984 hci_dev_unlock(hdev); 2980 hci_dev_unlock(hdev);
2985} 2981}
2986 2982
2987static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2983static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2988{ 2984{
2989 BT_DBG("%s", hdev->name); 2985 BT_DBG("%s", hdev->name);
2990} 2986}
2991 2987
2992static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2988static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993{ 2989{
2994 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2995 2991
2996 BT_DBG("%s status %d", hdev->name, ev->status); 2992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2997} 2993}
2998 2994
2999static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2995static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
3000{ 2997{
3001 struct inquiry_data data; 2998 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3043,7 +3040,51 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
3043 hci_dev_unlock(hdev); 3040 hci_dev_unlock(hdev);
3044} 3041}
3045 3042
3046static inline u8 hci_get_auth_req(struct hci_conn *conn) 3043static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3044 struct sk_buff *skb)
3045{
3046 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3047 struct hci_conn *conn;
3048
3049 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3050 __le16_to_cpu(ev->handle));
3051
3052 hci_dev_lock(hdev);
3053
3054 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3055 if (!conn)
3056 goto unlock;
3057
3058 if (!ev->status)
3059 conn->sec_level = conn->pending_sec_level;
3060
3061 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3062
3063 if (ev->status && conn->state == BT_CONNECTED) {
3064 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3065 hci_conn_put(conn);
3066 goto unlock;
3067 }
3068
3069 if (conn->state == BT_CONFIG) {
3070 if (!ev->status)
3071 conn->state = BT_CONNECTED;
3072
3073 hci_proto_connect_cfm(conn, ev->status);
3074 hci_conn_put(conn);
3075 } else {
3076 hci_auth_cfm(conn, ev->status);
3077
3078 hci_conn_hold(conn);
3079 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3080 hci_conn_put(conn);
3081 }
3082
3083unlock:
3084 hci_dev_unlock(hdev);
3085}
3086
3087static u8 hci_get_auth_req(struct hci_conn *conn)
3047{ 3088{
3048 /* If remote requests dedicated bonding follow that lead */ 3089 /* If remote requests dedicated bonding follow that lead */
3049 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3090 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3062,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
3062 return conn->auth_type; 3103 return conn->auth_type;
3063} 3104}
3064 3105
3065static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3106static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3066{ 3107{
3067 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3108 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3068 struct hci_conn *conn; 3109 struct hci_conn *conn;
@@ -3081,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3081 goto unlock; 3122 goto unlock;
3082 3123
3083 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3124 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3084 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3125 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3085 struct hci_cp_io_capability_reply cp; 3126 struct hci_cp_io_capability_reply cp;
3086 3127
3087 bacpy(&cp.bdaddr, &ev->bdaddr); 3128 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3092,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3092 conn->auth_type = hci_get_auth_req(conn); 3133 conn->auth_type = hci_get_auth_req(conn);
3093 cp.authentication = conn->auth_type; 3134 cp.authentication = conn->auth_type;
3094 3135
3095 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3136 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3096 hci_find_remote_oob_data(hdev, &conn->dst)) 3137 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3097 cp.oob_data = 0x01; 3138 cp.oob_data = 0x01;
3098 else 3139 else
3099 cp.oob_data = 0x00; 3140 cp.oob_data = 0x00;
3100 3141
3101 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3142 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3102 sizeof(cp), &cp); 3143 sizeof(cp), &cp);
3103 } else { 3144 } else {
3104 struct hci_cp_io_capability_neg_reply cp; 3145 struct hci_cp_io_capability_neg_reply cp;
3105 3146
@@ -3107,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3107 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3148 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3108 3149
3109 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3150 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3110 sizeof(cp), &cp); 3151 sizeof(cp), &cp);
3111 } 3152 }
3112 3153
3113unlock: 3154unlock:
3114 hci_dev_unlock(hdev); 3155 hci_dev_unlock(hdev);
3115} 3156}
3116 3157
3117static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3158static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3118{ 3159{
3119 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3160 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3120 struct hci_conn *conn; 3161 struct hci_conn *conn;
@@ -3136,8 +3177,8 @@ unlock:
3136 hci_dev_unlock(hdev); 3177 hci_dev_unlock(hdev);
3137} 3178}
3138 3179
3139static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3180static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3140 struct sk_buff *skb) 3181 struct sk_buff *skb)
3141{ 3182{
3142 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3183 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3143 int loc_mitm, rem_mitm, confirm_hint = 0; 3184 int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3165,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3165 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3206 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3166 BT_DBG("Rejecting request: remote device can't provide MITM"); 3207 BT_DBG("Rejecting request: remote device can't provide MITM");
3167 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3208 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3168 sizeof(ev->bdaddr), &ev->bdaddr); 3209 sizeof(ev->bdaddr), &ev->bdaddr);
3169 goto unlock; 3210 goto unlock;
3170 } 3211 }
3171 3212
3172 /* If no side requires MITM protection; auto-accept */ 3213 /* If no side requires MITM protection; auto-accept */
3173 if ((!loc_mitm || conn->remote_cap == 0x03) && 3214 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3174 (!rem_mitm || conn->io_capability == 0x03)) { 3215 (!rem_mitm || conn->io_capability == 0x03)) {
3175 3216
3176 /* If we're not the initiators request authorization to 3217 /* If we're not the initiators request authorization to
3177 * proceed from user space (mgmt_user_confirm with 3218 * proceed from user space (mgmt_user_confirm with
@@ -3183,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3183 } 3224 }
3184 3225
3185 BT_DBG("Auto-accept of user confirmation with %ums delay", 3226 BT_DBG("Auto-accept of user confirmation with %ums delay",
3186 hdev->auto_accept_delay); 3227 hdev->auto_accept_delay);
3187 3228
3188 if (hdev->auto_accept_delay > 0) { 3229 if (hdev->auto_accept_delay > 0) {
3189 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3230 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3192,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3192 } 3233 }
3193 3234
3194 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3235 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3195 sizeof(ev->bdaddr), &ev->bdaddr); 3236 sizeof(ev->bdaddr), &ev->bdaddr);
3196 goto unlock; 3237 goto unlock;
3197 } 3238 }
3198 3239
@@ -3204,8 +3245,8 @@ unlock:
3204 hci_dev_unlock(hdev); 3245 hci_dev_unlock(hdev);
3205} 3246}
3206 3247
3207static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3248static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3208 struct sk_buff *skb) 3249 struct sk_buff *skb)
3209{ 3250{
3210 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3251 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3211 3252
@@ -3219,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3219 hci_dev_unlock(hdev); 3260 hci_dev_unlock(hdev);
3220} 3261}
3221 3262
3222static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3263static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3264 struct sk_buff *skb)
3223{ 3265{
3224 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3266 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3225 struct hci_conn *conn; 3267 struct hci_conn *conn;
@@ -3247,7 +3289,8 @@ unlock:
3247 hci_dev_unlock(hdev); 3289 hci_dev_unlock(hdev);
3248} 3290}
3249 3291
3250static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3292static void hci_remote_host_features_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3251{ 3294{
3252 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3295 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3253 struct inquiry_entry *ie; 3296 struct inquiry_entry *ie;
@@ -3263,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
3263 hci_dev_unlock(hdev); 3306 hci_dev_unlock(hdev);
3264} 3307}
3265 3308
3266static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3309static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3267 struct sk_buff *skb) 3310 struct sk_buff *skb)
3268{ 3311{
3269 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3312 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3270 struct oob_data *data; 3313 struct oob_data *data;
@@ -3285,28 +3328,41 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3285 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3328 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3286 3329
3287 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3330 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3288 &cp); 3331 &cp);
3289 } else { 3332 } else {
3290 struct hci_cp_remote_oob_data_neg_reply cp; 3333 struct hci_cp_remote_oob_data_neg_reply cp;
3291 3334
3292 bacpy(&cp.bdaddr, &ev->bdaddr); 3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3293 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3336 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3294 &cp); 3337 &cp);
3295 } 3338 }
3296 3339
3297unlock: 3340unlock:
3298 hci_dev_unlock(hdev); 3341 hci_dev_unlock(hdev);
3299} 3342}
3300 3343
3301static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3344static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3302{ 3345{
3303 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3346 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3304 struct hci_conn *conn; 3347 struct hci_conn *conn;
3305 3348
3306 BT_DBG("%s status %d", hdev->name, ev->status); 3349 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3307 3350
3308 hci_dev_lock(hdev); 3351 hci_dev_lock(hdev);
3309 3352
3353 if (ev->status) {
3354 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3355 if (!conn)
3356 goto unlock;
3357
3358 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3359 conn->dst_type, ev->status);
3360 hci_proto_connect_cfm(conn, ev->status);
3361 conn->state = BT_CLOSED;
3362 hci_conn_del(conn);
3363 goto unlock;
3364 }
3365
3310 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3366 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3311 if (!conn) { 3367 if (!conn) {
3312 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3368 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3319,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3319 conn->dst_type = ev->bdaddr_type; 3375 conn->dst_type = ev->bdaddr_type;
3320 } 3376 }
3321 3377
3322 if (ev->status) {
3323 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3324 conn->dst_type, ev->status);
3325 hci_proto_connect_cfm(conn, ev->status);
3326 conn->state = BT_CLOSED;
3327 hci_conn_del(conn);
3328 goto unlock;
3329 }
3330
3331 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3378 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3332 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3379 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3333 conn->dst_type, 0, NULL, 0, NULL); 3380 conn->dst_type, 0, NULL, 0, NULL);
@@ -3345,8 +3392,7 @@ unlock:
3345 hci_dev_unlock(hdev); 3392 hci_dev_unlock(hdev);
3346} 3393}
3347 3394
3348static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3395static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3349 struct sk_buff *skb)
3350{ 3396{
3351 u8 num_reports = skb->data[0]; 3397 u8 num_reports = skb->data[0];
3352 void *ptr = &skb->data[1]; 3398 void *ptr = &skb->data[1];
@@ -3367,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3367 hci_dev_unlock(hdev); 3413 hci_dev_unlock(hdev);
3368} 3414}
3369 3415
3370static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3416static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3371 struct sk_buff *skb)
3372{ 3417{
3373 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3418 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3374 struct hci_cp_le_ltk_reply cp; 3419 struct hci_cp_le_ltk_reply cp;
@@ -3376,7 +3421,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3376 struct hci_conn *conn; 3421 struct hci_conn *conn;
3377 struct smp_ltk *ltk; 3422 struct smp_ltk *ltk;
3378 3423
3379 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle)); 3424 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3380 3425
3381 hci_dev_lock(hdev); 3426 hci_dev_lock(hdev);
3382 3427
@@ -3411,7 +3456,7 @@ not_found:
3411 hci_dev_unlock(hdev); 3456 hci_dev_unlock(hdev);
3412} 3457}
3413 3458
3414static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3459static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415{ 3460{
3416 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3461 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3417 3462
@@ -3559,6 +3604,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3559 hci_extended_inquiry_result_evt(hdev, skb); 3604 hci_extended_inquiry_result_evt(hdev, skb);
3560 break; 3605 break;
3561 3606
3607 case HCI_EV_KEY_REFRESH_COMPLETE:
3608 hci_key_refresh_complete_evt(hdev, skb);
3609 break;
3610
3562 case HCI_EV_IO_CAPA_REQUEST: 3611 case HCI_EV_IO_CAPA_REQUEST:
3563 hci_io_capa_request_evt(hdev, skb); 3612 hci_io_capa_request_evt(hdev, skb);
3564 break; 3613 break;
@@ -3596,7 +3645,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3596 break; 3645 break;
3597 3646
3598 default: 3647 default:
3599 BT_DBG("%s event 0x%x", hdev->name, event); 3648 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3600 break; 3649 break;
3601 } 3650 }
3602 3651
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426a..a7f04de03d79 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
24 24
25/* Bluetooth HCI sockets. */ 25/* Bluetooth HCI sockets. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <linux/uaccess.h>
46#include <asm/unaligned.h> 28#include <asm/unaligned.h>
47 29
48#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 flt = &hci_pi(sk)->filter; 95 flt = &hci_pi(sk)->filter;
114 96
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
117 continue; 100 continue;
118 101
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121 104
122 if (!hci_test_bit(evt, &flt->event_mask)) 105 if (!hci_test_bit(evt, &flt->event_mask))
123 continue; 106 continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_mon_hdr *hdr; 223 struct hci_mon_hdr *hdr;
241 224
242 /* Create a private copy with headroom */ 225 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
244 if (!skb_copy) 228 if (!skb_copy)
245 continue; 229 continue;
246 230
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
495} 479}
496 480
497/* Ioctls that require bound socket */ 481/* Ioctls that require bound socket */
498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
499{ 484{
500 struct hci_dev *hdev = hci_pi(sk)->hdev; 485 struct hci_dev *hdev = hci_pi(sk)->hdev;
501 486
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
540 } 525 }
541} 526}
542 527
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
544{ 530{
545 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg; 532 void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
601 } 587 }
602} 588}
603 589
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
605{ 592{
606 struct sockaddr_hci haddr; 593 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
690 return err; 677 return err;
691} 678}
692 679
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
694{ 682{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk; 684 struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
711 return 0; 699 return 0;
712} 700}
713 701
714static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 702static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 struct sk_buff *skb)
715{ 704{
716 __u32 mask = hci_pi(sk)->cmsg_mask; 705 __u32 mask = hci_pi(sk)->cmsg_mask;
717 706
718 if (mask & HCI_CMSG_DIR) { 707 if (mask & HCI_CMSG_DIR) {
719 int incoming = bt_cb(skb)->incoming; 708 int incoming = bt_cb(skb)->incoming;
720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 &incoming);
721 } 711 }
722 712
723 if (mask & HCI_CMSG_TSTAMP) { 713 if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
747} 737}
748 738
749static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 739static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 struct msghdr *msg, size_t len, int flags) 740 struct msghdr *msg, size_t len, int flags)
751{ 741{
752 int noblock = flags & MSG_DONTWAIT; 742 int noblock = flags & MSG_DONTWAIT;
753 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
857 u16 ocf = hci_opcode_ocf(opcode); 847 u16 ocf = hci_opcode_ocf(opcode);
858 848
859 if (((ogf > HCI_SFLT_MAX_OGF) || 849 if (((ogf > HCI_SFLT_MAX_OGF) ||
860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
861 !capable(CAP_NET_RAW)) { 851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
862 err = -EPERM; 853 err = -EPERM;
863 goto drop; 854 goto drop;
864 } 855 }
@@ -891,7 +882,8 @@ drop:
891 goto done; 882 goto done;
892} 883}
893 884
894static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 885static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
895{ 887{
896 struct hci_ufilter uf = { .opcode = 0 }; 888 struct hci_ufilter uf = { .opcode = 0 };
897 struct sock *sk = sock->sk; 889 struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
973 return err; 965 return err;
974} 966}
975 967
976static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 968static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
977{ 970{
978 struct hci_ufilter uf; 971 struct hci_ufilter uf;
979 struct sock *sk = sock->sk; 972 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eafa..a20e61c3653d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/init.h>
6#include <linux/debugfs.h> 3#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8#include <linux/module.h> 4#include <linux/module.h>
9 5
10#include <net/bluetooth/bluetooth.h> 6#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
31 } 27 }
32} 28}
33 29
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 30static ssize_t show_link_type(struct device *dev,
31 struct device_attribute *attr, char *buf)
35{ 32{
36 struct hci_conn *conn = to_hci_conn(dev); 33 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 34 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 35}
39 36
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf)
41{ 39{
42 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 42}
45 43
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_link_features(struct device *dev,
45 struct device_attribute *attr, char *buf)
47{ 46{
48 struct hci_conn *conn = to_hci_conn(dev); 47 struct hci_conn *conn = to_hci_conn(dev);
49 48
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 49 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 50 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 51 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 52 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 53 conn->features[6], conn->features[7]);
55} 54}
56 55
57#define LINK_ATTR(_name, _mode, _show, _store) \ 56#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
185 } 184 }
186} 185}
187 186
188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{ 189{
190 struct hci_dev *hdev = to_hci_dev(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192} 192}
193 193
194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf)
195{ 196{
196 struct hci_dev *hdev = to_hci_dev(dev); 197 struct hci_dev *hdev = to_hci_dev(dev);
197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 198 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
198} 199}
199 200
200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 201static ssize_t show_name(struct device *dev,
202 struct device_attribute *attr, char *buf)
201{ 203{
202 struct hci_dev *hdev = to_hci_dev(dev); 204 struct hci_dev *hdev = to_hci_dev(dev);
203 char name[HCI_MAX_NAME_LENGTH + 1]; 205 char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
210 return sprintf(buf, "%s\n", name); 212 return sprintf(buf, "%s\n", name);
211} 213}
212 214
213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
214{ 217{
215 struct hci_dev *hdev = to_hci_dev(dev); 218 struct hci_dev *hdev = to_hci_dev(dev);
216 return sprintf(buf, "0x%.2x%.2x%.2x\n", 219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 220 hdev->dev_class[1], hdev->dev_class[0]);
218} 221}
219 222
220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 223static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf)
221{ 225{
222 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
224} 228}
225 229
226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
227{ 232{
228 struct hci_dev *hdev = to_hci_dev(dev); 233 struct hci_dev *hdev = to_hci_dev(dev);
229 234
230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
231 hdev->features[0], hdev->features[1], 236 hdev->features[0], hdev->features[1],
232 hdev->features[2], hdev->features[3], 237 hdev->features[2], hdev->features[3],
233 hdev->features[4], hdev->features[5], 238 hdev->features[4], hdev->features[5],
234 hdev->features[6], hdev->features[7]); 239 hdev->features[6], hdev->features[7]);
235} 240}
236 241
237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
238{ 244{
239 struct hci_dev *hdev = to_hci_dev(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
240 return sprintf(buf, "%d\n", hdev->manufacturer); 246 return sprintf(buf, "%d\n", hdev->manufacturer);
241} 247}
242 248
243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
244{ 251{
245 struct hci_dev *hdev = to_hci_dev(dev); 252 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->hci_ver); 253 return sprintf(buf, "%d\n", hdev->hci_ver);
247} 254}
248 255
249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
250{ 258{
251 struct hci_dev *hdev = to_hci_dev(dev); 259 struct hci_dev *hdev = to_hci_dev(dev);
252 return sprintf(buf, "%d\n", hdev->hci_rev); 260 return sprintf(buf, "%d\n", hdev->hci_rev);
253} 261}
254 262
255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
256{ 265{
257 struct hci_dev *hdev = to_hci_dev(dev); 266 struct hci_dev *hdev = to_hci_dev(dev);
258 return sprintf(buf, "%d\n", hdev->idle_timeout); 267 return sprintf(buf, "%d\n", hdev->idle_timeout);
259} 268}
260 269
261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
262{ 273{
263 struct hci_dev *hdev = to_hci_dev(dev); 274 struct hci_dev *hdev = to_hci_dev(dev);
264 unsigned int val; 275 unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
276 return count; 287 return count;
277} 288}
278 289
279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
280{ 292{
281 struct hci_dev *hdev = to_hci_dev(dev); 293 struct hci_dev *hdev = to_hci_dev(dev);
282 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
283} 295}
284 296
285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
286{ 300{
287 struct hci_dev *hdev = to_hci_dev(dev); 301 struct hci_dev *hdev = to_hci_dev(dev);
288 u16 val; 302 u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
300 return count; 314 return count;
301} 315}
302 316
303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
304{ 319{
305 struct hci_dev *hdev = to_hci_dev(dev); 320 struct hci_dev *hdev = to_hci_dev(dev);
306 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
307} 322}
308 323
309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
310{ 327{
311 struct hci_dev *hdev = to_hci_dev(dev); 328 struct hci_dev *hdev = to_hci_dev(dev);
312 u16 val; 329 u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
335static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
336 353
337static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
338 show_idle_timeout, store_idle_timeout); 355 show_idle_timeout, store_idle_timeout);
339static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
340 show_sniff_max_interval, store_sniff_max_interval); 357 show_sniff_max_interval, store_sniff_max_interval);
341static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
342 show_sniff_min_interval, store_sniff_min_interval); 359 show_sniff_min_interval, store_sniff_min_interval);
343 360
344static struct attribute *bt_host_attrs[] = { 361static struct attribute *bt_host_attrs[] = {
345 &dev_attr_bus.attr, 362 &dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
455 memcpy(&data5, &uuid[14], 2); 472 memcpy(&data5, &uuid[14], 2);
456 473
457 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
458 ntohl(data0), ntohs(data1), ntohs(data2), 475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
459 ntohs(data3), ntohl(data4), ntohs(data5)); 476 ntohl(data4), ntohs(data5));
460} 477}
461 478
462static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
513} 530}
514 531
515DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
516 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
517 534
518void hci_init_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
519{ 536{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
547 return 0; 564 return 0;
548 565
549 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 566 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
550 hdev, &inquiry_cache_fops); 567 hdev, &inquiry_cache_fops);
551 568
552 debugfs_create_file("blacklist", 0444, hdev->debugfs, 569 debugfs_create_file("blacklist", 0444, hdev->debugfs,
553 hdev, &blacklist_fops); 570 hdev, &blacklist_fops);
554 571
555 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 572 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
556 573
557 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, 574 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
558 &auto_accept_delay_fops); 575 &auto_accept_delay_fops);
559 return 0; 576 return 0;
560} 577}
561 578
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 4deaca78e91e..9332bc7aa851 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && INPUT && HID_SUPPORT 3 depends on BT && INPUT
4 select HID 4 select HID
5 help 5 help
6 HIDP (Human Interface Device Protocol) is a transport layer 6 HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b394..ccd985da6518 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/freezer.h>
32#include <linux/fcntl.h>
33#include <linux/skbuff.h>
34#include <linux/socket.h>
35#include <linux/ioctl.h>
36#include <linux/file.h> 24#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 25#include <linux/kthread.h>
41#include <net/sock.h>
42
43#include <linux/input.h>
44#include <linux/hid.h>
45#include <linux/hidraw.h> 26#include <linux/hidraw.h>
46 27
47#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
244} 225}
245 226
246static int __hidp_send_ctrl_message(struct hidp_session *session, 227static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size) 228 unsigned char hdr, unsigned char *data,
229 int size)
248{ 230{
249 struct sk_buff *skb; 231 struct sk_buff *skb;
250 232
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
268 return 0; 250 return 0;
269} 251}
270 252
271static inline int hidp_send_ctrl_message(struct hidp_session *session, 253static int hidp_send_ctrl_message(struct hidp_session *session,
272 unsigned char hdr, unsigned char *data, int size) 254 unsigned char hdr, unsigned char *data, int size)
273{ 255{
274 int err; 256 int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
471 mod_timer(&session->timer, jiffies + HZ * session->idle_to); 453 mod_timer(&session->timer, jiffies + HZ * session->idle_to);
472} 454}
473 455
474static inline void hidp_del_timer(struct hidp_session *session) 456static void hidp_del_timer(struct hidp_session *session)
475{ 457{
476 if (session->idle_to > 0) 458 if (session->idle_to > 0)
477 del_timer(&session->timer); 459 del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1f..18b3f6892a36 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24
25#include <linux/types.h>
26#include <linux/capability.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/poll.h>
30#include <linux/fcntl.h>
31#include <linux/skbuff.h>
32#include <linux/socket.h>
33#include <linux/ioctl.h>
34#include <linux/file.h> 24#include <linux/file.h>
35#include <linux/init.h>
36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h>
39 25
40#include "hidp.h" 26#include "hidp.h"
41 27
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24f144b72a96..a8964db04bfb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h> 33#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h> 34#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54 35
55#include <net/bluetooth/bluetooth.h> 36#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h> 37#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
59 41
60bool disable_ertm; 42bool disable_ertm;
61 43
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 56 struct l2cap_chan *chan, int err);
75 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
76/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
77 62
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
196 state_to_string(state)); 181 state_to_string(state));
197 182
198 chan->state = state; 183 chan->state = state;
199 chan->ops->state_change(chan->data, state); 184 chan->ops->state_change(chan, state);
200} 185}
201 186
202static void l2cap_state_change(struct l2cap_chan *chan, int state) 187static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
224 release_sock(sk); 209 release_sock(sk);
225} 210}
226 211
212static void __set_retrans_timer(struct l2cap_chan *chan)
213{
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219}
220
221static void __set_monitor_timer(struct l2cap_chan *chan)
222{
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
227/* ---- L2CAP sequence number lists ---- */ 243/* ---- L2CAP sequence number lists ---- */
228 244
229/* For ERTM, ordered lists of sequence numbers must be tracked for 245/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
366 382
367 l2cap_chan_unlock(chan); 383 l2cap_chan_unlock(chan);
368 384
369 chan->ops->close(chan->data); 385 chan->ops->close(chan);
370 mutex_unlock(&conn->chan_lock); 386 mutex_unlock(&conn->chan_lock);
371 387
372 l2cap_chan_put(chan); 388 l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
392 408
393 atomic_set(&chan->refcnt, 1); 409 atomic_set(&chan->refcnt, 1);
394 410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
395 BT_DBG("chan %p", chan); 414 BT_DBG("chan %p", chan);
396 415
397 return chan; 416 return chan;
@@ -412,6 +431,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW; 435 chan->sec_level = BT_SECURITY_LOW;
416 436
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -430,7 +450,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
430 case L2CAP_CHAN_CONN_ORIENTED: 450 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) { 451 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */ 452 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU; 453 chan->omtu = L2CAP_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA; 454 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA; 455 chan->dcid = L2CAP_CID_LE_DATA;
436 } else { 456 } else {
@@ -447,6 +467,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
447 chan->omtu = L2CAP_DEFAULT_MTU; 467 chan->omtu = L2CAP_DEFAULT_MTU;
448 break; 468 break;
449 469
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
475 break;
476
450 default: 477 default:
451 /* Raw socket can send/recv signalling messages only */ 478 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING; 479 chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +493,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
466 list_add(&chan->list, &conn->chan_l); 493 list_add(&chan->list, &conn->chan_l);
467} 494}
468 495
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 496void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{ 497{
471 mutex_lock(&conn->chan_lock); 498 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan); 499 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock); 500 mutex_unlock(&conn->chan_lock);
474} 501}
475 502
476static void l2cap_chan_del(struct l2cap_chan *chan, int err) 503void l2cap_chan_del(struct l2cap_chan *chan, int err)
477{ 504{
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn; 505 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481 506
482 __clear_chan_timer(chan); 507 __clear_chan_timer(chan);
483 508
@@ -490,34 +515,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
490 l2cap_chan_put(chan); 515 l2cap_chan_put(chan);
491 516
492 chan->conn = NULL; 517 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500
501 if (err)
502 __l2cap_chan_set_err(chan, err);
503 518
504 if (parent) { 519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
505 bt_accept_unlink(sk); 520 hci_conn_put(conn->hcon);
506 parent->sk_data_ready(parent, 0); 521 }
507 } else
508 sk->sk_state_change(sk);
509 522
510 release_sock(sk); 523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
511 525
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && 526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return; 527 return;
515 528
516 skb_queue_purge(&chan->tx_q); 529 switch(chan->mode) {
517 530 case L2CAP_MODE_BASIC:
518 if (chan->mode == L2CAP_MODE_ERTM) { 531 break;
519 struct srej_list *l, *tmp;
520 532
533 case L2CAP_MODE_ERTM:
521 __clear_retrans_timer(chan); 534 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan); 535 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan); 536 __clear_ack_timer(chan);
@@ -526,30 +539,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 539
527 l2cap_seq_list_free(&chan->srej_list); 540 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list); 541 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534}
535
536static void l2cap_chan_cleanup_listen(struct sock *parent)
537{
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545 542
546 l2cap_chan_lock(chan); 543 /* fall through */
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550 544
551 chan->ops->close(chan->data); 545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
547 break;
552 } 548 }
549
550 return;
553} 551}
554 552
555void l2cap_chan_close(struct l2cap_chan *chan, int reason) 553void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +560,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 560
563 switch (chan->state) { 561 switch (chan->state) {
564 case BT_LISTEN: 562 case BT_LISTEN:
565 lock_sock(sk); 563 if (chan->ops->teardown)
566 l2cap_chan_cleanup_listen(sk); 564 chan->ops->teardown(chan, 0);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break; 565 break;
572 566
573 case BT_CONNECTED: 567 case BT_CONNECTED:
@@ -595,7 +589,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
595 rsp.scid = cpu_to_le16(chan->dcid); 589 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid); 590 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result); 591 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp); 594 sizeof(rsp), &rsp);
601 } 595 }
@@ -609,9 +603,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 break; 603 break;
610 604
611 default: 605 default:
612 lock_sock(sk); 606 if (chan->ops->teardown)
613 sock_set_flag(sk, SOCK_ZAPPED); 607 chan->ops->teardown(chan, 0);
614 release_sock(sk);
615 break; 608 break;
616 } 609 }
617} 610}
@@ -627,7 +620,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
627 default: 620 default:
628 return HCI_AT_NO_BONDING; 621 return HCI_AT_NO_BONDING;
629 } 622 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) { 623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
631 if (chan->sec_level == BT_SECURITY_LOW) 624 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP; 625 chan->sec_level = BT_SECURITY_SDP;
633 626
@@ -773,9 +766,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data), 767 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control); 768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
776 } else { 770 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data), 771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control); 772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
779 } 774 }
780} 775}
781 776
@@ -830,66 +825,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
830 } 825 }
831} 826}
832 827
833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 828static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
834{ 829{
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE; 831 return L2CAP_EXT_HDR_SIZE;
845 else 832 else
846 hlen = L2CAP_ENH_HDR_SIZE; 833 return L2CAP_ENH_HDR_SIZE;
834}
835
836static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
837 u32 control)
838{
839 struct sk_buff *skb;
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
847 842
848 if (chan->fcs == L2CAP_FCS_CRC16) 843 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE; 844 hlen += L2CAP_FCS_SIZE;
850 845
851 BT_DBG("chan %p, control 0x%8.8x", chan, control); 846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
852 847
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb) 848 if (!skb)
865 return; 849 return ERR_PTR(-ENOMEM);
866 850
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid); 853 lh->cid = cpu_to_le16(chan->dcid);
870 854
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 else
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
872 859
873 if (chan->fcs == L2CAP_FCS_CRC16) { 860 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 } 863 }
877 864
878 skb->priority = HCI_PRIO_MAX; 865 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb); 866 return skb;
880} 867}
881 868
882static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 869static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
883{ 871{
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 872 struct sk_buff *skb;
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 873 u32 control_field;
874
875 BT_DBG("chan %p, control %p", chan, control);
876
877 if (!control->sframe)
878 return;
879
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
881 !control->poll)
882 control->final = 1;
883
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state); 887 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889 888
890 control |= __set_reqseq(chan, chan->buffer_seq); 889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
892 }
893
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
896
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
899 else
900 control_field = __pack_enhanced_control(control);
901
902 skb = l2cap_create_sframe_pdu(chan, control_field);
903 if (!IS_ERR(skb))
904 l2cap_do_send(chan, skb);
905}
906
907static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908{
909 struct l2cap_ctrl control;
910
911 BT_DBG("chan %p, poll %d", chan, poll);
891 912
892 l2cap_send_sframe(chan, control); 913 memset(&control, 0, sizeof(control));
914 control.sframe = 1;
915 control.poll = poll;
916
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
919 else
920 control.super = L2CAP_SUPER_RR;
921
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
893} 924}
894 925
895static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 926static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +945,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
914 945
915static void l2cap_chan_ready(struct l2cap_chan *chan) 946static void l2cap_chan_ready(struct l2cap_chan *chan)
916{ 947{
917 struct sock *sk = chan->sk; 948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0; 949 chan->conf_state = 0;
927 __clear_chan_timer(chan); 950 __clear_chan_timer(chan);
928 951
929 __l2cap_state_change(chan, BT_CONNECTED); 952 chan->state = BT_CONNECTED;
930 sk->sk_state_change(sk);
931 953
932 if (parent) 954 chan->ops->ready(chan);
933 parent->sk_data_ready(parent, 0);
934
935 release_sock(sk);
936} 955}
937 956
938static void l2cap_do_start(struct l2cap_chan *chan) 957static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +972,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
953 l2cap_send_conn_req(chan); 972 l2cap_send_conn_req(chan);
954 } else { 973 } else {
955 struct l2cap_info_req req; 974 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
957 976
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn); 978 conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1014,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
995 __clear_ack_timer(chan); 1014 __clear_ack_timer(chan);
996 } 1015 }
997 1016
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1019 return;
1020 }
1021
998 req.dcid = cpu_to_le16(chan->dcid); 1022 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid); 1023 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1077,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1053 if (test_bit(BT_SK_DEFER_SETUP, 1077 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) { 1078 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent; 1079 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent) 1082 if (parent)
1059 parent->sk_data_ready(parent, 0); 1083 parent->sk_data_ready(parent, 0);
1060 1084
1061 } else { 1085 } else {
1062 __l2cap_state_change(chan, BT_CONFIG); 1086 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1065 } 1089 }
1066 release_sock(sk); 1090 release_sock(sk);
1067 } else { 1091 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 } 1094 }
1071 1095
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1174,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1150 1174
1151 lock_sock(parent); 1175 lock_sock(parent);
1152 1176
1153 /* Check for backlog size */ 1177 chan = pchan->ops->new_connection(pchan);
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan) 1178 if (!chan)
1161 goto clean; 1179 goto clean;
1162 1180
@@ -1171,10 +1189,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1171 1189
1172 l2cap_chan_add(conn, chan); 1190 l2cap_chan_add(conn, chan);
1173 1191
1174 __set_chan_timer(chan, sk->sk_sndtimeo); 1192 l2cap_chan_ready(chan);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178 1193
1179clean: 1194clean:
1180 release_sock(parent); 1195 release_sock(parent);
@@ -1198,6 +1213,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 1213
1199 l2cap_chan_lock(chan); 1214 l2cap_chan_lock(chan);
1200 1215
1216 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1217 l2cap_chan_unlock(chan);
1218 continue;
1219 }
1220
1201 if (conn->hcon->type == LE_LINK) { 1221 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level)) 1222 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan); 1223 l2cap_chan_ready(chan);
@@ -1270,7 +1290,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 1290
1271 l2cap_chan_unlock(chan); 1291 l2cap_chan_unlock(chan);
1272 1292
1273 chan->ops->close(chan->data); 1293 chan->ops->close(chan);
1274 l2cap_chan_put(chan); 1294 l2cap_chan_put(chan);
1275 } 1295 }
1276 1296
@@ -1295,7 +1315,12 @@ static void security_timeout(struct work_struct *work)
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1315 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work); 1316 security_timer.work);
1297 1317
1298 l2cap_conn_del(conn->hcon, ETIMEDOUT); 1318 BT_DBG("conn %p", conn);
1319
1320 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1321 smp_chan_destroy(conn);
1322 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1323 }
1299} 1324}
1300 1325
1301static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1326static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -1439,21 +1464,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1439 goto done; 1464 goto done;
1440 } 1465 }
1441 1466
1442 lock_sock(sk); 1467 switch (chan->state) {
1443
1444 switch (sk->sk_state) {
1445 case BT_CONNECT: 1468 case BT_CONNECT:
1446 case BT_CONNECT2: 1469 case BT_CONNECT2:
1447 case BT_CONFIG: 1470 case BT_CONFIG:
1448 /* Already connecting */ 1471 /* Already connecting */
1449 err = 0; 1472 err = 0;
1450 release_sock(sk);
1451 goto done; 1473 goto done;
1452 1474
1453 case BT_CONNECTED: 1475 case BT_CONNECTED:
1454 /* Already connected */ 1476 /* Already connected */
1455 err = -EISCONN; 1477 err = -EISCONN;
1456 release_sock(sk);
1457 goto done; 1478 goto done;
1458 1479
1459 case BT_OPEN: 1480 case BT_OPEN:
@@ -1463,13 +1484,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1463 1484
1464 default: 1485 default:
1465 err = -EBADFD; 1486 err = -EBADFD;
1466 release_sock(sk);
1467 goto done; 1487 goto done;
1468 } 1488 }
1469 1489
1470 /* Set destination address and psm */ 1490 /* Set destination address and psm */
1491 lock_sock(sk);
1471 bacpy(&bt_sk(sk)->dst, dst); 1492 bacpy(&bt_sk(sk)->dst, dst);
1472
1473 release_sock(sk); 1493 release_sock(sk);
1474 1494
1475 chan->psm = psm; 1495 chan->psm = psm;
@@ -1571,23 +1591,20 @@ int __l2cap_wait_ack(struct sock *sk)
1571static void l2cap_monitor_timeout(struct work_struct *work) 1591static void l2cap_monitor_timeout(struct work_struct *work)
1572{ 1592{
1573 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1593 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1574 monitor_timer.work); 1594 monitor_timer.work);
1575 1595
1576 BT_DBG("chan %p", chan); 1596 BT_DBG("chan %p", chan);
1577 1597
1578 l2cap_chan_lock(chan); 1598 l2cap_chan_lock(chan);
1579 1599
1580 if (chan->retry_count >= chan->remote_max_tx) { 1600 if (!chan->conn) {
1581 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1582 l2cap_chan_unlock(chan); 1601 l2cap_chan_unlock(chan);
1583 l2cap_chan_put(chan); 1602 l2cap_chan_put(chan);
1584 return; 1603 return;
1585 } 1604 }
1586 1605
1587 chan->retry_count++; 1606 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1588 __set_monitor_timer(chan);
1589 1607
1590 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1591 l2cap_chan_unlock(chan); 1608 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan); 1609 l2cap_chan_put(chan);
1593} 1610}
@@ -1595,234 +1612,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1595static void l2cap_retrans_timeout(struct work_struct *work) 1612static void l2cap_retrans_timeout(struct work_struct *work)
1596{ 1613{
1597 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1614 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1598 retrans_timer.work); 1615 retrans_timer.work);
1599 1616
1600 BT_DBG("chan %p", chan); 1617 BT_DBG("chan %p", chan);
1601 1618
1602 l2cap_chan_lock(chan); 1619 l2cap_chan_lock(chan);
1603 1620
1604 chan->retry_count = 1; 1621 if (!chan->conn) {
1605 __set_monitor_timer(chan); 1622 l2cap_chan_unlock(chan);
1606 1623 l2cap_chan_put(chan);
1607 set_bit(CONN_WAIT_F, &chan->conn_state); 1624 return;
1608 1625 }
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610 1626
1627 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1611 l2cap_chan_unlock(chan); 1628 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan); 1629 l2cap_chan_put(chan);
1613} 1630}
1614 1631
1615static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1632static void l2cap_streaming_send(struct l2cap_chan *chan,
1633 struct sk_buff_head *skbs)
1616{ 1634{
1617 struct sk_buff *skb; 1635 struct sk_buff *skb;
1636 struct l2cap_ctrl *control;
1618 1637
1619 while ((skb = skb_peek(&chan->tx_q)) && 1638 BT_DBG("chan %p, skbs %p", chan, skbs);
1620 chan->unacked_frames) {
1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1622 break;
1623 1639
1624 skb = skb_dequeue(&chan->tx_q); 1640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1625 kfree_skb(skb);
1626 1641
1627 chan->unacked_frames--; 1642 while (!skb_queue_empty(&chan->tx_q)) {
1628 }
1629 1643
1630 if (!chan->unacked_frames) 1644 skb = skb_dequeue(&chan->tx_q);
1631 __clear_retrans_timer(chan);
1632}
1633 1645
1634static void l2cap_streaming_send(struct l2cap_chan *chan) 1646 bt_cb(skb)->control.retries = 1;
1635{ 1647 control = &bt_cb(skb)->control;
1636 struct sk_buff *skb; 1648
1637 u32 control; 1649 control->reqseq = 0;
1638 u16 fcs; 1650 control->txseq = chan->next_tx_seq;
1639 1651
1640 while ((skb = skb_dequeue(&chan->tx_q))) { 1652 __pack_control(chan, control, skb);
1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1642 control |= __set_txseq(chan, chan->next_tx_seq);
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1645 1653
1646 if (chan->fcs == L2CAP_FCS_CRC16) { 1654 if (chan->fcs == L2CAP_FCS_CRC16) {
1647 fcs = crc16(0, (u8 *)skb->data, 1655 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1648 skb->len - L2CAP_FCS_SIZE); 1656 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1649 put_unaligned_le16(fcs,
1650 skb->data + skb->len - L2CAP_FCS_SIZE);
1651 } 1657 }
1652 1658
1653 l2cap_do_send(chan, skb); 1659 l2cap_do_send(chan, skb);
1654 1660
1661 BT_DBG("Sent txseq %u", control->txseq);
1662
1655 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1663 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 chan->frames_sent++;
1656 } 1665 }
1657} 1666}
1658 1667
1659static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1668static int l2cap_ertm_send(struct l2cap_chan *chan)
1660{ 1669{
1661 struct sk_buff *skb, *tx_skb; 1670 struct sk_buff *skb, *tx_skb;
1662 u16 fcs; 1671 struct l2cap_ctrl *control;
1663 u32 control; 1672 int sent = 0;
1664 1673
1665 skb = skb_peek(&chan->tx_q); 1674 BT_DBG("chan %p", chan);
1666 if (!skb)
1667 return;
1668 1675
1669 while (bt_cb(skb)->control.txseq != tx_seq) { 1676 if (chan->state != BT_CONNECTED)
1670 if (skb_queue_is_last(&chan->tx_q, skb)) 1677 return -ENOTCONN;
1671 return;
1672 1678
1673 skb = skb_queue_next(&chan->tx_q, skb); 1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1674 } 1680 return 0;
1675 1681
1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1682 while (chan->tx_send_head &&
1677 chan->remote_max_tx) { 1683 chan->unacked_frames < chan->remote_tx_win &&
1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1684 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1679 return;
1680 }
1681 1685
1682 tx_skb = skb_clone(skb, GFP_ATOMIC); 1686 skb = chan->tx_send_head;
1683 bt_cb(skb)->control.retries++;
1684 1687
1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1688 bt_cb(skb)->control.retries = 1;
1686 control &= __get_sar_mask(chan); 1689 control = &bt_cb(skb)->control;
1687 1690
1688 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1691 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1689 control |= __set_ctrl_final(chan); 1692 control->final = 1;
1690 1693
1691 control |= __set_reqseq(chan, chan->buffer_seq); 1694 control->reqseq = chan->buffer_seq;
1692 control |= __set_txseq(chan, tx_seq); 1695 chan->last_acked_seq = chan->buffer_seq;
1696 control->txseq = chan->next_tx_seq;
1693 1697
1694 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1698 __pack_control(chan, control, skb);
1695 1699
1696 if (chan->fcs == L2CAP_FCS_CRC16) { 1700 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 fcs = crc16(0, (u8 *)tx_skb->data, 1701 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1698 tx_skb->len - L2CAP_FCS_SIZE); 1702 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1699 put_unaligned_le16(fcs, 1703 }
1700 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1704
1705 /* Clone after data has been modified. Data is assumed to be
1706 read-only (for locking purposes) on cloned sk_buffs.
1707 */
1708 tx_skb = skb_clone(skb, GFP_KERNEL);
1709
1710 if (!tx_skb)
1711 break;
1712
1713 __set_retrans_timer(chan);
1714
1715 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1716 chan->unacked_frames++;
1717 chan->frames_sent++;
1718 sent++;
1719
1720 if (skb_queue_is_last(&chan->tx_q, skb))
1721 chan->tx_send_head = NULL;
1722 else
1723 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1724
1725 l2cap_do_send(chan, tx_skb);
1726 BT_DBG("Sent txseq %u", control->txseq);
1701 } 1727 }
1702 1728
1703 l2cap_do_send(chan, tx_skb); 1729 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1730 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1731
1732 return sent;
1704} 1733}
1705 1734
1706static int l2cap_ertm_send(struct l2cap_chan *chan) 1735static void l2cap_ertm_resend(struct l2cap_chan *chan)
1707{ 1736{
1708 struct sk_buff *skb, *tx_skb; 1737 struct l2cap_ctrl control;
1709 u16 fcs; 1738 struct sk_buff *skb;
1710 u32 control; 1739 struct sk_buff *tx_skb;
1711 int nsent = 0; 1740 u16 seq;
1712 1741
1713 if (chan->state != BT_CONNECTED) 1742 BT_DBG("chan %p", chan);
1714 return -ENOTCONN;
1715 1743
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1744 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0; 1745 return;
1718 1746
1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1747 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1748 seq = l2cap_seq_list_pop(&chan->retrans_list);
1720 1749
1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1750 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1722 chan->remote_max_tx) { 1751 if (!skb) {
1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1752 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1724 break; 1753 seq);
1754 continue;
1725 } 1755 }
1726 1756
1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1728
1729 bt_cb(skb)->control.retries++; 1757 bt_cb(skb)->control.retries++;
1758 control = bt_cb(skb)->control;
1730 1759
1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1760 if (chan->max_tx != 0 &&
1732 control &= __get_sar_mask(chan); 1761 bt_cb(skb)->control.retries > chan->max_tx) {
1762 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1764 l2cap_seq_list_clear(&chan->retrans_list);
1765 break;
1766 }
1733 1767
1768 control.reqseq = chan->buffer_seq;
1734 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1769 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1735 control |= __set_ctrl_final(chan); 1770 control.final = 1;
1771 else
1772 control.final = 0;
1736 1773
1737 control |= __set_reqseq(chan, chan->buffer_seq); 1774 if (skb_cloned(skb)) {
1738 control |= __set_txseq(chan, chan->next_tx_seq); 1775 /* Cloned sk_buffs are read-only, so we need a
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1776 * writeable copy
1777 */
1778 tx_skb = skb_copy(skb, GFP_ATOMIC);
1779 } else {
1780 tx_skb = skb_clone(skb, GFP_ATOMIC);
1781 }
1782
1783 if (!tx_skb) {
1784 l2cap_seq_list_clear(&chan->retrans_list);
1785 break;
1786 }
1740 1787
1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1788 /* Update skb contents */
1789 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1790 put_unaligned_le32(__pack_extended_control(&control),
1791 tx_skb->data + L2CAP_HDR_SIZE);
1792 } else {
1793 put_unaligned_le16(__pack_enhanced_control(&control),
1794 tx_skb->data + L2CAP_HDR_SIZE);
1795 }
1742 1796
1743 if (chan->fcs == L2CAP_FCS_CRC16) { 1797 if (chan->fcs == L2CAP_FCS_CRC16) {
1744 fcs = crc16(0, (u8 *)skb->data, 1798 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1745 tx_skb->len - L2CAP_FCS_SIZE); 1799 put_unaligned_le16(fcs, skb_put(tx_skb,
1746 put_unaligned_le16(fcs, skb->data + 1800 L2CAP_FCS_SIZE));
1747 tx_skb->len - L2CAP_FCS_SIZE);
1748 } 1801 }
1749 1802
1750 l2cap_do_send(chan, tx_skb); 1803 l2cap_do_send(chan, tx_skb);
1751 1804
1752 __set_retrans_timer(chan); 1805 BT_DBG("Resent txseq %d", control.txseq);
1753
1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1755
1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1757
1758 if (bt_cb(skb)->control.retries == 1) {
1759 chan->unacked_frames++;
1760
1761 if (!nsent++)
1762 __clear_ack_timer(chan);
1763 }
1764
1765 chan->frames_sent++;
1766 1806
1767 if (skb_queue_is_last(&chan->tx_q, skb)) 1807 chan->last_acked_seq = chan->buffer_seq;
1768 chan->tx_send_head = NULL;
1769 else
1770 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1771 } 1808 }
1772
1773 return nsent;
1774} 1809}
1775 1810
1776static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1811static void l2cap_retransmit(struct l2cap_chan *chan,
1812 struct l2cap_ctrl *control)
1777{ 1813{
1778 int ret; 1814 BT_DBG("chan %p, control %p", chan, control);
1779
1780 if (!skb_queue_empty(&chan->tx_q))
1781 chan->tx_send_head = chan->tx_q.next;
1782 1815
1783 chan->next_tx_seq = chan->expected_ack_seq; 1816 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1784 ret = l2cap_ertm_send(chan); 1817 l2cap_ertm_resend(chan);
1785 return ret;
1786} 1818}
1787 1819
1788static void __l2cap_send_ack(struct l2cap_chan *chan) 1820static void l2cap_retransmit_all(struct l2cap_chan *chan,
1821 struct l2cap_ctrl *control)
1789{ 1822{
1790 u32 control = 0; 1823 struct sk_buff *skb;
1791 1824
1792 control |= __set_reqseq(chan, chan->buffer_seq); 1825 BT_DBG("chan %p, control %p", chan, control);
1793 1826
1794 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1827 if (control->poll)
1795 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1828 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1796 set_bit(CONN_RNR_SENT, &chan->conn_state); 1829
1797 l2cap_send_sframe(chan, control); 1830 l2cap_seq_list_clear(&chan->retrans_list);
1798 return;
1799 }
1800 1831
1801 if (l2cap_ertm_send(chan) > 0) 1832 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1802 return; 1833 return;
1803 1834
1804 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1835 if (chan->unacked_frames) {
1805 l2cap_send_sframe(chan, control); 1836 skb_queue_walk(&chan->tx_q, skb) {
1837 if (bt_cb(skb)->control.txseq == control->reqseq ||
1838 skb == chan->tx_send_head)
1839 break;
1840 }
1841
1842 skb_queue_walk_from(&chan->tx_q, skb) {
1843 if (skb == chan->tx_send_head)
1844 break;
1845
1846 l2cap_seq_list_append(&chan->retrans_list,
1847 bt_cb(skb)->control.txseq);
1848 }
1849
1850 l2cap_ertm_resend(chan);
1851 }
1806} 1852}
1807 1853
1808static void l2cap_send_ack(struct l2cap_chan *chan) 1854static void l2cap_send_ack(struct l2cap_chan *chan)
1809{ 1855{
1810 __clear_ack_timer(chan); 1856 struct l2cap_ctrl control;
1811 __l2cap_send_ack(chan); 1857 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1812} 1858 chan->last_acked_seq);
1859 int threshold;
1813 1860
1814static void l2cap_send_srejtail(struct l2cap_chan *chan) 1861 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1815{ 1862 chan, chan->last_acked_seq, chan->buffer_seq);
1816 struct srej_list *tail;
1817 u32 control;
1818 1863
1819 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1864 memset(&control, 0, sizeof(control));
1820 control |= __set_ctrl_final(chan); 1865 control.sframe = 1;
1821 1866
1822 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1823 control |= __set_reqseq(chan, tail->tx_seq); 1868 chan->rx_state == L2CAP_RX_STATE_RECV) {
1869 __clear_ack_timer(chan);
1870 control.super = L2CAP_SUPER_RNR;
1871 control.reqseq = chan->buffer_seq;
1872 l2cap_send_sframe(chan, &control);
1873 } else {
1874 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1875 l2cap_ertm_send(chan);
1876 /* If any i-frames were sent, they included an ack */
1877 if (chan->buffer_seq == chan->last_acked_seq)
1878 frames_to_ack = 0;
1879 }
1824 1880
1825 l2cap_send_sframe(chan, control); 1881 /* Ack now if the window is 3/4ths full.
1882 * Calculate without mul or div
1883 */
1884 threshold = chan->ack_win;
1885 threshold += threshold << 1;
1886 threshold >>= 2;
1887
1888 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1889 threshold);
1890
1891 if (frames_to_ack >= threshold) {
1892 __clear_ack_timer(chan);
1893 control.super = L2CAP_SUPER_RR;
1894 control.reqseq = chan->buffer_seq;
1895 l2cap_send_sframe(chan, &control);
1896 frames_to_ack = 0;
1897 }
1898
1899 if (frames_to_ack)
1900 __set_ack_timer(chan);
1901 }
1826} 1902}
1827 1903
1828static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 1904static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1871,15 +1947,15 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1871} 1947}
1872 1948
1873static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 1949static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1874 struct msghdr *msg, size_t len, 1950 struct msghdr *msg, size_t len,
1875 u32 priority) 1951 u32 priority)
1876{ 1952{
1877 struct l2cap_conn *conn = chan->conn; 1953 struct l2cap_conn *conn = chan->conn;
1878 struct sk_buff *skb; 1954 struct sk_buff *skb;
1879 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 1955 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1880 struct l2cap_hdr *lh; 1956 struct l2cap_hdr *lh;
1881 1957
1882 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority); 1958 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1883 1959
1884 count = min_t(unsigned int, (conn->mtu - hlen), len); 1960 count = min_t(unsigned int, (conn->mtu - hlen), len);
1885 1961
@@ -1905,15 +1981,15 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1905} 1981}
1906 1982
1907static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 1983static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1908 struct msghdr *msg, size_t len, 1984 struct msghdr *msg, size_t len,
1909 u32 priority) 1985 u32 priority)
1910{ 1986{
1911 struct l2cap_conn *conn = chan->conn; 1987 struct l2cap_conn *conn = chan->conn;
1912 struct sk_buff *skb; 1988 struct sk_buff *skb;
1913 int err, count; 1989 int err, count;
1914 struct l2cap_hdr *lh; 1990 struct l2cap_hdr *lh;
1915 1991
1916 BT_DBG("chan %p len %d", chan, (int)len); 1992 BT_DBG("chan %p len %zu", chan, len);
1917 1993
1918 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 1994 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1919 1995
@@ -1938,23 +2014,20 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1938} 2014}
1939 2015
1940static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2016static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1941 struct msghdr *msg, size_t len, 2017 struct msghdr *msg, size_t len,
1942 u16 sdulen) 2018 u16 sdulen)
1943{ 2019{
1944 struct l2cap_conn *conn = chan->conn; 2020 struct l2cap_conn *conn = chan->conn;
1945 struct sk_buff *skb; 2021 struct sk_buff *skb;
1946 int err, count, hlen; 2022 int err, count, hlen;
1947 struct l2cap_hdr *lh; 2023 struct l2cap_hdr *lh;
1948 2024
1949 BT_DBG("chan %p len %d", chan, (int)len); 2025 BT_DBG("chan %p len %zu", chan, len);
1950 2026
1951 if (!conn) 2027 if (!conn)
1952 return ERR_PTR(-ENOTCONN); 2028 return ERR_PTR(-ENOTCONN);
1953 2029
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2030 hlen = __ertm_hdr_size(chan);
1955 hlen = L2CAP_EXT_HDR_SIZE;
1956 else
1957 hlen = L2CAP_ENH_HDR_SIZE;
1958 2031
1959 if (sdulen) 2032 if (sdulen)
1960 hlen += L2CAP_SDULEN_SIZE; 2033 hlen += L2CAP_SDULEN_SIZE;
@@ -1974,7 +2047,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1974 lh->cid = cpu_to_le16(chan->dcid); 2047 lh->cid = cpu_to_le16(chan->dcid);
1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2048 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1976 2049
1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); 2050 /* Control header is populated later */
2051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2052 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2053 else
2054 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1978 2055
1979 if (sdulen) 2056 if (sdulen)
1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2057 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1985,9 +2062,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1985 return ERR_PTR(err); 2062 return ERR_PTR(err);
1986 } 2063 }
1987 2064
1988 if (chan->fcs == L2CAP_FCS_CRC16) 2065 bt_cb(skb)->control.fcs = chan->fcs;
1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1990
1991 bt_cb(skb)->control.retries = 0; 2066 bt_cb(skb)->control.retries = 0;
1992 return skb; 2067 return skb;
1993} 2068}
@@ -1999,10 +2074,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
1999 struct sk_buff *skb; 2074 struct sk_buff *skb;
2000 u16 sdu_len; 2075 u16 sdu_len;
2001 size_t pdu_len; 2076 size_t pdu_len;
2002 int err = 0;
2003 u8 sar; 2077 u8 sar;
2004 2078
2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); 2079 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2006 2080
2007 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2081 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2008 * so fragmented skbs are not used. The HCI layer's handling 2082 * so fragmented skbs are not used. The HCI layer's handling
@@ -2015,7 +2089,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2089 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2016 2090
2017 /* Adjust for largest possible L2CAP overhead. */ 2091 /* Adjust for largest possible L2CAP overhead. */
2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; 2092 if (chan->fcs)
2093 pdu_len -= L2CAP_FCS_SIZE;
2094
2095 pdu_len -= __ertm_hdr_size(chan);
2019 2096
2020 /* Remote device may have requested smaller PDUs */ 2097 /* Remote device may have requested smaller PDUs */
2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2098 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2055,7 +2132,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2055 } 2132 }
2056 } 2133 }
2057 2134
2058 return err; 2135 return 0;
2059} 2136}
2060 2137
2061int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2138int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2117,17 +2194,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2117 if (err) 2194 if (err)
2118 break; 2195 break;
2119 2196
2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2121 chan->tx_send_head = seg_queue.next;
2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2123
2124 if (chan->mode == L2CAP_MODE_ERTM) 2197 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan); 2198 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2126 else 2199 else
2127 l2cap_streaming_send(chan); 2200 l2cap_streaming_send(chan, &seg_queue);
2128 2201
2129 if (err >= 0) 2202 err = len;
2130 err = len;
2131 2203
2132 /* If the skbs were not queued for sending, they'll still be in 2204 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged. 2205 * seg_queue and need to be purged.
@@ -2143,6 +2215,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2143 return err; 2215 return err;
2144} 2216}
2145 2217
2218static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2219{
2220 struct l2cap_ctrl control;
2221 u16 seq;
2222
2223 BT_DBG("chan %p, txseq %u", chan, txseq);
2224
2225 memset(&control, 0, sizeof(control));
2226 control.sframe = 1;
2227 control.super = L2CAP_SUPER_SREJ;
2228
2229 for (seq = chan->expected_tx_seq; seq != txseq;
2230 seq = __next_seq(chan, seq)) {
2231 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2232 control.reqseq = seq;
2233 l2cap_send_sframe(chan, &control);
2234 l2cap_seq_list_append(&chan->srej_list, seq);
2235 }
2236 }
2237
2238 chan->expected_tx_seq = __next_seq(chan, txseq);
2239}
2240
2241static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2242{
2243 struct l2cap_ctrl control;
2244
2245 BT_DBG("chan %p", chan);
2246
2247 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2248 return;
2249
2250 memset(&control, 0, sizeof(control));
2251 control.sframe = 1;
2252 control.super = L2CAP_SUPER_SREJ;
2253 control.reqseq = chan->srej_list.tail;
2254 l2cap_send_sframe(chan, &control);
2255}
2256
2257static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2258{
2259 struct l2cap_ctrl control;
2260 u16 initial_head;
2261 u16 seq;
2262
2263 BT_DBG("chan %p, txseq %u", chan, txseq);
2264
2265 memset(&control, 0, sizeof(control));
2266 control.sframe = 1;
2267 control.super = L2CAP_SUPER_SREJ;
2268
2269 /* Capture initial list head to allow only one pass through the list. */
2270 initial_head = chan->srej_list.head;
2271
2272 do {
2273 seq = l2cap_seq_list_pop(&chan->srej_list);
2274 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2275 break;
2276
2277 control.reqseq = seq;
2278 l2cap_send_sframe(chan, &control);
2279 l2cap_seq_list_append(&chan->srej_list, seq);
2280 } while (chan->srej_list.head != initial_head);
2281}
2282
2283static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2284{
2285 struct sk_buff *acked_skb;
2286 u16 ackseq;
2287
2288 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2289
2290 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2291 return;
2292
2293 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2294 chan->expected_ack_seq, chan->unacked_frames);
2295
2296 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2297 ackseq = __next_seq(chan, ackseq)) {
2298
2299 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2300 if (acked_skb) {
2301 skb_unlink(acked_skb, &chan->tx_q);
2302 kfree_skb(acked_skb);
2303 chan->unacked_frames--;
2304 }
2305 }
2306
2307 chan->expected_ack_seq = reqseq;
2308
2309 if (chan->unacked_frames == 0)
2310 __clear_retrans_timer(chan);
2311
2312 BT_DBG("unacked_frames %u", chan->unacked_frames);
2313}
2314
2315static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2316{
2317 BT_DBG("chan %p", chan);
2318
2319 chan->expected_tx_seq = chan->buffer_seq;
2320 l2cap_seq_list_clear(&chan->srej_list);
2321 skb_queue_purge(&chan->srej_q);
2322 chan->rx_state = L2CAP_RX_STATE_RECV;
2323}
2324
2325static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2326 struct l2cap_ctrl *control,
2327 struct sk_buff_head *skbs, u8 event)
2328{
2329 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2330 event);
2331
2332 switch (event) {
2333 case L2CAP_EV_DATA_REQUEST:
2334 if (chan->tx_send_head == NULL)
2335 chan->tx_send_head = skb_peek(skbs);
2336
2337 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2338 l2cap_ertm_send(chan);
2339 break;
2340 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2341 BT_DBG("Enter LOCAL_BUSY");
2342 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343
2344 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2345 /* The SREJ_SENT state must be aborted if we are to
2346 * enter the LOCAL_BUSY state.
2347 */
2348 l2cap_abort_rx_srej_sent(chan);
2349 }
2350
2351 l2cap_send_ack(chan);
2352
2353 break;
2354 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2355 BT_DBG("Exit LOCAL_BUSY");
2356 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2357
2358 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2359 struct l2cap_ctrl local_control;
2360
2361 memset(&local_control, 0, sizeof(local_control));
2362 local_control.sframe = 1;
2363 local_control.super = L2CAP_SUPER_RR;
2364 local_control.poll = 1;
2365 local_control.reqseq = chan->buffer_seq;
2366 l2cap_send_sframe(chan, &local_control);
2367
2368 chan->retry_count = 1;
2369 __set_monitor_timer(chan);
2370 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2371 }
2372 break;
2373 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2374 l2cap_process_reqseq(chan, control->reqseq);
2375 break;
2376 case L2CAP_EV_EXPLICIT_POLL:
2377 l2cap_send_rr_or_rnr(chan, 1);
2378 chan->retry_count = 1;
2379 __set_monitor_timer(chan);
2380 __clear_ack_timer(chan);
2381 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 break;
2383 case L2CAP_EV_RETRANS_TO:
2384 l2cap_send_rr_or_rnr(chan, 1);
2385 chan->retry_count = 1;
2386 __set_monitor_timer(chan);
2387 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2388 break;
2389 case L2CAP_EV_RECV_FBIT:
2390 /* Nothing to process */
2391 break;
2392 default:
2393 break;
2394 }
2395}
2396
2397static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2398 struct l2cap_ctrl *control,
2399 struct sk_buff_head *skbs, u8 event)
2400{
2401 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2402 event);
2403
2404 switch (event) {
2405 case L2CAP_EV_DATA_REQUEST:
2406 if (chan->tx_send_head == NULL)
2407 chan->tx_send_head = skb_peek(skbs);
2408 /* Queue data, but don't send. */
2409 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2410 break;
2411 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2412 BT_DBG("Enter LOCAL_BUSY");
2413 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414
2415 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2416 /* The SREJ_SENT state must be aborted if we are to
2417 * enter the LOCAL_BUSY state.
2418 */
2419 l2cap_abort_rx_srej_sent(chan);
2420 }
2421
2422 l2cap_send_ack(chan);
2423
2424 break;
2425 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2426 BT_DBG("Exit LOCAL_BUSY");
2427 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2428
2429 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2430 struct l2cap_ctrl local_control;
2431 memset(&local_control, 0, sizeof(local_control));
2432 local_control.sframe = 1;
2433 local_control.super = L2CAP_SUPER_RR;
2434 local_control.poll = 1;
2435 local_control.reqseq = chan->buffer_seq;
2436 l2cap_send_sframe(chan, &local_control);
2437
2438 chan->retry_count = 1;
2439 __set_monitor_timer(chan);
2440 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2441 }
2442 break;
2443 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2444 l2cap_process_reqseq(chan, control->reqseq);
2445
2446 /* Fall through */
2447
2448 case L2CAP_EV_RECV_FBIT:
2449 if (control && control->final) {
2450 __clear_monitor_timer(chan);
2451 if (chan->unacked_frames > 0)
2452 __set_retrans_timer(chan);
2453 chan->retry_count = 0;
2454 chan->tx_state = L2CAP_TX_STATE_XMIT;
2455 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2456 }
2457 break;
2458 case L2CAP_EV_EXPLICIT_POLL:
2459 /* Ignore */
2460 break;
2461 case L2CAP_EV_MONITOR_TO:
2462 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2463 l2cap_send_rr_or_rnr(chan, 1);
2464 __set_monitor_timer(chan);
2465 chan->retry_count++;
2466 } else {
2467 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2468 }
2469 break;
2470 default:
2471 break;
2472 }
2473}
2474
2475static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2476 struct sk_buff_head *skbs, u8 event)
2477{
2478 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2479 chan, control, skbs, event, chan->tx_state);
2480
2481 switch (chan->tx_state) {
2482 case L2CAP_TX_STATE_XMIT:
2483 l2cap_tx_state_xmit(chan, control, skbs, event);
2484 break;
2485 case L2CAP_TX_STATE_WAIT_F:
2486 l2cap_tx_state_wait_f(chan, control, skbs, event);
2487 break;
2488 default:
2489 /* Ignore event */
2490 break;
2491 }
2492}
2493
2494static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2495 struct l2cap_ctrl *control)
2496{
2497 BT_DBG("chan %p, control %p", chan, control);
2498 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2499}
2500
2501static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2502 struct l2cap_ctrl *control)
2503{
2504 BT_DBG("chan %p, control %p", chan, control);
2505 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2506}
2507
2146/* Copy frame to all raw sockets on that connection */ 2508/* Copy frame to all raw sockets on that connection */
2147static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2509static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2148{ 2510{
@@ -2165,7 +2527,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2165 if (!nskb) 2527 if (!nskb)
2166 continue; 2528 continue;
2167 2529
2168 if (chan->ops->recv(chan->data, nskb)) 2530 if (chan->ops->recv(chan, nskb))
2169 kfree_skb(nskb); 2531 kfree_skb(nskb);
2170 } 2532 }
2171 2533
@@ -2173,16 +2535,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2173} 2535}
2174 2536
2175/* ---- L2CAP signalling commands ---- */ 2537/* ---- L2CAP signalling commands ---- */
2176static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 2538static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2177 u8 code, u8 ident, u16 dlen, void *data) 2539 u8 ident, u16 dlen, void *data)
2178{ 2540{
2179 struct sk_buff *skb, **frag; 2541 struct sk_buff *skb, **frag;
2180 struct l2cap_cmd_hdr *cmd; 2542 struct l2cap_cmd_hdr *cmd;
2181 struct l2cap_hdr *lh; 2543 struct l2cap_hdr *lh;
2182 int len, count; 2544 int len, count;
2183 2545
2184 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", 2546 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2185 conn, code, ident, dlen); 2547 conn, code, ident, dlen);
2186 2548
2187 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2549 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2188 count = min_t(unsigned int, conn->mtu, len); 2550 count = min_t(unsigned int, conn->mtu, len);
@@ -2195,9 +2557,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2195 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2557 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2196 2558
2197 if (conn->hcon->type == LE_LINK) 2559 if (conn->hcon->type == LE_LINK)
2198 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2560 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2199 else 2561 else
2200 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2201 2563
2202 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2564 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2203 cmd->code = code; 2565 cmd->code = code;
@@ -2265,7 +2627,7 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
2265 break; 2627 break;
2266 } 2628 }
2267 2629
2268 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); 2630 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2269 return len; 2631 return len;
2270} 2632}
2271 2633
@@ -2273,7 +2635,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2273{ 2635{
2274 struct l2cap_conf_opt *opt = *ptr; 2636 struct l2cap_conf_opt *opt = *ptr;
2275 2637
2276 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); 2638 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2277 2639
2278 opt->type = type; 2640 opt->type = type;
2279 opt->len = len; 2641 opt->len = len;
@@ -2309,8 +2671,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2309 efs.stype = chan->local_stype; 2671 efs.stype = chan->local_stype;
2310 efs.msdu = cpu_to_le16(chan->local_msdu); 2672 efs.msdu = cpu_to_le16(chan->local_msdu);
2311 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2673 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2312 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2674 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2313 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2675 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2314 break; 2676 break;
2315 2677
2316 case L2CAP_MODE_STREAMING: 2678 case L2CAP_MODE_STREAMING:
@@ -2333,20 +2695,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2333static void l2cap_ack_timeout(struct work_struct *work) 2695static void l2cap_ack_timeout(struct work_struct *work)
2334{ 2696{
2335 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2697 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2336 ack_timer.work); 2698 ack_timer.work);
2699 u16 frames_to_ack;
2337 2700
2338 BT_DBG("chan %p", chan); 2701 BT_DBG("chan %p", chan);
2339 2702
2340 l2cap_chan_lock(chan); 2703 l2cap_chan_lock(chan);
2341 2704
2342 __l2cap_send_ack(chan); 2705 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2706 chan->last_acked_seq);
2343 2707
2344 l2cap_chan_unlock(chan); 2708 if (frames_to_ack)
2709 l2cap_send_rr_or_rnr(chan, 0);
2345 2710
2711 l2cap_chan_unlock(chan);
2346 l2cap_chan_put(chan); 2712 l2cap_chan_put(chan);
2347} 2713}
2348 2714
2349static inline int l2cap_ertm_init(struct l2cap_chan *chan) 2715int l2cap_ertm_init(struct l2cap_chan *chan)
2350{ 2716{
2351 int err; 2717 int err;
2352 2718
@@ -2355,7 +2721,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2355 chan->expected_ack_seq = 0; 2721 chan->expected_ack_seq = 0;
2356 chan->unacked_frames = 0; 2722 chan->unacked_frames = 0;
2357 chan->buffer_seq = 0; 2723 chan->buffer_seq = 0;
2358 chan->num_acked = 0;
2359 chan->frames_sent = 0; 2724 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0; 2725 chan->last_acked_seq = 0;
2361 chan->sdu = NULL; 2726 chan->sdu = NULL;
@@ -2376,12 +2741,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2376 2741
2377 skb_queue_head_init(&chan->srej_q); 2742 skb_queue_head_init(&chan->srej_q);
2378 2743
2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 2744 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0) 2745 if (err < 0)
2382 return err; 2746 return err;
2383 2747
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 2748 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2749 if (err < 0)
2750 l2cap_seq_list_free(&chan->srej_list);
2751
2752 return err;
2385} 2753}
2386 2754
2387static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2755static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2419,6 +2787,7 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2419 L2CAP_DEFAULT_TX_WINDOW); 2787 L2CAP_DEFAULT_TX_WINDOW);
2420 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 2788 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2421 } 2789 }
2790 chan->ack_win = chan->tx_win;
2422} 2791}
2423 2792
2424static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) 2793static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
@@ -2507,6 +2876,7 @@ done:
2507 break; 2876 break;
2508 2877
2509 case L2CAP_MODE_STREAMING: 2878 case L2CAP_MODE_STREAMING:
2879 l2cap_txwin_setup(chan);
2510 rfc.mode = L2CAP_MODE_STREAMING; 2880 rfc.mode = L2CAP_MODE_STREAMING;
2511 rfc.txwin_size = 0; 2881 rfc.txwin_size = 0;
2512 rfc.max_transmit = 0; 2882 rfc.max_transmit = 0;
@@ -2537,7 +2907,7 @@ done:
2537 } 2907 }
2538 2908
2539 req->dcid = cpu_to_le16(chan->dcid); 2909 req->dcid = cpu_to_le16(chan->dcid);
2540 req->flags = cpu_to_le16(0); 2910 req->flags = __constant_cpu_to_le16(0);
2541 2911
2542 return ptr - data; 2912 return ptr - data;
2543} 2913}
@@ -2757,7 +3127,7 @@ done:
2757 } 3127 }
2758 rsp->scid = cpu_to_le16(chan->dcid); 3128 rsp->scid = cpu_to_le16(chan->dcid);
2759 rsp->result = cpu_to_le16(result); 3129 rsp->result = cpu_to_le16(result);
2760 rsp->flags = cpu_to_le16(0x0000); 3130 rsp->flags = __constant_cpu_to_le16(0);
2761 3131
2762 return ptr - data; 3132 return ptr - data;
2763} 3133}
@@ -2807,10 +3177,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2807 break; 3177 break;
2808 3178
2809 case L2CAP_CONF_EWS: 3179 case L2CAP_CONF_EWS:
2810 chan->tx_win = min_t(u16, val, 3180 chan->ack_win = min_t(u16, val, chan->ack_win);
2811 L2CAP_DEFAULT_EXT_WINDOW);
2812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2813 chan->tx_win); 3182 chan->tx_win);
2814 break; 3183 break;
2815 3184
2816 case L2CAP_CONF_EFS: 3185 case L2CAP_CONF_EFS:
@@ -2839,6 +3208,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2839 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3208 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2840 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3209 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2841 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3210 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3212 chan->ack_win = min_t(u16, chan->ack_win,
3213 rfc.txwin_size);
2842 3214
2843 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2844 chan->local_msdu = le16_to_cpu(efs.msdu); 3216 chan->local_msdu = le16_to_cpu(efs.msdu);
@@ -2856,7 +3228,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2856 } 3228 }
2857 3229
2858 req->dcid = cpu_to_le16(chan->dcid); 3230 req->dcid = cpu_to_le16(chan->dcid);
2859 req->flags = cpu_to_le16(0x0000); 3231 req->flags = __constant_cpu_to_le16(0);
2860 3232
2861 return ptr - data; 3233 return ptr - data;
2862} 3234}
@@ -2883,8 +3255,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2883 3255
2884 rsp.scid = cpu_to_le16(chan->dcid); 3256 rsp.scid = cpu_to_le16(chan->dcid);
2885 rsp.dcid = cpu_to_le16(chan->scid); 3257 rsp.dcid = cpu_to_le16(chan->scid);
2886 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3258 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
2887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3259 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
2888 l2cap_send_cmd(conn, chan->ident, 3260 l2cap_send_cmd(conn, chan->ident,
2889 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3261 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2890 3262
@@ -2900,7 +3272,17 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2900{ 3272{
2901 int type, olen; 3273 int type, olen;
2902 unsigned long val; 3274 unsigned long val;
2903 struct l2cap_conf_rfc rfc; 3275 /* Use sane default values in case a misbehaving remote device
3276 * did not send an RFC or extended window size option.
3277 */
3278 u16 txwin_ext = chan->ack_win;
3279 struct l2cap_conf_rfc rfc = {
3280 .mode = chan->mode,
3281 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3282 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3283 .max_pdu_size = cpu_to_le16(chan->imtu),
3284 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3285 };
2904 3286
2905 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 3287 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2906 3288
@@ -2914,26 +3296,23 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2914 case L2CAP_CONF_RFC: 3296 case L2CAP_CONF_RFC:
2915 if (olen == sizeof(rfc)) 3297 if (olen == sizeof(rfc))
2916 memcpy(&rfc, (void *)val, olen); 3298 memcpy(&rfc, (void *)val, olen);
2917 goto done; 3299 break;
3300 case L2CAP_CONF_EWS:
3301 txwin_ext = val;
3302 break;
2918 } 3303 }
2919 } 3304 }
2920 3305
2921 /* Use sane default values in case a misbehaving remote device
2922 * did not send an RFC option.
2923 */
2924 rfc.mode = chan->mode;
2925 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2926 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2927 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2928
2929 BT_ERR("Expected RFC option was not found, using defaults");
2930
2931done:
2932 switch (rfc.mode) { 3306 switch (rfc.mode) {
2933 case L2CAP_MODE_ERTM: 3307 case L2CAP_MODE_ERTM:
2934 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3308 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2935 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3309 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2936 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3310 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3312 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3313 else
3314 chan->ack_win = min_t(u16, chan->ack_win,
3315 rfc.txwin_size);
2937 break; 3316 break;
2938 case L2CAP_MODE_STREAMING: 3317 case L2CAP_MODE_STREAMING:
2939 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3318 chan->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2986,7 +3365,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2986 lock_sock(parent); 3365 lock_sock(parent);
2987 3366
2988 /* Check if the ACL is secure enough (if not SDP) */ 3367 /* Check if the ACL is secure enough (if not SDP) */
2989 if (psm != cpu_to_le16(0x0001) && 3368 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
2990 !hci_conn_check_link_mode(conn->hcon)) { 3369 !hci_conn_check_link_mode(conn->hcon)) {
2991 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3370 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2992 result = L2CAP_CR_SEC_BLOCK; 3371 result = L2CAP_CR_SEC_BLOCK;
@@ -2995,25 +3374,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2995 3374
2996 result = L2CAP_CR_NO_MEM; 3375 result = L2CAP_CR_NO_MEM;
2997 3376
2998 /* Check for backlog size */ 3377 /* Check if we already have channel with that dcid */
2999 if (sk_acceptq_is_full(parent)) { 3378 if (__l2cap_get_chan_by_dcid(conn, scid))
3000 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3001 goto response; 3379 goto response;
3002 }
3003 3380
3004 chan = pchan->ops->new_connection(pchan->data); 3381 chan = pchan->ops->new_connection(pchan);
3005 if (!chan) 3382 if (!chan)
3006 goto response; 3383 goto response;
3007 3384
3008 sk = chan->sk; 3385 sk = chan->sk;
3009 3386
3010 /* Check if we already have channel with that dcid */
3011 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3012 sock_set_flag(sk, SOCK_ZAPPED);
3013 chan->ops->close(chan->data);
3014 goto response;
3015 }
3016
3017 hci_conn_hold(conn->hcon); 3387 hci_conn_hold(conn->hcon);
3018 3388
3019 bacpy(&bt_sk(sk)->src, conn->src); 3389 bacpy(&bt_sk(sk)->src, conn->src);
@@ -3067,7 +3437,7 @@ sendresp:
3067 3437
3068 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3438 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3069 struct l2cap_info_req info; 3439 struct l2cap_info_req info;
3070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3440 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3071 3441
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3442 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3073 conn->info_ident = l2cap_get_ident(conn); 3443 conn->info_ident = l2cap_get_ident(conn);
@@ -3189,7 +3559,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3559 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3190 struct l2cap_cmd_rej_cid rej; 3560 struct l2cap_cmd_rej_cid rej;
3191 3561
3192 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 3562 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3193 rej.scid = cpu_to_le16(chan->scid); 3563 rej.scid = cpu_to_le16(chan->scid);
3194 rej.dcid = cpu_to_le16(chan->dcid); 3564 rej.dcid = cpu_to_le16(chan->dcid);
3195 3565
@@ -3211,11 +3581,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3211 memcpy(chan->conf_req + chan->conf_len, req->data, len); 3581 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3212 chan->conf_len += len; 3582 chan->conf_len += len;
3213 3583
3214 if (flags & 0x0001) { 3584 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3215 /* Incomplete config. Send empty response. */ 3585 /* Incomplete config. Send empty response. */
3216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3217 l2cap_build_conf_rsp(chan, rsp, 3587 l2cap_build_conf_rsp(chan, rsp,
3218 L2CAP_CONF_SUCCESS, 0x0001), rsp); 3588 L2CAP_CONF_SUCCESS, flags), rsp);
3219 goto unlock; 3589 goto unlock;
3220 } 3590 }
3221 3591
@@ -3238,8 +3608,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3238 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 3608 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3239 set_default_fcs(chan); 3609 set_default_fcs(chan);
3240 3610
3241 l2cap_state_change(chan, BT_CONNECTED);
3242
3243 if (chan->mode == L2CAP_MODE_ERTM || 3611 if (chan->mode == L2CAP_MODE_ERTM ||
3244 chan->mode == L2CAP_MODE_STREAMING) 3612 chan->mode == L2CAP_MODE_STREAMING)
3245 err = l2cap_ertm_init(chan); 3613 err = l2cap_ertm_init(chan);
@@ -3271,7 +3639,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3271 3639
3272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3273 l2cap_build_conf_rsp(chan, rsp, 3641 l2cap_build_conf_rsp(chan, rsp,
3274 L2CAP_CONF_SUCCESS, 0x0000), rsp); 3642 L2CAP_CONF_SUCCESS, flags), rsp);
3275 } 3643 }
3276 3644
3277unlock: 3645unlock:
@@ -3362,7 +3730,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3362 goto done; 3730 goto done;
3363 } 3731 }
3364 3732
3365 if (flags & 0x01) 3733 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3366 goto done; 3734 goto done;
3367 3735
3368 set_bit(CONF_INPUT_DONE, &chan->conf_state); 3736 set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3370,7 +3738,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3370 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 3738 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3371 set_default_fcs(chan); 3739 set_default_fcs(chan);
3372 3740
3373 l2cap_state_change(chan, BT_CONNECTED);
3374 if (chan->mode == L2CAP_MODE_ERTM || 3741 if (chan->mode == L2CAP_MODE_ERTM ||
3375 chan->mode == L2CAP_MODE_STREAMING) 3742 chan->mode == L2CAP_MODE_STREAMING)
3376 err = l2cap_ertm_init(chan); 3743 err = l2cap_ertm_init(chan);
@@ -3424,7 +3791,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3424 3791
3425 l2cap_chan_unlock(chan); 3792 l2cap_chan_unlock(chan);
3426 3793
3427 chan->ops->close(chan->data); 3794 chan->ops->close(chan);
3428 l2cap_chan_put(chan); 3795 l2cap_chan_put(chan);
3429 3796
3430 mutex_unlock(&conn->chan_lock); 3797 mutex_unlock(&conn->chan_lock);
@@ -3458,7 +3825,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3458 3825
3459 l2cap_chan_unlock(chan); 3826 l2cap_chan_unlock(chan);
3460 3827
3461 chan->ops->close(chan->data); 3828 chan->ops->close(chan);
3462 l2cap_chan_put(chan); 3829 l2cap_chan_put(chan);
3463 3830
3464 mutex_unlock(&conn->chan_lock); 3831 mutex_unlock(&conn->chan_lock);
@@ -3479,8 +3846,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3479 u8 buf[8]; 3846 u8 buf[8];
3480 u32 feat_mask = l2cap_feat_mask; 3847 u32 feat_mask = l2cap_feat_mask;
3481 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3848 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3482 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3849 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3483 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3850 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3484 if (!disable_ertm) 3851 if (!disable_ertm)
3485 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3852 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3486 | L2CAP_FEAT_FCS; 3853 | L2CAP_FEAT_FCS;
@@ -3500,15 +3867,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3500 else 3867 else
3501 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3868 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3502 3869
3503 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3870 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3504 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3871 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3505 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3872 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3506 l2cap_send_cmd(conn, cmd->ident, 3873 l2cap_send_cmd(conn, cmd->ident,
3507 L2CAP_INFO_RSP, sizeof(buf), buf); 3874 L2CAP_INFO_RSP, sizeof(buf), buf);
3508 } else { 3875 } else {
3509 struct l2cap_info_rsp rsp; 3876 struct l2cap_info_rsp rsp;
3510 rsp.type = cpu_to_le16(type); 3877 rsp.type = cpu_to_le16(type);
3511 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3878 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3512 l2cap_send_cmd(conn, cmd->ident, 3879 l2cap_send_cmd(conn, cmd->ident,
3513 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3880 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3514 } 3881 }
@@ -3548,7 +3915,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3548 3915
3549 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3916 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3550 struct l2cap_info_req req; 3917 struct l2cap_info_req req;
3551 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3918 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3552 3919
3553 conn->info_ident = l2cap_get_ident(conn); 3920 conn->info_ident = l2cap_get_ident(conn);
3554 3921
@@ -3591,7 +3958,7 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3591 psm = le16_to_cpu(req->psm); 3958 psm = le16_to_cpu(req->psm);
3592 scid = le16_to_cpu(req->scid); 3959 scid = le16_to_cpu(req->scid);
3593 3960
3594 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); 3961 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3595 3962
3596 /* Placeholder: Always reject */ 3963 /* Placeholder: Always reject */
3597 rsp.dcid = 0; 3964 rsp.dcid = 0;
@@ -3614,11 +3981,11 @@ static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3614} 3981}
3615 3982
3616static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, 3983static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3617 u16 icid, u16 result) 3984 u16 icid, u16 result)
3618{ 3985{
3619 struct l2cap_move_chan_rsp rsp; 3986 struct l2cap_move_chan_rsp rsp;
3620 3987
3621 BT_DBG("icid %d, result %d", icid, result); 3988 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3622 3989
3623 rsp.icid = cpu_to_le16(icid); 3990 rsp.icid = cpu_to_le16(icid);
3624 rsp.result = cpu_to_le16(result); 3991 rsp.result = cpu_to_le16(result);
@@ -3627,12 +3994,13 @@ static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3627} 3994}
3628 3995
3629static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, 3996static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3630 struct l2cap_chan *chan, u16 icid, u16 result) 3997 struct l2cap_chan *chan,
3998 u16 icid, u16 result)
3631{ 3999{
3632 struct l2cap_move_chan_cfm cfm; 4000 struct l2cap_move_chan_cfm cfm;
3633 u8 ident; 4001 u8 ident;
3634 4002
3635 BT_DBG("icid %d, result %d", icid, result); 4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3636 4004
3637 ident = l2cap_get_ident(conn); 4005 ident = l2cap_get_ident(conn);
3638 if (chan) 4006 if (chan)
@@ -3645,18 +4013,19 @@ static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3645} 4013}
3646 4014
3647static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 4015static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3648 u16 icid) 4016 u16 icid)
3649{ 4017{
3650 struct l2cap_move_chan_cfm_rsp rsp; 4018 struct l2cap_move_chan_cfm_rsp rsp;
3651 4019
3652 BT_DBG("icid %d", icid); 4020 BT_DBG("icid 0x%4.4x", icid);
3653 4021
3654 rsp.icid = cpu_to_le16(icid); 4022 rsp.icid = cpu_to_le16(icid);
3655 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 4023 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3656} 4024}
3657 4025
3658static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 4026static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3659 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4027 struct l2cap_cmd_hdr *cmd,
4028 u16 cmd_len, void *data)
3660{ 4029{
3661 struct l2cap_move_chan_req *req = data; 4030 struct l2cap_move_chan_req *req = data;
3662 u16 icid = 0; 4031 u16 icid = 0;
@@ -3667,7 +4036,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3667 4036
3668 icid = le16_to_cpu(req->icid); 4037 icid = le16_to_cpu(req->icid);
3669 4038
3670 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); 4039 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
3671 4040
3672 if (!enable_hs) 4041 if (!enable_hs)
3673 return -EINVAL; 4042 return -EINVAL;
@@ -3679,7 +4048,8 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3679} 4048}
3680 4049
3681static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, 4050static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3682 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4051 struct l2cap_cmd_hdr *cmd,
4052 u16 cmd_len, void *data)
3683{ 4053{
3684 struct l2cap_move_chan_rsp *rsp = data; 4054 struct l2cap_move_chan_rsp *rsp = data;
3685 u16 icid, result; 4055 u16 icid, result;
@@ -3690,7 +4060,7 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3690 icid = le16_to_cpu(rsp->icid); 4060 icid = le16_to_cpu(rsp->icid);
3691 result = le16_to_cpu(rsp->result); 4061 result = le16_to_cpu(rsp->result);
3692 4062
3693 BT_DBG("icid %d, result %d", icid, result); 4063 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3694 4064
3695 /* Placeholder: Always unconfirmed */ 4065 /* Placeholder: Always unconfirmed */
3696 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); 4066 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
@@ -3699,7 +4069,8 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3699} 4069}
3700 4070
3701static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, 4071static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3702 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4072 struct l2cap_cmd_hdr *cmd,
4073 u16 cmd_len, void *data)
3703{ 4074{
3704 struct l2cap_move_chan_cfm *cfm = data; 4075 struct l2cap_move_chan_cfm *cfm = data;
3705 u16 icid, result; 4076 u16 icid, result;
@@ -3710,7 +4081,7 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3710 icid = le16_to_cpu(cfm->icid); 4081 icid = le16_to_cpu(cfm->icid);
3711 result = le16_to_cpu(cfm->result); 4082 result = le16_to_cpu(cfm->result);
3712 4083
3713 BT_DBG("icid %d, result %d", icid, result); 4084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3714 4085
3715 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 4086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3716 4087
@@ -3718,7 +4089,8 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3718} 4089}
3719 4090
3720static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, 4091static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3721 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4092 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data)
3722{ 4094{
3723 struct l2cap_move_chan_cfm_rsp *rsp = data; 4095 struct l2cap_move_chan_cfm_rsp *rsp = data;
3724 u16 icid; 4096 u16 icid;
@@ -3728,7 +4100,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3728 4100
3729 icid = le16_to_cpu(rsp->icid); 4101 icid = le16_to_cpu(rsp->icid);
3730 4102
3731 BT_DBG("icid %d", icid); 4103 BT_DBG("icid 0x%4.4x", icid);
3732 4104
3733 return 0; 4105 return 0;
3734} 4106}
@@ -3783,9 +4155,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3783 4155
3784 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 4156 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3785 if (err) 4157 if (err)
3786 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4158 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3787 else 4159 else
3788 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3789 4161
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4162 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3791 sizeof(rsp), &rsp); 4163 sizeof(rsp), &rsp);
@@ -3933,7 +4305,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3933 BT_ERR("Wrong link type (%d)", err); 4305 BT_ERR("Wrong link type (%d)", err);
3934 4306
3935 /* FIXME: Map err to a valid reason */ 4307 /* FIXME: Map err to a valid reason */
3936 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 4308 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3937 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4309 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3938 } 4310 }
3939 4311
@@ -3965,65 +4337,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3965 return 0; 4337 return 0;
3966} 4338}
3967 4339
3968static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 4340static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3969{ 4341{
3970 u32 control = 0; 4342 struct l2cap_ctrl control;
3971 4343
3972 chan->frames_sent = 0; 4344 BT_DBG("chan %p", chan);
3973 4345
3974 control |= __set_reqseq(chan, chan->buffer_seq); 4346 memset(&control, 0, sizeof(control));
4347 control.sframe = 1;
4348 control.final = 1;
4349 control.reqseq = chan->buffer_seq;
4350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3975 4351
3976 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4352 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3977 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 4353 control.super = L2CAP_SUPER_RNR;
3978 l2cap_send_sframe(chan, control); 4354 l2cap_send_sframe(chan, &control);
3979 set_bit(CONN_RNR_SENT, &chan->conn_state);
3980 } 4355 }
3981 4356
3982 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 4357 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3983 l2cap_retransmit_frames(chan); 4358 chan->unacked_frames > 0)
4359 __set_retrans_timer(chan);
3984 4360
4361 /* Send pending iframes */
3985 l2cap_ertm_send(chan); 4362 l2cap_ertm_send(chan);
3986 4363
3987 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 4364 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3988 chan->frames_sent == 0) { 4365 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
3989 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 4366 /* F-bit wasn't sent in an s-frame or i-frame yet, so
3990 l2cap_send_sframe(chan, control); 4367 * send it now.
3991 } 4368 */
3992} 4369 control.super = L2CAP_SUPER_RR;
3993 4370 l2cap_send_sframe(chan, &control);
3994static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3995{
3996 struct sk_buff *next_skb;
3997 int tx_seq_offset, next_tx_seq_offset;
3998
3999 bt_cb(skb)->control.txseq = tx_seq;
4000 bt_cb(skb)->control.sar = sar;
4001
4002 next_skb = skb_peek(&chan->srej_q);
4003
4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4005
4006 while (next_skb) {
4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
4008 return -EINVAL;
4009
4010 next_tx_seq_offset = __seq_offset(chan,
4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4012
4013 if (next_tx_seq_offset > tx_seq_offset) {
4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
4015 return 0;
4016 }
4017
4018 if (skb_queue_is_last(&chan->srej_q, next_skb))
4019 next_skb = NULL;
4020 else
4021 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4022 } 4371 }
4023
4024 __skb_queue_tail(&chan->srej_q, skb);
4025
4026 return 0;
4027} 4372}
4028 4373
4029static void append_skb_frag(struct sk_buff *skb, 4374static void append_skb_frag(struct sk_buff *skb,
@@ -4045,16 +4390,17 @@ static void append_skb_frag(struct sk_buff *skb,
4045 skb->truesize += new_frag->truesize; 4390 skb->truesize += new_frag->truesize;
4046} 4391}
4047 4392
4048static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 4393static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4394 struct l2cap_ctrl *control)
4049{ 4395{
4050 int err = -EINVAL; 4396 int err = -EINVAL;
4051 4397
4052 switch (__get_ctrl_sar(chan, control)) { 4398 switch (control->sar) {
4053 case L2CAP_SAR_UNSEGMENTED: 4399 case L2CAP_SAR_UNSEGMENTED:
4054 if (chan->sdu) 4400 if (chan->sdu)
4055 break; 4401 break;
4056 4402
4057 err = chan->ops->recv(chan->data, skb); 4403 err = chan->ops->recv(chan, skb);
4058 break; 4404 break;
4059 4405
4060 case L2CAP_SAR_START: 4406 case L2CAP_SAR_START:
@@ -4104,7 +4450,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4104 if (chan->sdu->len != chan->sdu_len) 4450 if (chan->sdu->len != chan->sdu_len)
4105 break; 4451 break;
4106 4452
4107 err = chan->ops->recv(chan->data, chan->sdu); 4453 err = chan->ops->recv(chan, chan->sdu);
4108 4454
4109 if (!err) { 4455 if (!err) {
4110 /* Reassembly complete */ 4456 /* Reassembly complete */
@@ -4126,448 +4472,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4126 return err; 4472 return err;
4127} 4473}
4128 4474
4129static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4475void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4130{ 4476{
4131 BT_DBG("chan %p, Enter local busy", chan); 4477 u8 event;
4132 4478
4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4479 if (chan->mode != L2CAP_MODE_ERTM)
4134 l2cap_seq_list_clear(&chan->srej_list); 4480 return;
4135 4481
4136 __set_ack_timer(chan); 4482 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4483 l2cap_tx(chan, NULL, NULL, event);
4137} 4484}
4138 4485
4139static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4486static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4140{ 4487{
4141 u32 control; 4488 int err = 0;
4142 4489 /* Pass sequential frames to l2cap_reassemble_sdu()
4143 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4490 * until a gap is encountered.
4144 goto done; 4491 */
4145 4492
4146 control = __set_reqseq(chan, chan->buffer_seq); 4493 BT_DBG("chan %p", chan);
4147 control |= __set_ctrl_poll(chan);
4148 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4149 l2cap_send_sframe(chan, control);
4150 chan->retry_count = 1;
4151 4494
4152 __clear_retrans_timer(chan); 4495 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4153 __set_monitor_timer(chan); 4496 struct sk_buff *skb;
4497 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4498 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4154 4499
4155 set_bit(CONN_WAIT_F, &chan->conn_state); 4500 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4156 4501
4157done: 4502 if (!skb)
4158 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4503 break;
4159 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4160 4504
4161 BT_DBG("chan %p, Exit local busy", chan); 4505 skb_unlink(skb, &chan->srej_q);
4162} 4506 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4507 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4508 if (err)
4509 break;
4510 }
4163 4511
4164void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 4512 if (skb_queue_empty(&chan->srej_q)) {
4165{ 4513 chan->rx_state = L2CAP_RX_STATE_RECV;
4166 if (chan->mode == L2CAP_MODE_ERTM) { 4514 l2cap_send_ack(chan);
4167 if (busy)
4168 l2cap_ertm_enter_local_busy(chan);
4169 else
4170 l2cap_ertm_exit_local_busy(chan);
4171 } 4515 }
4516
4517 return err;
4172} 4518}
4173 4519
4174static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 4520static void l2cap_handle_srej(struct l2cap_chan *chan,
4521 struct l2cap_ctrl *control)
4175{ 4522{
4176 struct sk_buff *skb; 4523 struct sk_buff *skb;
4177 u32 control;
4178 4524
4179 while ((skb = skb_peek(&chan->srej_q)) && 4525 BT_DBG("chan %p, control %p", chan, control);
4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4181 int err;
4182 4526
4183 if (bt_cb(skb)->control.txseq != tx_seq) 4527 if (control->reqseq == chan->next_tx_seq) {
4184 break; 4528 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4530 return;
4531 }
4185 4532
4186 skb = skb_dequeue(&chan->srej_q); 4533 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4188 err = l2cap_reassemble_sdu(chan, skb, control);
4189 4534
4190 if (err < 0) { 4535 if (skb == NULL) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4536 BT_DBG("Seq %d not available for retransmission",
4192 break; 4537 control->reqseq);
4193 } 4538 return;
4539 }
4194 4540
4195 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 4541 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4196 tx_seq = __next_seq(chan, tx_seq); 4542 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4544 return;
4197 } 4545 }
4198}
4199 4546
4200static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4547 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4201{
4202 struct srej_list *l, *tmp;
4203 u32 control;
4204 4548
4205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 4549 if (control->poll) {
4206 if (l->tx_seq == tx_seq) { 4550 l2cap_pass_to_tx(chan, control);
4207 list_del(&l->list); 4551
4208 kfree(l); 4552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4209 return; 4553 l2cap_retransmit(chan, control);
4554 l2cap_ertm_send(chan);
4555
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4559 }
4560 } else {
4561 l2cap_pass_to_tx_fbit(chan, control);
4562
4563 if (control->final) {
4564 if (chan->srej_save_reqseq != control->reqseq ||
4565 !test_and_clear_bit(CONN_SREJ_ACT,
4566 &chan->conn_state))
4567 l2cap_retransmit(chan, control);
4568 } else {
4569 l2cap_retransmit(chan, control);
4570 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4572 chan->srej_save_reqseq = control->reqseq;
4573 }
4210 } 4574 }
4211 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4212 control |= __set_reqseq(chan, l->tx_seq);
4213 l2cap_send_sframe(chan, control);
4214 list_del(&l->list);
4215 list_add_tail(&l->list, &chan->srej_l);
4216 } 4575 }
4217} 4576}
4218 4577
4219static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4578static void l2cap_handle_rej(struct l2cap_chan *chan,
4579 struct l2cap_ctrl *control)
4220{ 4580{
4221 struct srej_list *new; 4581 struct sk_buff *skb;
4222 u32 control;
4223
4224 while (tx_seq != chan->expected_tx_seq) {
4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4228 l2cap_send_sframe(chan, control);
4229 4582
4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4583 BT_DBG("chan %p, control %p", chan, control);
4231 if (!new)
4232 return -ENOMEM;
4233 4584
4234 new->tx_seq = chan->expected_tx_seq; 4585 if (control->reqseq == chan->next_tx_seq) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4588 return;
4589 }
4235 4590
4236 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4591 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4237 4592
4238 list_add_tail(&new->list, &chan->srej_l); 4593 if (chan->max_tx && skb &&
4594 bt_cb(skb)->control.retries >= chan->max_tx) {
4595 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4597 return;
4239 } 4598 }
4240 4599
4241 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4242 4601
4243 return 0; 4602 l2cap_pass_to_tx(chan, control);
4603
4604 if (control->final) {
4605 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4606 l2cap_retransmit_all(chan, control);
4607 } else {
4608 l2cap_retransmit_all(chan, control);
4609 l2cap_ertm_send(chan);
4610 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4611 set_bit(CONN_REJ_ACT, &chan->conn_state);
4612 }
4244} 4613}
4245 4614
4246static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4615static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4247{ 4616{
4248 u16 tx_seq = __get_txseq(chan, rx_control); 4617 BT_DBG("chan %p, txseq %d", chan, txseq);
4249 u16 req_seq = __get_reqseq(chan, rx_control);
4250 u8 sar = __get_ctrl_sar(chan, rx_control);
4251 int tx_seq_offset, expected_tx_seq_offset;
4252 int num_to_ack = (chan->tx_win/6) + 1;
4253 int err = 0;
4254 4618
4255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 4619 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4256 tx_seq, rx_control); 4620 chan->expected_tx_seq);
4257 4621
4258 if (__is_ctrl_final(chan, rx_control) && 4622 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4259 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4623 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4260 __clear_monitor_timer(chan); 4624 chan->tx_win) {
4261 if (chan->unacked_frames > 0) 4625 /* See notes below regarding "double poll" and
4262 __set_retrans_timer(chan); 4626 * invalid packets.
4263 clear_bit(CONN_WAIT_F, &chan->conn_state); 4627 */
4264 } 4628 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4629 BT_DBG("Invalid/Ignore - after SREJ");
4630 return L2CAP_TXSEQ_INVALID_IGNORE;
4631 } else {
4632 BT_DBG("Invalid - in window after SREJ sent");
4633 return L2CAP_TXSEQ_INVALID;
4634 }
4635 }
4265 4636
4266 chan->expected_ack_seq = req_seq; 4637 if (chan->srej_list.head == txseq) {
4267 l2cap_drop_acked_frames(chan); 4638 BT_DBG("Expected SREJ");
4639 return L2CAP_TXSEQ_EXPECTED_SREJ;
4640 }
4268 4641
4269 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4642 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4643 BT_DBG("Duplicate SREJ - txseq already stored");
4644 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4645 }
4270 4646
4271 /* invalid tx_seq */ 4647 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4272 if (tx_seq_offset >= chan->tx_win) { 4648 BT_DBG("Unexpected SREJ - not requested");
4273 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4649 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4274 goto drop; 4650 }
4275 } 4651 }
4276 4652
4277 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4653 if (chan->expected_tx_seq == txseq) {
4278 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4654 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4279 l2cap_send_ack(chan); 4655 chan->tx_win) {
4280 goto drop; 4656 BT_DBG("Invalid - txseq outside tx window");
4657 return L2CAP_TXSEQ_INVALID;
4658 } else {
4659 BT_DBG("Expected");
4660 return L2CAP_TXSEQ_EXPECTED;
4661 }
4281 } 4662 }
4282 4663
4283 if (tx_seq == chan->expected_tx_seq) 4664 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4284 goto expected; 4665 __seq_offset(chan, chan->expected_tx_seq,
4666 chan->last_acked_seq)){
4667 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4668 return L2CAP_TXSEQ_DUPLICATE;
4669 }
4670
4671 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4672 /* A source of invalid packets is a "double poll" condition,
4673 * where delays cause us to send multiple poll packets. If
4674 * the remote stack receives and processes both polls,
4675 * sequence numbers can wrap around in such a way that a
4676 * resent frame has a sequence number that looks like new data
4677 * with a sequence gap. This would trigger an erroneous SREJ
4678 * request.
4679 *
4680 * Fortunately, this is impossible with a tx window that's
4681 * less than half of the maximum sequence number, which allows
4682 * invalid frames to be safely ignored.
4683 *
4684 * With tx window sizes greater than half of the tx window
4685 * maximum, the frame is invalid and cannot be ignored. This
4686 * causes a disconnect.
4687 */
4688
4689 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4690 BT_DBG("Invalid/Ignore - txseq outside tx window");
4691 return L2CAP_TXSEQ_INVALID_IGNORE;
4692 } else {
4693 BT_DBG("Invalid - txseq outside tx window");
4694 return L2CAP_TXSEQ_INVALID;
4695 }
4696 } else {
4697 BT_DBG("Unexpected - txseq indicates missing frames");
4698 return L2CAP_TXSEQ_UNEXPECTED;
4699 }
4700}
4285 4701
4286 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4702static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4287 struct srej_list *first; 4703 struct l2cap_ctrl *control,
4704 struct sk_buff *skb, u8 event)
4705{
4706 int err = 0;
4707 bool skb_in_use = 0;
4288 4708
4289 first = list_first_entry(&chan->srej_l, 4709 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4290 struct srej_list, list); 4710 event);
4291 if (tx_seq == first->tx_seq) {
4292 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4293 l2cap_check_srej_gap(chan, tx_seq);
4294 4711
4295 list_del(&first->list); 4712 switch (event) {
4296 kfree(first); 4713 case L2CAP_EV_RECV_IFRAME:
4714 switch (l2cap_classify_txseq(chan, control->txseq)) {
4715 case L2CAP_TXSEQ_EXPECTED:
4716 l2cap_pass_to_tx(chan, control);
4297 4717
4298 if (list_empty(&chan->srej_l)) { 4718 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4299 chan->buffer_seq = chan->buffer_seq_srej; 4719 BT_DBG("Busy, discarding expected seq %d",
4300 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 4720 control->txseq);
4301 l2cap_send_ack(chan); 4721 break;
4302 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4303 } 4722 }
4304 } else {
4305 struct srej_list *l;
4306 4723
4307 /* duplicated tx_seq */ 4724 chan->expected_tx_seq = __next_seq(chan,
4308 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 4725 control->txseq);
4309 goto drop; 4726
4727 chan->buffer_seq = chan->expected_tx_seq;
4728 skb_in_use = 1;
4310 4729
4311 list_for_each_entry(l, &chan->srej_l, list) { 4730 err = l2cap_reassemble_sdu(chan, skb, control);
4312 if (l->tx_seq == tx_seq) { 4731 if (err)
4313 l2cap_resend_srejframe(chan, tx_seq); 4732 break;
4314 return 0; 4733
4734 if (control->final) {
4735 if (!test_and_clear_bit(CONN_REJ_ACT,
4736 &chan->conn_state)) {
4737 control->final = 0;
4738 l2cap_retransmit_all(chan, control);
4739 l2cap_ertm_send(chan);
4315 } 4740 }
4316 } 4741 }
4317 4742
4318 err = l2cap_send_srejframe(chan, tx_seq); 4743 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4319 if (err < 0) { 4744 l2cap_send_ack(chan);
4320 l2cap_send_disconn_req(chan->conn, chan, -err); 4745 break;
4321 return err; 4746 case L2CAP_TXSEQ_UNEXPECTED:
4747 l2cap_pass_to_tx(chan, control);
4748
4749 /* Can't issue SREJ frames in the local busy state.
4750 * Drop this frame, it will be seen as missing
4751 * when local busy is exited.
4752 */
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding unexpected seq %d",
4755 control->txseq);
4756 break;
4322 } 4757 }
4323 }
4324 } else {
4325 expected_tx_seq_offset = __seq_offset(chan,
4326 chan->expected_tx_seq, chan->buffer_seq);
4327
4328 /* duplicated tx_seq */
4329 if (tx_seq_offset < expected_tx_seq_offset)
4330 goto drop;
4331 4758
4332 set_bit(CONN_SREJ_SENT, &chan->conn_state); 4759 /* There was a gap in the sequence, so an SREJ
4760 * must be sent for each missing frame. The
4761 * current frame is stored for later use.
4762 */
4763 skb_queue_tail(&chan->srej_q, skb);
4764 skb_in_use = 1;
4765 BT_DBG("Queued %p (queue len %d)", skb,
4766 skb_queue_len(&chan->srej_q));
4333 4767
4334 BT_DBG("chan %p, Enter SREJ", chan); 4768 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4769 l2cap_seq_list_clear(&chan->srej_list);
4770 l2cap_send_srej(chan, control->txseq);
4335 4771
4336 INIT_LIST_HEAD(&chan->srej_l); 4772 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4337 chan->buffer_seq_srej = chan->buffer_seq; 4773 break;
4774 case L2CAP_TXSEQ_DUPLICATE:
4775 l2cap_pass_to_tx(chan, control);
4776 break;
4777 case L2CAP_TXSEQ_INVALID_IGNORE:
4778 break;
4779 case L2CAP_TXSEQ_INVALID:
4780 default:
4781 l2cap_send_disconn_req(chan->conn, chan,
4782 ECONNRESET);
4783 break;
4784 }
4785 break;
4786 case L2CAP_EV_RECV_RR:
4787 l2cap_pass_to_tx(chan, control);
4788 if (control->final) {
4789 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4338 4790
4339 __skb_queue_head_init(&chan->srej_q); 4791 if (!test_and_clear_bit(CONN_REJ_ACT,
4340 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4792 &chan->conn_state)) {
4793 control->final = 0;
4794 l2cap_retransmit_all(chan, control);
4795 }
4341 4796
4342 /* Set P-bit only if there are some I-frames to ack. */ 4797 l2cap_ertm_send(chan);
4343 if (__clear_ack_timer(chan)) 4798 } else if (control->poll) {
4344 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4799 l2cap_send_i_or_rr_or_rnr(chan);
4800 } else {
4801 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4802 &chan->conn_state) &&
4803 chan->unacked_frames)
4804 __set_retrans_timer(chan);
4345 4805
4346 err = l2cap_send_srejframe(chan, tx_seq); 4806 l2cap_ertm_send(chan);
4347 if (err < 0) {
4348 l2cap_send_disconn_req(chan->conn, chan, -err);
4349 return err;
4350 } 4807 }
4808 break;
4809 case L2CAP_EV_RECV_RNR:
4810 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4811 l2cap_pass_to_tx(chan, control);
4812 if (control && control->poll) {
4813 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4814 l2cap_send_rr_or_rnr(chan, 0);
4815 }
4816 __clear_retrans_timer(chan);
4817 l2cap_seq_list_clear(&chan->retrans_list);
4818 break;
4819 case L2CAP_EV_RECV_REJ:
4820 l2cap_handle_rej(chan, control);
4821 break;
4822 case L2CAP_EV_RECV_SREJ:
4823 l2cap_handle_srej(chan, control);
4824 break;
4825 default:
4826 break;
4351 } 4827 }
4352 return 0;
4353
4354expected:
4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4356
4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4358 bt_cb(skb)->control.txseq = tx_seq;
4359 bt_cb(skb)->control.sar = sar;
4360 __skb_queue_tail(&chan->srej_q, skb);
4361 return 0;
4362 }
4363
4364 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4365 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4366 4828
4367 if (err < 0) { 4829 if (skb && !skb_in_use) {
4368 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4830 BT_DBG("Freeing %p", skb);
4369 return err; 4831 kfree_skb(skb);
4370 } 4832 }
4371 4833
4372 if (__is_ctrl_final(chan, rx_control)) { 4834 return err;
4373 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4835}
4374 l2cap_retransmit_frames(chan);
4375 }
4376 4836
4837static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4838 struct l2cap_ctrl *control,
4839 struct sk_buff *skb, u8 event)
4840{
4841 int err = 0;
4842 u16 txseq = control->txseq;
4843 bool skb_in_use = 0;
4844
4845 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4846 event);
4847
4848 switch (event) {
4849 case L2CAP_EV_RECV_IFRAME:
4850 switch (l2cap_classify_txseq(chan, txseq)) {
4851 case L2CAP_TXSEQ_EXPECTED:
4852 /* Keep frame for reassembly later */
4853 l2cap_pass_to_tx(chan, control);
4854 skb_queue_tail(&chan->srej_q, skb);
4855 skb_in_use = 1;
4856 BT_DBG("Queued %p (queue len %d)", skb,
4857 skb_queue_len(&chan->srej_q));
4858
4859 chan->expected_tx_seq = __next_seq(chan, txseq);
4860 break;
4861 case L2CAP_TXSEQ_EXPECTED_SREJ:
4862 l2cap_seq_list_pop(&chan->srej_list);
4377 4863
4378 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 4864 l2cap_pass_to_tx(chan, control);
4379 if (chan->num_acked == num_to_ack - 1) 4865 skb_queue_tail(&chan->srej_q, skb);
4380 l2cap_send_ack(chan); 4866 skb_in_use = 1;
4381 else 4867 BT_DBG("Queued %p (queue len %d)", skb,
4382 __set_ack_timer(chan); 4868 skb_queue_len(&chan->srej_q));
4383 4869
4384 return 0; 4870 err = l2cap_rx_queued_iframes(chan);
4871 if (err)
4872 break;
4385 4873
4386drop: 4874 break;
4387 kfree_skb(skb); 4875 case L2CAP_TXSEQ_UNEXPECTED:
4388 return 0; 4876 /* Got a frame that can't be reassembled yet.
4389} 4877 * Save it for later, and send SREJs to cover
4878 * the missing frames.
4879 */
4880 skb_queue_tail(&chan->srej_q, skb);
4881 skb_in_use = 1;
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4884
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej(chan, control->txseq);
4887 break;
4888 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4889 /* This frame was requested with an SREJ, but
4890 * some expected retransmitted frames are
4891 * missing. Request retransmission of missing
4892 * SREJ'd frames.
4893 */
4894 skb_queue_tail(&chan->srej_q, skb);
4895 skb_in_use = 1;
4896 BT_DBG("Queued %p (queue len %d)", skb,
4897 skb_queue_len(&chan->srej_q));
4898
4899 l2cap_pass_to_tx(chan, control);
4900 l2cap_send_srej_list(chan, control->txseq);
4901 break;
4902 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4903 /* We've already queued this frame. Drop this copy. */
4904 l2cap_pass_to_tx(chan, control);
4905 break;
4906 case L2CAP_TXSEQ_DUPLICATE:
4907 /* Expecting a later sequence number, so this frame
4908 * was already received. Ignore it completely.
4909 */
4910 break;
4911 case L2CAP_TXSEQ_INVALID_IGNORE:
4912 break;
4913 case L2CAP_TXSEQ_INVALID:
4914 default:
4915 l2cap_send_disconn_req(chan->conn, chan,
4916 ECONNRESET);
4917 break;
4918 }
4919 break;
4920 case L2CAP_EV_RECV_RR:
4921 l2cap_pass_to_tx(chan, control);
4922 if (control->final) {
4923 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4390 4924
4391static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 4925 if (!test_and_clear_bit(CONN_REJ_ACT,
4392{ 4926 &chan->conn_state)) {
4393 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 4927 control->final = 0;
4394 __get_reqseq(chan, rx_control), rx_control); 4928 l2cap_retransmit_all(chan, control);
4929 }
4395 4930
4396 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 4931 l2cap_ertm_send(chan);
4397 l2cap_drop_acked_frames(chan); 4932 } else if (control->poll) {
4933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4934 &chan->conn_state) &&
4935 chan->unacked_frames) {
4936 __set_retrans_timer(chan);
4937 }
4398 4938
4399 if (__is_ctrl_poll(chan, rx_control)) { 4939 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4400 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4940 l2cap_send_srej_tail(chan);
4401 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4941 } else {
4402 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4942 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4403 (chan->unacked_frames > 0)) 4943 &chan->conn_state) &&
4944 chan->unacked_frames)
4404 __set_retrans_timer(chan); 4945 __set_retrans_timer(chan);
4405 4946
4406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4947 l2cap_send_ack(chan);
4407 l2cap_send_srejtail(chan); 4948 }
4949 break;
4950 case L2CAP_EV_RECV_RNR:
4951 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4952 l2cap_pass_to_tx(chan, control);
4953 if (control->poll) {
4954 l2cap_send_srej_tail(chan);
4408 } else { 4955 } else {
4409 l2cap_send_i_or_rr_or_rnr(chan); 4956 struct l2cap_ctrl rr_control;
4957 memset(&rr_control, 0, sizeof(rr_control));
4958 rr_control.sframe = 1;
4959 rr_control.super = L2CAP_SUPER_RR;
4960 rr_control.reqseq = chan->buffer_seq;
4961 l2cap_send_sframe(chan, &rr_control);
4410 } 4962 }
4411 4963
4412 } else if (__is_ctrl_final(chan, rx_control)) { 4964 break;
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4965 case L2CAP_EV_RECV_REJ:
4414 4966 l2cap_handle_rej(chan, control);
4415 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4967 break;
4416 l2cap_retransmit_frames(chan); 4968 case L2CAP_EV_RECV_SREJ:
4417 4969 l2cap_handle_srej(chan, control);
4418 } else { 4970 break;
4419 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4971 }
4420 (chan->unacked_frames > 0))
4421 __set_retrans_timer(chan);
4422 4972
4423 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4973 if (skb && !skb_in_use) {
4424 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4974 BT_DBG("Freeing %p", skb);
4425 l2cap_send_ack(chan); 4975 kfree_skb(skb);
4426 else
4427 l2cap_ertm_send(chan);
4428 } 4976 }
4977
4978 return err;
4429} 4979}
4430 4980
4431static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4981static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4432{ 4982{
4433 u16 tx_seq = __get_reqseq(chan, rx_control); 4983 /* Make sure reqseq is for a packet that has been sent but not acked */
4434 4984 u16 unacked;
4435 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4436
4437 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4438
4439 chan->expected_ack_seq = tx_seq;
4440 l2cap_drop_acked_frames(chan);
4441
4442 if (__is_ctrl_final(chan, rx_control)) {
4443 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4444 l2cap_retransmit_frames(chan);
4445 } else {
4446 l2cap_retransmit_frames(chan);
4447 4985
4448 if (test_bit(CONN_WAIT_F, &chan->conn_state)) 4986 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4449 set_bit(CONN_REJ_ACT, &chan->conn_state); 4987 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4450 }
4451} 4988}
4452static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4453{
4454 u16 tx_seq = __get_reqseq(chan, rx_control);
4455
4456 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4457 4989
4458 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4990static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4459 4991 struct sk_buff *skb, u8 event)
4460 if (__is_ctrl_poll(chan, rx_control)) { 4992{
4461 chan->expected_ack_seq = tx_seq; 4993 int err = 0;
4462 l2cap_drop_acked_frames(chan);
4463 4994
4464 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4995 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4465 l2cap_retransmit_one_frame(chan, tx_seq); 4996 control, skb, event, chan->rx_state);
4466 4997
4467 l2cap_ertm_send(chan); 4998 if (__valid_reqseq(chan, control->reqseq)) {
4468 4999 switch (chan->rx_state) {
4469 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 5000 case L2CAP_RX_STATE_RECV:
4470 chan->srej_save_reqseq = tx_seq; 5001 err = l2cap_rx_state_recv(chan, control, skb, event);
4471 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5002 break;
5003 case L2CAP_RX_STATE_SREJ_SENT:
5004 err = l2cap_rx_state_srej_sent(chan, control, skb,
5005 event);
5006 break;
5007 default:
5008 /* shut it down */
5009 break;
4472 } 5010 }
4473 } else if (__is_ctrl_final(chan, rx_control)) {
4474 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4475 chan->srej_save_reqseq == tx_seq)
4476 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4477 else
4478 l2cap_retransmit_one_frame(chan, tx_seq);
4479 } else { 5011 } else {
4480 l2cap_retransmit_one_frame(chan, tx_seq); 5012 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4481 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 5013 control->reqseq, chan->next_tx_seq,
4482 chan->srej_save_reqseq = tx_seq; 5014 chan->expected_ack_seq);
4483 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5015 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4484 }
4485 } 5016 }
5017
5018 return err;
4486} 5019}
4487 5020
4488static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 5021static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5022 struct sk_buff *skb)
4489{ 5023{
4490 u16 tx_seq = __get_reqseq(chan, rx_control); 5024 int err = 0;
4491 5025
4492 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 5026 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5027 chan->rx_state);
4493 5028
4494 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5029 if (l2cap_classify_txseq(chan, control->txseq) ==
4495 chan->expected_ack_seq = tx_seq; 5030 L2CAP_TXSEQ_EXPECTED) {
4496 l2cap_drop_acked_frames(chan); 5031 l2cap_pass_to_tx(chan, control);
4497 5032
4498 if (__is_ctrl_poll(chan, rx_control)) 5033 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4499 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5034 __next_seq(chan, chan->buffer_seq));
4500 5035
4501 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 5036 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4502 __clear_retrans_timer(chan);
4503 if (__is_ctrl_poll(chan, rx_control))
4504 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4505 return;
4506 }
4507 5037
4508 if (__is_ctrl_poll(chan, rx_control)) { 5038 l2cap_reassemble_sdu(chan, skb, control);
4509 l2cap_send_srejtail(chan);
4510 } else { 5039 } else {
4511 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 5040 if (chan->sdu) {
4512 l2cap_send_sframe(chan, rx_control); 5041 kfree_skb(chan->sdu);
4513 } 5042 chan->sdu = NULL;
4514} 5043 }
4515 5044 chan->sdu_last_frag = NULL;
4516static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 5045 chan->sdu_len = 0;
4517{
4518 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4519 5046
4520 if (__is_ctrl_final(chan, rx_control) && 5047 if (skb) {
4521 test_bit(CONN_WAIT_F, &chan->conn_state)) { 5048 BT_DBG("Freeing %p", skb);
4522 __clear_monitor_timer(chan); 5049 kfree_skb(skb);
4523 if (chan->unacked_frames > 0) 5050 }
4524 __set_retrans_timer(chan);
4525 clear_bit(CONN_WAIT_F, &chan->conn_state);
4526 } 5051 }
4527 5052
4528 switch (__get_ctrl_super(chan, rx_control)) { 5053 chan->last_acked_seq = control->txseq;
4529 case L2CAP_SUPER_RR: 5054 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4530 l2cap_data_channel_rrframe(chan, rx_control);
4531 break;
4532
4533 case L2CAP_SUPER_REJ:
4534 l2cap_data_channel_rejframe(chan, rx_control);
4535 break;
4536
4537 case L2CAP_SUPER_SREJ:
4538 l2cap_data_channel_srejframe(chan, rx_control);
4539 break;
4540
4541 case L2CAP_SUPER_RNR:
4542 l2cap_data_channel_rnrframe(chan, rx_control);
4543 break;
4544 }
4545 5055
4546 kfree_skb(skb); 5056 return err;
4547 return 0;
4548} 5057}
4549 5058
4550static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 5059static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4551{ 5060{
4552 u32 control; 5061 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4553 u16 req_seq; 5062 u16 len;
4554 int len, next_tx_seq_offset, req_seq_offset; 5063 u8 event;
4555 5064
4556 __unpack_control(chan, skb); 5065 __unpack_control(chan, skb);
4557 5066
4558 control = __get_control(chan, skb->data);
4559 skb_pull(skb, __ctrl_size(chan));
4560 len = skb->len; 5067 len = skb->len;
4561 5068
4562 /* 5069 /*
4563 * We can just drop the corrupted I-frame here. 5070 * We can just drop the corrupted I-frame here.
4564 * Receiver will miss it and start proper recovery 5071 * Receiver will miss it and start proper recovery
4565 * procedures and ask retransmission. 5072 * procedures and ask for retransmission.
4566 */ 5073 */
4567 if (l2cap_check_fcs(chan, skb)) 5074 if (l2cap_check_fcs(chan, skb))
4568 goto drop; 5075 goto drop;
4569 5076
4570 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 5077 if (!control->sframe && control->sar == L2CAP_SAR_START)
4571 len -= L2CAP_SDULEN_SIZE; 5078 len -= L2CAP_SDULEN_SIZE;
4572 5079
4573 if (chan->fcs == L2CAP_FCS_CRC16) 5080 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4578,34 +5085,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4578 goto drop; 5085 goto drop;
4579 } 5086 }
4580 5087
4581 req_seq = __get_reqseq(chan, control); 5088 if (!control->sframe) {
4582 5089 int err;
4583 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4584
4585 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4586 chan->expected_ack_seq);
4587 5090
4588 /* check for invalid req-seq */ 5091 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4589 if (req_seq_offset > next_tx_seq_offset) { 5092 control->sar, control->reqseq, control->final,
4590 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5093 control->txseq);
4591 goto drop;
4592 }
4593 5094
4594 if (!__is_sframe(chan, control)) { 5095 /* Validate F-bit - F=0 always valid, F=1 only
4595 if (len < 0) { 5096 * valid in TX WAIT_F
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5097 */
5098 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4597 goto drop; 5099 goto drop;
5100
5101 if (chan->mode != L2CAP_MODE_STREAMING) {
5102 event = L2CAP_EV_RECV_IFRAME;
5103 err = l2cap_rx(chan, control, skb, event);
5104 } else {
5105 err = l2cap_stream_rx(chan, control, skb);
4598 } 5106 }
4599 5107
4600 l2cap_data_channel_iframe(chan, control, skb); 5108 if (err)
5109 l2cap_send_disconn_req(chan->conn, chan,
5110 ECONNRESET);
4601 } else { 5111 } else {
5112 const u8 rx_func_to_event[4] = {
5113 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5114 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5115 };
5116
5117 /* Only I-frames are expected in streaming mode */
5118 if (chan->mode == L2CAP_MODE_STREAMING)
5119 goto drop;
5120
5121 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5122 control->reqseq, control->final, control->poll,
5123 control->super);
5124
4602 if (len != 0) { 5125 if (len != 0) {
4603 BT_ERR("%d", len); 5126 BT_ERR("%d", len);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 goto drop; 5128 goto drop;
4606 } 5129 }
4607 5130
4608 l2cap_data_channel_sframe(chan, control, skb); 5131 /* Validate F and P bits */
5132 if (control->final && (control->poll ||
5133 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5134 goto drop;
5135
5136 event = rx_func_to_event[control->super];
5137 if (l2cap_rx(chan, control, skb, event))
5138 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4609 } 5139 }
4610 5140
4611 return 0; 5141 return 0;
@@ -4615,19 +5145,27 @@ drop:
4615 return 0; 5145 return 0;
4616} 5146}
4617 5147
4618static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 5148static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5149 struct sk_buff *skb)
4619{ 5150{
4620 struct l2cap_chan *chan; 5151 struct l2cap_chan *chan;
4621 u32 control;
4622 u16 tx_seq;
4623 int len;
4624 5152
4625 chan = l2cap_get_chan_by_scid(conn, cid); 5153 chan = l2cap_get_chan_by_scid(conn, cid);
4626 if (!chan) { 5154 if (!chan) {
4627 BT_DBG("unknown cid 0x%4.4x", cid); 5155 if (cid == L2CAP_CID_A2MP) {
4628 /* Drop packet and return */ 5156 chan = a2mp_channel_create(conn, skb);
4629 kfree_skb(skb); 5157 if (!chan) {
4630 return 0; 5158 kfree_skb(skb);
5159 return;
5160 }
5161
5162 l2cap_chan_lock(chan);
5163 } else {
5164 BT_DBG("unknown cid 0x%4.4x", cid);
5165 /* Drop packet and return */
5166 kfree_skb(skb);
5167 return;
5168 }
4631 } 5169 }
4632 5170
4633 BT_DBG("chan %p, len %d", chan, skb->len); 5171 BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4645,49 +5183,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4645 if (chan->imtu < skb->len) 5183 if (chan->imtu < skb->len)
4646 goto drop; 5184 goto drop;
4647 5185
4648 if (!chan->ops->recv(chan->data, skb)) 5186 if (!chan->ops->recv(chan, skb))
4649 goto done; 5187 goto done;
4650 break; 5188 break;
4651 5189
4652 case L2CAP_MODE_ERTM: 5190 case L2CAP_MODE_ERTM:
4653 l2cap_ertm_data_rcv(chan, skb);
4654
4655 goto done;
4656
4657 case L2CAP_MODE_STREAMING: 5191 case L2CAP_MODE_STREAMING:
4658 control = __get_control(chan, skb->data); 5192 l2cap_data_rcv(chan, skb);
4659 skb_pull(skb, __ctrl_size(chan));
4660 len = skb->len;
4661
4662 if (l2cap_check_fcs(chan, skb))
4663 goto drop;
4664
4665 if (__is_sar_start(chan, control))
4666 len -= L2CAP_SDULEN_SIZE;
4667
4668 if (chan->fcs == L2CAP_FCS_CRC16)
4669 len -= L2CAP_FCS_SIZE;
4670
4671 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4672 goto drop;
4673
4674 tx_seq = __get_txseq(chan, control);
4675
4676 if (chan->expected_tx_seq != tx_seq) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan->sdu);
4679 chan->sdu = NULL;
4680 chan->sdu_last_frag = NULL;
4681 chan->sdu_len = 0;
4682
4683 /* TODO: Notify userland of missing data */
4684 }
4685
4686 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4687
4688 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4689 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4690
4691 goto done; 5193 goto done;
4692 5194
4693 default: 5195 default:
@@ -4700,11 +5202,10 @@ drop:
4700 5202
4701done: 5203done:
4702 l2cap_chan_unlock(chan); 5204 l2cap_chan_unlock(chan);
4703
4704 return 0;
4705} 5205}
4706 5206
4707static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 5207static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5208 struct sk_buff *skb)
4708{ 5209{
4709 struct l2cap_chan *chan; 5210 struct l2cap_chan *chan;
4710 5211
@@ -4720,17 +5221,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4720 if (chan->imtu < skb->len) 5221 if (chan->imtu < skb->len)
4721 goto drop; 5222 goto drop;
4722 5223
4723 if (!chan->ops->recv(chan->data, skb)) 5224 if (!chan->ops->recv(chan, skb))
4724 return 0; 5225 return;
4725 5226
4726drop: 5227drop:
4727 kfree_skb(skb); 5228 kfree_skb(skb);
4728
4729 return 0;
4730} 5229}
4731 5230
4732static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, 5231static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb) 5232 struct sk_buff *skb)
4734{ 5233{
4735 struct l2cap_chan *chan; 5234 struct l2cap_chan *chan;
4736 5235
@@ -4746,13 +5245,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4746 if (chan->imtu < skb->len) 5245 if (chan->imtu < skb->len)
4747 goto drop; 5246 goto drop;
4748 5247
4749 if (!chan->ops->recv(chan->data, skb)) 5248 if (!chan->ops->recv(chan, skb))
4750 return 0; 5249 return;
4751 5250
4752drop: 5251drop:
4753 kfree_skb(skb); 5252 kfree_skb(skb);
4754
4755 return 0;
4756} 5253}
4757 5254
4758static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 5255static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4780,7 +5277,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4780 5277
4781 case L2CAP_CID_CONN_LESS: 5278 case L2CAP_CID_CONN_LESS:
4782 psm = get_unaligned((__le16 *) skb->data); 5279 psm = get_unaligned((__le16 *) skb->data);
4783 skb_pull(skb, 2); 5280 skb_pull(skb, L2CAP_PSMLEN_SIZE);
4784 l2cap_conless_channel(conn, psm, skb); 5281 l2cap_conless_channel(conn, psm, skb);
4785 break; 5282 break;
4786 5283
@@ -4891,7 +5388,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4891 if (!conn) 5388 if (!conn)
4892 return 0; 5389 return 0;
4893 5390
4894 BT_DBG("conn %p", conn); 5391 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
4895 5392
4896 if (hcon->type == LE_LINK) { 5393 if (hcon->type == LE_LINK) {
4897 if (!status && encrypt) 5394 if (!status && encrypt)
@@ -4904,7 +5401,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4904 list_for_each_entry(chan, &conn->chan_l, list) { 5401 list_for_each_entry(chan, &conn->chan_l, list) {
4905 l2cap_chan_lock(chan); 5402 l2cap_chan_lock(chan);
4906 5403
4907 BT_DBG("chan->scid %d", chan->scid); 5404 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5405 state_to_string(chan->state));
4908 5406
4909 if (chan->scid == L2CAP_CID_LE_DATA) { 5407 if (chan->scid == L2CAP_CID_LE_DATA) {
4910 if (!status && encrypt) { 5408 if (!status && encrypt) {
@@ -4974,6 +5472,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4974 rsp.status = cpu_to_le16(stat); 5472 rsp.status = cpu_to_le16(stat);
4975 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 5473 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4976 sizeof(rsp), &rsp); 5474 sizeof(rsp), &rsp);
5475
5476 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5477 res == L2CAP_CR_SUCCESS) {
5478 char buf[128];
5479 set_bit(CONF_REQ_SENT, &chan->conf_state);
5480 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5481 L2CAP_CONF_REQ,
5482 l2cap_build_conf_req(chan, buf),
5483 buf);
5484 chan->num_conf_req++;
5485 }
4977 } 5486 }
4978 5487
4979 l2cap_chan_unlock(chan); 5488 l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d48..a4bb27e8427e 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
27 27
28/* Bluetooth L2CAP sockets. */ 28/* Bluetooth L2CAP sockets. */
29 29
30#include <linux/security.h>
31#include <linux/export.h> 30#include <linux/export.h>
32 31
33#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
89 if (err < 0) 88 if (err < 0)
90 goto done; 89 goto done;
91 90
92 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 91 if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
93 __le16_to_cpu(la.l2_psm) == 0x0003) 92 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
94 chan->sec_level = BT_SECURITY_SDP; 93 chan->sec_level = BT_SECURITY_SDP;
95 94
96 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 95 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
446 return err; 445 return err;
447} 446}
448 447
448static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
449{
450 switch (chan->scid) {
451 case L2CAP_CID_LE_DATA:
452 if (mtu < L2CAP_LE_MIN_MTU)
453 return false;
454 break;
455
456 default:
457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
458 return false;
459 }
460
461 return true;
462}
463
449static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 464static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
450{ 465{
451 struct sock *sk = sock->sk; 466 struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
484 break; 499 break;
485 } 500 }
486 501
502 if (!l2cap_valid_mtu(chan, opts.imtu)) {
503 err = -EINVAL;
504 break;
505 }
506
487 chan->mode = opts.mode; 507 chan->mode = opts.mode;
488 switch (chan->mode) { 508 switch (chan->mode) {
489 case L2CAP_MODE_BASIC: 509 case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
873 return err; 893 return err;
874} 894}
875 895
876static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) 896static void l2cap_sock_cleanup_listen(struct sock *parent)
877{ 897{
878 struct sock *sk, *parent = data; 898 struct sock *sk;
899
900 BT_DBG("parent %p", parent);
901
902 /* Close not yet accepted channels */
903 while ((sk = bt_accept_dequeue(parent, NULL))) {
904 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
905
906 l2cap_chan_lock(chan);
907 __clear_chan_timer(chan);
908 l2cap_chan_close(chan, ECONNRESET);
909 l2cap_chan_unlock(chan);
910
911 l2cap_sock_kill(sk);
912 }
913}
914
915static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
916{
917 struct sock *sk, *parent = chan->data;
918
919 /* Check for backlog size */
920 if (sk_acceptq_is_full(parent)) {
921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
922 return NULL;
923 }
879 924
880 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
881 GFP_ATOMIC); 926 GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
889 return l2cap_pi(sk)->chan; 934 return l2cap_pi(sk)->chan;
890} 935}
891 936
892static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) 937static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
893{ 938{
894 int err; 939 int err;
895 struct sock *sk = data; 940 struct sock *sk = chan->data;
896 struct l2cap_pinfo *pi = l2cap_pi(sk); 941 struct l2cap_pinfo *pi = l2cap_pi(sk);
897 942
898 lock_sock(sk); 943 lock_sock(sk);
@@ -925,16 +970,57 @@ done:
925 return err; 970 return err;
926} 971}
927 972
928static void l2cap_sock_close_cb(void *data) 973static void l2cap_sock_close_cb(struct l2cap_chan *chan)
929{ 974{
930 struct sock *sk = data; 975 struct sock *sk = chan->data;
931 976
932 l2cap_sock_kill(sk); 977 l2cap_sock_kill(sk);
933} 978}
934 979
935static void l2cap_sock_state_change_cb(void *data, int state) 980static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
936{ 981{
937 struct sock *sk = data; 982 struct sock *sk = chan->data;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 sock_set_flag(sk, SOCK_ZAPPED);
990
991 switch (chan->state) {
992 case BT_OPEN:
993 case BT_BOUND:
994 case BT_CLOSED:
995 break;
996 case BT_LISTEN:
997 l2cap_sock_cleanup_listen(sk);
998 sk->sk_state = BT_CLOSED;
999 chan->state = BT_CLOSED;
1000
1001 break;
1002 default:
1003 sk->sk_state = BT_CLOSED;
1004 chan->state = BT_CLOSED;
1005
1006 sk->sk_err = err;
1007
1008 if (parent) {
1009 bt_accept_unlink(sk);
1010 parent->sk_data_ready(parent, 0);
1011 } else {
1012 sk->sk_state_change(sk);
1013 }
1014
1015 break;
1016 }
1017
1018 release_sock(sk);
1019}
1020
1021static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
1022{
1023 struct sock *sk = chan->data;
938 1024
939 sk->sk_state = state; 1025 sk->sk_state = state;
940} 1026}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
955 return skb; 1041 return skb;
956} 1042}
957 1043
1044static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1045{
1046 struct sock *sk = chan->data;
1047 struct sock *parent;
1048
1049 lock_sock(sk);
1050
1051 parent = bt_sk(sk)->parent;
1052
1053 BT_DBG("sk %p, parent %p", sk, parent);
1054
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 release_sock(sk);
1062}
1063
958static struct l2cap_ops l2cap_chan_ops = { 1064static struct l2cap_ops l2cap_chan_ops = {
959 .name = "L2CAP Socket Interface", 1065 .name = "L2CAP Socket Interface",
960 .new_connection = l2cap_sock_new_connection_cb, 1066 .new_connection = l2cap_sock_new_connection_cb,
961 .recv = l2cap_sock_recv_cb, 1067 .recv = l2cap_sock_recv_cb,
962 .close = l2cap_sock_close_cb, 1068 .close = l2cap_sock_close_cb,
1069 .teardown = l2cap_sock_teardown_cb,
963 .state_change = l2cap_sock_state_change_cb, 1070 .state_change = l2cap_sock_state_change_cb,
1071 .ready = l2cap_sock_ready_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb, 1072 .alloc_skb = l2cap_sock_alloc_skb_cb,
965}; 1073};
966 1074
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f36..e1c97527e16c 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt 27#define pr_fmt(fmt) "Bluetooth: " fmt
28 28
29#include <linux/module.h> 29#include <linux/export.h>
30
31#include <linux/kernel.h>
32#include <linux/stddef.h>
33#include <linux/string.h>
34#include <asm/errno.h>
35 30
36#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
37 32
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 25d220776079..ad6613d17ca6 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
24 24
25/* Bluetooth HCI Management interface */ 25/* Bluetooth HCI Management interface */
26 26
27#include <linux/kernel.h>
28#include <linux/uaccess.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <asm/unaligned.h> 28#include <asm/unaligned.h>
31 29
@@ -212,7 +210,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
212 210
213 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 211 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
214 212
215 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); 213 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
216 if (!skb) 214 if (!skb)
217 return -ENOMEM; 215 return -ENOMEM;
218 216
@@ -243,7 +241,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
243 241
244 BT_DBG("sock %p", sk); 242 BT_DBG("sock %p", sk);
245 243
246 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC); 244 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
247 if (!skb) 245 if (!skb)
248 return -ENOMEM; 246 return -ENOMEM;
249 247
@@ -689,14 +687,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
689{ 687{
690 struct pending_cmd *cmd; 688 struct pending_cmd *cmd;
691 689
692 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 690 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
693 if (!cmd) 691 if (!cmd)
694 return NULL; 692 return NULL;
695 693
696 cmd->opcode = opcode; 694 cmd->opcode = opcode;
697 cmd->index = hdev->id; 695 cmd->index = hdev->id;
698 696
699 cmd->param = kmalloc(len, GFP_ATOMIC); 697 cmd->param = kmalloc(len, GFP_KERNEL);
700 if (!cmd->param) { 698 if (!cmd->param) {
701 kfree(cmd); 699 kfree(cmd);
702 return NULL; 700 return NULL;
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
714} 712}
715 713
716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 714static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
717 void (*cb)(struct pending_cmd *cmd, void *data), 715 void (*cb)(struct pending_cmd *cmd,
716 void *data),
718 void *data) 717 void *data)
719{ 718{
720 struct list_head *p, *n; 719 struct list_head *p, *n;
@@ -813,7 +812,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
813 struct sk_buff *skb; 812 struct sk_buff *skb;
814 struct mgmt_hdr *hdr; 813 struct mgmt_hdr *hdr;
815 814
816 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 815 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
817 if (!skb) 816 if (!skb)
818 return -ENOMEM; 817 return -ENOMEM;
819 818
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
871 } 870 }
872 871
873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 872 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 873 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
876 MGMT_STATUS_BUSY); 875 MGMT_STATUS_BUSY);
877 goto failed; 876 goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
978 } 977 }
979 978
980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 979 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 980 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 981 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
983 MGMT_STATUS_BUSY); 982 MGMT_STATUS_BUSY);
984 goto failed; 983 goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1001 scan = 0; 1000 scan = 0;
1002 1001
1003 if (test_bit(HCI_ISCAN, &hdev->flags) && 1002 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0) 1003 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off); 1004 cancel_delayed_work(&hdev->discov_off);
1006 } 1005 }
1007 1006
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 bool changed = false; 1055 bool changed = false;
1057 1056
1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1057 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059 &hdev->dev_flags)) { 1058 &hdev->dev_flags)) {
1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1059 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061 changed = true; 1060 changed = true;
1062 } 1061 }
@@ -1269,7 +1268,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1269 goto failed; 1268 goto failed;
1270 } 1269 }
1271 1270
1272 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 1271 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1273 if (!uuid) { 1272 if (!uuid) {
1274 err = -ENOMEM; 1273 err = -ENOMEM;
1275 goto failed; 1274 goto failed;
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
1317} 1316}
1318 1317
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1318static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320 u16 len) 1319 u16 len)
1321{ 1320{
1322 struct mgmt_cp_remove_uuid *cp = data; 1321 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd; 1322 struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
1442} 1441}
1443 1442
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1443static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445 u16 len) 1444 u16 len)
1446{ 1445{
1447 struct mgmt_cp_load_link_keys *cp = data; 1446 struct mgmt_cp_load_link_keys *cp = data;
1448 u16 key_count, expected_len; 1447 u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1454 sizeof(struct mgmt_link_key_info); 1453 sizeof(struct mgmt_link_key_info);
1455 if (expected_len != len) { 1454 if (expected_len != len) {
1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1455 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457 len, expected_len); 1456 len, expected_len);
1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459 MGMT_STATUS_INVALID_PARAMS); 1458 MGMT_STATUS_INVALID_PARAMS);
1460 } 1459 }
1461 1460
1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1461 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463 key_count); 1462 key_count);
1464 1463
1465 hci_dev_lock(hdev); 1464 hci_dev_lock(hdev);
1466 1465
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1535 if (cp->disconnect) { 1534 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR) 1535 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr); 1537 &cp->addr.bdaddr);
1539 else 1538 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr); 1540 &cp->addr.bdaddr);
1542 } else { 1541 } else {
1543 conn = NULL; 1542 conn = NULL;
1544 } 1543 }
@@ -1594,11 +1593,12 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1594 } 1593 }
1595 1594
1596 if (cp->addr.type == BDADDR_BREDR) 1595 if (cp->addr.type == BDADDR_BREDR)
1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1597 &cp->addr.bdaddr);
1598 else 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1600 1600
1601 if (!conn) { 1601 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1602 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1602 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1603 MGMT_STATUS_NOT_CONNECTED); 1603 MGMT_STATUS_NOT_CONNECTED);
1604 goto failed; 1604 goto failed;
@@ -1611,7 +1611,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1611 } 1611 }
1612 1612
1613 dc.handle = cpu_to_le16(conn->handle); 1613 dc.handle = cpu_to_le16(conn->handle);
1614 dc.reason = 0x13; /* Remote User Terminated Connection */ 1614 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1615 1615
1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1617 if (err < 0) 1617 if (err < 0)
@@ -1667,7 +1667,7 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1667 } 1667 }
1668 1668
1669 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1669 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1670 rp = kmalloc(rp_len, GFP_ATOMIC); 1670 rp = kmalloc(rp_len, GFP_KERNEL);
1671 if (!rp) { 1671 if (!rp) {
1672 err = -ENOMEM; 1672 err = -ENOMEM;
1673 goto unlock; 1673 goto unlock;
@@ -1778,29 +1778,6 @@ failed:
1778 return err; 1778 return err;
1779} 1779}
1780 1780
1781static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1782 void *data, u16 len)
1783{
1784 struct mgmt_cp_pin_code_neg_reply *cp = data;
1785 int err;
1786
1787 BT_DBG("");
1788
1789 hci_dev_lock(hdev);
1790
1791 if (!hdev_is_powered(hdev)) {
1792 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
1793 MGMT_STATUS_NOT_POWERED);
1794 goto failed;
1795 }
1796
1797 err = send_pin_code_neg_reply(sk, hdev, cp);
1798
1799failed:
1800 hci_dev_unlock(hdev);
1801 return err;
1802}
1803
1804static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, 1781static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1805 u16 len) 1782 u16 len)
1806{ 1783{
@@ -1813,7 +1790,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1813 hdev->io_capability = cp->io_capability; 1790 hdev->io_capability = cp->io_capability;
1814 1791
1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1792 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816 hdev->io_capability); 1793 hdev->io_capability);
1817 1794
1818 hci_dev_unlock(hdev); 1795 hci_dev_unlock(hdev);
1819 1796
@@ -1821,7 +1798,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1821 0); 1798 0);
1822} 1799}
1823 1800
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1801static struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{ 1802{
1826 struct hci_dev *hdev = conn->hdev; 1803 struct hci_dev *hdev = conn->hdev;
1827 struct pending_cmd *cmd; 1804 struct pending_cmd *cmd;
@@ -1873,6 +1850,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1873 pairing_complete(cmd, mgmt_status(status)); 1850 pairing_complete(cmd, mgmt_status(status));
1874} 1851}
1875 1852
1853static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
1854{
1855 struct pending_cmd *cmd;
1856
1857 BT_DBG("status %u", status);
1858
1859 if (!status)
1860 return;
1861
1862 cmd = find_pairing(conn);
1863 if (!cmd)
1864 BT_DBG("Unable to find a pending command");
1865 else
1866 pairing_complete(cmd, mgmt_status(status));
1867}
1868
1876static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, 1869static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len) 1870 u16 len)
1878{ 1871{
@@ -1911,8 +1904,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1911 rp.addr.type = cp->addr.type; 1904 rp.addr.type = cp->addr.type;
1912 1905
1913 if (IS_ERR(conn)) { 1906 if (IS_ERR(conn)) {
1907 int status;
1908
1909 if (PTR_ERR(conn) == -EBUSY)
1910 status = MGMT_STATUS_BUSY;
1911 else
1912 status = MGMT_STATUS_CONNECT_FAILED;
1913
1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1915 MGMT_STATUS_CONNECT_FAILED, &rp, 1915 status, &rp,
1916 sizeof(rp)); 1916 sizeof(rp));
1917 goto unlock; 1917 goto unlock;
1918 } 1918 }
@@ -1934,6 +1934,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1934 /* For LE, just connecting isn't a proof that the pairing finished */ 1934 /* For LE, just connecting isn't a proof that the pairing finished */
1935 if (cp->addr.type == BDADDR_BREDR) 1935 if (cp->addr.type == BDADDR_BREDR)
1936 conn->connect_cfm_cb = pairing_complete_cb; 1936 conn->connect_cfm_cb = pairing_complete_cb;
1937 else
1938 conn->connect_cfm_cb = le_connect_complete_cb;
1937 1939
1938 conn->security_cfm_cb = pairing_complete_cb; 1940 conn->security_cfm_cb = pairing_complete_cb;
1939 conn->disconn_cfm_cb = pairing_complete_cb; 1941 conn->disconn_cfm_cb = pairing_complete_cb;
@@ -1941,7 +1943,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1941 cmd->user_data = conn; 1943 cmd->user_data = conn;
1942 1944
1943 if (conn->state == BT_CONNECTED && 1945 if (conn->state == BT_CONNECTED &&
1944 hci_conn_security(conn, sec_level, auth_type)) 1946 hci_conn_security(conn, sec_level, auth_type))
1945 pairing_complete(cmd, 0); 1947 pairing_complete(cmd, 0);
1946 1948
1947 err = 0; 1949 err = 0;
@@ -2058,6 +2060,18 @@ done:
2058 return err; 2060 return err;
2059} 2061}
2060 2062
2063static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2064 void *data, u16 len)
2065{
2066 struct mgmt_cp_pin_code_neg_reply *cp = data;
2067
2068 BT_DBG("");
2069
2070 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2071 MGMT_OP_PIN_CODE_NEG_REPLY,
2072 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2073}
2074
2061static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, 2075static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2062 u16 len) 2076 u16 len)
2063{ 2077{
@@ -2238,7 +2252,7 @@ unlock:
2238} 2252}
2239 2253
2240static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2254static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2241 void *data, u16 len) 2255 void *data, u16 len)
2242{ 2256{
2243 struct mgmt_cp_remove_remote_oob_data *cp = data; 2257 struct mgmt_cp_remove_remote_oob_data *cp = data;
2244 u8 status; 2258 u8 status;
@@ -2407,7 +2421,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2407 2421
2408 case DISCOVERY_RESOLVING: 2422 case DISCOVERY_RESOLVING:
2409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2423 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2410 NAME_PENDING); 2424 NAME_PENDING);
2411 if (!e) { 2425 if (!e) {
2412 mgmt_pending_remove(cmd); 2426 mgmt_pending_remove(cmd);
2413 err = cmd_complete(sk, hdev->id, 2427 err = cmd_complete(sk, hdev->id,
@@ -2582,8 +2596,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2582 if (cp->val) { 2596 if (cp->val) {
2583 type = PAGE_SCAN_TYPE_INTERLACED; 2597 type = PAGE_SCAN_TYPE_INTERLACED;
2584 2598
2585 /* 22.5 msec page scan interval */ 2599 /* 160 msec page scan interval */
2586 acp.interval = __constant_cpu_to_le16(0x0024); 2600 acp.interval = __constant_cpu_to_le16(0x0100);
2587 } else { 2601 } else {
2588 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 2602 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2589 2603
@@ -2629,7 +2643,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2629 sizeof(struct mgmt_ltk_info); 2643 sizeof(struct mgmt_ltk_info);
2630 if (expected_len != len) { 2644 if (expected_len != len) {
2631 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2645 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2632 len, expected_len); 2646 len, expected_len);
2633 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2647 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2634 EINVAL); 2648 EINVAL);
2635 } 2649 }
@@ -2754,7 +2768,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2754 } 2768 }
2755 2769
2756 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 2770 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2757 mgmt_handlers[opcode].func == NULL) { 2771 mgmt_handlers[opcode].func == NULL) {
2758 BT_DBG("Unknown op %u", opcode); 2772 BT_DBG("Unknown op %u", opcode);
2759 err = cmd_status(sk, index, opcode, 2773 err = cmd_status(sk, index, opcode,
2760 MGMT_STATUS_UNKNOWN_COMMAND); 2774 MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2762,7 +2776,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2762 } 2776 }
2763 2777
2764 if ((hdev && opcode < MGMT_OP_READ_INFO) || 2778 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2765 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 2779 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2766 err = cmd_status(sk, index, opcode, 2780 err = cmd_status(sk, index, opcode,
2767 MGMT_STATUS_INVALID_INDEX); 2781 MGMT_STATUS_INVALID_INDEX);
2768 goto done; 2782 goto done;
@@ -2771,7 +2785,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2771 handler = &mgmt_handlers[opcode]; 2785 handler = &mgmt_handlers[opcode];
2772 2786
2773 if ((handler->var_len && len < handler->data_len) || 2787 if ((handler->var_len && len < handler->data_len) ||
2774 (!handler->var_len && len != handler->data_len)) { 2788 (!handler->var_len && len != handler->data_len)) {
2775 err = cmd_status(sk, index, opcode, 2789 err = cmd_status(sk, index, opcode,
2776 MGMT_STATUS_INVALID_PARAMS); 2790 MGMT_STATUS_INVALID_PARAMS);
2777 goto done; 2791 goto done;
@@ -2955,7 +2969,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2969 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2956 ev.key.addr.type = BDADDR_BREDR; 2970 ev.key.addr.type = BDADDR_BREDR;
2957 ev.key.type = key->type; 2971 ev.key.type = key->type;
2958 memcpy(ev.key.val, key->val, 16); 2972 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
2959 ev.key.pin_len = key->pin_len; 2973 ev.key.pin_len = key->pin_len;
2960 2974
2961 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2975 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3090,7 +3104,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3090 mgmt_pending_remove(cmd); 3104 mgmt_pending_remove(cmd);
3091 3105
3092 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3106 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3093 hdev); 3107 hdev);
3094 return err; 3108 return err;
3095} 3109}
3096 3110
@@ -3180,7 +3194,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3180} 3194}
3181 3195
3182int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3196int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 link_type, u8 addr_type) 3197 u8 link_type, u8 addr_type)
3184{ 3198{
3185 struct mgmt_ev_user_passkey_request ev; 3199 struct mgmt_ev_user_passkey_request ev;
3186 3200
@@ -3194,8 +3208,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3194} 3208}
3195 3209
3196static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3210static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3197 u8 link_type, u8 addr_type, u8 status, 3211 u8 link_type, u8 addr_type, u8 status,
3198 u8 opcode) 3212 u8 opcode)
3199{ 3213{
3200 struct pending_cmd *cmd; 3214 struct pending_cmd *cmd;
3201 struct mgmt_rp_user_confirm_reply rp; 3215 struct mgmt_rp_user_confirm_reply rp;
@@ -3226,7 +3240,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3226 u8 link_type, u8 addr_type, u8 status) 3240 u8 link_type, u8 addr_type, u8 status)
3227{ 3241{
3228 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3229 status, MGMT_OP_USER_CONFIRM_NEG_REPLY); 3243 status,
3244 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3230} 3245}
3231 3246
3232int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3247int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3240,7 +3255,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 link_type, u8 addr_type, u8 status) 3255 u8 link_type, u8 addr_type, u8 status)
3241{ 3256{
3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3257 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3243 status, MGMT_OP_USER_PASSKEY_NEG_REPLY); 3258 status,
3259 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3244} 3260}
3245 3261
3246int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3262int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -3519,9 +3535,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3519 ev->addr.type = link_to_bdaddr(link_type, addr_type); 3535 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3520 ev->rssi = rssi; 3536 ev->rssi = rssi;
3521 if (cfm_name) 3537 if (cfm_name)
3522 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME; 3538 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3523 if (!ssp) 3539 if (!ssp)
3524 ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING; 3540 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3525 3541
3526 if (eir_len > 0) 3542 if (eir_len > 0)
3527 memcpy(ev->eir, eir, eir_len); 3543 memcpy(ev->eir, eir, eir_len);
@@ -3531,7 +3547,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3531 dev_class, 3); 3547 dev_class, 3);
3532 3548
3533 ev->eir_len = cpu_to_le16(eir_len); 3549 ev->eir_len = cpu_to_le16(eir_len);
3534
3535 ev_size = sizeof(*ev) + eir_len; 3550 ev_size = sizeof(*ev) + eir_len;
3536 3551
3537 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); 3552 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e7..c75107ef8920 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/device.h>
36#include <linux/debugfs.h> 29#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38#include <linux/net.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 30#include <linux/kthread.h>
41#include <linux/slab.h>
42
43#include <net/sock.h>
44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 31#include <asm/unaligned.h>
46 32
47#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 101#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
116#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 102#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
117 103
118static inline void rfcomm_schedule(void) 104static void rfcomm_schedule(void)
119{ 105{
120 if (!rfcomm_thread) 106 if (!rfcomm_thread)
121 return; 107 return;
122 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
123} 109}
124 110
125static inline void rfcomm_session_put(struct rfcomm_session *s) 111static void rfcomm_session_put(struct rfcomm_session *s)
126{ 112{
127 if (atomic_dec_and_test(&s->refcnt)) 113 if (atomic_dec_and_test(&s->refcnt))
128 rfcomm_session_del(s); 114 rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
227 return err; 213 return err;
228} 214}
229 215
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 216static int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 217{
232 struct sock *sk = d->session->sock->sk; 218 struct sock *sk = d->session->sock->sk;
233 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 219 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
1750/* Send data queued for the DLC. 1736/* Send data queued for the DLC.
1751 * Return number of frames left in the queue. 1737 * Return number of frames left in the queue.
1752 */ 1738 */
1753static inline int rfcomm_process_tx(struct rfcomm_dlc *d) 1739static int rfcomm_process_tx(struct rfcomm_dlc *d)
1754{ 1740{
1755 struct sk_buff *skb; 1741 struct sk_buff *skb;
1756 int err; 1742 int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1798 return skb_queue_len(&d->tx_queue); 1784 return skb_queue_len(&d->tx_queue);
1799} 1785}
1800 1786
1801static inline void rfcomm_process_dlcs(struct rfcomm_session *s) 1787static void rfcomm_process_dlcs(struct rfcomm_session *s)
1802{ 1788{
1803 struct rfcomm_dlc *d; 1789 struct rfcomm_dlc *d;
1804 struct list_head *p, *n; 1790 struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1858 } 1844 }
1859} 1845}
1860 1846
1861static inline void rfcomm_process_rx(struct rfcomm_session *s) 1847static void rfcomm_process_rx(struct rfcomm_session *s)
1862{ 1848{
1863 struct socket *sock = s->sock; 1849 struct socket *sock = s->sock;
1864 struct sock *sk = sock->sk; 1850 struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1883 } 1869 }
1884} 1870}
1885 1871
1886static inline void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
1887{ 1873{
1888 struct socket *sock = s->sock, *nsock; 1874 struct socket *sock = s->sock, *nsock;
1889 int err; 1875 int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1917 sock_release(nsock); 1903 sock_release(nsock);
1918} 1904}
1919 1905
1920static inline void rfcomm_check_connection(struct rfcomm_session *s) 1906static void rfcomm_check_connection(struct rfcomm_session *s)
1921{ 1907{
1922 struct sock *sk = s->sock->sk; 1908 struct sock *sk = s->sock->sk;
1923 1909
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1941 } 1927 }
1942} 1928}
1943 1929
1944static inline void rfcomm_process_sessions(void) 1930static void rfcomm_process_sessions(void)
1945{ 1931{
1946 struct list_head *p, *n; 1932 struct list_head *p, *n;
1947 1933
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb864..7e1e59645c05 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h> 29#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/security.h>
46#include <net/sock.h>
47
48#include <linux/uaccess.h>
49 30
50#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14aee..cb960773c002 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
31#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#include <linux/capability.h>
35#include <linux/slab.h>
36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
38
39#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 36#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
132 return NULL; 127 return NULL;
133} 128}
134 129
135static inline struct rfcomm_dev *rfcomm_dev_get(int id) 130static struct rfcomm_dev *rfcomm_dev_get(int id)
136{ 131{
137 struct rfcomm_dev *dev; 132 struct rfcomm_dev *dev;
138 133
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
345 tty_port_put(&dev->port); 340 tty_port_put(&dev->port);
346} 341}
347 342
348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 343static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
349{ 344{
350 tty_port_get(&dev->port); 345 tty_port_get(&dev->port);
351 atomic_add(skb->truesize, &dev->wmem_alloc); 346 atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a7..40bbe25dcff7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
25/* Bluetooth SCO sockets. */ 25/* Bluetooth SCO sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/socket.h>
39#include <linux/skbuff.h>
40#include <linux/device.h>
41#include <linux/debugfs.h> 28#include <linux/debugfs.h>
42#include <linux/seq_file.h> 29#include <linux/seq_file.h>
43#include <linux/list.h>
44#include <linux/security.h>
45#include <net/sock.h>
46
47#include <linux/uaccess.h>
48 30
49#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
123 return conn; 105 return conn;
124} 106}
125 107
126static inline struct sock *sco_chan_get(struct sco_conn *conn) 108static struct sock *sco_chan_get(struct sco_conn *conn)
127{ 109{
128 struct sock *sk = NULL; 110 struct sock *sk = NULL;
129 sco_conn_lock(conn); 111 sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
157 return 0; 139 return 0;
158} 140}
159 141
160static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) 142static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
143 struct sock *parent)
161{ 144{
162 int err = 0; 145 int err = 0;
163 146
@@ -228,7 +211,7 @@ done:
228 return err; 211 return err;
229} 212}
230 213
231static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 214static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
232{ 215{
233 struct sco_conn *conn = sco_pi(sk)->conn; 216 struct sco_conn *conn = sco_pi(sk)->conn;
234 struct sk_buff *skb; 217 struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
254 return len; 237 return len;
255} 238}
256 239
257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 240static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
258{ 241{
259 struct sock *sk = sco_chan_get(conn); 242 struct sock *sk = sco_chan_get(conn);
260 243
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
523 goto done; 506 goto done;
524 507
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 508 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 509 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 510
528done: 511done:
529 release_sock(sk); 512 release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
788 771
789 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 772 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
790 err = bt_sock_wait_state(sk, BT_CLOSED, 773 err = bt_sock_wait_state(sk, BT_CLOSED,
791 sk->sk_lingertime); 774 sk->sk_lingertime);
792 } 775 }
793 release_sock(sk); 776 release_sock(sk);
794 return err; 777 return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
878 bh_lock_sock(parent); 861 bh_lock_sock(parent);
879 862
880 sk = sco_sock_alloc(sock_net(parent), NULL, 863 sk = sco_sock_alloc(sock_net(parent), NULL,
881 BTPROTO_SCO, GFP_ATOMIC); 864 BTPROTO_SCO, GFP_ATOMIC);
882 if (!sk) { 865 if (!sk) {
883 bh_unlock_sock(parent); 866 bh_unlock_sock(parent);
884 goto done; 867 goto done;
@@ -907,7 +890,7 @@ done:
907/* ----- SCO interface with lower layer (HCI) ----- */ 890/* ----- SCO interface with lower layer (HCI) ----- */
908int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 891int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
909{ 892{
910 register struct sock *sk; 893 struct sock *sk;
911 struct hlist_node *node; 894 struct hlist_node *node;
912 int lm = 0; 895 int lm = 0;
913 896
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
920 continue; 903 continue;
921 904
922 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
923 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 906 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
924 lm |= HCI_LM_ACCEPT; 907 lm |= HCI_LM_ACCEPT;
925 break; 908 break;
926 } 909 }
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
981 964
982 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
983 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
984 batostr(&bt_sk(sk)->dst), sk->sk_state); 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
985 } 968 }
986 969
987 read_unlock(&sco_sk_list.lock); 970 read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
1044 } 1027 }
1045 1028
1046 if (bt_debugfs) { 1029 if (bt_debugfs) {
1047 sco_debugfs = debugfs_create_file("sco", 0444, 1030 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1048 bt_debugfs, NULL, &sco_debugfs_fops); 1031 NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs) 1032 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file"); 1033 BT_ERR("Failed to create SCO debug file");
1051 } 1034 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6fc7c4708f3e..16ef0dc85a0a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <crypto/b128ops.h>
26
23#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h> 28#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h> 29#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/mgmt.h> 30#include <net/bluetooth/mgmt.h>
27#include <net/bluetooth/smp.h> 31#include <net/bluetooth/smp.h>
28#include <linux/crypto.h>
29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h>
31 32
32#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 34
@@ -648,7 +649,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
648 649
649 auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; 650 auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
650 651
651 ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); 652 ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
652 if (ret) 653 if (ret)
653 return SMP_UNSPECIFIED; 654 return SMP_UNSPECIFIED;
654 655
@@ -703,7 +704,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
703 return 0; 704 return 0;
704} 705}
705 706
706static u8 smp_ltk_encrypt(struct l2cap_conn *conn) 707static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
707{ 708{
708 struct smp_ltk *key; 709 struct smp_ltk *key;
709 struct hci_conn *hcon = conn->hcon; 710 struct hci_conn *hcon = conn->hcon;
@@ -712,6 +713,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
712 if (!key) 713 if (!key)
713 return 0; 714 return 0;
714 715
716 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
717 return 0;
718
715 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) 719 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
716 return 1; 720 return 1;
717 721
@@ -732,7 +736,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
732 736
733 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 737 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
734 738
735 if (smp_ltk_encrypt(conn)) 739 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
736 return 0; 740 return 0;
737 741
738 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 742 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +775,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
771 return 1; 775 return 1;
772 776
773 if (hcon->link_mode & HCI_LM_MASTER) 777 if (hcon->link_mode & HCI_LM_MASTER)
774 if (smp_ltk_encrypt(conn)) 778 if (smp_ltk_encrypt(conn, sec_level))
775 goto done; 779 goto done;
776 780
777 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 781 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 929e48aed444..333484537600 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -127,9 +127,9 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
127 const struct br_cpu_netstats *bstats 127 const struct br_cpu_netstats *bstats
128 = per_cpu_ptr(br->stats, cpu); 128 = per_cpu_ptr(br->stats, cpu);
129 do { 129 do {
130 start = u64_stats_fetch_begin(&bstats->syncp); 130 start = u64_stats_fetch_begin_bh(&bstats->syncp);
131 memcpy(&tmp, bstats, sizeof(tmp)); 131 memcpy(&tmp, bstats, sizeof(tmp));
132 } while (u64_stats_fetch_retry(&bstats->syncp, start)); 132 } while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
133 sum.tx_bytes += tmp.tx_bytes; 133 sum.tx_bytes += tmp.tx_bytes;
134 sum.tx_packets += tmp.tx_packets; 134 sum.tx_packets += tmp.tx_packets;
135 sum.rx_bytes += tmp.rx_bytes; 135 sum.rx_bytes += tmp.rx_bytes;
@@ -246,10 +246,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
246 if (!np) 246 if (!np)
247 goto out; 247 goto out;
248 248
249 np->dev = p->dev; 249 err = __netpoll_setup(np, p->dev);
250 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
251
252 err = __netpoll_setup(np);
253 if (err) { 250 if (err) {
254 kfree(np); 251 kfree(np);
255 goto out; 252 goto out;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0a942fbccc9a..e1144e1617be 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
240 return -ENOMEM; 240 return -ENOMEM;
241 241
242 dev_net_set(dev, net); 242 dev_net_set(dev, net);
243 dev->rtnl_link_ops = &br_link_ops;
243 244
244 res = register_netdev(dev); 245 res = register_netdev(dev);
245 if (res) 246 if (res)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b66581208cb2..241743417f49 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -540,10 +540,11 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
540 540
541 if (mdb->size >= max) { 541 if (mdb->size >= max) {
542 max *= 2; 542 max *= 2;
543 if (unlikely(max >= br->hash_max)) { 543 if (unlikely(max > br->hash_max)) {
544 br_warn(br, "Multicast hash table maximum " 544 br_warn(br, "Multicast hash table maximum of %d "
545 "reached, disabling snooping: %s, %d\n", 545 "reached, disabling snooping: %s\n",
546 port ? port->dev->name : br->dev->name, max); 546 br->hash_max,
547 port ? port->dev->name : br->dev->name);
547 err = -E2BIG; 548 err = -E2BIG;
548disable: 549disable:
549 br->multicast_disabled = 1; 550 br->multicast_disabled = 1;
@@ -1160,7 +1161,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1160 goto out; 1161 goto out;
1161 } 1162 }
1162 mld = (struct mld_msg *) icmp6_hdr(skb); 1163 mld = (struct mld_msg *) icmp6_hdr(skb);
1163 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); 1164 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1164 if (max_delay) 1165 if (max_delay)
1165 group = &mld->mld_mca; 1166 group = &mld->mld_mca;
1166 } else if (skb->len >= sizeof(*mld2q)) { 1167 } else if (skb->len >= sizeof(*mld2q)) {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e41456bd3cc6..68e8f364bbf8 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,7 +111,13 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111 pppoe_proto(skb) == htons(PPP_IPV6) && \ 111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged) 112 brnf_filter_pppoe_tagged)
113 113
114static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) 114static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
115 struct sk_buff *skb, u32 mtu)
116{
117}
118
119static void fake_redirect(struct dst_entry *dst, struct sock *sk,
120 struct sk_buff *skb)
115{ 121{
116} 122}
117 123
@@ -120,7 +126,9 @@ static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
120 return NULL; 126 return NULL;
121} 127}
122 128
123static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr) 129static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
130 struct sk_buff *skb,
131 const void *daddr)
124{ 132{
125 return NULL; 133 return NULL;
126} 134}
@@ -134,6 +142,7 @@ static struct dst_ops fake_dst_ops = {
134 .family = AF_INET, 142 .family = AF_INET,
135 .protocol = cpu_to_be16(ETH_P_IP), 143 .protocol = cpu_to_be16(ETH_P_IP),
136 .update_pmtu = fake_update_pmtu, 144 .update_pmtu = fake_update_pmtu,
145 .redirect = fake_redirect,
137 .cow_metrics = fake_cow_metrics, 146 .cow_metrics = fake_cow_metrics,
138 .neigh_lookup = fake_neigh_lookup, 147 .neigh_lookup = fake_neigh_lookup,
139 .mtu = fake_mtu, 148 .mtu = fake_mtu,
@@ -373,19 +382,29 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
373 if (!skb->dev) 382 if (!skb->dev)
374 goto free_skb; 383 goto free_skb;
375 dst = skb_dst(skb); 384 dst = skb_dst(skb);
376 neigh = dst_get_neighbour_noref(dst); 385 neigh = dst_neigh_lookup_skb(dst, skb);
377 if (neigh->hh.hh_len) { 386 if (neigh) {
378 neigh_hh_bridge(&neigh->hh, skb); 387 int ret;
379 skb->dev = nf_bridge->physindev; 388
380 return br_handle_frame_finish(skb); 389 if (neigh->hh.hh_len) {
381 } else { 390 neigh_hh_bridge(&neigh->hh, skb);
382 /* the neighbour function below overwrites the complete 391 skb->dev = nf_bridge->physindev;
383 * MAC header, so we save the Ethernet source address and 392 ret = br_handle_frame_finish(skb);
384 * protocol number. */ 393 } else {
385 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); 394 /* the neighbour function below overwrites the complete
386 /* tell br_dev_xmit to continue with forwarding */ 395 * MAC header, so we save the Ethernet source address and
387 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 396 * protocol number.
388 return neigh->output(neigh, skb); 397 */
398 skb_copy_from_linear_data_offset(skb,
399 -(ETH_HLEN-ETH_ALEN),
400 skb->nf_bridge->data,
401 ETH_HLEN-ETH_ALEN);
402 /* tell br_dev_xmit to continue with forwarding */
403 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
404 ret = neigh->output(neigh, skb);
405 }
406 neigh_release(neigh);
407 return ret;
389 } 408 }
390free_skb: 409free_skb:
391 kfree_skb(skb); 410 kfree_skb(skb);
@@ -764,9 +783,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
764 return NF_DROP; 783 return NF_DROP;
765 784
766 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) 785 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
767 pf = PF_INET; 786 pf = NFPROTO_IPV4;
768 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) 787 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
769 pf = PF_INET6; 788 pf = NFPROTO_IPV6;
770 else 789 else
771 return NF_ACCEPT; 790 return NF_ACCEPT;
772 791
@@ -778,13 +797,13 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
778 nf_bridge->mask |= BRNF_PKT_TYPE; 797 nf_bridge->mask |= BRNF_PKT_TYPE;
779 } 798 }
780 799
781 if (pf == PF_INET && br_parse_ip_options(skb)) 800 if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
782 return NF_DROP; 801 return NF_DROP;
783 802
784 /* The physdev module checks on this */ 803 /* The physdev module checks on this */
785 nf_bridge->mask |= BRNF_BRIDGED; 804 nf_bridge->mask |= BRNF_BRIDGED;
786 nf_bridge->physoutdev = skb->dev; 805 nf_bridge->physoutdev = skb->dev;
787 if (pf == PF_INET) 806 if (pf == NFPROTO_IPV4)
788 skb->protocol = htons(ETH_P_IP); 807 skb->protocol = htons(ETH_P_IP);
789 else 808 else
790 skb->protocol = htons(ETH_P_IPV6); 809 skb->protocol = htons(ETH_P_IPV6);
@@ -871,9 +890,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
871 return NF_DROP; 890 return NF_DROP;
872 891
873 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) 892 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
874 pf = PF_INET; 893 pf = NFPROTO_IPV4;
875 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) 894 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
876 pf = PF_INET6; 895 pf = NFPROTO_IPV6;
877 else 896 else
878 return NF_ACCEPT; 897 return NF_ACCEPT;
879 898
@@ -886,7 +905,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
886 905
887 nf_bridge_pull_encap_header(skb); 906 nf_bridge_pull_encap_header(skb);
888 nf_bridge_save_header(skb); 907 nf_bridge_save_header(skb);
889 if (pf == PF_INET) 908 if (pf == NFPROTO_IPV4)
890 skb->protocol = htons(ETH_P_IP); 909 skb->protocol = htons(ETH_P_IP);
891 else 910 else
892 skb->protocol = htons(ETH_P_IPV6); 911 skb->protocol = htons(ETH_P_IPV6);
@@ -919,49 +938,49 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
919 { 938 {
920 .hook = br_nf_pre_routing, 939 .hook = br_nf_pre_routing,
921 .owner = THIS_MODULE, 940 .owner = THIS_MODULE,
922 .pf = PF_BRIDGE, 941 .pf = NFPROTO_BRIDGE,
923 .hooknum = NF_BR_PRE_ROUTING, 942 .hooknum = NF_BR_PRE_ROUTING,
924 .priority = NF_BR_PRI_BRNF, 943 .priority = NF_BR_PRI_BRNF,
925 }, 944 },
926 { 945 {
927 .hook = br_nf_local_in, 946 .hook = br_nf_local_in,
928 .owner = THIS_MODULE, 947 .owner = THIS_MODULE,
929 .pf = PF_BRIDGE, 948 .pf = NFPROTO_BRIDGE,
930 .hooknum = NF_BR_LOCAL_IN, 949 .hooknum = NF_BR_LOCAL_IN,
931 .priority = NF_BR_PRI_BRNF, 950 .priority = NF_BR_PRI_BRNF,
932 }, 951 },
933 { 952 {
934 .hook = br_nf_forward_ip, 953 .hook = br_nf_forward_ip,
935 .owner = THIS_MODULE, 954 .owner = THIS_MODULE,
936 .pf = PF_BRIDGE, 955 .pf = NFPROTO_BRIDGE,
937 .hooknum = NF_BR_FORWARD, 956 .hooknum = NF_BR_FORWARD,
938 .priority = NF_BR_PRI_BRNF - 1, 957 .priority = NF_BR_PRI_BRNF - 1,
939 }, 958 },
940 { 959 {
941 .hook = br_nf_forward_arp, 960 .hook = br_nf_forward_arp,
942 .owner = THIS_MODULE, 961 .owner = THIS_MODULE,
943 .pf = PF_BRIDGE, 962 .pf = NFPROTO_BRIDGE,
944 .hooknum = NF_BR_FORWARD, 963 .hooknum = NF_BR_FORWARD,
945 .priority = NF_BR_PRI_BRNF, 964 .priority = NF_BR_PRI_BRNF,
946 }, 965 },
947 { 966 {
948 .hook = br_nf_post_routing, 967 .hook = br_nf_post_routing,
949 .owner = THIS_MODULE, 968 .owner = THIS_MODULE,
950 .pf = PF_BRIDGE, 969 .pf = NFPROTO_BRIDGE,
951 .hooknum = NF_BR_POST_ROUTING, 970 .hooknum = NF_BR_POST_ROUTING,
952 .priority = NF_BR_PRI_LAST, 971 .priority = NF_BR_PRI_LAST,
953 }, 972 },
954 { 973 {
955 .hook = ip_sabotage_in, 974 .hook = ip_sabotage_in,
956 .owner = THIS_MODULE, 975 .owner = THIS_MODULE,
957 .pf = PF_INET, 976 .pf = NFPROTO_IPV4,
958 .hooknum = NF_INET_PRE_ROUTING, 977 .hooknum = NF_INET_PRE_ROUTING,
959 .priority = NF_IP_PRI_FIRST, 978 .priority = NF_IP_PRI_FIRST,
960 }, 979 },
961 { 980 {
962 .hook = ip_sabotage_in, 981 .hook = ip_sabotage_in,
963 .owner = THIS_MODULE, 982 .owner = THIS_MODULE,
964 .pf = PF_INET6, 983 .pf = NFPROTO_IPV6,
965 .hooknum = NF_INET_PRE_ROUTING, 984 .hooknum = NF_INET_PRE_ROUTING,
966 .priority = NF_IP6_PRI_FIRST, 985 .priority = NF_IP6_PRI_FIRST,
967 }, 986 },
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2080485515f1..fe41260fbf38 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -208,7 +208,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
208 return 0; 208 return 0;
209} 209}
210 210
211static struct rtnl_link_ops br_link_ops __read_mostly = { 211struct rtnl_link_ops br_link_ops __read_mostly = {
212 .kind = "bridge", 212 .kind = "bridge",
213 .priv_size = sizeof(struct net_bridge), 213 .priv_size = sizeof(struct net_bridge),
214 .setup = br_dev_setup, 214 .setup = br_dev_setup,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1a8ad4fb9a6b..a768b2408edf 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -549,6 +549,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
549#endif 549#endif
550 550
551/* br_netlink.c */ 551/* br_netlink.c */
552extern struct rtnl_link_ops br_link_ops;
552extern int br_netlink_init(void); 553extern int br_netlink_init(void);
553extern void br_netlink_fini(void); 554extern void br_netlink_fini(void);
554extern void br_ifinfo_notify(int event, struct net_bridge_port *port); 555extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 6229b62749e8..13b36bdc76a7 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -27,7 +27,7 @@ struct brport_attribute {
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
30struct brport_attribute brport_attr_##_name = { \ 30const struct brport_attribute brport_attr_##_name = { \
31 .attr = {.name = __stringify(_name), \ 31 .attr = {.name = __stringify(_name), \
32 .mode = _mode }, \ 32 .mode = _mode }, \
33 .show = _show, \ 33 .show = _show, \
@@ -164,7 +164,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
164 store_multicast_router); 164 store_multicast_router);
165#endif 165#endif
166 166
167static struct brport_attribute *brport_attrs[] = { 167static const struct brport_attribute *brport_attrs[] = {
168 &brport_attr_path_cost, 168 &brport_attr_path_cost,
169 &brport_attr_priority, 169 &brport_attr_priority,
170 &brport_attr_port_id, 170 &brport_attr_port_id,
@@ -241,7 +241,7 @@ const struct sysfs_ops brport_sysfs_ops = {
241int br_sysfs_addif(struct net_bridge_port *p) 241int br_sysfs_addif(struct net_bridge_port *p)
242{ 242{
243 struct net_bridge *br = p->br; 243 struct net_bridge *br = p->br;
244 struct brport_attribute **a; 244 const struct brport_attribute **a;
245 int err; 245 int err;
246 246
247 err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj, 247 err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 5449294bdd5e..19063473c71f 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -145,19 +145,24 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
145 145
146 if (!ub->skb) { 146 if (!ub->skb) {
147 if (!(ub->skb = ulog_alloc_skb(size))) 147 if (!(ub->skb = ulog_alloc_skb(size)))
148 goto alloc_failure; 148 goto unlock;
149 } else if (size > skb_tailroom(ub->skb)) { 149 } else if (size > skb_tailroom(ub->skb)) {
150 ulog_send(group); 150 ulog_send(group);
151 151
152 if (!(ub->skb = ulog_alloc_skb(size))) 152 if (!(ub->skb = ulog_alloc_skb(size)))
153 goto alloc_failure; 153 goto unlock;
154 } 154 }
155 155
156 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, 156 nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
157 size - NLMSG_ALIGN(sizeof(*nlh))); 157 size - NLMSG_ALIGN(sizeof(*nlh)), 0);
158 if (!nlh) {
159 kfree_skb(ub->skb);
160 ub->skb = NULL;
161 goto unlock;
162 }
158 ub->qlen++; 163 ub->qlen++;
159 164
160 pm = NLMSG_DATA(nlh); 165 pm = nlmsg_data(nlh);
161 166
162 /* Fill in the ulog data */ 167 /* Fill in the ulog data */
163 pm->version = EBT_ULOG_VERSION; 168 pm->version = EBT_ULOG_VERSION;
@@ -209,14 +214,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
209 214
210unlock: 215unlock:
211 spin_unlock_bh(lock); 216 spin_unlock_bh(lock);
212
213 return;
214
215nlmsg_failure:
216 pr_debug("error during NLMSG_PUT. This should "
217 "not happen, please report to author.\n");
218alloc_failure:
219 goto unlock;
220} 217}
221 218
222/* this function is registered with the netfilter core */ 219/* this function is registered with the netfilter core */
@@ -285,6 +282,9 @@ static int __init ebt_ulog_init(void)
285{ 282{
286 int ret; 283 int ret;
287 int i; 284 int i;
285 struct netlink_kernel_cfg cfg = {
286 .groups = EBT_ULOG_MAXNLGROUPS,
287 };
288 288
289 if (nlbufsiz >= 128*1024) { 289 if (nlbufsiz >= 128*1024) {
290 pr_warning("Netlink buffer has to be <= 128kB," 290 pr_warning("Netlink buffer has to be <= 128kB,"
@@ -299,8 +299,7 @@ static int __init ebt_ulog_init(void)
299 } 299 }
300 300
301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, 301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
302 EBT_ULOG_MAXNLGROUPS, NULL, NULL, 302 THIS_MODULE, &cfg);
303 THIS_MODULE);
304 if (!ebtulognl) 303 if (!ebtulognl)
305 ret = -ENOMEM; 304 ret = -ENOMEM;
306 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) 305 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index aa6f716524fd..1ae1d9cb278d 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -4,8 +4,7 @@
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * 6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to 7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * and Sakari Ailus <sakari.ailus@nokia.com> 8 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */ 9 */
11 10
@@ -91,11 +90,8 @@ static int caifd_refcnt_read(struct caif_device_entry *e)
91/* Allocate new CAIF device. */ 90/* Allocate new CAIF device. */
92static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 91static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93{ 92{
94 struct caif_device_entry_list *caifdevs;
95 struct caif_device_entry *caifd; 93 struct caif_device_entry *caifd;
96 94
97 caifdevs = caif_device_list(dev_net(dev));
98
99 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 95 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
100 if (!caifd) 96 if (!caifd)
101 return NULL; 97 return NULL;
@@ -132,6 +128,11 @@ void caif_flow_cb(struct sk_buff *skb)
132 128
133 rcu_read_lock(); 129 rcu_read_lock();
134 caifd = caif_get(skb->dev); 130 caifd = caif_get(skb->dev);
131
132 WARN_ON(caifd == NULL);
133 if (caifd == NULL)
134 return;
135
135 caifd_hold(caifd); 136 caifd_hold(caifd);
136 rcu_read_unlock(); 137 rcu_read_unlock();
137 138
@@ -562,9 +563,9 @@ static int __init caif_device_init(void)
562 563
563static void __exit caif_device_exit(void) 564static void __exit caif_device_exit(void)
564{ 565{
565 unregister_pernet_subsys(&caif_net_ops);
566 unregister_netdevice_notifier(&caif_device_notifier); 566 unregister_netdevice_notifier(&caif_device_notifier);
567 dev_remove_pack(&caif_packet_type); 567 dev_remove_pack(&caif_packet_type);
568 unregister_pernet_subsys(&caif_net_ops);
568} 569}
569 570
570module_init(caif_device_init); 571module_init(caif_device_init);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index fb8944355264..095259f83902 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -141,7 +141,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
141 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
142 if (err) 142 if (err)
143 return err; 143 return err;
144 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 144 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
145 set_rx_flow_off(cf_sk); 145 set_rx_flow_off(cf_sk);
146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
147 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 147 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
@@ -220,6 +220,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
220 cfsk_hold, cfsk_put); 220 cfsk_hold, cfsk_put);
221 cf_sk->sk.sk_state = CAIF_CONNECTED; 221 cf_sk->sk.sk_state = CAIF_CONNECTED;
222 set_tx_flow_on(cf_sk); 222 set_tx_flow_on(cf_sk);
223 cf_sk->sk.sk_shutdown = 0;
223 cf_sk->sk.sk_state_change(&cf_sk->sk); 224 cf_sk->sk.sk_state_change(&cf_sk->sk);
224 break; 225 break;
225 226
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 047cd0eec022..44f270fc2d06 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -175,15 +175,17 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
175 175
176void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 176void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
177{ 177{
178 struct cfpkt *pkt;
178 struct cfctrl *cfctrl = container_obj(layer); 179 struct cfctrl *cfctrl = container_obj(layer);
179 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
180 struct cflayer *dn = cfctrl->serv.layer.dn; 180 struct cflayer *dn = cfctrl->serv.layer.dn;
181 if (!pkt) 181
182 return;
183 if (!dn) { 182 if (!dn) {
184 pr_debug("not able to send enum request\n"); 183 pr_debug("not able to send enum request\n");
185 return; 184 return;
186 } 185 }
186 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
187 if (!pkt)
188 return;
187 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 189 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
188 init_info(cfpkt_info(pkt), cfctrl); 190 init_info(cfpkt_info(pkt), cfctrl);
189 cfpkt_info(pkt)->dev_info->id = physlinkid; 191 cfpkt_info(pkt)->dev_info->id = physlinkid;
@@ -302,18 +304,17 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
302 struct cflayer *client) 304 struct cflayer *client)
303{ 305{
304 int ret; 306 int ret;
307 struct cfpkt *pkt;
305 struct cfctrl *cfctrl = container_obj(layer); 308 struct cfctrl *cfctrl = container_obj(layer);
306 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
307 struct cflayer *dn = cfctrl->serv.layer.dn; 309 struct cflayer *dn = cfctrl->serv.layer.dn;
308 310
309 if (!pkt)
310 return -ENOMEM;
311
312 if (!dn) { 311 if (!dn) {
313 pr_debug("not able to send link-down request\n"); 312 pr_debug("not able to send link-down request\n");
314 return -ENODEV; 313 return -ENODEV;
315 } 314 }
316 315 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
316 if (!pkt)
317 return -ENOMEM;
317 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 318 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
318 cfpkt_addbdy(pkt, channelid); 319 cfpkt_addbdy(pkt, channelid);
319 init_info(cfpkt_info(pkt), cfctrl); 320 init_info(cfpkt_info(pkt), cfctrl);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 0ce2ad0696da..821022a7214f 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -41,6 +41,7 @@
41 */ 41 */
42 42
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/stddef.h>
44#include <linux/init.h> 45#include <linux/init.h>
45#include <linux/kmod.h> 46#include <linux/kmod.h>
46#include <linux/slab.h> 47#include <linux/slab.h>
@@ -220,30 +221,46 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
220 * -ENOBUFS on full driver queue (see net_xmit_errno()) 221 * -ENOBUFS on full driver queue (see net_xmit_errno())
221 * -ENOMEM when local loopback failed at calling skb_clone() 222 * -ENOMEM when local loopback failed at calling skb_clone()
222 * -EPERM when trying to send on a non-CAN interface 223 * -EPERM when trying to send on a non-CAN interface
224 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU
223 * -EINVAL when the skb->data does not contain a valid CAN frame 225 * -EINVAL when the skb->data does not contain a valid CAN frame
224 */ 226 */
225int can_send(struct sk_buff *skb, int loop) 227int can_send(struct sk_buff *skb, int loop)
226{ 228{
227 struct sk_buff *newskb = NULL; 229 struct sk_buff *newskb = NULL;
228 struct can_frame *cf = (struct can_frame *)skb->data; 230 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
229 int err; 231 int err = -EINVAL;
232
233 if (skb->len == CAN_MTU) {
234 skb->protocol = htons(ETH_P_CAN);
235 if (unlikely(cfd->len > CAN_MAX_DLEN))
236 goto inval_skb;
237 } else if (skb->len == CANFD_MTU) {
238 skb->protocol = htons(ETH_P_CANFD);
239 if (unlikely(cfd->len > CANFD_MAX_DLEN))
240 goto inval_skb;
241 } else
242 goto inval_skb;
230 243
231 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) { 244 /*
232 kfree_skb(skb); 245 * Make sure the CAN frame can pass the selected CAN netdevice.
233 return -EINVAL; 246 * As structs can_frame and canfd_frame are similar, we can provide
247 * CAN FD frames to legacy CAN drivers as long as the length is <= 8
248 */
249 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
250 err = -EMSGSIZE;
251 goto inval_skb;
234 } 252 }
235 253
236 if (skb->dev->type != ARPHRD_CAN) { 254 if (unlikely(skb->dev->type != ARPHRD_CAN)) {
237 kfree_skb(skb); 255 err = -EPERM;
238 return -EPERM; 256 goto inval_skb;
239 } 257 }
240 258
241 if (!(skb->dev->flags & IFF_UP)) { 259 if (unlikely(!(skb->dev->flags & IFF_UP))) {
242 kfree_skb(skb); 260 err = -ENETDOWN;
243 return -ENETDOWN; 261 goto inval_skb;
244 } 262 }
245 263
246 skb->protocol = htons(ETH_P_CAN);
247 skb_reset_network_header(skb); 264 skb_reset_network_header(skb);
248 skb_reset_transport_header(skb); 265 skb_reset_transport_header(skb);
249 266
@@ -300,6 +317,10 @@ int can_send(struct sk_buff *skb, int loop)
300 can_stats.tx_frames_delta++; 317 can_stats.tx_frames_delta++;
301 318
302 return 0; 319 return 0;
320
321inval_skb:
322 kfree_skb(skb);
323 return err;
303} 324}
304EXPORT_SYMBOL(can_send); 325EXPORT_SYMBOL(can_send);
305 326
@@ -334,8 +355,8 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
334 * relevant bits for the filter. 355 * relevant bits for the filter.
335 * 356 *
336 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 357 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
337 * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames 358 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
338 * there is a special filterlist and a special rx path filter handling. 359 * frames there is a special filterlist and a special rx path filter handling.
339 * 360 *
340 * Return: 361 * Return:
341 * Pointer to optimal filterlist for the given can_id/mask pair. 362 * Pointer to optimal filterlist for the given can_id/mask pair.
@@ -347,7 +368,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
347{ 368{
348 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 369 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
349 370
350 /* filter for error frames in extra filterlist */ 371 /* filter for error message frames in extra filterlist */
351 if (*mask & CAN_ERR_FLAG) { 372 if (*mask & CAN_ERR_FLAG) {
352 /* clear CAN_ERR_FLAG in filter entry */ 373 /* clear CAN_ERR_FLAG in filter entry */
353 *mask &= CAN_ERR_MASK; 374 *mask &= CAN_ERR_MASK;
@@ -408,7 +429,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
408 * <received_can_id> & mask == can_id & mask 429 * <received_can_id> & mask == can_id & mask
409 * 430 *
410 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 431 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
411 * filter for error frames (CAN_ERR_FLAG bit set in mask). 432 * filter for error message frames (CAN_ERR_FLAG bit set in mask).
412 * 433 *
413 * The provided pointer to the sk_buff is guaranteed to be valid as long as 434 * The provided pointer to the sk_buff is guaranteed to be valid as long as
414 * the callback function is running. The callback function must *not* free 435 * the callback function is running. The callback function must *not* free
@@ -578,7 +599,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
578 return 0; 599 return 0;
579 600
580 if (can_id & CAN_ERR_FLAG) { 601 if (can_id & CAN_ERR_FLAG) {
581 /* check for error frame entries only */ 602 /* check for error message frame entries only */
582 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { 603 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
583 if (can_id & r->mask) { 604 if (can_id & r->mask) {
584 deliver(skb, r); 605 deliver(skb, r);
@@ -632,24 +653,11 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
632 return matches; 653 return matches;
633} 654}
634 655
635static int can_rcv(struct sk_buff *skb, struct net_device *dev, 656static void can_receive(struct sk_buff *skb, struct net_device *dev)
636 struct packet_type *pt, struct net_device *orig_dev)
637{ 657{
638 struct dev_rcv_lists *d; 658 struct dev_rcv_lists *d;
639 struct can_frame *cf = (struct can_frame *)skb->data;
640 int matches; 659 int matches;
641 660
642 if (!net_eq(dev_net(dev), &init_net))
643 goto drop;
644
645 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
646 skb->len != sizeof(struct can_frame) ||
647 cf->can_dlc > 8,
648 "PF_CAN: dropped non conform skbuf: "
649 "dev type %d, len %d, can_dlc %d\n",
650 dev->type, skb->len, cf->can_dlc))
651 goto drop;
652
653 /* update statistics */ 661 /* update statistics */
654 can_stats.rx_frames++; 662 can_stats.rx_frames++;
655 can_stats.rx_frames_delta++; 663 can_stats.rx_frames_delta++;
@@ -673,7 +681,49 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
673 can_stats.matches++; 681 can_stats.matches++;
674 can_stats.matches_delta++; 682 can_stats.matches_delta++;
675 } 683 }
684}
676 685
686static int can_rcv(struct sk_buff *skb, struct net_device *dev,
687 struct packet_type *pt, struct net_device *orig_dev)
688{
689 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
690
691 if (unlikely(!net_eq(dev_net(dev), &init_net)))
692 goto drop;
693
694 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
695 skb->len != CAN_MTU ||
696 cfd->len > CAN_MAX_DLEN,
697 "PF_CAN: dropped non conform CAN skbuf: "
698 "dev type %d, len %d, datalen %d\n",
699 dev->type, skb->len, cfd->len))
700 goto drop;
701
702 can_receive(skb, dev);
703 return NET_RX_SUCCESS;
704
705drop:
706 kfree_skb(skb);
707 return NET_RX_DROP;
708}
709
710static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
711 struct packet_type *pt, struct net_device *orig_dev)
712{
713 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
714
715 if (unlikely(!net_eq(dev_net(dev), &init_net)))
716 goto drop;
717
718 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
719 skb->len != CANFD_MTU ||
720 cfd->len > CANFD_MAX_DLEN,
721 "PF_CAN: dropped non conform CAN FD skbuf: "
722 "dev type %d, len %d, datalen %d\n",
723 dev->type, skb->len, cfd->len))
724 goto drop;
725
726 can_receive(skb, dev);
677 return NET_RX_SUCCESS; 727 return NET_RX_SUCCESS;
678 728
679drop: 729drop:
@@ -807,10 +857,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
807 857
808static struct packet_type can_packet __read_mostly = { 858static struct packet_type can_packet __read_mostly = {
809 .type = cpu_to_be16(ETH_P_CAN), 859 .type = cpu_to_be16(ETH_P_CAN),
810 .dev = NULL,
811 .func = can_rcv, 860 .func = can_rcv,
812}; 861};
813 862
863static struct packet_type canfd_packet __read_mostly = {
864 .type = cpu_to_be16(ETH_P_CANFD),
865 .func = canfd_rcv,
866};
867
814static const struct net_proto_family can_family_ops = { 868static const struct net_proto_family can_family_ops = {
815 .family = PF_CAN, 869 .family = PF_CAN,
816 .create = can_create, 870 .create = can_create,
@@ -824,6 +878,12 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
824 878
825static __init int can_init(void) 879static __init int can_init(void)
826{ 880{
881 /* check for correct padding to be able to use the structs similarly */
882 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
883 offsetof(struct canfd_frame, len) ||
884 offsetof(struct can_frame, data) !=
885 offsetof(struct canfd_frame, data));
886
827 printk(banner); 887 printk(banner);
828 888
829 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); 889 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
@@ -846,6 +906,7 @@ static __init int can_init(void)
846 sock_register(&can_family_ops); 906 sock_register(&can_family_ops);
847 register_netdevice_notifier(&can_netdev_notifier); 907 register_netdevice_notifier(&can_netdev_notifier);
848 dev_add_pack(&can_packet); 908 dev_add_pack(&can_packet);
909 dev_add_pack(&canfd_packet);
849 910
850 return 0; 911 return 0;
851} 912}
@@ -860,6 +921,7 @@ static __exit void can_exit(void)
860 can_remove_proc(); 921 can_remove_proc();
861 922
862 /* protocol unregister */ 923 /* protocol unregister */
924 dev_remove_pack(&canfd_packet);
863 dev_remove_pack(&can_packet); 925 dev_remove_pack(&can_packet);
864 unregister_netdevice_notifier(&can_netdev_notifier); 926 unregister_netdevice_notifier(&can_netdev_notifier);
865 sock_unregister(PF_CAN); 927 sock_unregister(PF_CAN);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fd882dbadad3..1dccb4c33894 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -104,6 +104,9 @@ struct s_pstats {
104 unsigned long rcv_entries_max; 104 unsigned long rcv_entries_max;
105}; 105};
106 106
107/* receive filters subscribed for 'all' CAN devices */
108extern struct dev_rcv_lists can_rx_alldev_list;
109
107/* function prototypes for the CAN networklayer procfs (proc.c) */ 110/* function prototypes for the CAN networklayer procfs (proc.c) */
108extern void can_init_proc(void); 111extern void can_init_proc(void);
109extern void can_remove_proc(void); 112extern void can_remove_proc(void);
diff --git a/net/can/gw.c b/net/can/gw.c
index b41acf25668f..b54d5e695b03 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -444,11 +444,14 @@ static int cgw_notifier(struct notifier_block *nb,
444 return NOTIFY_DONE; 444 return NOTIFY_DONE;
445} 445}
446 446
447static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj) 447static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
448 u32 pid, u32 seq, int flags)
448{ 449{
449 struct cgw_frame_mod mb; 450 struct cgw_frame_mod mb;
450 struct rtcanmsg *rtcan; 451 struct rtcanmsg *rtcan;
451 struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0); 452 struct nlmsghdr *nlh;
453
454 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
452 if (!nlh) 455 if (!nlh)
453 return -EMSGSIZE; 456 return -EMSGSIZE;
454 457
@@ -462,15 +465,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
462 if (gwj->handled_frames) { 465 if (gwj->handled_frames) {
463 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0) 466 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
464 goto cancel; 467 goto cancel;
465 else
466 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
467 } 468 }
468 469
469 if (gwj->dropped_frames) { 470 if (gwj->dropped_frames) {
470 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0) 471 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
471 goto cancel; 472 goto cancel;
472 else
473 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
474 } 473 }
475 474
476 /* check non default settings of attributes */ 475 /* check non default settings of attributes */
@@ -480,8 +479,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
480 mb.modtype = gwj->mod.modtype.and; 479 mb.modtype = gwj->mod.modtype.and;
481 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) 480 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
482 goto cancel; 481 goto cancel;
483 else
484 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
485 } 482 }
486 483
487 if (gwj->mod.modtype.or) { 484 if (gwj->mod.modtype.or) {
@@ -489,8 +486,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
489 mb.modtype = gwj->mod.modtype.or; 486 mb.modtype = gwj->mod.modtype.or;
490 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) 487 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
491 goto cancel; 488 goto cancel;
492 else
493 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
494 } 489 }
495 490
496 if (gwj->mod.modtype.xor) { 491 if (gwj->mod.modtype.xor) {
@@ -498,8 +493,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
498 mb.modtype = gwj->mod.modtype.xor; 493 mb.modtype = gwj->mod.modtype.xor;
499 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) 494 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
500 goto cancel; 495 goto cancel;
501 else
502 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
503 } 496 }
504 497
505 if (gwj->mod.modtype.set) { 498 if (gwj->mod.modtype.set) {
@@ -507,26 +500,18 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
507 mb.modtype = gwj->mod.modtype.set; 500 mb.modtype = gwj->mod.modtype.set;
508 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) 501 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
509 goto cancel; 502 goto cancel;
510 else
511 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
512 } 503 }
513 504
514 if (gwj->mod.csumfunc.crc8) { 505 if (gwj->mod.csumfunc.crc8) {
515 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, 506 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
516 &gwj->mod.csum.crc8) < 0) 507 &gwj->mod.csum.crc8) < 0)
517 goto cancel; 508 goto cancel;
518 else
519 nlh->nlmsg_len += NLA_HDRLEN + \
520 NLA_ALIGN(CGW_CS_CRC8_LEN);
521 } 509 }
522 510
523 if (gwj->mod.csumfunc.xor) { 511 if (gwj->mod.csumfunc.xor) {
524 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, 512 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
525 &gwj->mod.csum.xor) < 0) 513 &gwj->mod.csum.xor) < 0)
526 goto cancel; 514 goto cancel;
527 else
528 nlh->nlmsg_len += NLA_HDRLEN + \
529 NLA_ALIGN(CGW_CS_XOR_LEN);
530 } 515 }
531 516
532 if (gwj->gwtype == CGW_TYPE_CAN_CAN) { 517 if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
@@ -535,23 +520,16 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
535 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter), 520 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
536 &gwj->ccgw.filter) < 0) 521 &gwj->ccgw.filter) < 0)
537 goto cancel; 522 goto cancel;
538 else
539 nlh->nlmsg_len += NLA_HDRLEN +
540 NLA_ALIGN(sizeof(struct can_filter));
541 } 523 }
542 524
543 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0) 525 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
544 goto cancel; 526 goto cancel;
545 else
546 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
547 527
548 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0) 528 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
549 goto cancel; 529 goto cancel;
550 else
551 nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
552 } 530 }
553 531
554 return skb->len; 532 return nlmsg_end(skb, nlh);
555 533
556cancel: 534cancel:
557 nlmsg_cancel(skb, nlh); 535 nlmsg_cancel(skb, nlh);
@@ -571,7 +549,8 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
571 if (idx < s_idx) 549 if (idx < s_idx)
572 goto cont; 550 goto cont;
573 551
574 if (cgw_put_job(skb, gwj) < 0) 552 if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
553 cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
575 break; 554 break;
576cont: 555cont:
577 idx++; 556 idx++;
@@ -583,6 +562,18 @@ cont:
583 return skb->len; 562 return skb->len;
584} 563}
585 564
565static const struct nla_policy cgw_policy[CGW_MAX+1] = {
566 [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
567 [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
568 [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
569 [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
570 [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
571 [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
572 [CGW_SRC_IF] = { .type = NLA_U32 },
573 [CGW_DST_IF] = { .type = NLA_U32 },
574 [CGW_FILTER] = { .len = sizeof(struct can_filter) },
575};
576
586/* check for common and gwtype specific attributes */ 577/* check for common and gwtype specific attributes */
587static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, 578static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
588 u8 gwtype, void *gwtypeattr) 579 u8 gwtype, void *gwtypeattr)
@@ -595,14 +586,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
595 /* initialize modification & checksum data space */ 586 /* initialize modification & checksum data space */
596 memset(mod, 0, sizeof(*mod)); 587 memset(mod, 0, sizeof(*mod));
597 588
598 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL); 589 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX,
590 cgw_policy);
599 if (err < 0) 591 if (err < 0)
600 return err; 592 return err;
601 593
602 /* check for AND/OR/XOR/SET modifications */ 594 /* check for AND/OR/XOR/SET modifications */
603 595
604 if (tb[CGW_MOD_AND] && 596 if (tb[CGW_MOD_AND]) {
605 nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
606 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); 597 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
607 598
608 canframecpy(&mod->modframe.and, &mb.cf); 599 canframecpy(&mod->modframe.and, &mb.cf);
@@ -618,8 +609,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
618 mod->modfunc[modidx++] = mod_and_data; 609 mod->modfunc[modidx++] = mod_and_data;
619 } 610 }
620 611
621 if (tb[CGW_MOD_OR] && 612 if (tb[CGW_MOD_OR]) {
622 nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
623 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); 613 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
624 614
625 canframecpy(&mod->modframe.or, &mb.cf); 615 canframecpy(&mod->modframe.or, &mb.cf);
@@ -635,8 +625,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
635 mod->modfunc[modidx++] = mod_or_data; 625 mod->modfunc[modidx++] = mod_or_data;
636 } 626 }
637 627
638 if (tb[CGW_MOD_XOR] && 628 if (tb[CGW_MOD_XOR]) {
639 nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
640 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); 629 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
641 630
642 canframecpy(&mod->modframe.xor, &mb.cf); 631 canframecpy(&mod->modframe.xor, &mb.cf);
@@ -652,8 +641,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
652 mod->modfunc[modidx++] = mod_xor_data; 641 mod->modfunc[modidx++] = mod_xor_data;
653 } 642 }
654 643
655 if (tb[CGW_MOD_SET] && 644 if (tb[CGW_MOD_SET]) {
656 nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
657 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); 645 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
658 646
659 canframecpy(&mod->modframe.set, &mb.cf); 647 canframecpy(&mod->modframe.set, &mb.cf);
@@ -672,11 +660,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
672 /* check for checksum operations after CAN frame modifications */ 660 /* check for checksum operations after CAN frame modifications */
673 if (modidx) { 661 if (modidx) {
674 662
675 if (tb[CGW_CS_CRC8] && 663 if (tb[CGW_CS_CRC8]) {
676 nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) { 664 struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
677
678 struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
679 nla_data(tb[CGW_CS_CRC8]);
680 665
681 err = cgw_chk_csum_parms(c->from_idx, c->to_idx, 666 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
682 c->result_idx); 667 c->result_idx);
@@ -699,11 +684,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
699 mod->csumfunc.crc8 = cgw_csum_crc8_neg; 684 mod->csumfunc.crc8 = cgw_csum_crc8_neg;
700 } 685 }
701 686
702 if (tb[CGW_CS_XOR] && 687 if (tb[CGW_CS_XOR]) {
703 nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) { 688 struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
704
705 struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
706 nla_data(tb[CGW_CS_XOR]);
707 689
708 err = cgw_chk_csum_parms(c->from_idx, c->to_idx, 690 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
709 c->result_idx); 691 c->result_idx);
@@ -735,8 +717,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
735 memset(ccgw, 0, sizeof(*ccgw)); 717 memset(ccgw, 0, sizeof(*ccgw));
736 718
737 /* check for can_filter in attributes */ 719 /* check for can_filter in attributes */
738 if (tb[CGW_FILTER] && 720 if (tb[CGW_FILTER])
739 nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
740 nla_memcpy(&ccgw->filter, tb[CGW_FILTER], 721 nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
741 sizeof(struct can_filter)); 722 sizeof(struct can_filter));
742 723
@@ -746,13 +727,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
746 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF]) 727 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
747 return err; 728 return err;
748 729
749 if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32)) 730 ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
750 nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF], 731 ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
751 sizeof(u32));
752
753 if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
754 nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
755 sizeof(u32));
756 732
757 /* both indices set to 0 for flushing all routing entries */ 733 /* both indices set to 0 for flushing all routing entries */
758 if (!ccgw->src_idx && !ccgw->dst_idx) 734 if (!ccgw->src_idx && !ccgw->dst_idx)
diff --git a/net/can/proc.c b/net/can/proc.c
index ba873c36d2fd..3b6dd3180492 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -83,9 +83,6 @@ static const char rx_list_name[][8] = {
83 [RX_EFF] = "rx_eff", 83 [RX_EFF] = "rx_eff",
84}; 84};
85 85
86/* receive filters subscribed for 'all' CAN devices */
87extern struct dev_rcv_lists can_rx_alldev_list;
88
89/* 86/*
90 * af_can statistics stuff 87 * af_can statistics stuff
91 */ 88 */
diff --git a/net/can/raw.c b/net/can/raw.c
index cde1b4a20f75..3e9c89356a93 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -82,6 +82,7 @@ struct raw_sock {
82 struct notifier_block notifier; 82 struct notifier_block notifier;
83 int loopback; 83 int loopback;
84 int recv_own_msgs; 84 int recv_own_msgs;
85 int fd_frames;
85 int count; /* number of active filters */ 86 int count; /* number of active filters */
86 struct can_filter dfilter; /* default/single filter */ 87 struct can_filter dfilter; /* default/single filter */
87 struct can_filter *filter; /* pointer to filter(s) */ 88 struct can_filter *filter; /* pointer to filter(s) */
@@ -119,6 +120,14 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
119 if (!ro->recv_own_msgs && oskb->sk == sk) 120 if (!ro->recv_own_msgs && oskb->sk == sk)
120 return; 121 return;
121 122
123 /* do not pass frames with DLC > 8 to a legacy socket */
124 if (!ro->fd_frames) {
125 struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
126
127 if (unlikely(cfd->len > CAN_MAX_DLEN))
128 return;
129 }
130
122 /* clone the given skb to be able to enqueue it into the rcv queue */ 131 /* clone the given skb to be able to enqueue it into the rcv queue */
123 skb = skb_clone(oskb, GFP_ATOMIC); 132 skb = skb_clone(oskb, GFP_ATOMIC);
124 if (!skb) 133 if (!skb)
@@ -291,6 +300,7 @@ static int raw_init(struct sock *sk)
291 /* set default loopback behaviour */ 300 /* set default loopback behaviour */
292 ro->loopback = 1; 301 ro->loopback = 1;
293 ro->recv_own_msgs = 0; 302 ro->recv_own_msgs = 0;
303 ro->fd_frames = 0;
294 304
295 /* set notifier */ 305 /* set notifier */
296 ro->notifier.notifier_call = raw_notifier; 306 ro->notifier.notifier_call = raw_notifier;
@@ -569,6 +579,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
569 579
570 break; 580 break;
571 581
582 case CAN_RAW_FD_FRAMES:
583 if (optlen != sizeof(ro->fd_frames))
584 return -EINVAL;
585
586 if (copy_from_user(&ro->fd_frames, optval, optlen))
587 return -EFAULT;
588
589 break;
590
572 default: 591 default:
573 return -ENOPROTOOPT; 592 return -ENOPROTOOPT;
574 } 593 }
@@ -627,6 +646,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
627 val = &ro->recv_own_msgs; 646 val = &ro->recv_own_msgs;
628 break; 647 break;
629 648
649 case CAN_RAW_FD_FRAMES:
650 if (len > sizeof(int))
651 len = sizeof(int);
652 val = &ro->fd_frames;
653 break;
654
630 default: 655 default:
631 return -ENOPROTOOPT; 656 return -ENOPROTOOPT;
632 } 657 }
@@ -662,8 +687,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
662 } else 687 } else
663 ifindex = ro->ifindex; 688 ifindex = ro->ifindex;
664 689
665 if (size != sizeof(struct can_frame)) 690 if (ro->fd_frames) {
666 return -EINVAL; 691 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
692 return -EINVAL;
693 } else {
694 if (unlikely(size != CAN_MTU))
695 return -EINVAL;
696 }
667 697
668 dev = dev_get_by_index(&init_net, ifindex); 698 dev = dev_get_by_index(&init_net, ifindex);
669 if (!dev) 699 if (!dev)
@@ -681,9 +711,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
681 if (err < 0) 711 if (err < 0)
682 goto free_skb; 712 goto free_skb;
683 713
684 /* to be able to check the received tx sock reference in raw_rcv() */
685 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
686
687 skb->dev = dev; 714 skb->dev = dev;
688 skb->sk = sk; 715 skb->sk = sk;
689 716
@@ -708,7 +735,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
708 struct msghdr *msg, size_t size, int flags) 735 struct msghdr *msg, size_t size, int flags)
709{ 736{
710 struct sock *sk = sock->sk; 737 struct sock *sk = sock->sk;
738 struct raw_sock *ro = raw_sk(sk);
711 struct sk_buff *skb; 739 struct sk_buff *skb;
740 int rxmtu;
712 int err = 0; 741 int err = 0;
713 int noblock; 742 int noblock;
714 743
@@ -719,10 +748,20 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
719 if (!skb) 748 if (!skb)
720 return err; 749 return err;
721 750
722 if (size < skb->len) 751 /*
752 * when serving a legacy socket the DLC <= 8 is already checked inside
753 * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
754 * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
755 */
756 if (!ro->fd_frames)
757 rxmtu = CAN_MTU;
758 else
759 rxmtu = skb->len;
760
761 if (size < rxmtu)
723 msg->msg_flags |= MSG_TRUNC; 762 msg->msg_flags |= MSG_TRUNC;
724 else 763 else
725 size = skb->len; 764 size = rxmtu;
726 765
727 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 766 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
728 if (err < 0) { 767 if (err < 0) {
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a776f751edbf..69e38db28e5f 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18 18
19 19
20#include <linux/ceph/ceph_features.h>
20#include <linux/ceph/libceph.h> 21#include <linux/ceph/libceph.h>
21#include <linux/ceph/debugfs.h> 22#include <linux/ceph/debugfs.h>
22#include <linux/ceph/decode.h> 23#include <linux/ceph/decode.h>
@@ -460,27 +461,23 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
460 client->auth_err = 0; 461 client->auth_err = 0;
461 462
462 client->extra_mon_dispatch = NULL; 463 client->extra_mon_dispatch = NULL;
463 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT | 464 client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
464 supported_features; 465 supported_features;
465 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT | 466 client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT |
466 required_features; 467 required_features;
467 468
468 /* msgr */ 469 /* msgr */
469 if (ceph_test_opt(client, MYIP)) 470 if (ceph_test_opt(client, MYIP))
470 myaddr = &client->options->my_addr; 471 myaddr = &client->options->my_addr;
471 client->msgr = ceph_messenger_create(myaddr, 472 ceph_messenger_init(&client->msgr, myaddr,
472 client->supported_features, 473 client->supported_features,
473 client->required_features); 474 client->required_features,
474 if (IS_ERR(client->msgr)) { 475 ceph_test_opt(client, NOCRC));
475 err = PTR_ERR(client->msgr);
476 goto fail;
477 }
478 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
479 476
480 /* subsystems */ 477 /* subsystems */
481 err = ceph_monc_init(&client->monc, client); 478 err = ceph_monc_init(&client->monc, client);
482 if (err < 0) 479 if (err < 0)
483 goto fail_msgr; 480 goto fail;
484 err = ceph_osdc_init(&client->osdc, client); 481 err = ceph_osdc_init(&client->osdc, client);
485 if (err < 0) 482 if (err < 0)
486 goto fail_monc; 483 goto fail_monc;
@@ -489,8 +486,6 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
489 486
490fail_monc: 487fail_monc:
491 ceph_monc_stop(&client->monc); 488 ceph_monc_stop(&client->monc);
492fail_msgr:
493 ceph_messenger_destroy(client->msgr);
494fail: 489fail:
495 kfree(client); 490 kfree(client);
496 return ERR_PTR(err); 491 return ERR_PTR(err);
@@ -501,22 +496,15 @@ void ceph_destroy_client(struct ceph_client *client)
501{ 496{
502 dout("destroy_client %p\n", client); 497 dout("destroy_client %p\n", client);
503 498
499 atomic_set(&client->msgr.stopping, 1);
500
504 /* unmount */ 501 /* unmount */
505 ceph_osdc_stop(&client->osdc); 502 ceph_osdc_stop(&client->osdc);
506 503
507 /*
508 * make sure osd connections close out before destroying the
509 * auth module, which is needed to free those connections'
510 * ceph_authorizers.
511 */
512 ceph_msgr_flush();
513
514 ceph_monc_stop(&client->monc); 504 ceph_monc_stop(&client->monc);
515 505
516 ceph_debugfs_client_cleanup(client); 506 ceph_debugfs_client_cleanup(client);
517 507
518 ceph_messenger_destroy(client->msgr);
519
520 ceph_destroy_options(client->options); 508 ceph_destroy_options(client->options);
521 509
522 kfree(client); 510 kfree(client);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index d7edc24333b8..35fce755ce10 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -306,7 +306,6 @@ static int crush_choose(const struct crush_map *map,
306 int item = 0; 306 int item = 0;
307 int itemtype; 307 int itemtype;
308 int collide, reject; 308 int collide, reject;
309 const unsigned int orig_tries = 5; /* attempts before we fall back to search */
310 309
311 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "", 310 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
312 bucket->id, x, outpos, numrep); 311 bucket->id, x, outpos, numrep);
@@ -351,8 +350,9 @@ static int crush_choose(const struct crush_map *map,
351 reject = 1; 350 reject = 1;
352 goto reject; 351 goto reject;
353 } 352 }
354 if (flocal >= (in->size>>1) && 353 if (map->choose_local_fallback_tries > 0 &&
355 flocal > orig_tries) 354 flocal >= (in->size>>1) &&
355 flocal > map->choose_local_fallback_tries)
356 item = bucket_perm_choose(in, x, r); 356 item = bucket_perm_choose(in, x, r);
357 else 357 else
358 item = crush_bucket_choose(in, x, r); 358 item = crush_bucket_choose(in, x, r);
@@ -422,13 +422,14 @@ reject:
422 ftotal++; 422 ftotal++;
423 flocal++; 423 flocal++;
424 424
425 if (collide && flocal < 3) 425 if (collide && flocal <= map->choose_local_tries)
426 /* retry locally a few times */ 426 /* retry locally a few times */
427 retry_bucket = 1; 427 retry_bucket = 1;
428 else if (flocal <= in->size + orig_tries) 428 else if (map->choose_local_fallback_tries > 0 &&
429 flocal <= in->size + map->choose_local_fallback_tries)
429 /* exhaustive bucket search */ 430 /* exhaustive bucket search */
430 retry_bucket = 1; 431 retry_bucket = 1;
431 else if (ftotal < 20) 432 else if (ftotal <= map->choose_total_tries)
432 /* then retry descent */ 433 /* then retry descent */
433 retry_descent = 1; 434 retry_descent = 1;
434 else 435 else
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index b780cb7947dd..9da7fdd3cd8a 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) {
466 struct ceph_crypto_key *ckey = key->payload.data; 466 struct ceph_crypto_key *ckey = key->payload.data;
467 467
468 ceph_crypto_key_destroy(ckey); 468 ceph_crypto_key_destroy(ckey);
469 kfree(ckey);
469} 470}
470 471
471struct key_type key_type_ceph = { 472struct key_type key_type_ceph = {
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 1919d1550d75..3572dc518bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -16,7 +16,8 @@ struct ceph_crypto_key {
16 16
17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) 17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
18{ 18{
19 kfree(key->key); 19 if (key)
20 kfree(key->key);
20} 21}
21 22
22extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 23extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 524f4e4f598b..b9796750034a 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -29,6 +29,74 @@
29 * the sender. 29 * the sender.
30 */ 30 */
31 31
32/*
33 * We track the state of the socket on a given connection using
34 * values defined below. The transition to a new socket state is
35 * handled by a function which verifies we aren't coming from an
36 * unexpected state.
37 *
38 * --------
39 * | NEW* | transient initial state
40 * --------
41 * | con_sock_state_init()
42 * v
43 * ----------
44 * | CLOSED | initialized, but no socket (and no
45 * ---------- TCP connection)
46 * ^ \
47 * | \ con_sock_state_connecting()
48 * | ----------------------
49 * | \
50 * + con_sock_state_closed() \
51 * |+--------------------------- \
52 * | \ \ \
53 * | ----------- \ \
54 * | | CLOSING | socket event; \ \
55 * | ----------- await close \ \
56 * | ^ \ |
57 * | | \ |
58 * | + con_sock_state_closing() \ |
59 * | / \ | |
60 * | / --------------- | |
61 * | / \ v v
62 * | / --------------
63 * | / -----------------| CONNECTING | socket created, TCP
64 * | | / -------------- connect initiated
65 * | | | con_sock_state_connected()
66 * | | v
67 * -------------
68 * | CONNECTED | TCP connection established
69 * -------------
70 *
71 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
72 */
73
74#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
75#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
76#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
77#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
78#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
79
80/*
81 * connection states
82 */
83#define CON_STATE_CLOSED 1 /* -> PREOPEN */
84#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
85#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
86#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
87#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
88#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
89
90/*
91 * ceph_connection flag bits
92 */
93#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
94 * messages on errors */
95#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
96#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
97#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
98#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
99
32/* static tag bytes (protocol control messages) */ 100/* static tag bytes (protocol control messages) */
33static char tag_msg = CEPH_MSGR_TAG_MSG; 101static char tag_msg = CEPH_MSGR_TAG_MSG;
34static char tag_ack = CEPH_MSGR_TAG_ACK; 102static char tag_ack = CEPH_MSGR_TAG_ACK;
@@ -147,72 +215,130 @@ void ceph_msgr_flush(void)
147} 215}
148EXPORT_SYMBOL(ceph_msgr_flush); 216EXPORT_SYMBOL(ceph_msgr_flush);
149 217
218/* Connection socket state transition functions */
219
220static void con_sock_state_init(struct ceph_connection *con)
221{
222 int old_state;
223
224 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
225 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
226 printk("%s: unexpected old state %d\n", __func__, old_state);
227 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
228 CON_SOCK_STATE_CLOSED);
229}
230
231static void con_sock_state_connecting(struct ceph_connection *con)
232{
233 int old_state;
234
235 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
236 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
237 printk("%s: unexpected old state %d\n", __func__, old_state);
238 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
239 CON_SOCK_STATE_CONNECTING);
240}
241
242static void con_sock_state_connected(struct ceph_connection *con)
243{
244 int old_state;
245
246 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
247 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
248 printk("%s: unexpected old state %d\n", __func__, old_state);
249 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
250 CON_SOCK_STATE_CONNECTED);
251}
252
253static void con_sock_state_closing(struct ceph_connection *con)
254{
255 int old_state;
256
257 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
258 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
259 old_state != CON_SOCK_STATE_CONNECTED &&
260 old_state != CON_SOCK_STATE_CLOSING))
261 printk("%s: unexpected old state %d\n", __func__, old_state);
262 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
263 CON_SOCK_STATE_CLOSING);
264}
265
266static void con_sock_state_closed(struct ceph_connection *con)
267{
268 int old_state;
269
270 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
271 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
272 old_state != CON_SOCK_STATE_CLOSING &&
273 old_state != CON_SOCK_STATE_CONNECTING &&
274 old_state != CON_SOCK_STATE_CLOSED))
275 printk("%s: unexpected old state %d\n", __func__, old_state);
276 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
277 CON_SOCK_STATE_CLOSED);
278}
150 279
151/* 280/*
152 * socket callback functions 281 * socket callback functions
153 */ 282 */
154 283
155/* data available on socket, or listen socket received a connect */ 284/* data available on socket, or listen socket received a connect */
156static void ceph_data_ready(struct sock *sk, int count_unused) 285static void ceph_sock_data_ready(struct sock *sk, int count_unused)
157{ 286{
158 struct ceph_connection *con = sk->sk_user_data; 287 struct ceph_connection *con = sk->sk_user_data;
288 if (atomic_read(&con->msgr->stopping)) {
289 return;
290 }
159 291
160 if (sk->sk_state != TCP_CLOSE_WAIT) { 292 if (sk->sk_state != TCP_CLOSE_WAIT) {
161 dout("ceph_data_ready on %p state = %lu, queueing work\n", 293 dout("%s on %p state = %lu, queueing work\n", __func__,
162 con, con->state); 294 con, con->state);
163 queue_con(con); 295 queue_con(con);
164 } 296 }
165} 297}
166 298
167/* socket has buffer space for writing */ 299/* socket has buffer space for writing */
168static void ceph_write_space(struct sock *sk) 300static void ceph_sock_write_space(struct sock *sk)
169{ 301{
170 struct ceph_connection *con = sk->sk_user_data; 302 struct ceph_connection *con = sk->sk_user_data;
171 303
172 /* only queue to workqueue if there is data we want to write, 304 /* only queue to workqueue if there is data we want to write,
173 * and there is sufficient space in the socket buffer to accept 305 * and there is sufficient space in the socket buffer to accept
174 * more data. clear SOCK_NOSPACE so that ceph_write_space() 306 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
175 * doesn't get called again until try_write() fills the socket 307 * doesn't get called again until try_write() fills the socket
176 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 308 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
177 * and net/core/stream.c:sk_stream_write_space(). 309 * and net/core/stream.c:sk_stream_write_space().
178 */ 310 */
179 if (test_bit(WRITE_PENDING, &con->state)) { 311 if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
180 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 312 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
181 dout("ceph_write_space %p queueing write work\n", con); 313 dout("%s %p queueing write work\n", __func__, con);
182 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 314 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
183 queue_con(con); 315 queue_con(con);
184 } 316 }
185 } else { 317 } else {
186 dout("ceph_write_space %p nothing to write\n", con); 318 dout("%s %p nothing to write\n", __func__, con);
187 } 319 }
188} 320}
189 321
190/* socket's state has changed */ 322/* socket's state has changed */
191static void ceph_state_change(struct sock *sk) 323static void ceph_sock_state_change(struct sock *sk)
192{ 324{
193 struct ceph_connection *con = sk->sk_user_data; 325 struct ceph_connection *con = sk->sk_user_data;
194 326
195 dout("ceph_state_change %p state = %lu sk_state = %u\n", 327 dout("%s %p state = %lu sk_state = %u\n", __func__,
196 con, con->state, sk->sk_state); 328 con, con->state, sk->sk_state);
197 329
198 if (test_bit(CLOSED, &con->state))
199 return;
200
201 switch (sk->sk_state) { 330 switch (sk->sk_state) {
202 case TCP_CLOSE: 331 case TCP_CLOSE:
203 dout("ceph_state_change TCP_CLOSE\n"); 332 dout("%s TCP_CLOSE\n", __func__);
204 case TCP_CLOSE_WAIT: 333 case TCP_CLOSE_WAIT:
205 dout("ceph_state_change TCP_CLOSE_WAIT\n"); 334 dout("%s TCP_CLOSE_WAIT\n", __func__);
206 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) { 335 con_sock_state_closing(con);
207 if (test_bit(CONNECTING, &con->state)) 336 set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
208 con->error_msg = "connection failed"; 337 queue_con(con);
209 else
210 con->error_msg = "socket closed";
211 queue_con(con);
212 }
213 break; 338 break;
214 case TCP_ESTABLISHED: 339 case TCP_ESTABLISHED:
215 dout("ceph_state_change TCP_ESTABLISHED\n"); 340 dout("%s TCP_ESTABLISHED\n", __func__);
341 con_sock_state_connected(con);
216 queue_con(con); 342 queue_con(con);
217 break; 343 break;
218 default: /* Everything else is uninteresting */ 344 default: /* Everything else is uninteresting */
@@ -228,9 +354,9 @@ static void set_sock_callbacks(struct socket *sock,
228{ 354{
229 struct sock *sk = sock->sk; 355 struct sock *sk = sock->sk;
230 sk->sk_user_data = con; 356 sk->sk_user_data = con;
231 sk->sk_data_ready = ceph_data_ready; 357 sk->sk_data_ready = ceph_sock_data_ready;
232 sk->sk_write_space = ceph_write_space; 358 sk->sk_write_space = ceph_sock_write_space;
233 sk->sk_state_change = ceph_state_change; 359 sk->sk_state_change = ceph_sock_state_change;
234} 360}
235 361
236 362
@@ -262,6 +388,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
262 388
263 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 389 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
264 390
391 con_sock_state_connecting(con);
265 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 392 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
266 O_NONBLOCK); 393 O_NONBLOCK);
267 if (ret == -EINPROGRESS) { 394 if (ret == -EINPROGRESS) {
@@ -277,7 +404,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
277 return ret; 404 return ret;
278 } 405 }
279 con->sock = sock; 406 con->sock = sock;
280
281 return 0; 407 return 0;
282} 408}
283 409
@@ -333,16 +459,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
333 */ 459 */
334static int con_close_socket(struct ceph_connection *con) 460static int con_close_socket(struct ceph_connection *con)
335{ 461{
336 int rc; 462 int rc = 0;
337 463
338 dout("con_close_socket on %p sock %p\n", con, con->sock); 464 dout("con_close_socket on %p sock %p\n", con, con->sock);
339 if (!con->sock) 465 if (con->sock) {
340 return 0; 466 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
341 set_bit(SOCK_CLOSED, &con->state); 467 sock_release(con->sock);
342 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 468 con->sock = NULL;
343 sock_release(con->sock); 469 }
344 con->sock = NULL; 470
345 clear_bit(SOCK_CLOSED, &con->state); 471 /*
472 * Forcibly clear the SOCK_CLOSED flag. It gets set
473 * independent of the connection mutex, and we could have
474 * received a socket close event before we had the chance to
475 * shut the socket down.
476 */
477 clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
478
479 con_sock_state_closed(con);
346 return rc; 480 return rc;
347} 481}
348 482
@@ -353,6 +487,10 @@ static int con_close_socket(struct ceph_connection *con)
353static void ceph_msg_remove(struct ceph_msg *msg) 487static void ceph_msg_remove(struct ceph_msg *msg)
354{ 488{
355 list_del_init(&msg->list_head); 489 list_del_init(&msg->list_head);
490 BUG_ON(msg->con == NULL);
491 msg->con->ops->put(msg->con);
492 msg->con = NULL;
493
356 ceph_msg_put(msg); 494 ceph_msg_put(msg);
357} 495}
358static void ceph_msg_remove_list(struct list_head *head) 496static void ceph_msg_remove_list(struct list_head *head)
@@ -372,8 +510,11 @@ static void reset_connection(struct ceph_connection *con)
372 ceph_msg_remove_list(&con->out_sent); 510 ceph_msg_remove_list(&con->out_sent);
373 511
374 if (con->in_msg) { 512 if (con->in_msg) {
513 BUG_ON(con->in_msg->con != con);
514 con->in_msg->con = NULL;
375 ceph_msg_put(con->in_msg); 515 ceph_msg_put(con->in_msg);
376 con->in_msg = NULL; 516 con->in_msg = NULL;
517 con->ops->put(con);
377 } 518 }
378 519
379 con->connect_seq = 0; 520 con->connect_seq = 0;
@@ -391,32 +532,44 @@ static void reset_connection(struct ceph_connection *con)
391 */ 532 */
392void ceph_con_close(struct ceph_connection *con) 533void ceph_con_close(struct ceph_connection *con)
393{ 534{
535 mutex_lock(&con->mutex);
394 dout("con_close %p peer %s\n", con, 536 dout("con_close %p peer %s\n", con,
395 ceph_pr_addr(&con->peer_addr.in_addr)); 537 ceph_pr_addr(&con->peer_addr.in_addr));
396 set_bit(CLOSED, &con->state); /* in case there's queued work */ 538 con->state = CON_STATE_CLOSED;
397 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ 539
398 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ 540 clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
399 clear_bit(KEEPALIVE_PENDING, &con->state); 541 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
400 clear_bit(WRITE_PENDING, &con->state); 542 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
401 mutex_lock(&con->mutex); 543 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
544 clear_bit(CON_FLAG_BACKOFF, &con->flags);
545
402 reset_connection(con); 546 reset_connection(con);
403 con->peer_global_seq = 0; 547 con->peer_global_seq = 0;
404 cancel_delayed_work(&con->work); 548 cancel_delayed_work(&con->work);
549 con_close_socket(con);
405 mutex_unlock(&con->mutex); 550 mutex_unlock(&con->mutex);
406 queue_con(con);
407} 551}
408EXPORT_SYMBOL(ceph_con_close); 552EXPORT_SYMBOL(ceph_con_close);
409 553
410/* 554/*
411 * Reopen a closed connection, with a new peer address. 555 * Reopen a closed connection, with a new peer address.
412 */ 556 */
413void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) 557void ceph_con_open(struct ceph_connection *con,
558 __u8 entity_type, __u64 entity_num,
559 struct ceph_entity_addr *addr)
414{ 560{
561 mutex_lock(&con->mutex);
415 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
416 set_bit(OPENING, &con->state); 563
417 clear_bit(CLOSED, &con->state); 564 BUG_ON(con->state != CON_STATE_CLOSED);
565 con->state = CON_STATE_PREOPEN;
566
567 con->peer_name.type = (__u8) entity_type;
568 con->peer_name.num = cpu_to_le64(entity_num);
569
418 memcpy(&con->peer_addr, addr, sizeof(*addr)); 570 memcpy(&con->peer_addr, addr, sizeof(*addr));
419 con->delay = 0; /* reset backoff memory */ 571 con->delay = 0; /* reset backoff memory */
572 mutex_unlock(&con->mutex);
420 queue_con(con); 573 queue_con(con);
421} 574}
422EXPORT_SYMBOL(ceph_con_open); 575EXPORT_SYMBOL(ceph_con_open);
@@ -430,42 +583,26 @@ bool ceph_con_opened(struct ceph_connection *con)
430} 583}
431 584
432/* 585/*
433 * generic get/put
434 */
435struct ceph_connection *ceph_con_get(struct ceph_connection *con)
436{
437 int nref = __atomic_add_unless(&con->nref, 1, 0);
438
439 dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1);
440
441 return nref ? con : NULL;
442}
443
444void ceph_con_put(struct ceph_connection *con)
445{
446 int nref = atomic_dec_return(&con->nref);
447
448 BUG_ON(nref < 0);
449 if (nref == 0) {
450 BUG_ON(con->sock);
451 kfree(con);
452 }
453 dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref);
454}
455
456/*
457 * initialize a new connection. 586 * initialize a new connection.
458 */ 587 */
459void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) 588void ceph_con_init(struct ceph_connection *con, void *private,
589 const struct ceph_connection_operations *ops,
590 struct ceph_messenger *msgr)
460{ 591{
461 dout("con_init %p\n", con); 592 dout("con_init %p\n", con);
462 memset(con, 0, sizeof(*con)); 593 memset(con, 0, sizeof(*con));
463 atomic_set(&con->nref, 1); 594 con->private = private;
595 con->ops = ops;
464 con->msgr = msgr; 596 con->msgr = msgr;
597
598 con_sock_state_init(con);
599
465 mutex_init(&con->mutex); 600 mutex_init(&con->mutex);
466 INIT_LIST_HEAD(&con->out_queue); 601 INIT_LIST_HEAD(&con->out_queue);
467 INIT_LIST_HEAD(&con->out_sent); 602 INIT_LIST_HEAD(&con->out_sent);
468 INIT_DELAYED_WORK(&con->work, con_work); 603 INIT_DELAYED_WORK(&con->work, con_work);
604
605 con->state = CON_STATE_CLOSED;
469} 606}
470EXPORT_SYMBOL(ceph_con_init); 607EXPORT_SYMBOL(ceph_con_init);
471 608
@@ -486,14 +623,14 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
486 return ret; 623 return ret;
487} 624}
488 625
489static void ceph_con_out_kvec_reset(struct ceph_connection *con) 626static void con_out_kvec_reset(struct ceph_connection *con)
490{ 627{
491 con->out_kvec_left = 0; 628 con->out_kvec_left = 0;
492 con->out_kvec_bytes = 0; 629 con->out_kvec_bytes = 0;
493 con->out_kvec_cur = &con->out_kvec[0]; 630 con->out_kvec_cur = &con->out_kvec[0];
494} 631}
495 632
496static void ceph_con_out_kvec_add(struct ceph_connection *con, 633static void con_out_kvec_add(struct ceph_connection *con,
497 size_t size, void *data) 634 size_t size, void *data)
498{ 635{
499 int index; 636 int index;
@@ -507,6 +644,53 @@ static void ceph_con_out_kvec_add(struct ceph_connection *con,
507 con->out_kvec_bytes += size; 644 con->out_kvec_bytes += size;
508} 645}
509 646
647#ifdef CONFIG_BLOCK
648static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
649{
650 if (!bio) {
651 *iter = NULL;
652 *seg = 0;
653 return;
654 }
655 *iter = bio;
656 *seg = bio->bi_idx;
657}
658
659static void iter_bio_next(struct bio **bio_iter, int *seg)
660{
661 if (*bio_iter == NULL)
662 return;
663
664 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
665
666 (*seg)++;
667 if (*seg == (*bio_iter)->bi_vcnt)
668 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
669}
670#endif
671
672static void prepare_write_message_data(struct ceph_connection *con)
673{
674 struct ceph_msg *msg = con->out_msg;
675
676 BUG_ON(!msg);
677 BUG_ON(!msg->hdr.data_len);
678
679 /* initialize page iterator */
680 con->out_msg_pos.page = 0;
681 if (msg->pages)
682 con->out_msg_pos.page_pos = msg->page_alignment;
683 else
684 con->out_msg_pos.page_pos = 0;
685#ifdef CONFIG_BLOCK
686 if (msg->bio)
687 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
688#endif
689 con->out_msg_pos.data_pos = 0;
690 con->out_msg_pos.did_page_crc = false;
691 con->out_more = 1; /* data + footer will follow */
692}
693
510/* 694/*
511 * Prepare footer for currently outgoing message, and finish things 695 * Prepare footer for currently outgoing message, and finish things
512 * off. Assumes out_kvec* are already valid.. we just add on to the end. 696 * off. Assumes out_kvec* are already valid.. we just add on to the end.
@@ -516,6 +700,8 @@ static void prepare_write_message_footer(struct ceph_connection *con)
516 struct ceph_msg *m = con->out_msg; 700 struct ceph_msg *m = con->out_msg;
517 int v = con->out_kvec_left; 701 int v = con->out_kvec_left;
518 702
703 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
704
519 dout("prepare_write_message_footer %p\n", con); 705 dout("prepare_write_message_footer %p\n", con);
520 con->out_kvec_is_msg = true; 706 con->out_kvec_is_msg = true;
521 con->out_kvec[v].iov_base = &m->footer; 707 con->out_kvec[v].iov_base = &m->footer;
@@ -534,7 +720,7 @@ static void prepare_write_message(struct ceph_connection *con)
534 struct ceph_msg *m; 720 struct ceph_msg *m;
535 u32 crc; 721 u32 crc;
536 722
537 ceph_con_out_kvec_reset(con); 723 con_out_kvec_reset(con);
538 con->out_kvec_is_msg = true; 724 con->out_kvec_is_msg = true;
539 con->out_msg_done = false; 725 con->out_msg_done = false;
540 726
@@ -542,14 +728,16 @@ static void prepare_write_message(struct ceph_connection *con)
542 * TCP packet that's a good thing. */ 728 * TCP packet that's a good thing. */
543 if (con->in_seq > con->in_seq_acked) { 729 if (con->in_seq > con->in_seq_acked) {
544 con->in_seq_acked = con->in_seq; 730 con->in_seq_acked = con->in_seq;
545 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 731 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
546 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 732 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
547 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), 733 con_out_kvec_add(con, sizeof (con->out_temp_ack),
548 &con->out_temp_ack); 734 &con->out_temp_ack);
549 } 735 }
550 736
737 BUG_ON(list_empty(&con->out_queue));
551 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 738 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
552 con->out_msg = m; 739 con->out_msg = m;
740 BUG_ON(m->con != con);
553 741
554 /* put message on sent list */ 742 /* put message on sent list */
555 ceph_msg_get(m); 743 ceph_msg_get(m);
@@ -563,6 +751,10 @@ static void prepare_write_message(struct ceph_connection *con)
563 m->hdr.seq = cpu_to_le64(++con->out_seq); 751 m->hdr.seq = cpu_to_le64(++con->out_seq);
564 m->needs_out_seq = false; 752 m->needs_out_seq = false;
565 } 753 }
754#ifdef CONFIG_BLOCK
755 else
756 m->bio_iter = NULL;
757#endif
566 758
567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 759 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
568 m, con->out_seq, le16_to_cpu(m->hdr.type), 760 m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -572,18 +764,18 @@ static void prepare_write_message(struct ceph_connection *con)
572 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 764 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
573 765
574 /* tag + hdr + front + middle */ 766 /* tag + hdr + front + middle */
575 ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 767 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
576 ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 768 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
577 ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 769 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
578 770
579 if (m->middle) 771 if (m->middle)
580 ceph_con_out_kvec_add(con, m->middle->vec.iov_len, 772 con_out_kvec_add(con, m->middle->vec.iov_len,
581 m->middle->vec.iov_base); 773 m->middle->vec.iov_base);
582 774
583 /* fill in crc (except data pages), footer */ 775 /* fill in crc (except data pages), footer */
584 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 776 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
585 con->out_msg->hdr.crc = cpu_to_le32(crc); 777 con->out_msg->hdr.crc = cpu_to_le32(crc);
586 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE; 778 con->out_msg->footer.flags = 0;
587 779
588 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 780 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
589 con->out_msg->footer.front_crc = cpu_to_le32(crc); 781 con->out_msg->footer.front_crc = cpu_to_le32(crc);
@@ -593,28 +785,19 @@ static void prepare_write_message(struct ceph_connection *con)
593 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 785 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
594 } else 786 } else
595 con->out_msg->footer.middle_crc = 0; 787 con->out_msg->footer.middle_crc = 0;
596 con->out_msg->footer.data_crc = 0; 788 dout("%s front_crc %u middle_crc %u\n", __func__,
597 dout("prepare_write_message front_crc %u data_crc %u\n",
598 le32_to_cpu(con->out_msg->footer.front_crc), 789 le32_to_cpu(con->out_msg->footer.front_crc),
599 le32_to_cpu(con->out_msg->footer.middle_crc)); 790 le32_to_cpu(con->out_msg->footer.middle_crc));
600 791
601 /* is there a data payload? */ 792 /* is there a data payload? */
602 if (le32_to_cpu(m->hdr.data_len) > 0) { 793 con->out_msg->footer.data_crc = 0;
603 /* initialize page iterator */ 794 if (m->hdr.data_len)
604 con->out_msg_pos.page = 0; 795 prepare_write_message_data(con);
605 if (m->pages) 796 else
606 con->out_msg_pos.page_pos = m->page_alignment;
607 else
608 con->out_msg_pos.page_pos = 0;
609 con->out_msg_pos.data_pos = 0;
610 con->out_msg_pos.did_page_crc = false;
611 con->out_more = 1; /* data + footer will follow */
612 } else {
613 /* no, queue up footer too and be done */ 797 /* no, queue up footer too and be done */
614 prepare_write_message_footer(con); 798 prepare_write_message_footer(con);
615 }
616 799
617 set_bit(WRITE_PENDING, &con->state); 800 set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
618} 801}
619 802
620/* 803/*
@@ -626,16 +809,16 @@ static void prepare_write_ack(struct ceph_connection *con)
626 con->in_seq_acked, con->in_seq); 809 con->in_seq_acked, con->in_seq);
627 con->in_seq_acked = con->in_seq; 810 con->in_seq_acked = con->in_seq;
628 811
629 ceph_con_out_kvec_reset(con); 812 con_out_kvec_reset(con);
630 813
631 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 814 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
632 815
633 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 816 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
634 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), 817 con_out_kvec_add(con, sizeof (con->out_temp_ack),
635 &con->out_temp_ack); 818 &con->out_temp_ack);
636 819
637 con->out_more = 1; /* more will follow.. eventually.. */ 820 con->out_more = 1; /* more will follow.. eventually.. */
638 set_bit(WRITE_PENDING, &con->state); 821 set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
639} 822}
640 823
641/* 824/*
@@ -644,9 +827,9 @@ static void prepare_write_ack(struct ceph_connection *con)
644static void prepare_write_keepalive(struct ceph_connection *con) 827static void prepare_write_keepalive(struct ceph_connection *con)
645{ 828{
646 dout("prepare_write_keepalive %p\n", con); 829 dout("prepare_write_keepalive %p\n", con);
647 ceph_con_out_kvec_reset(con); 830 con_out_kvec_reset(con);
648 ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 831 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
649 set_bit(WRITE_PENDING, &con->state); 832 set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
650} 833}
651 834
652/* 835/*
@@ -661,27 +844,21 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
661 if (!con->ops->get_authorizer) { 844 if (!con->ops->get_authorizer) {
662 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 845 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
663 con->out_connect.authorizer_len = 0; 846 con->out_connect.authorizer_len = 0;
664
665 return NULL; 847 return NULL;
666 } 848 }
667 849
668 /* Can't hold the mutex while getting authorizer */ 850 /* Can't hold the mutex while getting authorizer */
669
670 mutex_unlock(&con->mutex); 851 mutex_unlock(&con->mutex);
671
672 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 852 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
673
674 mutex_lock(&con->mutex); 853 mutex_lock(&con->mutex);
675 854
676 if (IS_ERR(auth)) 855 if (IS_ERR(auth))
677 return auth; 856 return auth;
678 if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state)) 857 if (con->state != CON_STATE_NEGOTIATING)
679 return ERR_PTR(-EAGAIN); 858 return ERR_PTR(-EAGAIN);
680 859
681 con->auth_reply_buf = auth->authorizer_reply_buf; 860 con->auth_reply_buf = auth->authorizer_reply_buf;
682 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 861 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
683
684
685 return auth; 862 return auth;
686} 863}
687 864
@@ -690,12 +867,12 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
690 */ 867 */
691static void prepare_write_banner(struct ceph_connection *con) 868static void prepare_write_banner(struct ceph_connection *con)
692{ 869{
693 ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 870 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
694 ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 871 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
695 &con->msgr->my_enc_addr); 872 &con->msgr->my_enc_addr);
696 873
697 con->out_more = 0; 874 con->out_more = 0;
698 set_bit(WRITE_PENDING, &con->state); 875 set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
699} 876}
700 877
701static int prepare_write_connect(struct ceph_connection *con) 878static int prepare_write_connect(struct ceph_connection *con)
@@ -738,14 +915,15 @@ static int prepare_write_connect(struct ceph_connection *con)
738 con->out_connect.authorizer_len = auth ? 915 con->out_connect.authorizer_len = auth ?
739 cpu_to_le32(auth->authorizer_buf_len) : 0; 916 cpu_to_le32(auth->authorizer_buf_len) : 0;
740 917
741 ceph_con_out_kvec_add(con, sizeof (con->out_connect), 918 con_out_kvec_reset(con);
919 con_out_kvec_add(con, sizeof (con->out_connect),
742 &con->out_connect); 920 &con->out_connect);
743 if (auth && auth->authorizer_buf_len) 921 if (auth && auth->authorizer_buf_len)
744 ceph_con_out_kvec_add(con, auth->authorizer_buf_len, 922 con_out_kvec_add(con, auth->authorizer_buf_len,
745 auth->authorizer_buf); 923 auth->authorizer_buf);
746 924
747 con->out_more = 0; 925 con->out_more = 0;
748 set_bit(WRITE_PENDING, &con->state); 926 set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
749 927
750 return 0; 928 return 0;
751} 929}
@@ -793,30 +971,34 @@ out:
793 return ret; /* done! */ 971 return ret; /* done! */
794} 972}
795 973
796#ifdef CONFIG_BLOCK 974static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
797static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 975 size_t len, size_t sent, bool in_trail)
798{ 976{
799 if (!bio) { 977 struct ceph_msg *msg = con->out_msg;
800 *iter = NULL;
801 *seg = 0;
802 return;
803 }
804 *iter = bio;
805 *seg = bio->bi_idx;
806}
807 978
808static void iter_bio_next(struct bio **bio_iter, int *seg) 979 BUG_ON(!msg);
809{ 980 BUG_ON(!sent);
810 if (*bio_iter == NULL)
811 return;
812 981
813 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 982 con->out_msg_pos.data_pos += sent;
983 con->out_msg_pos.page_pos += sent;
984 if (sent < len)
985 return;
814 986
815 (*seg)++; 987 BUG_ON(sent != len);
816 if (*seg == (*bio_iter)->bi_vcnt) 988 con->out_msg_pos.page_pos = 0;
817 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 989 con->out_msg_pos.page++;
818} 990 con->out_msg_pos.did_page_crc = false;
991 if (in_trail)
992 list_move_tail(&page->lru,
993 &msg->trail->head);
994 else if (msg->pagelist)
995 list_move_tail(&page->lru,
996 &msg->pagelist->head);
997#ifdef CONFIG_BLOCK
998 else if (msg->bio)
999 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
819#endif 1000#endif
1001}
820 1002
821/* 1003/*
822 * Write as much message data payload as we can. If we finish, queue 1004 * Write as much message data payload as we can. If we finish, queue
@@ -833,41 +1015,36 @@ static int write_partial_msg_pages(struct ceph_connection *con)
833 bool do_datacrc = !con->msgr->nocrc; 1015 bool do_datacrc = !con->msgr->nocrc;
834 int ret; 1016 int ret;
835 int total_max_write; 1017 int total_max_write;
836 int in_trail = 0; 1018 bool in_trail = false;
837 size_t trail_len = (msg->trail ? msg->trail->length : 0); 1019 const size_t trail_len = (msg->trail ? msg->trail->length : 0);
1020 const size_t trail_off = data_len - trail_len;
838 1021
839 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 1022 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
840 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, 1023 con, msg, con->out_msg_pos.page, msg->nr_pages,
841 con->out_msg_pos.page_pos); 1024 con->out_msg_pos.page_pos);
842 1025
843#ifdef CONFIG_BLOCK 1026 /*
844 if (msg->bio && !msg->bio_iter) 1027 * Iterate through each page that contains data to be
845 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 1028 * written, and send as much as possible for each.
846#endif 1029 *
847 1030 * If we are calculating the data crc (the default), we will
1031 * need to map the page. If we have no pages, they have
1032 * been revoked, so use the zero page.
1033 */
848 while (data_len > con->out_msg_pos.data_pos) { 1034 while (data_len > con->out_msg_pos.data_pos) {
849 struct page *page = NULL; 1035 struct page *page = NULL;
850 int max_write = PAGE_SIZE; 1036 int max_write = PAGE_SIZE;
851 int bio_offset = 0; 1037 int bio_offset = 0;
852 1038
853 total_max_write = data_len - trail_len - 1039 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off;
854 con->out_msg_pos.data_pos; 1040 if (!in_trail)
855 1041 total_max_write = trail_off - con->out_msg_pos.data_pos;
856 /*
857 * if we are calculating the data crc (the default), we need
858 * to map the page. if our pages[] has been revoked, use the
859 * zero page.
860 */
861
862 /* have we reached the trail part of the data? */
863 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
864 in_trail = 1;
865 1042
1043 if (in_trail) {
866 total_max_write = data_len - con->out_msg_pos.data_pos; 1044 total_max_write = data_len - con->out_msg_pos.data_pos;
867 1045
868 page = list_first_entry(&msg->trail->head, 1046 page = list_first_entry(&msg->trail->head,
869 struct page, lru); 1047 struct page, lru);
870 max_write = PAGE_SIZE;
871 } else if (msg->pages) { 1048 } else if (msg->pages) {
872 page = msg->pages[con->out_msg_pos.page]; 1049 page = msg->pages[con->out_msg_pos.page];
873 } else if (msg->pagelist) { 1050 } else if (msg->pagelist) {
@@ -890,15 +1067,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
890 1067
891 if (do_datacrc && !con->out_msg_pos.did_page_crc) { 1068 if (do_datacrc && !con->out_msg_pos.did_page_crc) {
892 void *base; 1069 void *base;
893 u32 crc; 1070 u32 crc = le32_to_cpu(msg->footer.data_crc);
894 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
895 char *kaddr; 1071 char *kaddr;
896 1072
897 kaddr = kmap(page); 1073 kaddr = kmap(page);
898 BUG_ON(kaddr == NULL); 1074 BUG_ON(kaddr == NULL);
899 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1075 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
900 crc = crc32c(tmpcrc, base, len); 1076 crc = crc32c(crc, base, len);
901 con->out_msg->footer.data_crc = cpu_to_le32(crc); 1077 msg->footer.data_crc = cpu_to_le32(crc);
902 con->out_msg_pos.did_page_crc = true; 1078 con->out_msg_pos.did_page_crc = true;
903 } 1079 }
904 ret = ceph_tcp_sendpage(con->sock, page, 1080 ret = ceph_tcp_sendpage(con->sock, page,
@@ -911,31 +1087,15 @@ static int write_partial_msg_pages(struct ceph_connection *con)
911 if (ret <= 0) 1087 if (ret <= 0)
912 goto out; 1088 goto out;
913 1089
914 con->out_msg_pos.data_pos += ret; 1090 out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
915 con->out_msg_pos.page_pos += ret;
916 if (ret == len) {
917 con->out_msg_pos.page_pos = 0;
918 con->out_msg_pos.page++;
919 con->out_msg_pos.did_page_crc = false;
920 if (in_trail)
921 list_move_tail(&page->lru,
922 &msg->trail->head);
923 else if (msg->pagelist)
924 list_move_tail(&page->lru,
925 &msg->pagelist->head);
926#ifdef CONFIG_BLOCK
927 else if (msg->bio)
928 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
929#endif
930 }
931 } 1091 }
932 1092
933 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 1093 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
934 1094
935 /* prepare and queue up footer, too */ 1095 /* prepare and queue up footer, too */
936 if (!do_datacrc) 1096 if (!do_datacrc)
937 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1097 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
938 ceph_con_out_kvec_reset(con); 1098 con_out_kvec_reset(con);
939 prepare_write_message_footer(con); 1099 prepare_write_message_footer(con);
940 ret = 1; 1100 ret = 1;
941out: 1101out:
@@ -1347,20 +1507,14 @@ static int process_banner(struct ceph_connection *con)
1347 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1507 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1348 } 1508 }
1349 1509
1350 set_bit(NEGOTIATING, &con->state);
1351 prepare_read_connect(con);
1352 return 0; 1510 return 0;
1353} 1511}
1354 1512
1355static void fail_protocol(struct ceph_connection *con) 1513static void fail_protocol(struct ceph_connection *con)
1356{ 1514{
1357 reset_connection(con); 1515 reset_connection(con);
1358 set_bit(CLOSED, &con->state); /* in case there's queued work */ 1516 BUG_ON(con->state != CON_STATE_NEGOTIATING);
1359 1517 con->state = CON_STATE_CLOSED;
1360 mutex_unlock(&con->mutex);
1361 if (con->ops->bad_proto)
1362 con->ops->bad_proto(con);
1363 mutex_lock(&con->mutex);
1364} 1518}
1365 1519
1366static int process_connect(struct ceph_connection *con) 1520static int process_connect(struct ceph_connection *con)
@@ -1403,7 +1557,6 @@ static int process_connect(struct ceph_connection *con)
1403 return -1; 1557 return -1;
1404 } 1558 }
1405 con->auth_retry = 1; 1559 con->auth_retry = 1;
1406 ceph_con_out_kvec_reset(con);
1407 ret = prepare_write_connect(con); 1560 ret = prepare_write_connect(con);
1408 if (ret < 0) 1561 if (ret < 0)
1409 return ret; 1562 return ret;
@@ -1419,12 +1572,11 @@ static int process_connect(struct ceph_connection *con)
1419 * dropped messages. 1572 * dropped messages.
1420 */ 1573 */
1421 dout("process_connect got RESET peer seq %u\n", 1574 dout("process_connect got RESET peer seq %u\n",
1422 le32_to_cpu(con->in_connect.connect_seq)); 1575 le32_to_cpu(con->in_reply.connect_seq));
1423 pr_err("%s%lld %s connection reset\n", 1576 pr_err("%s%lld %s connection reset\n",
1424 ENTITY_NAME(con->peer_name), 1577 ENTITY_NAME(con->peer_name),
1425 ceph_pr_addr(&con->peer_addr.in_addr)); 1578 ceph_pr_addr(&con->peer_addr.in_addr));
1426 reset_connection(con); 1579 reset_connection(con);
1427 ceph_con_out_kvec_reset(con);
1428 ret = prepare_write_connect(con); 1580 ret = prepare_write_connect(con);
1429 if (ret < 0) 1581 if (ret < 0)
1430 return ret; 1582 return ret;
@@ -1436,8 +1588,7 @@ static int process_connect(struct ceph_connection *con)
1436 if (con->ops->peer_reset) 1588 if (con->ops->peer_reset)
1437 con->ops->peer_reset(con); 1589 con->ops->peer_reset(con);
1438 mutex_lock(&con->mutex); 1590 mutex_lock(&con->mutex);
1439 if (test_bit(CLOSED, &con->state) || 1591 if (con->state != CON_STATE_NEGOTIATING)
1440 test_bit(OPENING, &con->state))
1441 return -EAGAIN; 1592 return -EAGAIN;
1442 break; 1593 break;
1443 1594
@@ -1446,11 +1597,10 @@ static int process_connect(struct ceph_connection *con)
1446 * If we sent a smaller connect_seq than the peer has, try 1597 * If we sent a smaller connect_seq than the peer has, try
1447 * again with a larger value. 1598 * again with a larger value.
1448 */ 1599 */
1449 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", 1600 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
1450 le32_to_cpu(con->out_connect.connect_seq), 1601 le32_to_cpu(con->out_connect.connect_seq),
1451 le32_to_cpu(con->in_connect.connect_seq)); 1602 le32_to_cpu(con->in_reply.connect_seq));
1452 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); 1603 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
1453 ceph_con_out_kvec_reset(con);
1454 ret = prepare_write_connect(con); 1604 ret = prepare_write_connect(con);
1455 if (ret < 0) 1605 if (ret < 0)
1456 return ret; 1606 return ret;
@@ -1464,10 +1614,9 @@ static int process_connect(struct ceph_connection *con)
1464 */ 1614 */
1465 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1615 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1466 con->peer_global_seq, 1616 con->peer_global_seq,
1467 le32_to_cpu(con->in_connect.global_seq)); 1617 le32_to_cpu(con->in_reply.global_seq));
1468 get_global_seq(con->msgr, 1618 get_global_seq(con->msgr,
1469 le32_to_cpu(con->in_connect.global_seq)); 1619 le32_to_cpu(con->in_reply.global_seq));
1470 ceph_con_out_kvec_reset(con);
1471 ret = prepare_write_connect(con); 1620 ret = prepare_write_connect(con);
1472 if (ret < 0) 1621 if (ret < 0)
1473 return ret; 1622 return ret;
@@ -1485,7 +1634,10 @@ static int process_connect(struct ceph_connection *con)
1485 fail_protocol(con); 1634 fail_protocol(con);
1486 return -1; 1635 return -1;
1487 } 1636 }
1488 clear_bit(CONNECTING, &con->state); 1637
1638 BUG_ON(con->state != CON_STATE_NEGOTIATING);
1639 con->state = CON_STATE_OPEN;
1640
1489 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1641 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1490 con->connect_seq++; 1642 con->connect_seq++;
1491 con->peer_features = server_feat; 1643 con->peer_features = server_feat;
@@ -1497,7 +1649,9 @@ static int process_connect(struct ceph_connection *con)
1497 le32_to_cpu(con->in_reply.connect_seq)); 1649 le32_to_cpu(con->in_reply.connect_seq));
1498 1650
1499 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1651 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1500 set_bit(LOSSYTX, &con->state); 1652 set_bit(CON_FLAG_LOSSYTX, &con->flags);
1653
1654 con->delay = 0; /* reset backoff memory */
1501 1655
1502 prepare_read_tag(con); 1656 prepare_read_tag(con);
1503 break; 1657 break;
@@ -1583,10 +1737,7 @@ static int read_partial_message_section(struct ceph_connection *con,
1583 return 1; 1737 return 1;
1584} 1738}
1585 1739
1586static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 1740static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
1587 struct ceph_msg_header *hdr,
1588 int *skip);
1589
1590 1741
1591static int read_partial_message_pages(struct ceph_connection *con, 1742static int read_partial_message_pages(struct ceph_connection *con,
1592 struct page **pages, 1743 struct page **pages,
@@ -1629,9 +1780,6 @@ static int read_partial_message_bio(struct ceph_connection *con,
1629 void *p; 1780 void *p;
1630 int ret, left; 1781 int ret, left;
1631 1782
1632 if (IS_ERR(bv))
1633 return PTR_ERR(bv);
1634
1635 left = min((int)(data_len - con->in_msg_pos.data_pos), 1783 left = min((int)(data_len - con->in_msg_pos.data_pos),
1636 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1784 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1637 1785
@@ -1668,7 +1816,6 @@ static int read_partial_message(struct ceph_connection *con)
1668 int ret; 1816 int ret;
1669 unsigned int front_len, middle_len, data_len; 1817 unsigned int front_len, middle_len, data_len;
1670 bool do_datacrc = !con->msgr->nocrc; 1818 bool do_datacrc = !con->msgr->nocrc;
1671 int skip;
1672 u64 seq; 1819 u64 seq;
1673 u32 crc; 1820 u32 crc;
1674 1821
@@ -1719,10 +1866,13 @@ static int read_partial_message(struct ceph_connection *con)
1719 1866
1720 /* allocate message? */ 1867 /* allocate message? */
1721 if (!con->in_msg) { 1868 if (!con->in_msg) {
1869 int skip = 0;
1870
1722 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1871 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1723 con->in_hdr.front_len, con->in_hdr.data_len); 1872 con->in_hdr.front_len, con->in_hdr.data_len);
1724 skip = 0; 1873 ret = ceph_con_in_msg_alloc(con, &skip);
1725 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); 1874 if (ret < 0)
1875 return ret;
1726 if (skip) { 1876 if (skip) {
1727 /* skip this message */ 1877 /* skip this message */
1728 dout("alloc_msg said skip message\n"); 1878 dout("alloc_msg said skip message\n");
@@ -1733,11 +1883,9 @@ static int read_partial_message(struct ceph_connection *con)
1733 con->in_seq++; 1883 con->in_seq++;
1734 return 0; 1884 return 0;
1735 } 1885 }
1736 if (!con->in_msg) { 1886
1737 con->error_msg = 1887 BUG_ON(!con->in_msg);
1738 "error allocating memory for incoming message"; 1888 BUG_ON(con->in_msg->con != con);
1739 return -ENOMEM;
1740 }
1741 m = con->in_msg; 1889 m = con->in_msg;
1742 m->front.iov_len = 0; /* haven't read it yet */ 1890 m->front.iov_len = 0; /* haven't read it yet */
1743 if (m->middle) 1891 if (m->middle)
@@ -1749,6 +1897,11 @@ static int read_partial_message(struct ceph_connection *con)
1749 else 1897 else
1750 con->in_msg_pos.page_pos = 0; 1898 con->in_msg_pos.page_pos = 0;
1751 con->in_msg_pos.data_pos = 0; 1899 con->in_msg_pos.data_pos = 0;
1900
1901#ifdef CONFIG_BLOCK
1902 if (m->bio)
1903 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1904#endif
1752 } 1905 }
1753 1906
1754 /* front */ 1907 /* front */
@@ -1765,10 +1918,6 @@ static int read_partial_message(struct ceph_connection *con)
1765 if (ret <= 0) 1918 if (ret <= 0)
1766 return ret; 1919 return ret;
1767 } 1920 }
1768#ifdef CONFIG_BLOCK
1769 if (m->bio && !m->bio_iter)
1770 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1771#endif
1772 1921
1773 /* (page) data */ 1922 /* (page) data */
1774 while (con->in_msg_pos.data_pos < data_len) { 1923 while (con->in_msg_pos.data_pos < data_len) {
@@ -1779,7 +1928,7 @@ static int read_partial_message(struct ceph_connection *con)
1779 return ret; 1928 return ret;
1780#ifdef CONFIG_BLOCK 1929#ifdef CONFIG_BLOCK
1781 } else if (m->bio) { 1930 } else if (m->bio) {
1782 1931 BUG_ON(!m->bio_iter);
1783 ret = read_partial_message_bio(con, 1932 ret = read_partial_message_bio(con,
1784 &m->bio_iter, &m->bio_seg, 1933 &m->bio_iter, &m->bio_seg,
1785 data_len, do_datacrc); 1934 data_len, do_datacrc);
@@ -1833,8 +1982,11 @@ static void process_message(struct ceph_connection *con)
1833{ 1982{
1834 struct ceph_msg *msg; 1983 struct ceph_msg *msg;
1835 1984
1985 BUG_ON(con->in_msg->con != con);
1986 con->in_msg->con = NULL;
1836 msg = con->in_msg; 1987 msg = con->in_msg;
1837 con->in_msg = NULL; 1988 con->in_msg = NULL;
1989 con->ops->put(con);
1838 1990
1839 /* if first message, set peer_name */ 1991 /* if first message, set peer_name */
1840 if (con->peer_name.type == 0) 1992 if (con->peer_name.type == 0)
@@ -1854,7 +2006,6 @@ static void process_message(struct ceph_connection *con)
1854 con->ops->dispatch(con, msg); 2006 con->ops->dispatch(con, msg);
1855 2007
1856 mutex_lock(&con->mutex); 2008 mutex_lock(&con->mutex);
1857 prepare_read_tag(con);
1858} 2009}
1859 2010
1860 2011
@@ -1866,22 +2017,19 @@ static int try_write(struct ceph_connection *con)
1866{ 2017{
1867 int ret = 1; 2018 int ret = 1;
1868 2019
1869 dout("try_write start %p state %lu nref %d\n", con, con->state, 2020 dout("try_write start %p state %lu\n", con, con->state);
1870 atomic_read(&con->nref));
1871 2021
1872more: 2022more:
1873 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2023 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1874 2024
1875 /* open the socket first? */ 2025 /* open the socket first? */
1876 if (con->sock == NULL) { 2026 if (con->state == CON_STATE_PREOPEN) {
1877 ceph_con_out_kvec_reset(con); 2027 BUG_ON(con->sock);
2028 con->state = CON_STATE_CONNECTING;
2029
2030 con_out_kvec_reset(con);
1878 prepare_write_banner(con); 2031 prepare_write_banner(con);
1879 ret = prepare_write_connect(con);
1880 if (ret < 0)
1881 goto out;
1882 prepare_read_banner(con); 2032 prepare_read_banner(con);
1883 set_bit(CONNECTING, &con->state);
1884 clear_bit(NEGOTIATING, &con->state);
1885 2033
1886 BUG_ON(con->in_msg); 2034 BUG_ON(con->in_msg);
1887 con->in_tag = CEPH_MSGR_TAG_READY; 2035 con->in_tag = CEPH_MSGR_TAG_READY;
@@ -1928,7 +2076,7 @@ more_kvec:
1928 } 2076 }
1929 2077
1930do_next: 2078do_next:
1931 if (!test_bit(CONNECTING, &con->state)) { 2079 if (con->state == CON_STATE_OPEN) {
1932 /* is anything else pending? */ 2080 /* is anything else pending? */
1933 if (!list_empty(&con->out_queue)) { 2081 if (!list_empty(&con->out_queue)) {
1934 prepare_write_message(con); 2082 prepare_write_message(con);
@@ -1938,14 +2086,15 @@ do_next:
1938 prepare_write_ack(con); 2086 prepare_write_ack(con);
1939 goto more; 2087 goto more;
1940 } 2088 }
1941 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) { 2089 if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
2090 &con->flags)) {
1942 prepare_write_keepalive(con); 2091 prepare_write_keepalive(con);
1943 goto more; 2092 goto more;
1944 } 2093 }
1945 } 2094 }
1946 2095
1947 /* Nothing to do! */ 2096 /* Nothing to do! */
1948 clear_bit(WRITE_PENDING, &con->state); 2097 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
1949 dout("try_write nothing else to write.\n"); 2098 dout("try_write nothing else to write.\n");
1950 ret = 0; 2099 ret = 0;
1951out: 2100out:
@@ -1962,38 +2111,42 @@ static int try_read(struct ceph_connection *con)
1962{ 2111{
1963 int ret = -1; 2112 int ret = -1;
1964 2113
1965 if (!con->sock) 2114more:
1966 return 0; 2115 dout("try_read start on %p state %lu\n", con, con->state);
1967 2116 if (con->state != CON_STATE_CONNECTING &&
1968 if (test_bit(STANDBY, &con->state)) 2117 con->state != CON_STATE_NEGOTIATING &&
2118 con->state != CON_STATE_OPEN)
1969 return 0; 2119 return 0;
1970 2120
1971 dout("try_read start on %p\n", con); 2121 BUG_ON(!con->sock);
1972 2122
1973more:
1974 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2123 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1975 con->in_base_pos); 2124 con->in_base_pos);
1976 2125
1977 /* 2126 if (con->state == CON_STATE_CONNECTING) {
1978 * process_connect and process_message drop and re-take 2127 dout("try_read connecting\n");
1979 * con->mutex. make sure we handle a racing close or reopen. 2128 ret = read_partial_banner(con);
1980 */ 2129 if (ret <= 0)
1981 if (test_bit(CLOSED, &con->state) || 2130 goto out;
1982 test_bit(OPENING, &con->state)) { 2131 ret = process_banner(con);
1983 ret = -EAGAIN; 2132 if (ret < 0)
2133 goto out;
2134
2135 BUG_ON(con->state != CON_STATE_CONNECTING);
2136 con->state = CON_STATE_NEGOTIATING;
2137
2138 /* Banner is good, exchange connection info */
2139 ret = prepare_write_connect(con);
2140 if (ret < 0)
2141 goto out;
2142 prepare_read_connect(con);
2143
2144 /* Send connection info before awaiting response */
1984 goto out; 2145 goto out;
1985 } 2146 }
1986 2147
1987 if (test_bit(CONNECTING, &con->state)) { 2148 if (con->state == CON_STATE_NEGOTIATING) {
1988 if (!test_bit(NEGOTIATING, &con->state)) { 2149 dout("try_read negotiating\n");
1989 dout("try_read connecting\n");
1990 ret = read_partial_banner(con);
1991 if (ret <= 0)
1992 goto out;
1993 ret = process_banner(con);
1994 if (ret < 0)
1995 goto out;
1996 }
1997 ret = read_partial_connect(con); 2150 ret = read_partial_connect(con);
1998 if (ret <= 0) 2151 if (ret <= 0)
1999 goto out; 2152 goto out;
@@ -2003,6 +2156,8 @@ more:
2003 goto more; 2156 goto more;
2004 } 2157 }
2005 2158
2159 BUG_ON(con->state != CON_STATE_OPEN);
2160
2006 if (con->in_base_pos < 0) { 2161 if (con->in_base_pos < 0) {
2007 /* 2162 /*
2008 * skipping + discarding content. 2163 * skipping + discarding content.
@@ -2036,7 +2191,8 @@ more:
2036 prepare_read_ack(con); 2191 prepare_read_ack(con);
2037 break; 2192 break;
2038 case CEPH_MSGR_TAG_CLOSE: 2193 case CEPH_MSGR_TAG_CLOSE:
2039 set_bit(CLOSED, &con->state); /* fixme */ 2194 con_close_socket(con);
2195 con->state = CON_STATE_CLOSED;
2040 goto out; 2196 goto out;
2041 default: 2197 default:
2042 goto bad_tag; 2198 goto bad_tag;
@@ -2059,6 +2215,8 @@ more:
2059 if (con->in_tag == CEPH_MSGR_TAG_READY) 2215 if (con->in_tag == CEPH_MSGR_TAG_READY)
2060 goto more; 2216 goto more;
2061 process_message(con); 2217 process_message(con);
2218 if (con->state == CON_STATE_OPEN)
2219 prepare_read_tag(con);
2062 goto more; 2220 goto more;
2063 } 2221 }
2064 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2222 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
@@ -2087,12 +2245,6 @@ bad_tag:
2087 */ 2245 */
2088static void queue_con(struct ceph_connection *con) 2246static void queue_con(struct ceph_connection *con)
2089{ 2247{
2090 if (test_bit(DEAD, &con->state)) {
2091 dout("queue_con %p ignoring: DEAD\n",
2092 con);
2093 return;
2094 }
2095
2096 if (!con->ops->get(con)) { 2248 if (!con->ops->get(con)) {
2097 dout("queue_con %p ref count 0\n", con); 2249 dout("queue_con %p ref count 0\n", con);
2098 return; 2250 return;
@@ -2117,7 +2269,26 @@ static void con_work(struct work_struct *work)
2117 2269
2118 mutex_lock(&con->mutex); 2270 mutex_lock(&con->mutex);
2119restart: 2271restart:
2120 if (test_and_clear_bit(BACKOFF, &con->state)) { 2272 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
2273 switch (con->state) {
2274 case CON_STATE_CONNECTING:
2275 con->error_msg = "connection failed";
2276 break;
2277 case CON_STATE_NEGOTIATING:
2278 con->error_msg = "negotiation failed";
2279 break;
2280 case CON_STATE_OPEN:
2281 con->error_msg = "socket closed";
2282 break;
2283 default:
2284 dout("unrecognized con state %d\n", (int)con->state);
2285 con->error_msg = "unrecognized con state";
2286 BUG();
2287 }
2288 goto fault;
2289 }
2290
2291 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
2121 dout("con_work %p backing off\n", con); 2292 dout("con_work %p backing off\n", con);
2122 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2293 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2123 round_jiffies_relative(con->delay))) { 2294 round_jiffies_relative(con->delay))) {
@@ -2131,35 +2302,35 @@ restart:
2131 } 2302 }
2132 } 2303 }
2133 2304
2134 if (test_bit(STANDBY, &con->state)) { 2305 if (con->state == CON_STATE_STANDBY) {
2135 dout("con_work %p STANDBY\n", con); 2306 dout("con_work %p STANDBY\n", con);
2136 goto done; 2307 goto done;
2137 } 2308 }
2138 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 2309 if (con->state == CON_STATE_CLOSED) {
2139 dout("con_work CLOSED\n"); 2310 dout("con_work %p CLOSED\n", con);
2140 con_close_socket(con); 2311 BUG_ON(con->sock);
2141 goto done; 2312 goto done;
2142 } 2313 }
2143 if (test_and_clear_bit(OPENING, &con->state)) { 2314 if (con->state == CON_STATE_PREOPEN) {
2144 /* reopen w/ new peer */
2145 dout("con_work OPENING\n"); 2315 dout("con_work OPENING\n");
2146 con_close_socket(con); 2316 BUG_ON(con->sock);
2147 } 2317 }
2148 2318
2149 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
2150 goto fault;
2151
2152 ret = try_read(con); 2319 ret = try_read(con);
2153 if (ret == -EAGAIN) 2320 if (ret == -EAGAIN)
2154 goto restart; 2321 goto restart;
2155 if (ret < 0) 2322 if (ret < 0) {
2323 con->error_msg = "socket error on read";
2156 goto fault; 2324 goto fault;
2325 }
2157 2326
2158 ret = try_write(con); 2327 ret = try_write(con);
2159 if (ret == -EAGAIN) 2328 if (ret == -EAGAIN)
2160 goto restart; 2329 goto restart;
2161 if (ret < 0) 2330 if (ret < 0) {
2331 con->error_msg = "socket error on write";
2162 goto fault; 2332 goto fault;
2333 }
2163 2334
2164done: 2335done:
2165 mutex_unlock(&con->mutex); 2336 mutex_unlock(&con->mutex);
@@ -2168,7 +2339,6 @@ done_unlocked:
2168 return; 2339 return;
2169 2340
2170fault: 2341fault:
2171 mutex_unlock(&con->mutex);
2172 ceph_fault(con); /* error/fault path */ 2342 ceph_fault(con); /* error/fault path */
2173 goto done_unlocked; 2343 goto done_unlocked;
2174} 2344}
@@ -2179,26 +2349,31 @@ fault:
2179 * exponential backoff 2349 * exponential backoff
2180 */ 2350 */
2181static void ceph_fault(struct ceph_connection *con) 2351static void ceph_fault(struct ceph_connection *con)
2352 __releases(con->mutex)
2182{ 2353{
2183 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2354 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2184 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2355 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2185 dout("fault %p state %lu to peer %s\n", 2356 dout("fault %p state %lu to peer %s\n",
2186 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2357 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2187 2358
2188 if (test_bit(LOSSYTX, &con->state)) { 2359 BUG_ON(con->state != CON_STATE_CONNECTING &&
2189 dout("fault on LOSSYTX channel\n"); 2360 con->state != CON_STATE_NEGOTIATING &&
2190 goto out; 2361 con->state != CON_STATE_OPEN);
2191 }
2192
2193 mutex_lock(&con->mutex);
2194 if (test_bit(CLOSED, &con->state))
2195 goto out_unlock;
2196 2362
2197 con_close_socket(con); 2363 con_close_socket(con);
2198 2364
2365 if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
2366 dout("fault on LOSSYTX channel, marking CLOSED\n");
2367 con->state = CON_STATE_CLOSED;
2368 goto out_unlock;
2369 }
2370
2199 if (con->in_msg) { 2371 if (con->in_msg) {
2372 BUG_ON(con->in_msg->con != con);
2373 con->in_msg->con = NULL;
2200 ceph_msg_put(con->in_msg); 2374 ceph_msg_put(con->in_msg);
2201 con->in_msg = NULL; 2375 con->in_msg = NULL;
2376 con->ops->put(con);
2202 } 2377 }
2203 2378
2204 /* Requeue anything that hasn't been acked */ 2379 /* Requeue anything that hasn't been acked */
@@ -2207,12 +2382,13 @@ static void ceph_fault(struct ceph_connection *con)
2207 /* If there are no messages queued or keepalive pending, place 2382 /* If there are no messages queued or keepalive pending, place
2208 * the connection in a STANDBY state */ 2383 * the connection in a STANDBY state */
2209 if (list_empty(&con->out_queue) && 2384 if (list_empty(&con->out_queue) &&
2210 !test_bit(KEEPALIVE_PENDING, &con->state)) { 2385 !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
2211 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2386 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2212 clear_bit(WRITE_PENDING, &con->state); 2387 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
2213 set_bit(STANDBY, &con->state); 2388 con->state = CON_STATE_STANDBY;
2214 } else { 2389 } else {
2215 /* retry after a delay. */ 2390 /* retry after a delay. */
2391 con->state = CON_STATE_PREOPEN;
2216 if (con->delay == 0) 2392 if (con->delay == 0)
2217 con->delay = BASE_DELAY_INTERVAL; 2393 con->delay = BASE_DELAY_INTERVAL;
2218 else if (con->delay < MAX_DELAY_INTERVAL) 2394 else if (con->delay < MAX_DELAY_INTERVAL)
@@ -2233,13 +2409,12 @@ static void ceph_fault(struct ceph_connection *con)
2233 * that when con_work restarts we schedule the 2409 * that when con_work restarts we schedule the
2234 * delay then. 2410 * delay then.
2235 */ 2411 */
2236 set_bit(BACKOFF, &con->state); 2412 set_bit(CON_FLAG_BACKOFF, &con->flags);
2237 } 2413 }
2238 } 2414 }
2239 2415
2240out_unlock: 2416out_unlock:
2241 mutex_unlock(&con->mutex); 2417 mutex_unlock(&con->mutex);
2242out:
2243 /* 2418 /*
2244 * in case we faulted due to authentication, invalidate our 2419 * in case we faulted due to authentication, invalidate our
2245 * current tickets so that we can get new ones. 2420 * current tickets so that we can get new ones.
@@ -2256,18 +2431,14 @@ out:
2256 2431
2257 2432
2258/* 2433/*
2259 * create a new messenger instance 2434 * initialize a new messenger instance
2260 */ 2435 */
2261struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, 2436void ceph_messenger_init(struct ceph_messenger *msgr,
2262 u32 supported_features, 2437 struct ceph_entity_addr *myaddr,
2263 u32 required_features) 2438 u32 supported_features,
2439 u32 required_features,
2440 bool nocrc)
2264{ 2441{
2265 struct ceph_messenger *msgr;
2266
2267 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2268 if (msgr == NULL)
2269 return ERR_PTR(-ENOMEM);
2270
2271 msgr->supported_features = supported_features; 2442 msgr->supported_features = supported_features;
2272 msgr->required_features = required_features; 2443 msgr->required_features = required_features;
2273 2444
@@ -2280,30 +2451,23 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2280 msgr->inst.addr.type = 0; 2451 msgr->inst.addr.type = 0;
2281 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2452 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2282 encode_my_addr(msgr); 2453 encode_my_addr(msgr);
2454 msgr->nocrc = nocrc;
2283 2455
2284 dout("messenger_create %p\n", msgr); 2456 atomic_set(&msgr->stopping, 0);
2285 return msgr;
2286}
2287EXPORT_SYMBOL(ceph_messenger_create);
2288 2457
2289void ceph_messenger_destroy(struct ceph_messenger *msgr) 2458 dout("%s %p\n", __func__, msgr);
2290{
2291 dout("destroy %p\n", msgr);
2292 kfree(msgr);
2293 dout("destroyed messenger %p\n", msgr);
2294} 2459}
2295EXPORT_SYMBOL(ceph_messenger_destroy); 2460EXPORT_SYMBOL(ceph_messenger_init);
2296 2461
2297static void clear_standby(struct ceph_connection *con) 2462static void clear_standby(struct ceph_connection *con)
2298{ 2463{
2299 /* come back from STANDBY? */ 2464 /* come back from STANDBY? */
2300 if (test_and_clear_bit(STANDBY, &con->state)) { 2465 if (con->state == CON_STATE_STANDBY) {
2301 mutex_lock(&con->mutex);
2302 dout("clear_standby %p and ++connect_seq\n", con); 2466 dout("clear_standby %p and ++connect_seq\n", con);
2467 con->state = CON_STATE_PREOPEN;
2303 con->connect_seq++; 2468 con->connect_seq++;
2304 WARN_ON(test_bit(WRITE_PENDING, &con->state)); 2469 WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
2305 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); 2470 WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
2306 mutex_unlock(&con->mutex);
2307 } 2471 }
2308} 2472}
2309 2473
@@ -2312,21 +2476,24 @@ static void clear_standby(struct ceph_connection *con)
2312 */ 2476 */
2313void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2477void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2314{ 2478{
2315 if (test_bit(CLOSED, &con->state)) {
2316 dout("con_send %p closed, dropping %p\n", con, msg);
2317 ceph_msg_put(msg);
2318 return;
2319 }
2320
2321 /* set src+dst */ 2479 /* set src+dst */
2322 msg->hdr.src = con->msgr->inst.name; 2480 msg->hdr.src = con->msgr->inst.name;
2323
2324 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2481 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2325
2326 msg->needs_out_seq = true; 2482 msg->needs_out_seq = true;
2327 2483
2328 /* queue */
2329 mutex_lock(&con->mutex); 2484 mutex_lock(&con->mutex);
2485
2486 if (con->state == CON_STATE_CLOSED) {
2487 dout("con_send %p closed, dropping %p\n", con, msg);
2488 ceph_msg_put(msg);
2489 mutex_unlock(&con->mutex);
2490 return;
2491 }
2492
2493 BUG_ON(msg->con != NULL);
2494 msg->con = con->ops->get(con);
2495 BUG_ON(msg->con == NULL);
2496
2330 BUG_ON(!list_empty(&msg->list_head)); 2497 BUG_ON(!list_empty(&msg->list_head));
2331 list_add_tail(&msg->list_head, &con->out_queue); 2498 list_add_tail(&msg->list_head, &con->out_queue);
2332 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2499 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
@@ -2335,12 +2502,13 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2335 le32_to_cpu(msg->hdr.front_len), 2502 le32_to_cpu(msg->hdr.front_len),
2336 le32_to_cpu(msg->hdr.middle_len), 2503 le32_to_cpu(msg->hdr.middle_len),
2337 le32_to_cpu(msg->hdr.data_len)); 2504 le32_to_cpu(msg->hdr.data_len));
2505
2506 clear_standby(con);
2338 mutex_unlock(&con->mutex); 2507 mutex_unlock(&con->mutex);
2339 2508
2340 /* if there wasn't anything waiting to send before, queue 2509 /* if there wasn't anything waiting to send before, queue
2341 * new work */ 2510 * new work */
2342 clear_standby(con); 2511 if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
2343 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2344 queue_con(con); 2512 queue_con(con);
2345} 2513}
2346EXPORT_SYMBOL(ceph_con_send); 2514EXPORT_SYMBOL(ceph_con_send);
@@ -2348,24 +2516,34 @@ EXPORT_SYMBOL(ceph_con_send);
2348/* 2516/*
2349 * Revoke a message that was previously queued for send 2517 * Revoke a message that was previously queued for send
2350 */ 2518 */
2351void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) 2519void ceph_msg_revoke(struct ceph_msg *msg)
2352{ 2520{
2521 struct ceph_connection *con = msg->con;
2522
2523 if (!con)
2524 return; /* Message not in our possession */
2525
2353 mutex_lock(&con->mutex); 2526 mutex_lock(&con->mutex);
2354 if (!list_empty(&msg->list_head)) { 2527 if (!list_empty(&msg->list_head)) {
2355 dout("con_revoke %p msg %p - was on queue\n", con, msg); 2528 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
2356 list_del_init(&msg->list_head); 2529 list_del_init(&msg->list_head);
2357 ceph_msg_put(msg); 2530 BUG_ON(msg->con == NULL);
2531 msg->con->ops->put(msg->con);
2532 msg->con = NULL;
2358 msg->hdr.seq = 0; 2533 msg->hdr.seq = 0;
2534
2535 ceph_msg_put(msg);
2359 } 2536 }
2360 if (con->out_msg == msg) { 2537 if (con->out_msg == msg) {
2361 dout("con_revoke %p msg %p - was sending\n", con, msg); 2538 dout("%s %p msg %p - was sending\n", __func__, con, msg);
2362 con->out_msg = NULL; 2539 con->out_msg = NULL;
2363 if (con->out_kvec_is_msg) { 2540 if (con->out_kvec_is_msg) {
2364 con->out_skip = con->out_kvec_bytes; 2541 con->out_skip = con->out_kvec_bytes;
2365 con->out_kvec_is_msg = false; 2542 con->out_kvec_is_msg = false;
2366 } 2543 }
2367 ceph_msg_put(msg);
2368 msg->hdr.seq = 0; 2544 msg->hdr.seq = 0;
2545
2546 ceph_msg_put(msg);
2369 } 2547 }
2370 mutex_unlock(&con->mutex); 2548 mutex_unlock(&con->mutex);
2371} 2549}
@@ -2373,17 +2551,27 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2373/* 2551/*
2374 * Revoke a message that we may be reading data into 2552 * Revoke a message that we may be reading data into
2375 */ 2553 */
2376void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) 2554void ceph_msg_revoke_incoming(struct ceph_msg *msg)
2377{ 2555{
2556 struct ceph_connection *con;
2557
2558 BUG_ON(msg == NULL);
2559 if (!msg->con) {
2560 dout("%s msg %p null con\n", __func__, msg);
2561
2562 return; /* Message not in our possession */
2563 }
2564
2565 con = msg->con;
2378 mutex_lock(&con->mutex); 2566 mutex_lock(&con->mutex);
2379 if (con->in_msg && con->in_msg == msg) { 2567 if (con->in_msg == msg) {
2380 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 2568 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2381 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 2569 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2382 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 2570 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2383 2571
2384 /* skip rest of message */ 2572 /* skip rest of message */
2385 dout("con_revoke_pages %p msg %p revoked\n", con, msg); 2573 dout("%s %p msg %p revoked\n", __func__, con, msg);
2386 con->in_base_pos = con->in_base_pos - 2574 con->in_base_pos = con->in_base_pos -
2387 sizeof(struct ceph_msg_header) - 2575 sizeof(struct ceph_msg_header) -
2388 front_len - 2576 front_len -
2389 middle_len - 2577 middle_len -
@@ -2394,8 +2582,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2394 con->in_tag = CEPH_MSGR_TAG_READY; 2582 con->in_tag = CEPH_MSGR_TAG_READY;
2395 con->in_seq++; 2583 con->in_seq++;
2396 } else { 2584 } else {
2397 dout("con_revoke_pages %p msg %p pages %p no-op\n", 2585 dout("%s %p in_msg %p msg %p no-op\n",
2398 con, con->in_msg, msg); 2586 __func__, con, con->in_msg, msg);
2399 } 2587 }
2400 mutex_unlock(&con->mutex); 2588 mutex_unlock(&con->mutex);
2401} 2589}
@@ -2406,9 +2594,11 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2406void ceph_con_keepalive(struct ceph_connection *con) 2594void ceph_con_keepalive(struct ceph_connection *con)
2407{ 2595{
2408 dout("con_keepalive %p\n", con); 2596 dout("con_keepalive %p\n", con);
2597 mutex_lock(&con->mutex);
2409 clear_standby(con); 2598 clear_standby(con);
2410 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2599 mutex_unlock(&con->mutex);
2411 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2600 if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
2601 test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
2412 queue_con(con); 2602 queue_con(con);
2413} 2603}
2414EXPORT_SYMBOL(ceph_con_keepalive); 2604EXPORT_SYMBOL(ceph_con_keepalive);
@@ -2427,6 +2617,8 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2427 if (m == NULL) 2617 if (m == NULL)
2428 goto out; 2618 goto out;
2429 kref_init(&m->kref); 2619 kref_init(&m->kref);
2620
2621 m->con = NULL;
2430 INIT_LIST_HEAD(&m->list_head); 2622 INIT_LIST_HEAD(&m->list_head);
2431 2623
2432 m->hdr.tid = 0; 2624 m->hdr.tid = 0;
@@ -2522,46 +2714,77 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2522} 2714}
2523 2715
2524/* 2716/*
2525 * Generic message allocator, for incoming messages. 2717 * Allocate a message for receiving an incoming message on a
2718 * connection, and save the result in con->in_msg. Uses the
2719 * connection's private alloc_msg op if available.
2720 *
2721 * Returns 0 on success, or a negative error code.
2722 *
2723 * On success, if we set *skip = 1:
2724 * - the next message should be skipped and ignored.
2725 * - con->in_msg == NULL
2726 * or if we set *skip = 0:
2727 * - con->in_msg is non-null.
2728 * On error (ENOMEM, EAGAIN, ...),
2729 * - con->in_msg == NULL
2526 */ 2730 */
2527static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 2731static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
2528 struct ceph_msg_header *hdr,
2529 int *skip)
2530{ 2732{
2733 struct ceph_msg_header *hdr = &con->in_hdr;
2531 int type = le16_to_cpu(hdr->type); 2734 int type = le16_to_cpu(hdr->type);
2532 int front_len = le32_to_cpu(hdr->front_len); 2735 int front_len = le32_to_cpu(hdr->front_len);
2533 int middle_len = le32_to_cpu(hdr->middle_len); 2736 int middle_len = le32_to_cpu(hdr->middle_len);
2534 struct ceph_msg *msg = NULL; 2737 int ret = 0;
2535 int ret; 2738
2739 BUG_ON(con->in_msg != NULL);
2536 2740
2537 if (con->ops->alloc_msg) { 2741 if (con->ops->alloc_msg) {
2742 struct ceph_msg *msg;
2743
2538 mutex_unlock(&con->mutex); 2744 mutex_unlock(&con->mutex);
2539 msg = con->ops->alloc_msg(con, hdr, skip); 2745 msg = con->ops->alloc_msg(con, hdr, skip);
2540 mutex_lock(&con->mutex); 2746 mutex_lock(&con->mutex);
2541 if (!msg || *skip) 2747 if (con->state != CON_STATE_OPEN) {
2542 return NULL; 2748 ceph_msg_put(msg);
2749 return -EAGAIN;
2750 }
2751 con->in_msg = msg;
2752 if (con->in_msg) {
2753 con->in_msg->con = con->ops->get(con);
2754 BUG_ON(con->in_msg->con == NULL);
2755 }
2756 if (*skip) {
2757 con->in_msg = NULL;
2758 return 0;
2759 }
2760 if (!con->in_msg) {
2761 con->error_msg =
2762 "error allocating memory for incoming message";
2763 return -ENOMEM;
2764 }
2543 } 2765 }
2544 if (!msg) { 2766 if (!con->in_msg) {
2545 *skip = 0; 2767 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2546 msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 2768 if (!con->in_msg) {
2547 if (!msg) {
2548 pr_err("unable to allocate msg type %d len %d\n", 2769 pr_err("unable to allocate msg type %d len %d\n",
2549 type, front_len); 2770 type, front_len);
2550 return NULL; 2771 return -ENOMEM;
2551 } 2772 }
2552 msg->page_alignment = le16_to_cpu(hdr->data_off); 2773 con->in_msg->con = con->ops->get(con);
2774 BUG_ON(con->in_msg->con == NULL);
2775 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off);
2553 } 2776 }
2554 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2777 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2555 2778
2556 if (middle_len && !msg->middle) { 2779 if (middle_len && !con->in_msg->middle) {
2557 ret = ceph_alloc_middle(con, msg); 2780 ret = ceph_alloc_middle(con, con->in_msg);
2558 if (ret < 0) { 2781 if (ret < 0) {
2559 ceph_msg_put(msg); 2782 ceph_msg_put(con->in_msg);
2560 return NULL; 2783 con->in_msg = NULL;
2561 } 2784 }
2562 } 2785 }
2563 2786
2564 return msg; 2787 return ret;
2565} 2788}
2566 2789
2567 2790
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 10d6008d31f2..105d533b55f3 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -106,9 +106,9 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
106 monc->pending_auth = 1; 106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len; 107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len); 108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
109 ceph_con_revoke(monc->con, monc->m_auth); 109 ceph_msg_revoke(monc->m_auth);
110 ceph_msg_get(monc->m_auth); /* keep our ref */ 110 ceph_msg_get(monc->m_auth); /* keep our ref */
111 ceph_con_send(monc->con, monc->m_auth); 111 ceph_con_send(&monc->con, monc->m_auth);
112} 112}
113 113
114/* 114/*
@@ -117,8 +117,11 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
117static void __close_session(struct ceph_mon_client *monc) 117static void __close_session(struct ceph_mon_client *monc)
118{ 118{
119 dout("__close_session closing mon%d\n", monc->cur_mon); 119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 ceph_con_revoke(monc->con, monc->m_auth); 120 ceph_msg_revoke(monc->m_auth);
121 ceph_con_close(monc->con); 121 ceph_msg_revoke_incoming(monc->m_auth_reply);
122 ceph_msg_revoke(monc->m_subscribe);
123 ceph_msg_revoke_incoming(monc->m_subscribe_ack);
124 ceph_con_close(&monc->con);
122 monc->cur_mon = -1; 125 monc->cur_mon = -1;
123 monc->pending_auth = 0; 126 monc->pending_auth = 0;
124 ceph_auth_reset(monc->auth); 127 ceph_auth_reset(monc->auth);
@@ -142,9 +145,8 @@ static int __open_session(struct ceph_mon_client *monc)
142 monc->want_next_osdmap = !!monc->want_next_osdmap; 145 monc->want_next_osdmap = !!monc->want_next_osdmap;
143 146
144 dout("open_session mon%d opening\n", monc->cur_mon); 147 dout("open_session mon%d opening\n", monc->cur_mon);
145 monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; 148 ceph_con_open(&monc->con,
146 monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); 149 CEPH_ENTITY_TYPE_MON, monc->cur_mon,
147 ceph_con_open(monc->con,
148 &monc->monmap->mon_inst[monc->cur_mon].addr); 150 &monc->monmap->mon_inst[monc->cur_mon].addr);
149 151
150 /* initiatiate authentication handshake */ 152 /* initiatiate authentication handshake */
@@ -226,8 +228,8 @@ static void __send_subscribe(struct ceph_mon_client *monc)
226 228
227 msg->front.iov_len = p - msg->front.iov_base; 229 msg->front.iov_len = p - msg->front.iov_base;
228 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 230 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
229 ceph_con_revoke(monc->con, msg); 231 ceph_msg_revoke(msg);
230 ceph_con_send(monc->con, ceph_msg_get(msg)); 232 ceph_con_send(&monc->con, ceph_msg_get(msg));
231 233
232 monc->sub_sent = jiffies | 1; /* never 0 */ 234 monc->sub_sent = jiffies | 1; /* never 0 */
233 } 235 }
@@ -247,7 +249,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
247 if (monc->hunting) { 249 if (monc->hunting) {
248 pr_info("mon%d %s session established\n", 250 pr_info("mon%d %s session established\n",
249 monc->cur_mon, 251 monc->cur_mon,
250 ceph_pr_addr(&monc->con->peer_addr.in_addr)); 252 ceph_pr_addr(&monc->con.peer_addr.in_addr));
251 monc->hunting = false; 253 monc->hunting = false;
252 } 254 }
253 dout("handle_subscribe_ack after %d seconds\n", seconds); 255 dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -439,6 +441,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
439 m = NULL; 441 m = NULL;
440 } else { 442 } else {
441 dout("get_generic_reply %lld got %p\n", tid, req->reply); 443 dout("get_generic_reply %lld got %p\n", tid, req->reply);
444 *skip = 0;
442 m = ceph_msg_get(req->reply); 445 m = ceph_msg_get(req->reply);
443 /* 446 /*
444 * we don't need to track the connection reading into 447 * we don't need to track the connection reading into
@@ -461,7 +464,7 @@ static int do_generic_request(struct ceph_mon_client *monc,
461 req->request->hdr.tid = cpu_to_le64(req->tid); 464 req->request->hdr.tid = cpu_to_le64(req->tid);
462 __insert_generic_request(monc, req); 465 __insert_generic_request(monc, req);
463 monc->num_generic_requests++; 466 monc->num_generic_requests++;
464 ceph_con_send(monc->con, ceph_msg_get(req->request)); 467 ceph_con_send(&monc->con, ceph_msg_get(req->request));
465 mutex_unlock(&monc->mutex); 468 mutex_unlock(&monc->mutex);
466 469
467 err = wait_for_completion_interruptible(&req->completion); 470 err = wait_for_completion_interruptible(&req->completion);
@@ -684,8 +687,9 @@ static void __resend_generic_request(struct ceph_mon_client *monc)
684 687
685 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { 688 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
686 req = rb_entry(p, struct ceph_mon_generic_request, node); 689 req = rb_entry(p, struct ceph_mon_generic_request, node);
687 ceph_con_revoke(monc->con, req->request); 690 ceph_msg_revoke(req->request);
688 ceph_con_send(monc->con, ceph_msg_get(req->request)); 691 ceph_msg_revoke_incoming(req->reply);
692 ceph_con_send(&monc->con, ceph_msg_get(req->request));
689 } 693 }
690} 694}
691 695
@@ -705,7 +709,7 @@ static void delayed_work(struct work_struct *work)
705 __close_session(monc); 709 __close_session(monc);
706 __open_session(monc); /* continue hunting */ 710 __open_session(monc); /* continue hunting */
707 } else { 711 } else {
708 ceph_con_keepalive(monc->con); 712 ceph_con_keepalive(&monc->con);
709 713
710 __validate_auth(monc); 714 __validate_auth(monc);
711 715
@@ -760,19 +764,12 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
760 goto out; 764 goto out;
761 765
762 /* connection */ 766 /* connection */
763 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
764 if (!monc->con)
765 goto out_monmap;
766 ceph_con_init(monc->client->msgr, monc->con);
767 monc->con->private = monc;
768 monc->con->ops = &mon_con_ops;
769
770 /* authentication */ 767 /* authentication */
771 monc->auth = ceph_auth_init(cl->options->name, 768 monc->auth = ceph_auth_init(cl->options->name,
772 cl->options->key); 769 cl->options->key);
773 if (IS_ERR(monc->auth)) { 770 if (IS_ERR(monc->auth)) {
774 err = PTR_ERR(monc->auth); 771 err = PTR_ERR(monc->auth);
775 goto out_con; 772 goto out_monmap;
776 } 773 }
777 monc->auth->want_keys = 774 monc->auth->want_keys =
778 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | 775 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
@@ -801,6 +798,9 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
801 if (!monc->m_auth) 798 if (!monc->m_auth)
802 goto out_auth_reply; 799 goto out_auth_reply;
803 800
801 ceph_con_init(&monc->con, monc, &mon_con_ops,
802 &monc->client->msgr);
803
804 monc->cur_mon = -1; 804 monc->cur_mon = -1;
805 monc->hunting = true; 805 monc->hunting = true;
806 monc->sub_renew_after = jiffies; 806 monc->sub_renew_after = jiffies;
@@ -824,8 +824,6 @@ out_subscribe_ack:
824 ceph_msg_put(monc->m_subscribe_ack); 824 ceph_msg_put(monc->m_subscribe_ack);
825out_auth: 825out_auth:
826 ceph_auth_destroy(monc->auth); 826 ceph_auth_destroy(monc->auth);
827out_con:
828 monc->con->ops->put(monc->con);
829out_monmap: 827out_monmap:
830 kfree(monc->monmap); 828 kfree(monc->monmap);
831out: 829out:
@@ -841,12 +839,16 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
841 mutex_lock(&monc->mutex); 839 mutex_lock(&monc->mutex);
842 __close_session(monc); 840 __close_session(monc);
843 841
844 monc->con->private = NULL;
845 monc->con->ops->put(monc->con);
846 monc->con = NULL;
847
848 mutex_unlock(&monc->mutex); 842 mutex_unlock(&monc->mutex);
849 843
844 /*
845 * flush msgr queue before we destroy ourselves to ensure that:
846 * - any work that references our embedded con is finished.
847 * - any osd_client or other work that may reference an authorizer
848 * finishes before we shut down the auth subsystem.
849 */
850 ceph_msgr_flush();
851
850 ceph_auth_destroy(monc->auth); 852 ceph_auth_destroy(monc->auth);
851 853
852 ceph_msg_put(monc->m_auth); 854 ceph_msg_put(monc->m_auth);
@@ -880,8 +882,8 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
880 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { 882 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
881 dout("authenticated, starting session\n"); 883 dout("authenticated, starting session\n");
882 884
883 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; 885 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
884 monc->client->msgr->inst.name.num = 886 monc->client->msgr.inst.name.num =
885 cpu_to_le64(monc->auth->global_id); 887 cpu_to_le64(monc->auth->global_id);
886 888
887 __send_subscribe(monc); 889 __send_subscribe(monc);
@@ -992,6 +994,8 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
992 case CEPH_MSG_MDS_MAP: 994 case CEPH_MSG_MDS_MAP:
993 case CEPH_MSG_OSD_MAP: 995 case CEPH_MSG_OSD_MAP:
994 m = ceph_msg_new(type, front_len, GFP_NOFS, false); 996 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
997 if (!m)
998 return NULL; /* ENOMEM--return skip == 0 */
995 break; 999 break;
996 } 1000 }
997 1001
@@ -1021,7 +1025,7 @@ static void mon_fault(struct ceph_connection *con)
1021 if (!monc->hunting) 1025 if (!monc->hunting)
1022 pr_info("mon%d %s session lost, " 1026 pr_info("mon%d %s session lost, "
1023 "hunting for new mon\n", monc->cur_mon, 1027 "hunting for new mon\n", monc->cur_mon,
1024 ceph_pr_addr(&monc->con->peer_addr.in_addr)); 1028 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1025 1029
1026 __close_session(monc); 1030 __close_session(monc);
1027 if (!monc->hunting) { 1031 if (!monc->hunting) {
@@ -1036,9 +1040,23 @@ out:
1036 mutex_unlock(&monc->mutex); 1040 mutex_unlock(&monc->mutex);
1037} 1041}
1038 1042
1043/*
1044 * We can ignore refcounting on the connection struct, as all references
1045 * will come from the messenger workqueue, which is drained prior to
1046 * mon_client destruction.
1047 */
1048static struct ceph_connection *con_get(struct ceph_connection *con)
1049{
1050 return con;
1051}
1052
1053static void con_put(struct ceph_connection *con)
1054{
1055}
1056
1039static const struct ceph_connection_operations mon_con_ops = { 1057static const struct ceph_connection_operations mon_con_ops = {
1040 .get = ceph_con_get, 1058 .get = con_get,
1041 .put = ceph_con_put, 1059 .put = con_put,
1042 .dispatch = dispatch, 1060 .dispatch = dispatch,
1043 .fault = mon_fault, 1061 .fault = mon_fault,
1044 .alloc_msg = mon_alloc_msg, 1062 .alloc_msg = mon_alloc_msg,
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 11d5f4196a73..ddec1c10ac80 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 struct ceph_msg *msg; 13 struct ceph_msg *msg;
14 14
15 msg = ceph_msg_new(0, pool->front_len, gfp_mask, true); 15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true);
16 if (!msg) { 16 if (!msg) {
17 dout("msgpool_alloc %s failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 } else { 18 } else {
@@ -32,10 +32,11 @@ static void msgpool_free(void *element, void *arg)
32 ceph_msg_put(msg); 32 ceph_msg_put(msg);
33} 33}
34 34
35int ceph_msgpool_init(struct ceph_msgpool *pool, 35int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
36 int front_len, int size, bool blocking, const char *name) 36 int front_len, int size, bool blocking, const char *name)
37{ 37{
38 dout("msgpool %s init\n", name); 38 dout("msgpool %s init\n", name);
39 pool->type = type;
39 pool->front_len = front_len; 40 pool->front_len = front_len;
40 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); 41 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
41 if (!pool->pool) 42 if (!pool->pool)
@@ -61,7 +62,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
61 WARN_ON(1); 62 WARN_ON(1);
62 63
63 /* try to alloc a fresh message */ 64 /* try to alloc a fresh message */
64 return ceph_msg_new(0, front_len, GFP_NOFS, false); 65 return ceph_msg_new(pool->type, front_len, GFP_NOFS, false);
65 } 66 }
66 67
67 msg = mempool_alloc(pool->pool, GFP_NOFS); 68 msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1ffebed5ce0f..42119c05e82c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -139,15 +139,14 @@ void ceph_osdc_release_request(struct kref *kref)
139 139
140 if (req->r_request) 140 if (req->r_request)
141 ceph_msg_put(req->r_request); 141 ceph_msg_put(req->r_request);
142 if (req->r_reply)
143 ceph_msg_put(req->r_reply);
144 if (req->r_con_filling_msg) { 142 if (req->r_con_filling_msg) {
145 dout("release_request revoking pages %p from con %p\n", 143 dout("%s revoking pages %p from con %p\n", __func__,
146 req->r_pages, req->r_con_filling_msg); 144 req->r_pages, req->r_con_filling_msg);
147 ceph_con_revoke_message(req->r_con_filling_msg, 145 ceph_msg_revoke_incoming(req->r_reply);
148 req->r_reply); 146 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
149 ceph_con_put(req->r_con_filling_msg);
150 } 147 }
148 if (req->r_reply)
149 ceph_msg_put(req->r_reply);
151 if (req->r_own_pages) 150 if (req->r_own_pages)
152 ceph_release_page_vector(req->r_pages, 151 ceph_release_page_vector(req->r_pages,
153 req->r_num_pages); 152 req->r_num_pages);
@@ -214,10 +213,13 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
214 kref_init(&req->r_kref); 213 kref_init(&req->r_kref);
215 init_completion(&req->r_completion); 214 init_completion(&req->r_completion);
216 init_completion(&req->r_safe_completion); 215 init_completion(&req->r_safe_completion);
216 rb_init_node(&req->r_node);
217 INIT_LIST_HEAD(&req->r_unsafe_item); 217 INIT_LIST_HEAD(&req->r_unsafe_item);
218 INIT_LIST_HEAD(&req->r_linger_item); 218 INIT_LIST_HEAD(&req->r_linger_item);
219 INIT_LIST_HEAD(&req->r_linger_osd); 219 INIT_LIST_HEAD(&req->r_linger_osd);
220 INIT_LIST_HEAD(&req->r_req_lru_item); 220 INIT_LIST_HEAD(&req->r_req_lru_item);
221 INIT_LIST_HEAD(&req->r_osd_item);
222
221 req->r_flags = flags; 223 req->r_flags = flags;
222 224
223 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 225 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -243,6 +245,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
243 } 245 }
244 ceph_pagelist_init(req->r_trail); 246 ceph_pagelist_init(req->r_trail);
245 } 247 }
248
246 /* create request message; allow space for oid */ 249 /* create request message; allow space for oid */
247 msg_size += MAX_OBJ_NAME_SIZE; 250 msg_size += MAX_OBJ_NAME_SIZE;
248 if (snapc) 251 if (snapc)
@@ -256,7 +259,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
256 return NULL; 259 return NULL;
257 } 260 }
258 261
259 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
260 memset(msg->front.iov_base, 0, msg->front.iov_len); 262 memset(msg->front.iov_base, 0, msg->front.iov_len);
261 263
262 req->r_request = msg; 264 req->r_request = msg;
@@ -624,7 +626,7 @@ static void osd_reset(struct ceph_connection *con)
624/* 626/*
625 * Track open sessions with osds. 627 * Track open sessions with osds.
626 */ 628 */
627static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) 629static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
628{ 630{
629 struct ceph_osd *osd; 631 struct ceph_osd *osd;
630 632
@@ -634,15 +636,13 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
634 636
635 atomic_set(&osd->o_ref, 1); 637 atomic_set(&osd->o_ref, 1);
636 osd->o_osdc = osdc; 638 osd->o_osdc = osdc;
639 osd->o_osd = onum;
637 INIT_LIST_HEAD(&osd->o_requests); 640 INIT_LIST_HEAD(&osd->o_requests);
638 INIT_LIST_HEAD(&osd->o_linger_requests); 641 INIT_LIST_HEAD(&osd->o_linger_requests);
639 INIT_LIST_HEAD(&osd->o_osd_lru); 642 INIT_LIST_HEAD(&osd->o_osd_lru);
640 osd->o_incarnation = 1; 643 osd->o_incarnation = 1;
641 644
642 ceph_con_init(osdc->client->msgr, &osd->o_con); 645 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
643 osd->o_con.private = osd;
644 osd->o_con.ops = &osd_con_ops;
645 osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
646 646
647 INIT_LIST_HEAD(&osd->o_keepalive_item); 647 INIT_LIST_HEAD(&osd->o_keepalive_item);
648 return osd; 648 return osd;
@@ -688,7 +688,7 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
688 688
689static void remove_all_osds(struct ceph_osd_client *osdc) 689static void remove_all_osds(struct ceph_osd_client *osdc)
690{ 690{
691 dout("__remove_old_osds %p\n", osdc); 691 dout("%s %p\n", __func__, osdc);
692 mutex_lock(&osdc->request_mutex); 692 mutex_lock(&osdc->request_mutex);
693 while (!RB_EMPTY_ROOT(&osdc->osds)) { 693 while (!RB_EMPTY_ROOT(&osdc->osds)) {
694 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 694 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
@@ -752,7 +752,8 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
752 ret = -EAGAIN; 752 ret = -EAGAIN;
753 } else { 753 } else {
754 ceph_con_close(&osd->o_con); 754 ceph_con_close(&osd->o_con);
755 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); 755 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
756 &osdc->osdmap->osd_addr[osd->o_osd]);
756 osd->o_incarnation++; 757 osd->o_incarnation++;
757 } 758 }
758 return ret; 759 return ret;
@@ -853,7 +854,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
853 854
854 if (req->r_osd) { 855 if (req->r_osd) {
855 /* make sure the original request isn't in flight. */ 856 /* make sure the original request isn't in flight. */
856 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 857 ceph_msg_revoke(req->r_request);
857 858
858 list_del_init(&req->r_osd_item); 859 list_del_init(&req->r_osd_item);
859 if (list_empty(&req->r_osd->o_requests) && 860 if (list_empty(&req->r_osd->o_requests) &&
@@ -880,7 +881,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
880static void __cancel_request(struct ceph_osd_request *req) 881static void __cancel_request(struct ceph_osd_request *req)
881{ 882{
882 if (req->r_sent && req->r_osd) { 883 if (req->r_sent && req->r_osd) {
883 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 884 ceph_msg_revoke(req->r_request);
884 req->r_sent = 0; 885 req->r_sent = 0;
885 } 886 }
886} 887}
@@ -890,7 +891,9 @@ static void __register_linger_request(struct ceph_osd_client *osdc,
890{ 891{
891 dout("__register_linger_request %p\n", req); 892 dout("__register_linger_request %p\n", req);
892 list_add_tail(&req->r_linger_item, &osdc->req_linger); 893 list_add_tail(&req->r_linger_item, &osdc->req_linger);
893 list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests); 894 if (req->r_osd)
895 list_add_tail(&req->r_linger_osd,
896 &req->r_osd->o_linger_requests);
894} 897}
895 898
896static void __unregister_linger_request(struct ceph_osd_client *osdc, 899static void __unregister_linger_request(struct ceph_osd_client *osdc,
@@ -998,18 +1001,18 @@ static int __map_request(struct ceph_osd_client *osdc,
998 req->r_osd = __lookup_osd(osdc, o); 1001 req->r_osd = __lookup_osd(osdc, o);
999 if (!req->r_osd && o >= 0) { 1002 if (!req->r_osd && o >= 0) {
1000 err = -ENOMEM; 1003 err = -ENOMEM;
1001 req->r_osd = create_osd(osdc); 1004 req->r_osd = create_osd(osdc, o);
1002 if (!req->r_osd) { 1005 if (!req->r_osd) {
1003 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1006 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1004 goto out; 1007 goto out;
1005 } 1008 }
1006 1009
1007 dout("map_request osd %p is osd%d\n", req->r_osd, o); 1010 dout("map_request osd %p is osd%d\n", req->r_osd, o);
1008 req->r_osd->o_osd = o;
1009 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
1010 __insert_osd(osdc, req->r_osd); 1011 __insert_osd(osdc, req->r_osd);
1011 1012
1012 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); 1013 ceph_con_open(&req->r_osd->o_con,
1014 CEPH_ENTITY_TYPE_OSD, o,
1015 &osdc->osdmap->osd_addr[o]);
1013 } 1016 }
1014 1017
1015 if (req->r_osd) { 1018 if (req->r_osd) {
@@ -1216,7 +1219,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1216 if (req->r_con_filling_msg == con && req->r_reply == msg) { 1219 if (req->r_con_filling_msg == con && req->r_reply == msg) {
1217 dout(" dropping con_filling_msg ref %p\n", con); 1220 dout(" dropping con_filling_msg ref %p\n", con);
1218 req->r_con_filling_msg = NULL; 1221 req->r_con_filling_msg = NULL;
1219 ceph_con_put(con); 1222 con->ops->put(con);
1220 } 1223 }
1221 1224
1222 if (!req->r_got_reply) { 1225 if (!req->r_got_reply) {
@@ -1304,8 +1307,9 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1304 1307
1305 dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); 1308 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1306 mutex_lock(&osdc->request_mutex); 1309 mutex_lock(&osdc->request_mutex);
1307 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 1310 for (p = rb_first(&osdc->requests); p; ) {
1308 req = rb_entry(p, struct ceph_osd_request, r_node); 1311 req = rb_entry(p, struct ceph_osd_request, r_node);
1312 p = rb_next(p);
1309 err = __map_request(osdc, req, force_resend); 1313 err = __map_request(osdc, req, force_resend);
1310 if (err < 0) 1314 if (err < 0)
1311 continue; /* error */ 1315 continue; /* error */
@@ -1313,10 +1317,23 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1313 dout("%p tid %llu maps to no osd\n", req, req->r_tid); 1317 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1314 needmap++; /* request a newer map */ 1318 needmap++; /* request a newer map */
1315 } else if (err > 0) { 1319 } else if (err > 0) {
1316 dout("%p tid %llu requeued on osd%d\n", req, req->r_tid, 1320 if (!req->r_linger) {
1317 req->r_osd ? req->r_osd->o_osd : -1); 1321 dout("%p tid %llu requeued on osd%d\n", req,
1318 if (!req->r_linger) 1322 req->r_tid,
1323 req->r_osd ? req->r_osd->o_osd : -1);
1319 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1324 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1325 }
1326 }
1327 if (req->r_linger && list_empty(&req->r_linger_item)) {
1328 /*
1329 * register as a linger so that we will
1330 * re-submit below and get a new tid
1331 */
1332 dout("%p tid %llu restart on osd%d\n",
1333 req, req->r_tid,
1334 req->r_osd ? req->r_osd->o_osd : -1);
1335 __register_linger_request(osdc, req);
1336 __unregister_request(osdc, req);
1320 } 1337 }
1321 } 1338 }
1322 1339
@@ -1391,7 +1408,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1391 epoch, maplen); 1408 epoch, maplen);
1392 newmap = osdmap_apply_incremental(&p, next, 1409 newmap = osdmap_apply_incremental(&p, next,
1393 osdc->osdmap, 1410 osdc->osdmap,
1394 osdc->client->msgr); 1411 &osdc->client->msgr);
1395 if (IS_ERR(newmap)) { 1412 if (IS_ERR(newmap)) {
1396 err = PTR_ERR(newmap); 1413 err = PTR_ERR(newmap);
1397 goto bad; 1414 goto bad;
@@ -1839,11 +1856,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1839 if (!osdc->req_mempool) 1856 if (!osdc->req_mempool)
1840 goto out; 1857 goto out;
1841 1858
1842 err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true, 1859 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1860 OSD_OP_FRONT_LEN, 10, true,
1843 "osd_op"); 1861 "osd_op");
1844 if (err < 0) 1862 if (err < 0)
1845 goto out_mempool; 1863 goto out_mempool;
1846 err = ceph_msgpool_init(&osdc->msgpool_op_reply, 1864 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
1847 OSD_OPREPLY_FRONT_LEN, 10, true, 1865 OSD_OPREPLY_FRONT_LEN, 10, true,
1848 "osd_op_reply"); 1866 "osd_op_reply");
1849 if (err < 0) 1867 if (err < 0)
@@ -2019,16 +2037,16 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2019 if (!req) { 2037 if (!req) {
2020 *skip = 1; 2038 *skip = 1;
2021 m = NULL; 2039 m = NULL;
2022 pr_info("get_reply unknown tid %llu from osd%d\n", tid, 2040 dout("get_reply unknown tid %llu from osd%d\n", tid,
2023 osd->o_osd); 2041 osd->o_osd);
2024 goto out; 2042 goto out;
2025 } 2043 }
2026 2044
2027 if (req->r_con_filling_msg) { 2045 if (req->r_con_filling_msg) {
2028 dout("get_reply revoking msg %p from old con %p\n", 2046 dout("%s revoking msg %p from old con %p\n", __func__,
2029 req->r_reply, req->r_con_filling_msg); 2047 req->r_reply, req->r_con_filling_msg);
2030 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); 2048 ceph_msg_revoke_incoming(req->r_reply);
2031 ceph_con_put(req->r_con_filling_msg); 2049 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
2032 req->r_con_filling_msg = NULL; 2050 req->r_con_filling_msg = NULL;
2033 } 2051 }
2034 2052
@@ -2063,7 +2081,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2063#endif 2081#endif
2064 } 2082 }
2065 *skip = 0; 2083 *skip = 0;
2066 req->r_con_filling_msg = ceph_con_get(con); 2084 req->r_con_filling_msg = con->ops->get(con);
2067 dout("get_reply tid %lld %p\n", tid, m); 2085 dout("get_reply tid %lld %p\n", tid, m);
2068 2086
2069out: 2087out:
@@ -2080,6 +2098,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2080 int type = le16_to_cpu(hdr->type); 2098 int type = le16_to_cpu(hdr->type);
2081 int front = le32_to_cpu(hdr->front_len); 2099 int front = le32_to_cpu(hdr->front_len);
2082 2100
2101 *skip = 0;
2083 switch (type) { 2102 switch (type) {
2084 case CEPH_MSG_OSD_MAP: 2103 case CEPH_MSG_OSD_MAP:
2085 case CEPH_MSG_WATCH_NOTIFY: 2104 case CEPH_MSG_WATCH_NOTIFY:
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 81e3b84a77ef..3124b71a8883 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -135,6 +135,21 @@ bad:
135 return -EINVAL; 135 return -EINVAL;
136} 136}
137 137
138static int skip_name_map(void **p, void *end)
139{
140 int len;
141 ceph_decode_32_safe(p, end, len ,bad);
142 while (len--) {
143 int strlen;
144 *p += sizeof(u32);
145 ceph_decode_32_safe(p, end, strlen, bad);
146 *p += strlen;
147}
148 return 0;
149bad:
150 return -EINVAL;
151}
152
138static struct crush_map *crush_decode(void *pbyval, void *end) 153static struct crush_map *crush_decode(void *pbyval, void *end)
139{ 154{
140 struct crush_map *c; 155 struct crush_map *c;
@@ -143,6 +158,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
143 void **p = &pbyval; 158 void **p = &pbyval;
144 void *start = pbyval; 159 void *start = pbyval;
145 u32 magic; 160 u32 magic;
161 u32 num_name_maps;
146 162
147 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 163 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
148 164
@@ -150,6 +166,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
150 if (c == NULL) 166 if (c == NULL)
151 return ERR_PTR(-ENOMEM); 167 return ERR_PTR(-ENOMEM);
152 168
169 /* set tunables to default values */
170 c->choose_local_tries = 2;
171 c->choose_local_fallback_tries = 5;
172 c->choose_total_tries = 19;
173
153 ceph_decode_need(p, end, 4*sizeof(u32), bad); 174 ceph_decode_need(p, end, 4*sizeof(u32), bad);
154 magic = ceph_decode_32(p); 175 magic = ceph_decode_32(p);
155 if (magic != CRUSH_MAGIC) { 176 if (magic != CRUSH_MAGIC) {
@@ -297,7 +318,25 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
297 } 318 }
298 319
299 /* ignore trailing name maps. */ 320 /* ignore trailing name maps. */
321 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
322 err = skip_name_map(p, end);
323 if (err < 0)
324 goto done;
325 }
326
327 /* tunables */
328 ceph_decode_need(p, end, 3*sizeof(u32), done);
329 c->choose_local_tries = ceph_decode_32(p);
330 c->choose_local_fallback_tries = ceph_decode_32(p);
331 c->choose_total_tries = ceph_decode_32(p);
332 dout("crush decode tunable choose_local_tries = %d",
333 c->choose_local_tries);
334 dout("crush decode tunable choose_local_fallback_tries = %d",
335 c->choose_local_fallback_tries);
336 dout("crush decode tunable choose_total_tries = %d",
337 c->choose_total_tries);
300 338
339done:
301 dout("crush_decode success\n"); 340 dout("crush_decode success\n");
302 return c; 341 return c;
303 342
@@ -488,15 +527,16 @@ static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
488 ceph_decode_32_safe(p, end, pool, bad); 527 ceph_decode_32_safe(p, end, pool, bad);
489 ceph_decode_32_safe(p, end, len, bad); 528 ceph_decode_32_safe(p, end, len, bad);
490 dout(" pool %d len %d\n", pool, len); 529 dout(" pool %d len %d\n", pool, len);
530 ceph_decode_need(p, end, len, bad);
491 pi = __lookup_pg_pool(&map->pg_pools, pool); 531 pi = __lookup_pg_pool(&map->pg_pools, pool);
492 if (pi) { 532 if (pi) {
533 char *name = kstrndup(*p, len, GFP_NOFS);
534
535 if (!name)
536 return -ENOMEM;
493 kfree(pi->name); 537 kfree(pi->name);
494 pi->name = kmalloc(len + 1, GFP_NOFS); 538 pi->name = name;
495 if (pi->name) { 539 dout(" name is %s\n", pi->name);
496 memcpy(pi->name, *p, len);
497 pi->name[len] = '\0';
498 dout(" name is %s\n", pi->name);
499 }
500 } 540 }
501 *p += len; 541 *p += len;
502 } 542 }
@@ -666,6 +706,9 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
666 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 706 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
667 ceph_decode_copy(p, &pgid, sizeof(pgid)); 707 ceph_decode_copy(p, &pgid, sizeof(pgid));
668 n = ceph_decode_32(p); 708 n = ceph_decode_32(p);
709 err = -EINVAL;
710 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
711 goto bad;
669 ceph_decode_need(p, end, n * sizeof(u32), bad); 712 ceph_decode_need(p, end, n * sizeof(u32), bad);
670 err = -ENOMEM; 713 err = -ENOMEM;
671 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 714 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
@@ -889,6 +932,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
889 (void) __remove_pg_mapping(&map->pg_temp, pgid); 932 (void) __remove_pg_mapping(&map->pg_temp, pgid);
890 933
891 /* insert */ 934 /* insert */
935 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
936 err = -EINVAL;
937 goto bad;
938 }
892 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 939 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
893 if (!pg) { 940 if (!pg) {
894 err = -ENOMEM; 941 err = -ENOMEM;
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 13cb409a7bba..665cd23020ff 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -72,8 +72,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
72} 72}
73EXPORT_SYMBOL(ceph_pagelist_append); 73EXPORT_SYMBOL(ceph_pagelist_append);
74 74
75/** 75/* Allocate enough pages for a pagelist to append the given amount
76 * Allocate enough pages for a pagelist to append the given amount
77 * of data without without allocating. 76 * of data without without allocating.
78 * Returns: 0 on success, -ENOMEM on error. 77 * Returns: 0 on success, -ENOMEM on error.
79 */ 78 */
@@ -95,9 +94,7 @@ int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
95} 94}
96EXPORT_SYMBOL(ceph_pagelist_reserve); 95EXPORT_SYMBOL(ceph_pagelist_reserve);
97 96
98/** 97/* Free any pages that have been preallocated. */
99 * Free any pages that have been preallocated.
100 */
101int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) 98int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
102{ 99{
103 while (!list_empty(&pl->free_list)) { 100 while (!list_empty(&pl->free_list)) {
@@ -112,9 +109,7 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
112} 109}
113EXPORT_SYMBOL(ceph_pagelist_free_reserve); 110EXPORT_SYMBOL(ceph_pagelist_free_reserve);
114 111
115/** 112/* Create a truncation point. */
116 * Create a truncation point.
117 */
118void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, 113void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
119 struct ceph_pagelist_cursor *c) 114 struct ceph_pagelist_cursor *c)
120{ 115{
@@ -124,8 +119,7 @@ void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
124} 119}
125EXPORT_SYMBOL(ceph_pagelist_set_cursor); 120EXPORT_SYMBOL(ceph_pagelist_set_cursor);
126 121
127/** 122/* Truncate a pagelist to the given point. Move extra pages to reserve.
128 * Truncate a pagelist to the given point. Move extra pages to reserve.
129 * This won't sleep. 123 * This won't sleep.
130 * Returns: 0 on success, 124 * Returns: 0 on success,
131 * -EINVAL if the pagelist doesn't match the trunc point pagelist 125 * -EINVAL if the pagelist doesn't match the trunc point pagelist
diff --git a/net/compat.c b/net/compat.c
index 1b96281892de..74ed1d7a84a2 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -221,6 +221,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
221{ 221{
222 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; 222 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
223 struct compat_cmsghdr cmhdr; 223 struct compat_cmsghdr cmhdr;
224 struct compat_timeval ctv;
225 struct compat_timespec cts[3];
224 int cmlen; 226 int cmlen;
225 227
226 if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) { 228 if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
@@ -229,8 +231,6 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
229 } 231 }
230 232
231 if (!COMPAT_USE_64BIT_TIME) { 233 if (!COMPAT_USE_64BIT_TIME) {
232 struct compat_timeval ctv;
233 struct compat_timespec cts[3];
234 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) { 234 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
235 struct timeval *tv = (struct timeval *)data; 235 struct timeval *tv = (struct timeval *)data;
236 ctv.tv_sec = tv->tv_sec; 236 ctv.tv_sec = tv->tv_sec;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ae6acf6a3dea..0337e2b76862 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -248,7 +248,6 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
248 unlock_sock_fast(sk, slow); 248 unlock_sock_fast(sk, slow);
249 249
250 /* skb is now orphaned, can be freed outside of locked section */ 250 /* skb is now orphaned, can be freed outside of locked section */
251 trace_kfree_skb(skb, skb_free_datagram_locked);
252 __kfree_skb(skb); 251 __kfree_skb(skb);
253} 252}
254EXPORT_SYMBOL(skb_free_datagram_locked); 253EXPORT_SYMBOL(skb_free_datagram_locked);
diff --git a/net/core/dev.c b/net/core/dev.c
index cd0981977f5c..0cb3fe8d8e72 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
1136 no_module = request_module("netdev-%s", name); 1136 no_module = request_module("netdev-%s", name);
1137 if (no_module && capable(CAP_SYS_MODULE)) { 1137 if (no_module && capable(CAP_SYS_MODULE)) {
1138 if (!request_module("%s", name)) 1138 if (!request_module("%s", name))
1139 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", 1139 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1140 name); 1140 name);
1141 } 1141 }
1142} 1142}
1143EXPORT_SYMBOL(dev_load); 1143EXPORT_SYMBOL(dev_load);
@@ -1172,6 +1172,7 @@ static int __dev_open(struct net_device *dev)
1172 net_dmaengine_get(); 1172 net_dmaengine_get();
1173 dev_set_rx_mode(dev); 1173 dev_set_rx_mode(dev);
1174 dev_activate(dev); 1174 dev_activate(dev);
1175 add_device_randomness(dev->dev_addr, dev->addr_len);
1175 } 1176 }
1176 1177
1177 return ret; 1178 return ret;
@@ -1632,6 +1633,8 @@ static inline int deliver_skb(struct sk_buff *skb,
1632 struct packet_type *pt_prev, 1633 struct packet_type *pt_prev,
1633 struct net_device *orig_dev) 1634 struct net_device *orig_dev)
1634{ 1635{
1636 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1637 return -ENOMEM;
1635 atomic_inc(&skb->users); 1638 atomic_inc(&skb->users);
1636 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1639 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1637} 1640}
@@ -1691,7 +1694,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1691 rcu_read_unlock(); 1694 rcu_read_unlock();
1692} 1695}
1693 1696
1694/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1697/**
1698 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1695 * @dev: Network device 1699 * @dev: Network device
1696 * @txq: number of queues available 1700 * @txq: number of queues available
1697 * 1701 *
@@ -1793,6 +1797,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1793EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1797EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1794#endif 1798#endif
1795 1799
1800/**
1801 * netif_get_num_default_rss_queues - default number of RSS queues
1802 *
1803 * This routine should set an upper limit on the number of RSS queues
1804 * used by default by multiqueue devices.
1805 */
1806int netif_get_num_default_rss_queues(void)
1807{
1808 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1809}
1810EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1811
1796static inline void __netif_reschedule(struct Qdisc *q) 1812static inline void __netif_reschedule(struct Qdisc *q)
1797{ 1813{
1798 struct softnet_data *sd; 1814 struct softnet_data *sd;
@@ -2089,25 +2105,6 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2089 return 0; 2105 return 0;
2090} 2106}
2091 2107
2092/*
2093 * Try to orphan skb early, right before transmission by the device.
2094 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2095 * is needed on driver level for other reasons, e.g. see net/can/raw.c
2096 */
2097static inline void skb_orphan_try(struct sk_buff *skb)
2098{
2099 struct sock *sk = skb->sk;
2100
2101 if (sk && !skb_shinfo(skb)->tx_flags) {
2102 /* skb_tx_hash() wont be able to get sk.
2103 * We copy sk_hash into skb->rxhash
2104 */
2105 if (!skb->rxhash)
2106 skb->rxhash = sk->sk_hash;
2107 skb_orphan(skb);
2108 }
2109}
2110
2111static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) 2108static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2112{ 2109{
2113 return ((features & NETIF_F_GEN_CSUM) || 2110 return ((features & NETIF_F_GEN_CSUM) ||
@@ -2193,8 +2190,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2193 if (!list_empty(&ptype_all)) 2190 if (!list_empty(&ptype_all))
2194 dev_queue_xmit_nit(skb, dev); 2191 dev_queue_xmit_nit(skb, dev);
2195 2192
2196 skb_orphan_try(skb);
2197
2198 features = netif_skb_features(skb); 2193 features = netif_skb_features(skb);
2199 2194
2200 if (vlan_tx_tag_present(skb) && 2195 if (vlan_tx_tag_present(skb) &&
@@ -2304,7 +2299,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2304 if (skb->sk && skb->sk->sk_hash) 2299 if (skb->sk && skb->sk->sk_hash)
2305 hash = skb->sk->sk_hash; 2300 hash = skb->sk->sk_hash;
2306 else 2301 else
2307 hash = (__force u16) skb->protocol ^ skb->rxhash; 2302 hash = (__force u16) skb->protocol;
2308 hash = jhash_1word(hash, hashrnd); 2303 hash = jhash_1word(hash, hashrnd);
2309 2304
2310 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 2305 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
@@ -2465,8 +2460,12 @@ static void skb_update_prio(struct sk_buff *skb)
2465{ 2460{
2466 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2461 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2467 2462
2468 if ((!skb->priority) && (skb->sk) && map) 2463 if (!skb->priority && skb->sk && map) {
2469 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2464 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2465
2466 if (prioidx < map->priomap_len)
2467 skb->priority = map->priomap[prioidx];
2468 }
2470} 2469}
2471#else 2470#else
2472#define skb_update_prio(skb) 2471#define skb_update_prio(skb)
@@ -2476,6 +2475,23 @@ static DEFINE_PER_CPU(int, xmit_recursion);
2476#define RECURSION_LIMIT 10 2475#define RECURSION_LIMIT 10
2477 2476
2478/** 2477/**
2478 * dev_loopback_xmit - loop back @skb
2479 * @skb: buffer to transmit
2480 */
2481int dev_loopback_xmit(struct sk_buff *skb)
2482{
2483 skb_reset_mac_header(skb);
2484 __skb_pull(skb, skb_network_offset(skb));
2485 skb->pkt_type = PACKET_LOOPBACK;
2486 skb->ip_summed = CHECKSUM_UNNECESSARY;
2487 WARN_ON(!skb_dst(skb));
2488 skb_dst_force(skb);
2489 netif_rx_ni(skb);
2490 return 0;
2491}
2492EXPORT_SYMBOL(dev_loopback_xmit);
2493
2494/**
2479 * dev_queue_xmit - transmit a buffer 2495 * dev_queue_xmit - transmit a buffer
2480 * @skb: buffer to transmit 2496 * @skb: buffer to transmit
2481 * 2497 *
@@ -3140,6 +3156,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3140} 3156}
3141EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3157EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3142 3158
3159/*
3160 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3161 * the special handling of PFMEMALLOC skbs.
3162 */
3163static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3164{
3165 switch (skb->protocol) {
3166 case __constant_htons(ETH_P_ARP):
3167 case __constant_htons(ETH_P_IP):
3168 case __constant_htons(ETH_P_IPV6):
3169 case __constant_htons(ETH_P_8021Q):
3170 return true;
3171 default:
3172 return false;
3173 }
3174}
3175
3143static int __netif_receive_skb(struct sk_buff *skb) 3176static int __netif_receive_skb(struct sk_buff *skb)
3144{ 3177{
3145 struct packet_type *ptype, *pt_prev; 3178 struct packet_type *ptype, *pt_prev;
@@ -3149,17 +3182,28 @@ static int __netif_receive_skb(struct sk_buff *skb)
3149 bool deliver_exact = false; 3182 bool deliver_exact = false;
3150 int ret = NET_RX_DROP; 3183 int ret = NET_RX_DROP;
3151 __be16 type; 3184 __be16 type;
3185 unsigned long pflags = current->flags;
3152 3186
3153 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3187 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3154 3188
3155 trace_netif_receive_skb(skb); 3189 trace_netif_receive_skb(skb);
3156 3190
3191 /*
3192 * PFMEMALLOC skbs are special, they should
3193 * - be delivered to SOCK_MEMALLOC sockets only
3194 * - stay away from userspace
3195 * - have bounded memory usage
3196 *
3197 * Use PF_MEMALLOC as this saves us from propagating the allocation
3198 * context down to all allocation sites.
3199 */
3200 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3201 current->flags |= PF_MEMALLOC;
3202
3157 /* if we've gotten here through NAPI, check netpoll */ 3203 /* if we've gotten here through NAPI, check netpoll */
3158 if (netpoll_receive_skb(skb)) 3204 if (netpoll_receive_skb(skb))
3159 return NET_RX_DROP; 3205 goto out;
3160 3206
3161 if (!skb->skb_iif)
3162 skb->skb_iif = skb->dev->ifindex;
3163 orig_dev = skb->dev; 3207 orig_dev = skb->dev;
3164 3208
3165 skb_reset_network_header(skb); 3209 skb_reset_network_header(skb);
@@ -3171,13 +3215,14 @@ static int __netif_receive_skb(struct sk_buff *skb)
3171 rcu_read_lock(); 3215 rcu_read_lock();
3172 3216
3173another_round: 3217another_round:
3218 skb->skb_iif = skb->dev->ifindex;
3174 3219
3175 __this_cpu_inc(softnet_data.processed); 3220 __this_cpu_inc(softnet_data.processed);
3176 3221
3177 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 3222 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3178 skb = vlan_untag(skb); 3223 skb = vlan_untag(skb);
3179 if (unlikely(!skb)) 3224 if (unlikely(!skb))
3180 goto out; 3225 goto unlock;
3181 } 3226 }
3182 3227
3183#ifdef CONFIG_NET_CLS_ACT 3228#ifdef CONFIG_NET_CLS_ACT
@@ -3187,6 +3232,9 @@ another_round:
3187 } 3232 }
3188#endif 3233#endif
3189 3234
3235 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3236 goto skip_taps;
3237
3190 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3238 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3191 if (!ptype->dev || ptype->dev == skb->dev) { 3239 if (!ptype->dev || ptype->dev == skb->dev) {
3192 if (pt_prev) 3240 if (pt_prev)
@@ -3195,13 +3243,18 @@ another_round:
3195 } 3243 }
3196 } 3244 }
3197 3245
3246skip_taps:
3198#ifdef CONFIG_NET_CLS_ACT 3247#ifdef CONFIG_NET_CLS_ACT
3199 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3248 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3200 if (!skb) 3249 if (!skb)
3201 goto out; 3250 goto unlock;
3202ncls: 3251ncls:
3203#endif 3252#endif
3204 3253
3254 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3255 && !skb_pfmemalloc_protocol(skb))
3256 goto drop;
3257
3205 rx_handler = rcu_dereference(skb->dev->rx_handler); 3258 rx_handler = rcu_dereference(skb->dev->rx_handler);
3206 if (vlan_tx_tag_present(skb)) { 3259 if (vlan_tx_tag_present(skb)) {
3207 if (pt_prev) { 3260 if (pt_prev) {
@@ -3211,7 +3264,7 @@ ncls:
3211 if (vlan_do_receive(&skb, !rx_handler)) 3264 if (vlan_do_receive(&skb, !rx_handler))
3212 goto another_round; 3265 goto another_round;
3213 else if (unlikely(!skb)) 3266 else if (unlikely(!skb))
3214 goto out; 3267 goto unlock;
3215 } 3268 }
3216 3269
3217 if (rx_handler) { 3270 if (rx_handler) {
@@ -3221,7 +3274,7 @@ ncls:
3221 } 3274 }
3222 switch (rx_handler(&skb)) { 3275 switch (rx_handler(&skb)) {
3223 case RX_HANDLER_CONSUMED: 3276 case RX_HANDLER_CONSUMED:
3224 goto out; 3277 goto unlock;
3225 case RX_HANDLER_ANOTHER: 3278 case RX_HANDLER_ANOTHER:
3226 goto another_round; 3279 goto another_round;
3227 case RX_HANDLER_EXACT: 3280 case RX_HANDLER_EXACT:
@@ -3249,8 +3302,12 @@ ncls:
3249 } 3302 }
3250 3303
3251 if (pt_prev) { 3304 if (pt_prev) {
3252 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3305 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3306 ret = -ENOMEM;
3307 else
3308 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3253 } else { 3309 } else {
3310drop:
3254 atomic_long_inc(&skb->dev->rx_dropped); 3311 atomic_long_inc(&skb->dev->rx_dropped);
3255 kfree_skb(skb); 3312 kfree_skb(skb);
3256 /* Jamal, now you will not able to escape explaining 3313 /* Jamal, now you will not able to escape explaining
@@ -3259,8 +3316,10 @@ ncls:
3259 ret = NET_RX_DROP; 3316 ret = NET_RX_DROP;
3260 } 3317 }
3261 3318
3262out: 3319unlock:
3263 rcu_read_unlock(); 3320 rcu_read_unlock();
3321out:
3322 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3264 return ret; 3323 return ret;
3265} 3324}
3266 3325
@@ -4784,6 +4843,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4784 err = ops->ndo_set_mac_address(dev, sa); 4843 err = ops->ndo_set_mac_address(dev, sa);
4785 if (!err) 4844 if (!err)
4786 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4845 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4846 add_device_randomness(dev->dev_addr, dev->addr_len);
4787 return err; 4847 return err;
4788} 4848}
4789EXPORT_SYMBOL(dev_set_mac_address); 4849EXPORT_SYMBOL(dev_set_mac_address);
@@ -5562,6 +5622,7 @@ int register_netdevice(struct net_device *dev)
5562 dev_init_scheduler(dev); 5622 dev_init_scheduler(dev);
5563 dev_hold(dev); 5623 dev_hold(dev);
5564 list_netdevice(dev); 5624 list_netdevice(dev);
5625 add_device_randomness(dev->dev_addr, dev->addr_len);
5565 5626
5566 /* Notify protocols, that a new device appeared. */ 5627 /* Notify protocols, that a new device appeared. */
5567 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5628 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -5663,7 +5724,7 @@ int netdev_refcnt_read(const struct net_device *dev)
5663} 5724}
5664EXPORT_SYMBOL(netdev_refcnt_read); 5725EXPORT_SYMBOL(netdev_refcnt_read);
5665 5726
5666/* 5727/**
5667 * netdev_wait_allrefs - wait until all references are gone. 5728 * netdev_wait_allrefs - wait until all references are gone.
5668 * 5729 *
5669 * This is called when unregistering network devices. 5730 * This is called when unregistering network devices.
@@ -6300,7 +6361,8 @@ static struct hlist_head *netdev_create_hash(void)
6300/* Initialize per network namespace state */ 6361/* Initialize per network namespace state */
6301static int __net_init netdev_init(struct net *net) 6362static int __net_init netdev_init(struct net *net)
6302{ 6363{
6303 INIT_LIST_HEAD(&net->dev_base_head); 6364 if (net != &init_net)
6365 INIT_LIST_HEAD(&net->dev_base_head);
6304 6366
6305 net->dev_name_head = netdev_create_hash(); 6367 net->dev_name_head = netdev_create_hash();
6306 if (net->dev_name_head == NULL) 6368 if (net->dev_name_head == NULL)
diff --git a/net/core/dst.c b/net/core/dst.c
index 43d94cedbf7c..069d51d29414 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -94,7 +94,7 @@ loop:
94 * But we do not have state "obsoleted, but 94 * But we do not have state "obsoleted, but
95 * referenced by parent", so it is right. 95 * referenced by parent", so it is right.
96 */ 96 */
97 if (dst->obsolete > 1) 97 if (dst->obsolete > 0)
98 continue; 98 continue;
99 99
100 ___dst_free(dst); 100 ___dst_free(dst);
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(dst_discard);
152const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
153 153
154void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags) 155 int initial_ref, int initial_obsolete, unsigned short flags)
156{ 156{
157 struct dst_entry *dst; 157 struct dst_entry *dst;
158 158
@@ -171,7 +171,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 174#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 175 dst->xfrm = NULL;
177#endif 176#endif
@@ -188,6 +187,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
188 dst->__use = 0; 187 dst->__use = 0;
189 dst->lastuse = jiffies; 188 dst->lastuse = jiffies;
190 dst->flags = flags; 189 dst->flags = flags;
190 dst->pending_confirm = 0;
191 dst->next = NULL; 191 dst->next = NULL;
192 if (!(flags & DST_NOCOUNT)) 192 if (!(flags & DST_NOCOUNT))
193 dst_entries_add(ops, 1); 193 dst_entries_add(ops, 1);
@@ -202,7 +202,7 @@ static void ___dst_free(struct dst_entry *dst)
202 */ 202 */
203 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) 203 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
204 dst->input = dst->output = dst_discard; 204 dst->input = dst->output = dst_discard;
205 dst->obsolete = 2; 205 dst->obsolete = DST_OBSOLETE_DEAD;
206} 206}
207 207
208void __dst_free(struct dst_entry *dst) 208void __dst_free(struct dst_entry *dst)
@@ -224,19 +224,12 @@ EXPORT_SYMBOL(__dst_free);
224struct dst_entry *dst_destroy(struct dst_entry * dst) 224struct dst_entry *dst_destroy(struct dst_entry * dst)
225{ 225{
226 struct dst_entry *child; 226 struct dst_entry *child;
227 struct neighbour *neigh;
228 227
229 smp_rmb(); 228 smp_rmb();
230 229
231again: 230again:
232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 231 child = dst->child;
234 232
235 if (neigh) {
236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh);
238 }
239
240 if (!(dst->flags & DST_NOCOUNT)) 233 if (!(dst->flags & DST_NOCOUNT))
241 dst_entries_add(dst->ops, -1); 234 dst_entries_add(dst->ops, -1);
242 235
@@ -360,19 +353,9 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 353 if (!unregister) {
361 dst->input = dst->output = dst_discard; 354 dst->input = dst->output = dst_discard;
362 } else { 355 } else {
363 struct neighbour *neigh;
364
365 dst->dev = dev_net(dst->dev)->loopback_dev; 356 dst->dev = dev_net(dst->dev)->loopback_dev;
366 dev_hold(dst->dev); 357 dev_hold(dst->dev);
367 dev_put(dev); 358 dev_put(dev);
368 rcu_read_lock();
369 neigh = dst_get_neighbour_noref(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
372 dev_hold(dst->dev);
373 dev_put(dev);
374 }
375 rcu_read_unlock();
376 } 359 }
377} 360}
378 361
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9c2afb480270..cbf033dcaf1f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -729,6 +729,40 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
729 return dev->ethtool_ops->set_wol(dev, &wol); 729 return dev->ethtool_ops->set_wol(dev, &wol);
730} 730}
731 731
732static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
733{
734 struct ethtool_eee edata;
735 int rc;
736
737 if (!dev->ethtool_ops->get_eee)
738 return -EOPNOTSUPP;
739
740 memset(&edata, 0, sizeof(struct ethtool_eee));
741 edata.cmd = ETHTOOL_GEEE;
742 rc = dev->ethtool_ops->get_eee(dev, &edata);
743
744 if (rc)
745 return rc;
746
747 if (copy_to_user(useraddr, &edata, sizeof(edata)))
748 return -EFAULT;
749
750 return 0;
751}
752
753static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
754{
755 struct ethtool_eee edata;
756
757 if (!dev->ethtool_ops->set_eee)
758 return -EOPNOTSUPP;
759
760 if (copy_from_user(&edata, useraddr, sizeof(edata)))
761 return -EFAULT;
762
763 return dev->ethtool_ops->set_eee(dev, &edata);
764}
765
732static int ethtool_nway_reset(struct net_device *dev) 766static int ethtool_nway_reset(struct net_device *dev)
733{ 767{
734 if (!dev->ethtool_ops->nway_reset) 768 if (!dev->ethtool_ops->nway_reset)
@@ -1409,6 +1443,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1409 case ETHTOOL_GSET: 1443 case ETHTOOL_GSET:
1410 case ETHTOOL_GDRVINFO: 1444 case ETHTOOL_GDRVINFO:
1411 case ETHTOOL_GMSGLVL: 1445 case ETHTOOL_GMSGLVL:
1446 case ETHTOOL_GLINK:
1412 case ETHTOOL_GCOALESCE: 1447 case ETHTOOL_GCOALESCE:
1413 case ETHTOOL_GRINGPARAM: 1448 case ETHTOOL_GRINGPARAM:
1414 case ETHTOOL_GPAUSEPARAM: 1449 case ETHTOOL_GPAUSEPARAM:
@@ -1417,6 +1452,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1417 case ETHTOOL_GSG: 1452 case ETHTOOL_GSG:
1418 case ETHTOOL_GSSET_INFO: 1453 case ETHTOOL_GSSET_INFO:
1419 case ETHTOOL_GSTRINGS: 1454 case ETHTOOL_GSTRINGS:
1455 case ETHTOOL_GSTATS:
1420 case ETHTOOL_GTSO: 1456 case ETHTOOL_GTSO:
1421 case ETHTOOL_GPERMADDR: 1457 case ETHTOOL_GPERMADDR:
1422 case ETHTOOL_GUFO: 1458 case ETHTOOL_GUFO:
@@ -1429,8 +1465,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1429 case ETHTOOL_GRXCLSRLCNT: 1465 case ETHTOOL_GRXCLSRLCNT:
1430 case ETHTOOL_GRXCLSRULE: 1466 case ETHTOOL_GRXCLSRULE:
1431 case ETHTOOL_GRXCLSRLALL: 1467 case ETHTOOL_GRXCLSRLALL:
1468 case ETHTOOL_GRXFHINDIR:
1432 case ETHTOOL_GFEATURES: 1469 case ETHTOOL_GFEATURES:
1470 case ETHTOOL_GCHANNELS:
1433 case ETHTOOL_GET_TS_INFO: 1471 case ETHTOOL_GET_TS_INFO:
1472 case ETHTOOL_GEEE:
1434 break; 1473 break;
1435 default: 1474 default:
1436 if (!capable(CAP_NET_ADMIN)) 1475 if (!capable(CAP_NET_ADMIN))
@@ -1471,6 +1510,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1471 rc = ethtool_set_value_void(dev, useraddr, 1510 rc = ethtool_set_value_void(dev, useraddr,
1472 dev->ethtool_ops->set_msglevel); 1511 dev->ethtool_ops->set_msglevel);
1473 break; 1512 break;
1513 case ETHTOOL_GEEE:
1514 rc = ethtool_get_eee(dev, useraddr);
1515 break;
1516 case ETHTOOL_SEEE:
1517 rc = ethtool_set_eee(dev, useraddr);
1518 break;
1474 case ETHTOOL_NWAY_RST: 1519 case ETHTOOL_NWAY_RST:
1475 rc = ethtool_nway_reset(dev); 1520 rc = ethtool_nway_reset(dev);
1476 break; 1521 break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 72cceb79d0d4..ab7db83236c9 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -151,6 +151,8 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
151 151
152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
153 list_del_rcu(&rule->list); 153 list_del_rcu(&rule->list);
154 if (ops->delete)
155 ops->delete(rule);
154 fib_rule_put(rule); 156 fib_rule_put(rule);
155 } 157 }
156} 158}
@@ -499,6 +501,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
499 501
500 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 502 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
501 NETLINK_CB(skb).pid); 503 NETLINK_CB(skb).pid);
504 if (ops->delete)
505 ops->delete(rule);
502 fib_rule_put(rule); 506 fib_rule_put(rule);
503 flush_route_cache(ops); 507 flush_route_cache(ops);
504 rules_ops_put(ops); 508 rules_ops_put(ops);
diff --git a/net/core/filter.c b/net/core/filter.c
index d4ce2dc712e3..907efd27ec77 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -83,6 +83,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
83 int err; 83 int err;
84 struct sk_filter *filter; 84 struct sk_filter *filter;
85 85
86 /*
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
89 * helping free memory
90 */
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
92 return -ENOMEM;
93
86 err = security_sock_rcv_skb(sk, skb); 94 err = security_sock_rcv_skb(sk, skb);
87 if (err) 95 if (err)
88 return err; 96 return err;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a225089df5b6..466820b6e344 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -4,6 +4,7 @@
4#include <linux/ipv6.h> 4#include <linux/ipv6.h>
5#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
6#include <net/ip.h> 6#include <net/ip.h>
7#include <net/ipv6.h>
7#include <linux/if_tunnel.h> 8#include <linux/if_tunnel.h>
8#include <linux/if_pppox.h> 9#include <linux/if_pppox.h>
9#include <linux/ppp_defs.h> 10#include <linux/ppp_defs.h>
@@ -55,8 +56,8 @@ ipv6:
55 return false; 56 return false;
56 57
57 ip_proto = iph->nexthdr; 58 ip_proto = iph->nexthdr;
58 flow->src = iph->saddr.s6_addr32[3]; 59 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
59 flow->dst = iph->daddr.s6_addr32[3]; 60 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
60 nhoff += sizeof(struct ipv6hdr); 61 nhoff += sizeof(struct ipv6hdr);
61 break; 62 break;
62 } 63 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d81d026138f0..117afaf51268 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -474,8 +474,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
474} 474}
475EXPORT_SYMBOL(neigh_lookup_nodev); 475EXPORT_SYMBOL(neigh_lookup_nodev);
476 476
477struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, 477struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev) 478 struct net_device *dev, bool want_ref)
479{ 479{
480 u32 hash_val; 480 u32 hash_val;
481 int key_len = tbl->key_len; 481 int key_len = tbl->key_len;
@@ -535,14 +535,16 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
535 n1 = rcu_dereference_protected(n1->next, 535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) { 536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 neigh_hold(n1); 538 if (want_ref)
539 neigh_hold(n1);
539 rc = n1; 540 rc = n1;
540 goto out_tbl_unlock; 541 goto out_tbl_unlock;
541 } 542 }
542 } 543 }
543 544
544 n->dead = 0; 545 n->dead = 0;
545 neigh_hold(n); 546 if (want_ref)
547 neigh_hold(n);
546 rcu_assign_pointer(n->next, 548 rcu_assign_pointer(n->next,
547 rcu_dereference_protected(nht->hash_buckets[hash_val], 549 rcu_dereference_protected(nht->hash_buckets[hash_val],
548 lockdep_is_held(&tbl->lock))); 550 lockdep_is_held(&tbl->lock)));
@@ -558,7 +560,7 @@ out_neigh_release:
558 neigh_release(n); 560 neigh_release(n);
559 goto out; 561 goto out;
560} 562}
561EXPORT_SYMBOL(neigh_create); 563EXPORT_SYMBOL(__neigh_create);
562 564
563static u32 pneigh_hash(const void *pkey, int key_len) 565static u32 pneigh_hash(const void *pkey, int key_len)
564{ 566{
@@ -1199,10 +1201,23 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1199 write_unlock_bh(&neigh->lock); 1201 write_unlock_bh(&neigh->lock);
1200 1202
1201 rcu_read_lock(); 1203 rcu_read_lock();
1202 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1204
1203 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL) 1205 /* Why not just use 'neigh' as-is? The problem is that
1204 n1 = n2; 1206 * things such as shaper, eql, and sch_teql can end up
1207 * using alternative, different, neigh objects to output
1208 * the packet in the output path. So what we need to do
1209 * here is re-lookup the top-level neigh in the path so
1210 * we can reinject the packet there.
1211 */
1212 n2 = NULL;
1213 if (dst) {
1214 n2 = dst_neigh_lookup_skb(dst, skb);
1215 if (n2)
1216 n1 = n2;
1217 }
1205 n1->output(n1, skb); 1218 n1->output(n1, skb);
1219 if (n2)
1220 neigh_release(n2);
1206 rcu_read_unlock(); 1221 rcu_read_unlock();
1207 1222
1208 write_lock_bh(&neigh->lock); 1223 write_lock_bh(&neigh->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fdf9e61d0651..72607174ea5a 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -417,72 +417,6 @@ static struct attribute_group netstat_group = {
417 .name = "statistics", 417 .name = "statistics",
418 .attrs = netstat_attrs, 418 .attrs = netstat_attrs,
419}; 419};
420
421#ifdef CONFIG_WIRELESS_EXT_SYSFS
422/* helper function that does all the locking etc for wireless stats */
423static ssize_t wireless_show(struct device *d, char *buf,
424 ssize_t (*format)(const struct iw_statistics *,
425 char *))
426{
427 struct net_device *dev = to_net_dev(d);
428 const struct iw_statistics *iw;
429 ssize_t ret = -EINVAL;
430
431 if (!rtnl_trylock())
432 return restart_syscall();
433 if (dev_isalive(dev)) {
434 iw = get_wireless_stats(dev);
435 if (iw)
436 ret = (*format)(iw, buf);
437 }
438 rtnl_unlock();
439
440 return ret;
441}
442
443/* show function template for wireless fields */
444#define WIRELESS_SHOW(name, field, format_string) \
445static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
446{ \
447 return sprintf(buf, format_string, iw->field); \
448} \
449static ssize_t show_iw_##name(struct device *d, \
450 struct device_attribute *attr, char *buf) \
451{ \
452 return wireless_show(d, buf, format_iw_##name); \
453} \
454static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
455
456WIRELESS_SHOW(status, status, fmt_hex);
457WIRELESS_SHOW(link, qual.qual, fmt_dec);
458WIRELESS_SHOW(level, qual.level, fmt_dec);
459WIRELESS_SHOW(noise, qual.noise, fmt_dec);
460WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
461WIRELESS_SHOW(crypt, discard.code, fmt_dec);
462WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
463WIRELESS_SHOW(misc, discard.misc, fmt_dec);
464WIRELESS_SHOW(retries, discard.retries, fmt_dec);
465WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
466
467static struct attribute *wireless_attrs[] = {
468 &dev_attr_status.attr,
469 &dev_attr_link.attr,
470 &dev_attr_level.attr,
471 &dev_attr_noise.attr,
472 &dev_attr_nwid.attr,
473 &dev_attr_crypt.attr,
474 &dev_attr_fragment.attr,
475 &dev_attr_retries.attr,
476 &dev_attr_misc.attr,
477 &dev_attr_beacon.attr,
478 NULL
479};
480
481static struct attribute_group wireless_group = {
482 .name = "wireless",
483 .attrs = wireless_attrs,
484};
485#endif
486#endif /* CONFIG_SYSFS */ 420#endif /* CONFIG_SYSFS */
487 421
488#ifdef CONFIG_RPS 422#ifdef CONFIG_RPS
@@ -1463,14 +1397,6 @@ int netdev_register_kobject(struct net_device *net)
1463 groups++; 1397 groups++;
1464 1398
1465 *groups++ = &netstat_group; 1399 *groups++ = &netstat_group;
1466#ifdef CONFIG_WIRELESS_EXT_SYSFS
1467 if (net->ieee80211_ptr)
1468 *groups++ = &wireless_group;
1469#ifdef CONFIG_WIRELESS_EXT
1470 else if (net->wireless_handlers)
1471 *groups++ = &wireless_group;
1472#endif
1473#endif
1474#endif /* CONFIG_SYSFS */ 1400#endif /* CONFIG_SYSFS */
1475 1401
1476 error = device_add(dev); 1402 error = device_add(dev);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index dddbacb8f28c..42f1e1c7514f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,7 +27,9 @@ static DEFINE_MUTEX(net_mutex);
27LIST_HEAD(net_namespace_list); 27LIST_HEAD(net_namespace_list);
28EXPORT_SYMBOL_GPL(net_namespace_list); 28EXPORT_SYMBOL_GPL(net_namespace_list);
29 29
30struct net init_net; 30struct net init_net = {
31 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
32};
31EXPORT_SYMBOL(init_net); 33EXPORT_SYMBOL(init_net);
32 34
33#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 35#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index f9f40b932e4b..b4c90e42b443 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -715,14 +715,16 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
715} 715}
716EXPORT_SYMBOL(netpoll_parse_options); 716EXPORT_SYMBOL(netpoll_parse_options);
717 717
718int __netpoll_setup(struct netpoll *np) 718int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
719{ 719{
720 struct net_device *ndev = np->dev;
721 struct netpoll_info *npinfo; 720 struct netpoll_info *npinfo;
722 const struct net_device_ops *ops; 721 const struct net_device_ops *ops;
723 unsigned long flags; 722 unsigned long flags;
724 int err; 723 int err;
725 724
725 np->dev = ndev;
726 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
727
726 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 728 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
727 !ndev->netdev_ops->ndo_poll_controller) { 729 !ndev->netdev_ops->ndo_poll_controller) {
728 np_err(np, "%s doesn't support polling, aborting\n", 730 np_err(np, "%s doesn't support polling, aborting\n",
@@ -851,13 +853,11 @@ int netpoll_setup(struct netpoll *np)
851 np_info(np, "local IP %pI4\n", &np->local_ip); 853 np_info(np, "local IP %pI4\n", &np->local_ip);
852 } 854 }
853 855
854 np->dev = ndev;
855
856 /* fill up the skb queue */ 856 /* fill up the skb queue */
857 refill_skbs(); 857 refill_skbs();
858 858
859 rtnl_lock(); 859 rtnl_lock();
860 err = __netpoll_setup(np); 860 err = __netpoll_setup(np, ndev);
861 rtnl_unlock(); 861 rtnl_unlock();
862 862
863 if (err) 863 if (err)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5b8aa2fae48b..ed0c0431fcd8 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -25,6 +25,8 @@
25#include <net/sock.h> 25#include <net/sock.h>
26#include <net/netprio_cgroup.h> 26#include <net/netprio_cgroup.h>
27 27
28#include <linux/fdtable.h>
29
28#define PRIOIDX_SZ 128 30#define PRIOIDX_SZ 128
29 31
30static unsigned long prioidx_map[PRIOIDX_SZ]; 32static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -49,8 +51,9 @@ static int get_prioidx(u32 *prio)
49 return -ENOSPC; 51 return -ENOSPC;
50 } 52 }
51 set_bit(prioidx, prioidx_map); 53 set_bit(prioidx, prioidx_map);
54 if (atomic_read(&max_prioidx) < prioidx)
55 atomic_set(&max_prioidx, prioidx);
52 spin_unlock_irqrestore(&prioidx_map_lock, flags); 56 spin_unlock_irqrestore(&prioidx_map_lock, flags);
53 atomic_set(&max_prioidx, prioidx);
54 *prio = prioidx; 57 *prio = prioidx;
55 return 0; 58 return 0;
56} 59}
@@ -64,7 +67,7 @@ static void put_prioidx(u32 idx)
64 spin_unlock_irqrestore(&prioidx_map_lock, flags); 67 spin_unlock_irqrestore(&prioidx_map_lock, flags);
65} 68}
66 69
67static void extend_netdev_table(struct net_device *dev, u32 new_len) 70static int extend_netdev_table(struct net_device *dev, u32 new_len)
68{ 71{
69 size_t new_size = sizeof(struct netprio_map) + 72 size_t new_size = sizeof(struct netprio_map) +
70 ((sizeof(u32) * new_len)); 73 ((sizeof(u32) * new_len));
@@ -76,7 +79,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
76 79
77 if (!new_priomap) { 80 if (!new_priomap) {
78 pr_warn("Unable to alloc new priomap!\n"); 81 pr_warn("Unable to alloc new priomap!\n");
79 return; 82 return -ENOMEM;
80 } 83 }
81 84
82 for (i = 0; 85 for (i = 0;
@@ -89,46 +92,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
89 rcu_assign_pointer(dev->priomap, new_priomap); 92 rcu_assign_pointer(dev->priomap, new_priomap);
90 if (old_priomap) 93 if (old_priomap)
91 kfree_rcu(old_priomap, rcu); 94 kfree_rcu(old_priomap, rcu);
95 return 0;
96}
97
98static int write_update_netdev_table(struct net_device *dev)
99{
100 int ret = 0;
101 u32 max_len;
102 struct netprio_map *map;
103
104 rtnl_lock();
105 max_len = atomic_read(&max_prioidx) + 1;
106 map = rtnl_dereference(dev->priomap);
107 if (!map || map->priomap_len < max_len)
108 ret = extend_netdev_table(dev, max_len);
109 rtnl_unlock();
110
111 return ret;
92} 112}
93 113
94static void update_netdev_tables(void) 114static int update_netdev_tables(void)
95{ 115{
116 int ret = 0;
96 struct net_device *dev; 117 struct net_device *dev;
97 u32 max_len = atomic_read(&max_prioidx) + 1; 118 u32 max_len;
98 struct netprio_map *map; 119 struct netprio_map *map;
99 120
100 rtnl_lock(); 121 rtnl_lock();
122 max_len = atomic_read(&max_prioidx) + 1;
101 for_each_netdev(&init_net, dev) { 123 for_each_netdev(&init_net, dev) {
102 map = rtnl_dereference(dev->priomap); 124 map = rtnl_dereference(dev->priomap);
103 if ((!map) || 125 /*
104 (map->priomap_len < max_len)) 126 * don't allocate priomap if we didn't
105 extend_netdev_table(dev, max_len); 127 * change net_prio.ifpriomap (map == NULL),
128 * this will speed up skb_update_prio.
129 */
130 if (map && map->priomap_len < max_len) {
131 ret = extend_netdev_table(dev, max_len);
132 if (ret < 0)
133 break;
134 }
106 } 135 }
107 rtnl_unlock(); 136 rtnl_unlock();
137 return ret;
108} 138}
109 139
110static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 140static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
111{ 141{
112 struct cgroup_netprio_state *cs; 142 struct cgroup_netprio_state *cs;
113 int ret; 143 int ret = -EINVAL;
114 144
115 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 145 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
116 if (!cs) 146 if (!cs)
117 return ERR_PTR(-ENOMEM); 147 return ERR_PTR(-ENOMEM);
118 148
119 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { 149 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
120 kfree(cs); 150 goto out;
121 return ERR_PTR(-EINVAL);
122 }
123 151
124 ret = get_prioidx(&cs->prioidx); 152 ret = get_prioidx(&cs->prioidx);
125 if (ret != 0) { 153 if (ret < 0) {
126 pr_warn("No space in priority index array\n"); 154 pr_warn("No space in priority index array\n");
127 kfree(cs); 155 goto out;
128 return ERR_PTR(ret); 156 }
157
158 ret = update_netdev_tables();
159 if (ret < 0) {
160 put_prioidx(cs->prioidx);
161 goto out;
129 } 162 }
130 163
131 return &cs->css; 164 return &cs->css;
165out:
166 kfree(cs);
167 return ERR_PTR(ret);
132} 168}
133 169
134static void cgrp_destroy(struct cgroup *cgrp) 170static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +177,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
141 rtnl_lock(); 177 rtnl_lock();
142 for_each_netdev(&init_net, dev) { 178 for_each_netdev(&init_net, dev) {
143 map = rtnl_dereference(dev->priomap); 179 map = rtnl_dereference(dev->priomap);
144 if (map) 180 if (map && cs->prioidx < map->priomap_len)
145 map->priomap[cs->prioidx] = 0; 181 map->priomap[cs->prioidx] = 0;
146 } 182 }
147 rtnl_unlock(); 183 rtnl_unlock();
@@ -165,7 +201,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
165 rcu_read_lock(); 201 rcu_read_lock();
166 for_each_netdev_rcu(&init_net, dev) { 202 for_each_netdev_rcu(&init_net, dev) {
167 map = rcu_dereference(dev->priomap); 203 map = rcu_dereference(dev->priomap);
168 priority = map ? map->priomap[prioidx] : 0; 204 priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
169 cb->fill(cb, dev->name, priority); 205 cb->fill(cb, dev->name, priority);
170 } 206 }
171 rcu_read_unlock(); 207 rcu_read_unlock();
@@ -198,7 +234,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
198 234
199 /* 235 /*
200 *Separate the devname from the associated priority 236 *Separate the devname from the associated priority
201 *and advance the priostr poitner to the priority value 237 *and advance the priostr pointer to the priority value
202 */ 238 */
203 *priostr = '\0'; 239 *priostr = '\0';
204 priostr++; 240 priostr++;
@@ -220,13 +256,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
220 if (!dev) 256 if (!dev)
221 goto out_free_devname; 257 goto out_free_devname;
222 258
223 update_netdev_tables(); 259 ret = write_update_netdev_table(dev);
224 ret = 0; 260 if (ret < 0)
261 goto out_put_dev;
262
225 rcu_read_lock(); 263 rcu_read_lock();
226 map = rcu_dereference(dev->priomap); 264 map = rcu_dereference(dev->priomap);
227 if (map) 265 if (map)
228 map->priomap[prioidx] = priority; 266 map->priomap[prioidx] = priority;
229 rcu_read_unlock(); 267 rcu_read_unlock();
268
269out_put_dev:
230 dev_put(dev); 270 dev_put(dev);
231 271
232out_free_devname: 272out_free_devname:
@@ -234,6 +274,56 @@ out_free_devname:
234 return ret; 274 return ret;
235} 275}
236 276
277void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
278{
279 struct task_struct *p;
280 char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
281
282 if (!tmp) {
283 pr_warn("Unable to attach cgrp due to alloc failure!\n");
284 return;
285 }
286
287 cgroup_taskset_for_each(p, cgrp, tset) {
288 unsigned int fd;
289 struct fdtable *fdt;
290 struct files_struct *files;
291
292 task_lock(p);
293 files = p->files;
294 if (!files) {
295 task_unlock(p);
296 continue;
297 }
298
299 rcu_read_lock();
300 fdt = files_fdtable(files);
301 for (fd = 0; fd < fdt->max_fds; fd++) {
302 char *path;
303 struct file *file;
304 struct socket *sock;
305 unsigned long s;
306 int rv, err = 0;
307
308 file = fcheck_files(files, fd);
309 if (!file)
310 continue;
311
312 path = d_path(&file->f_path, tmp, PAGE_SIZE);
313 rv = sscanf(path, "socket:[%lu]", &s);
314 if (rv <= 0)
315 continue;
316
317 sock = sock_from_file(file, &err);
318 if (!err)
319 sock_update_netprioidx(sock->sk, p);
320 }
321 rcu_read_unlock();
322 task_unlock(p);
323 }
324 kfree(tmp);
325}
326
237static struct cftype ss_files[] = { 327static struct cftype ss_files[] = {
238 { 328 {
239 .name = "prioidx", 329 .name = "prioidx",
@@ -251,6 +341,7 @@ struct cgroup_subsys net_prio_subsys = {
251 .name = "net_prio", 341 .name = "net_prio",
252 .create = cgrp_create, 342 .create = cgrp_create,
253 .destroy = cgrp_destroy, 343 .destroy = cgrp_destroy,
344 .attach = net_prio_attach,
254#ifdef CONFIG_NETPRIO_CGROUP 345#ifdef CONFIG_NETPRIO_CGROUP
255 .subsys_id = net_prio_subsys_id, 346 .subsys_id = net_prio_subsys_id,
256#endif 347#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 21318d15bbc3..2c5a0a06c4ce 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -541,19 +541,6 @@ static const int rta_max[RTM_NR_FAMILIES] =
541 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, 541 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
542}; 542};
543 543
544void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
545{
546 struct rtattr *rta;
547 int size = RTA_LENGTH(attrlen);
548
549 rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
550 rta->rta_type = attrtype;
551 rta->rta_len = size;
552 memcpy(RTA_DATA(rta), data, attrlen);
553 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
554}
555EXPORT_SYMBOL(__rta_fill);
556
557int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 544int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
558{ 545{
559 struct sock *rtnl = net->rtnl; 546 struct sock *rtnl = net->rtnl;
@@ -628,7 +615,7 @@ nla_put_failure:
628EXPORT_SYMBOL(rtnetlink_put_metrics); 615EXPORT_SYMBOL(rtnetlink_put_metrics);
629 616
630int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 617int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
631 u32 ts, u32 tsage, long expires, u32 error) 618 long expires, u32 error)
632{ 619{
633 struct rta_cacheinfo ci = { 620 struct rta_cacheinfo ci = {
634 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), 621 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
@@ -636,13 +623,15 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
636 .rta_clntref = atomic_read(&(dst->__refcnt)), 623 .rta_clntref = atomic_read(&(dst->__refcnt)),
637 .rta_error = error, 624 .rta_error = error,
638 .rta_id = id, 625 .rta_id = id,
639 .rta_ts = ts,
640 .rta_tsage = tsage,
641 }; 626 };
642 627
643 if (expires) 628 if (expires) {
644 ci.rta_expires = jiffies_to_clock_t(expires); 629 unsigned long clock;
645 630
631 clock = jiffies_to_clock_t(abs(expires));
632 clock = min_t(unsigned long, clock, INT_MAX);
633 ci.rta_expires = (expires > 0) ? clock : -clock;
634 }
646 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 635 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
647} 636}
648EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 637EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
@@ -674,6 +663,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
674 } 663 }
675} 664}
676 665
666static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
667{
668 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
669 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
670}
671
677static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 672static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
678 const struct ifinfomsg *ifm) 673 const struct ifinfomsg *ifm)
679{ 674{
@@ -682,7 +677,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
682 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 677 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
683 if (ifm->ifi_change) 678 if (ifm->ifi_change)
684 flags = (flags & ifm->ifi_change) | 679 flags = (flags & ifm->ifi_change) |
685 (dev->flags & ~ifm->ifi_change); 680 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
686 681
687 return flags; 682 return flags;
688} 683}
@@ -786,6 +781,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
786 + nla_total_size(4) /* IFLA_LINK */ 781 + nla_total_size(4) /* IFLA_LINK */
787 + nla_total_size(4) /* IFLA_MASTER */ 782 + nla_total_size(4) /* IFLA_MASTER */
788 + nla_total_size(4) /* IFLA_PROMISCUITY */ 783 + nla_total_size(4) /* IFLA_PROMISCUITY */
784 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
785 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
789 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
790 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
791 + nla_total_size(ext_filter_mask 788 + nla_total_size(ext_filter_mask
@@ -904,6 +901,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
904 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 901 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
905 nla_put_u32(skb, IFLA_GROUP, dev->group) || 902 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
906 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 903 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
904 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
905#ifdef CONFIG_RPS
906 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
907#endif
907 (dev->ifindex != dev->iflink && 908 (dev->ifindex != dev->iflink &&
908 nla_put_u32(skb, IFLA_LINK, dev->iflink)) || 909 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
909 (dev->master && 910 (dev->master &&
@@ -1121,6 +1122,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1121 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1122 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1122 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1123 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1123 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1124 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1125 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1126 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1124}; 1127};
1125EXPORT_SYMBOL(ifla_policy); 1128EXPORT_SYMBOL(ifla_policy);
1126 1129
@@ -1378,6 +1381,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1378 goto errout; 1381 goto errout;
1379 send_addr_notify = 1; 1382 send_addr_notify = 1;
1380 modified = 1; 1383 modified = 1;
1384 add_device_randomness(dev->dev_addr, dev->addr_len);
1381 } 1385 }
1382 1386
1383 if (tb[IFLA_MTU]) { 1387 if (tb[IFLA_MTU]) {
@@ -1639,17 +1643,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1639{ 1643{
1640 int err; 1644 int err;
1641 struct net_device *dev; 1645 struct net_device *dev;
1642 unsigned int num_queues = 1; 1646 unsigned int num_tx_queues = 1;
1647 unsigned int num_rx_queues = 1;
1643 1648
1644 if (ops->get_tx_queues) { 1649 if (tb[IFLA_NUM_TX_QUEUES])
1645 err = ops->get_tx_queues(src_net, tb); 1650 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
1646 if (err < 0) 1651 else if (ops->get_num_tx_queues)
1647 goto err; 1652 num_tx_queues = ops->get_num_tx_queues();
1648 num_queues = err; 1653
1649 } 1654 if (tb[IFLA_NUM_RX_QUEUES])
1655 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
1656 else if (ops->get_num_rx_queues)
1657 num_rx_queues = ops->get_num_rx_queues();
1650 1658
1651 err = -ENOMEM; 1659 err = -ENOMEM;
1652 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); 1660 dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
1661 num_tx_queues, num_rx_queues);
1653 if (!dev) 1662 if (!dev)
1654 goto err; 1663 goto err;
1655 1664
@@ -2189,7 +2198,7 @@ skip:
2189} 2198}
2190 2199
2191/** 2200/**
2192 * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table. 2201 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
2193 * @nlh: netlink message header 2202 * @nlh: netlink message header
2194 * @dev: netdevice 2203 * @dev: netdevice
2195 * 2204 *
@@ -2366,8 +2375,13 @@ static struct notifier_block rtnetlink_dev_notifier = {
2366static int __net_init rtnetlink_net_init(struct net *net) 2375static int __net_init rtnetlink_net_init(struct net *net)
2367{ 2376{
2368 struct sock *sk; 2377 struct sock *sk;
2369 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 2378 struct netlink_kernel_cfg cfg = {
2370 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); 2379 .groups = RTNLGRP_MAX,
2380 .input = rtnetlink_rcv,
2381 .cb_mutex = &rtnl_mutex,
2382 };
2383
2384 sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
2371 if (!sk) 2385 if (!sk)
2372 return -ENOMEM; 2386 return -ENOMEM;
2373 net->rtnl = sk; 2387 net->rtnl = sk;
diff --git a/net/core/scm.c b/net/core/scm.c
index 611c5efd4cb0..8f6ccfd68ef4 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -109,25 +109,9 @@ void __scm_destroy(struct scm_cookie *scm)
109 109
110 if (fpl) { 110 if (fpl) {
111 scm->fp = NULL; 111 scm->fp = NULL;
112 if (current->scm_work_list) { 112 for (i=fpl->count-1; i>=0; i--)
113 list_add_tail(&fpl->list, current->scm_work_list); 113 fput(fpl->fp[i]);
114 } else { 114 kfree(fpl);
115 LIST_HEAD(work_list);
116
117 current->scm_work_list = &work_list;
118
119 list_add(&fpl->list, &work_list);
120 while (!list_empty(&work_list)) {
121 fpl = list_first_entry(&work_list, struct scm_fp_list, list);
122
123 list_del(&fpl->list);
124 for (i=fpl->count-1; i>=0; i--)
125 fput(fpl->fp[i]);
126 kfree(fpl);
127 }
128
129 current->scm_work_list = NULL;
130 }
131 } 115 }
132} 116}
133EXPORT_SYMBOL(__scm_destroy); 117EXPORT_SYMBOL(__scm_destroy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d78671e9d545..fe00d1208167 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -145,6 +145,43 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
145 BUG(); 145 BUG();
146} 146}
147 147
148
149/*
150 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
151 * the caller if emergency pfmemalloc reserves are being used. If it is and
152 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
153 * may be used. Otherwise, the packet data may be discarded until enough
154 * memory is free
155 */
156#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
157 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
158void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
159 bool *pfmemalloc)
160{
161 void *obj;
162 bool ret_pfmemalloc = false;
163
164 /*
165 * Try a regular allocation, when that fails and we're not entitled
166 * to the reserves, fail.
167 */
168 obj = kmalloc_node_track_caller(size,
169 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
170 node);
171 if (obj || !(gfp_pfmemalloc_allowed(flags)))
172 goto out;
173
174 /* Try again but now we are using pfmemalloc reserves */
175 ret_pfmemalloc = true;
176 obj = kmalloc_node_track_caller(size, flags, node);
177
178out:
179 if (pfmemalloc)
180 *pfmemalloc = ret_pfmemalloc;
181
182 return obj;
183}
184
148/* Allocate a new skbuff. We do this ourselves so we can fill in a few 185/* Allocate a new skbuff. We do this ourselves so we can fill in a few
149 * 'private' fields and also do memory statistics to find all the 186 * 'private' fields and also do memory statistics to find all the
150 * [BEEP] leaks. 187 * [BEEP] leaks.
@@ -155,26 +192,33 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
155 * __alloc_skb - allocate a network buffer 192 * __alloc_skb - allocate a network buffer
156 * @size: size to allocate 193 * @size: size to allocate
157 * @gfp_mask: allocation mask 194 * @gfp_mask: allocation mask
158 * @fclone: allocate from fclone cache instead of head cache 195 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
159 * and allocate a cloned (child) skb 196 * instead of head cache and allocate a cloned (child) skb.
197 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
198 * allocations in case the data is required for writeback
160 * @node: numa node to allocate memory on 199 * @node: numa node to allocate memory on
161 * 200 *
162 * Allocate a new &sk_buff. The returned buffer has no headroom and a 201 * Allocate a new &sk_buff. The returned buffer has no headroom and a
163 * tail room of size bytes. The object has a reference count of one. 202 * tail room of at least size bytes. The object has a reference count
164 * The return is the buffer. On a failure the return is %NULL. 203 * of one. The return is the buffer. On a failure the return is %NULL.
165 * 204 *
166 * Buffers may only be allocated from interrupts using a @gfp_mask of 205 * Buffers may only be allocated from interrupts using a @gfp_mask of
167 * %GFP_ATOMIC. 206 * %GFP_ATOMIC.
168 */ 207 */
169struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 208struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
170 int fclone, int node) 209 int flags, int node)
171{ 210{
172 struct kmem_cache *cache; 211 struct kmem_cache *cache;
173 struct skb_shared_info *shinfo; 212 struct skb_shared_info *shinfo;
174 struct sk_buff *skb; 213 struct sk_buff *skb;
175 u8 *data; 214 u8 *data;
215 bool pfmemalloc;
176 216
177 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 217 cache = (flags & SKB_ALLOC_FCLONE)
218 ? skbuff_fclone_cache : skbuff_head_cache;
219
220 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
221 gfp_mask |= __GFP_MEMALLOC;
178 222
179 /* Get the HEAD */ 223 /* Get the HEAD */
180 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 224 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
@@ -189,7 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
189 */ 233 */
190 size = SKB_DATA_ALIGN(size); 234 size = SKB_DATA_ALIGN(size);
191 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 235 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
192 data = kmalloc_node_track_caller(size, gfp_mask, node); 236 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
193 if (!data) 237 if (!data)
194 goto nodata; 238 goto nodata;
195 /* kmalloc(size) might give us more room than requested. 239 /* kmalloc(size) might give us more room than requested.
@@ -207,6 +251,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
207 memset(skb, 0, offsetof(struct sk_buff, tail)); 251 memset(skb, 0, offsetof(struct sk_buff, tail));
208 /* Account for allocated memory : skb + skb->head */ 252 /* Account for allocated memory : skb + skb->head */
209 skb->truesize = SKB_TRUESIZE(size); 253 skb->truesize = SKB_TRUESIZE(size);
254 skb->pfmemalloc = pfmemalloc;
210 atomic_set(&skb->users, 1); 255 atomic_set(&skb->users, 1);
211 skb->head = data; 256 skb->head = data;
212 skb->data = data; 257 skb->data = data;
@@ -222,7 +267,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
222 atomic_set(&shinfo->dataref, 1); 267 atomic_set(&shinfo->dataref, 1);
223 kmemcheck_annotate_variable(shinfo->destructor_arg); 268 kmemcheck_annotate_variable(shinfo->destructor_arg);
224 269
225 if (fclone) { 270 if (flags & SKB_ALLOC_FCLONE) {
226 struct sk_buff *child = skb + 1; 271 struct sk_buff *child = skb + 1;
227 atomic_t *fclone_ref = (atomic_t *) (child + 1); 272 atomic_t *fclone_ref = (atomic_t *) (child + 1);
228 273
@@ -232,6 +277,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
232 atomic_set(fclone_ref, 1); 277 atomic_set(fclone_ref, 1);
233 278
234 child->fclone = SKB_FCLONE_UNAVAILABLE; 279 child->fclone = SKB_FCLONE_UNAVAILABLE;
280 child->pfmemalloc = pfmemalloc;
235 } 281 }
236out: 282out:
237 return skb; 283 return skb;
@@ -296,17 +342,13 @@ EXPORT_SYMBOL(build_skb);
296struct netdev_alloc_cache { 342struct netdev_alloc_cache {
297 struct page *page; 343 struct page *page;
298 unsigned int offset; 344 unsigned int offset;
345 unsigned int pagecnt_bias;
299}; 346};
300static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 347static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
301 348
302/** 349#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
303 * netdev_alloc_frag - allocate a page fragment 350
304 * @fragsz: fragment size 351static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
305 *
306 * Allocates a frag from a page for receive buffer.
307 * Uses GFP_ATOMIC allocations.
308 */
309void *netdev_alloc_frag(unsigned int fragsz)
310{ 352{
311 struct netdev_alloc_cache *nc; 353 struct netdev_alloc_cache *nc;
312 void *data = NULL; 354 void *data = NULL;
@@ -316,21 +358,42 @@ void *netdev_alloc_frag(unsigned int fragsz)
316 nc = &__get_cpu_var(netdev_alloc_cache); 358 nc = &__get_cpu_var(netdev_alloc_cache);
317 if (unlikely(!nc->page)) { 359 if (unlikely(!nc->page)) {
318refill: 360refill:
319 nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); 361 nc->page = alloc_page(gfp_mask);
362 if (unlikely(!nc->page))
363 goto end;
364recycle:
365 atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
366 nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
320 nc->offset = 0; 367 nc->offset = 0;
321 } 368 }
322 if (likely(nc->page)) { 369
323 if (nc->offset + fragsz > PAGE_SIZE) { 370 if (nc->offset + fragsz > PAGE_SIZE) {
324 put_page(nc->page); 371 /* avoid unnecessary locked operations if possible */
325 goto refill; 372 if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
326 } 373 atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
327 data = page_address(nc->page) + nc->offset; 374 goto recycle;
328 nc->offset += fragsz; 375 goto refill;
329 get_page(nc->page);
330 } 376 }
377
378 data = page_address(nc->page) + nc->offset;
379 nc->offset += fragsz;
380 nc->pagecnt_bias--;
381end:
331 local_irq_restore(flags); 382 local_irq_restore(flags);
332 return data; 383 return data;
333} 384}
385
386/**
387 * netdev_alloc_frag - allocate a page fragment
388 * @fragsz: fragment size
389 *
390 * Allocates a frag from a page for receive buffer.
391 * Uses GFP_ATOMIC allocations.
392 */
393void *netdev_alloc_frag(unsigned int fragsz)
394{
395 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
396}
334EXPORT_SYMBOL(netdev_alloc_frag); 397EXPORT_SYMBOL(netdev_alloc_frag);
335 398
336/** 399/**
@@ -353,8 +416,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 416 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 417 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
355 418
356 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { 419 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
357 void *data = netdev_alloc_frag(fragsz); 420 void *data;
421
422 if (sk_memalloc_socks())
423 gfp_mask |= __GFP_MEMALLOC;
424
425 data = __netdev_alloc_frag(fragsz, gfp_mask);
358 426
359 if (likely(data)) { 427 if (likely(data)) {
360 skb = build_skb(data, fragsz); 428 skb = build_skb(data, fragsz);
@@ -362,7 +430,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
362 put_page(virt_to_head_page(data)); 430 put_page(virt_to_head_page(data));
363 } 431 }
364 } else { 432 } else {
365 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 433 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
434 SKB_ALLOC_RX, NUMA_NO_NODE);
366 } 435 }
367 if (likely(skb)) { 436 if (likely(skb)) {
368 skb_reserve(skb, NET_SKB_PAD); 437 skb_reserve(skb, NET_SKB_PAD);
@@ -644,6 +713,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
644#if IS_ENABLED(CONFIG_IP_VS) 713#if IS_ENABLED(CONFIG_IP_VS)
645 new->ipvs_property = old->ipvs_property; 714 new->ipvs_property = old->ipvs_property;
646#endif 715#endif
716 new->pfmemalloc = old->pfmemalloc;
647 new->protocol = old->protocol; 717 new->protocol = old->protocol;
648 new->mark = old->mark; 718 new->mark = old->mark;
649 new->skb_iif = old->skb_iif; 719 new->skb_iif = old->skb_iif;
@@ -713,7 +783,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
713} 783}
714EXPORT_SYMBOL_GPL(skb_morph); 784EXPORT_SYMBOL_GPL(skb_morph);
715 785
716/* skb_copy_ubufs - copy userspace skb frags buffers to kernel 786/**
787 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
717 * @skb: the skb to modify 788 * @skb: the skb to modify
718 * @gfp_mask: allocation priority 789 * @gfp_mask: allocation priority
719 * 790 *
@@ -738,7 +809,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
738 u8 *vaddr; 809 u8 *vaddr;
739 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 810 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
740 811
741 page = alloc_page(GFP_ATOMIC); 812 page = alloc_page(gfp_mask);
742 if (!page) { 813 if (!page) {
743 while (head) { 814 while (head) {
744 struct page *next = (struct page *)head->private; 815 struct page *next = (struct page *)head->private;
@@ -756,22 +827,22 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
756 } 827 }
757 828
758 /* skb frags release userspace buffers */ 829 /* skb frags release userspace buffers */
759 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 830 for (i = 0; i < num_frags; i++)
760 skb_frag_unref(skb, i); 831 skb_frag_unref(skb, i);
761 832
762 uarg->callback(uarg); 833 uarg->callback(uarg);
763 834
764 /* skb frags point to kernel buffers */ 835 /* skb frags point to kernel buffers */
765 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 836 for (i = num_frags - 1; i >= 0; i--) {
766 __skb_fill_page_desc(skb, i-1, head, 0, 837 __skb_fill_page_desc(skb, i, head, 0,
767 skb_shinfo(skb)->frags[i - 1].size); 838 skb_shinfo(skb)->frags[i].size);
768 head = (struct page *)head->private; 839 head = (struct page *)head->private;
769 } 840 }
770 841
771 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 842 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
772 return 0; 843 return 0;
773} 844}
774 845EXPORT_SYMBOL_GPL(skb_copy_ubufs);
775 846
776/** 847/**
777 * skb_clone - duplicate an sk_buff 848 * skb_clone - duplicate an sk_buff
@@ -791,10 +862,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
791{ 862{
792 struct sk_buff *n; 863 struct sk_buff *n;
793 864
794 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 865 if (skb_orphan_frags(skb, gfp_mask))
795 if (skb_copy_ubufs(skb, gfp_mask)) 866 return NULL;
796 return NULL;
797 }
798 867
799 n = skb + 1; 868 n = skb + 1;
800 if (skb->fclone == SKB_FCLONE_ORIG && 869 if (skb->fclone == SKB_FCLONE_ORIG &&
@@ -803,6 +872,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
803 n->fclone = SKB_FCLONE_CLONE; 872 n->fclone = SKB_FCLONE_CLONE;
804 atomic_inc(fclone_ref); 873 atomic_inc(fclone_ref);
805 } else { 874 } else {
875 if (skb_pfmemalloc(skb))
876 gfp_mask |= __GFP_MEMALLOC;
877
806 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 878 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
807 if (!n) 879 if (!n)
808 return NULL; 880 return NULL;
@@ -839,6 +911,13 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
839 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 911 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
840} 912}
841 913
914static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
915{
916 if (skb_pfmemalloc(skb))
917 return SKB_ALLOC_RX;
918 return 0;
919}
920
842/** 921/**
843 * skb_copy - create private copy of an sk_buff 922 * skb_copy - create private copy of an sk_buff
844 * @skb: buffer to copy 923 * @skb: buffer to copy
@@ -860,7 +939,8 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
860{ 939{
861 int headerlen = skb_headroom(skb); 940 int headerlen = skb_headroom(skb);
862 unsigned int size = skb_end_offset(skb) + skb->data_len; 941 unsigned int size = skb_end_offset(skb) + skb->data_len;
863 struct sk_buff *n = alloc_skb(size, gfp_mask); 942 struct sk_buff *n = __alloc_skb(size, gfp_mask,
943 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
864 944
865 if (!n) 945 if (!n)
866 return NULL; 946 return NULL;
@@ -895,7 +975,8 @@ EXPORT_SYMBOL(skb_copy);
895struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 975struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
896{ 976{
897 unsigned int size = skb_headlen(skb) + headroom; 977 unsigned int size = skb_headlen(skb) + headroom;
898 struct sk_buff *n = alloc_skb(size, gfp_mask); 978 struct sk_buff *n = __alloc_skb(size, gfp_mask,
979 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
899 980
900 if (!n) 981 if (!n)
901 goto out; 982 goto out;
@@ -914,12 +995,10 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
914 if (skb_shinfo(skb)->nr_frags) { 995 if (skb_shinfo(skb)->nr_frags) {
915 int i; 996 int i;
916 997
917 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 998 if (skb_orphan_frags(skb, gfp_mask)) {
918 if (skb_copy_ubufs(skb, gfp_mask)) { 999 kfree_skb(n);
919 kfree_skb(n); 1000 n = NULL;
920 n = NULL; 1001 goto out;
921 goto out;
922 }
923 } 1002 }
924 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1003 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
925 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1004 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -970,8 +1049,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
970 1049
971 size = SKB_DATA_ALIGN(size); 1050 size = SKB_DATA_ALIGN(size);
972 1051
973 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1052 if (skb_pfmemalloc(skb))
974 gfp_mask); 1053 gfp_mask |= __GFP_MEMALLOC;
1054 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1055 gfp_mask, NUMA_NO_NODE, NULL);
975 if (!data) 1056 if (!data)
976 goto nodata; 1057 goto nodata;
977 size = SKB_WITH_OVERHEAD(ksize(data)); 1058 size = SKB_WITH_OVERHEAD(ksize(data));
@@ -992,10 +1073,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
992 */ 1073 */
993 if (skb_cloned(skb)) { 1074 if (skb_cloned(skb)) {
994 /* copy this zero copy skb frags */ 1075 /* copy this zero copy skb frags */
995 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1076 if (skb_orphan_frags(skb, gfp_mask))
996 if (skb_copy_ubufs(skb, gfp_mask)) 1077 goto nofrags;
997 goto nofrags;
998 }
999 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1078 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1000 skb_frag_ref(skb, i); 1079 skb_frag_ref(skb, i);
1001 1080
@@ -1085,8 +1164,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1085 /* 1164 /*
1086 * Allocate the copy buffer 1165 * Allocate the copy buffer
1087 */ 1166 */
1088 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 1167 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1089 gfp_mask); 1168 gfp_mask, skb_alloc_rx_flag(skb),
1169 NUMA_NO_NODE);
1090 int oldheadroom = skb_headroom(skb); 1170 int oldheadroom = skb_headroom(skb);
1091 int head_copy_len, head_copy_off; 1171 int head_copy_len, head_copy_off;
1092 int off; 1172 int off;
@@ -1755,6 +1835,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1755 struct splice_pipe_desc spd = { 1835 struct splice_pipe_desc spd = {
1756 .pages = pages, 1836 .pages = pages,
1757 .partial = partial, 1837 .partial = partial,
1838 .nr_pages_max = MAX_SKB_FRAGS,
1758 .flags = flags, 1839 .flags = flags,
1759 .ops = &sock_pipe_buf_ops, 1840 .ops = &sock_pipe_buf_ops,
1760 .spd_release = sock_spd_release, 1841 .spd_release = sock_spd_release,
@@ -2613,7 +2694,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2613EXPORT_SYMBOL(skb_find_text); 2694EXPORT_SYMBOL(skb_find_text);
2614 2695
2615/** 2696/**
2616 * skb_append_datato_frags: - append the user data to a skb 2697 * skb_append_datato_frags - append the user data to a skb
2617 * @sk: sock structure 2698 * @sk: sock structure
2618 * @skb: skb structure to be appened with user data. 2699 * @skb: skb structure to be appened with user data.
2619 * @getfrag: call back function to be used for getting the user data 2700 * @getfrag: call back function to be used for getting the user data
@@ -2767,8 +2848,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2767 skb_release_head_state(nskb); 2848 skb_release_head_state(nskb);
2768 __skb_push(nskb, doffset); 2849 __skb_push(nskb, doffset);
2769 } else { 2850 } else {
2770 nskb = alloc_skb(hsize + doffset + headroom, 2851 nskb = __alloc_skb(hsize + doffset + headroom,
2771 GFP_ATOMIC); 2852 GFP_ATOMIC, skb_alloc_rx_flag(skb),
2853 NUMA_NO_NODE);
2772 2854
2773 if (unlikely(!nskb)) 2855 if (unlikely(!nskb))
2774 goto err; 2856 goto err;
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e5b71fda6ec..6b654b3ddfda 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -142,7 +142,7 @@
142static DEFINE_MUTEX(proto_list_mutex); 142static DEFINE_MUTEX(proto_list_mutex);
143static LIST_HEAD(proto_list); 143static LIST_HEAD(proto_list);
144 144
145#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 145#ifdef CONFIG_MEMCG_KMEM
146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
147{ 147{
148 struct proto *proto; 148 struct proto *proto;
@@ -271,6 +271,61 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
272EXPORT_SYMBOL(sysctl_optmem_max); 272EXPORT_SYMBOL(sysctl_optmem_max);
273 273
274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
275EXPORT_SYMBOL_GPL(memalloc_socks);
276
277/**
278 * sk_set_memalloc - sets %SOCK_MEMALLOC
279 * @sk: socket to set it on
280 *
281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
282 * It's the responsibility of the admin to adjust min_free_kbytes
283 * to meet the requirements
284 */
285void sk_set_memalloc(struct sock *sk)
286{
287 sock_set_flag(sk, SOCK_MEMALLOC);
288 sk->sk_allocation |= __GFP_MEMALLOC;
289 static_key_slow_inc(&memalloc_socks);
290}
291EXPORT_SYMBOL_GPL(sk_set_memalloc);
292
293void sk_clear_memalloc(struct sock *sk)
294{
295 sock_reset_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation &= ~__GFP_MEMALLOC;
297 static_key_slow_dec(&memalloc_socks);
298
299 /*
300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
302 * it has rmem allocations there is a risk that the user of the
303 * socket cannot make forward progress due to exceeding the rmem
304 * limits. By rights, sk_clear_memalloc() should only be called
305 * on sockets being torn down but warn and reset the accounting if
306 * that assumption breaks.
307 */
308 if (WARN_ON(sk->sk_forward_alloc))
309 sk_mem_reclaim(sk);
310}
311EXPORT_SYMBOL_GPL(sk_clear_memalloc);
312
313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
314{
315 int ret;
316 unsigned long pflags = current->flags;
317
318 /* these should have been dropped before queueing */
319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
320
321 current->flags |= PF_MEMALLOC;
322 ret = sk->sk_backlog_rcv(sk, skb);
323 tsk_restore_flags(current, pflags, PF_MEMALLOC);
324
325 return ret;
326}
327EXPORT_SYMBOL(__sk_backlog_rcv);
328
274#if defined(CONFIG_CGROUPS) 329#if defined(CONFIG_CGROUPS)
275#if !defined(CONFIG_NET_CLS_CGROUP) 330#if !defined(CONFIG_NET_CLS_CGROUP)
276int net_cls_subsys_id = -1; 331int net_cls_subsys_id = -1;
@@ -353,7 +408,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
353 if (err) 408 if (err)
354 return err; 409 return err;
355 410
356 if (!sk_rmem_schedule(sk, skb->truesize)) { 411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
357 atomic_inc(&sk->sk_drops); 412 atomic_inc(&sk->sk_drops);
358 return -ENOBUFS; 413 return -ENOBUFS;
359 } 414 }
@@ -1180,12 +1235,12 @@ void sock_update_classid(struct sock *sk)
1180} 1235}
1181EXPORT_SYMBOL(sock_update_classid); 1236EXPORT_SYMBOL(sock_update_classid);
1182 1237
1183void sock_update_netprioidx(struct sock *sk) 1238void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
1184{ 1239{
1185 if (in_interrupt()) 1240 if (in_interrupt())
1186 return; 1241 return;
1187 1242
1188 sk->sk_cgrp_prioidx = task_netprioidx(current); 1243 sk->sk_cgrp_prioidx = task_netprioidx(task);
1189} 1244}
1190EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1245EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1191#endif 1246#endif
@@ -1215,7 +1270,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1215 atomic_set(&sk->sk_wmem_alloc, 1); 1270 atomic_set(&sk->sk_wmem_alloc, 1);
1216 1271
1217 sock_update_classid(sk); 1272 sock_update_classid(sk);
1218 sock_update_netprioidx(sk); 1273 sock_update_netprioidx(sk, current);
1219 } 1274 }
1220 1275
1221 return sk; 1276 return sk;
@@ -1465,6 +1520,11 @@ void sock_rfree(struct sk_buff *skb)
1465} 1520}
1466EXPORT_SYMBOL(sock_rfree); 1521EXPORT_SYMBOL(sock_rfree);
1467 1522
1523void sock_edemux(struct sk_buff *skb)
1524{
1525 sock_put(skb->sk);
1526}
1527EXPORT_SYMBOL(sock_edemux);
1468 1528
1469int sock_i_uid(struct sock *sk) 1529int sock_i_uid(struct sock *sk)
1470{ 1530{
@@ -2154,6 +2214,10 @@ void release_sock(struct sock *sk)
2154 spin_lock_bh(&sk->sk_lock.slock); 2214 spin_lock_bh(&sk->sk_lock.slock);
2155 if (sk->sk_backlog.tail) 2215 if (sk->sk_backlog.tail)
2156 __release_sock(sk); 2216 __release_sock(sk);
2217
2218 if (sk->sk_prot->release_cb)
2219 sk->sk_prot->release_cb(sk);
2220
2157 sk->sk_lock.owned = 0; 2221 sk->sk_lock.owned = 0;
2158 if (waitqueue_active(&sk->sk_lock.wq)) 2222 if (waitqueue_active(&sk->sk_lock.wq))
2159 wake_up(&sk->sk_lock.wq); 2223 wake_up(&sk->sk_lock.wq);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 5fd146720f39..9d8755e4a7a5 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -4,7 +4,6 @@
4#include <net/netlink.h> 4#include <net/netlink.h>
5#include <net/net_namespace.h> 5#include <net/net_namespace.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/rtnetlink.h>
8#include <net/sock.h> 7#include <net/sock.h>
9 8
10#include <linux/inet_diag.h> 9#include <linux/inet_diag.h>
@@ -35,9 +34,7 @@ EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
35 34
36int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) 35int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
37{ 36{
38 __u32 *mem; 37 u32 mem[SK_MEMINFO_VARS];
39
40 mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
41 38
42 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 39 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
43 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; 40 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
@@ -46,11 +43,9 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
46 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; 43 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
47 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 44 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
48 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 45 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
46 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
49 47
50 return 0; 48 return nla_put(skb, attrtype, sizeof(mem), &mem);
51
52rtattr_failure:
53 return -EMSGSIZE;
54} 49}
55EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); 50EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
56 51
@@ -120,7 +115,7 @@ static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
120static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 115static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
121{ 116{
122 int err; 117 int err;
123 struct sock_diag_req *req = NLMSG_DATA(nlh); 118 struct sock_diag_req *req = nlmsg_data(nlh);
124 const struct sock_diag_handler *hndl; 119 const struct sock_diag_handler *hndl;
125 120
126 if (nlmsg_len(nlh) < sizeof(*req)) 121 if (nlmsg_len(nlh) < sizeof(*req))
@@ -171,19 +166,36 @@ static void sock_diag_rcv(struct sk_buff *skb)
171 mutex_unlock(&sock_diag_mutex); 166 mutex_unlock(&sock_diag_mutex);
172} 167}
173 168
174struct sock *sock_diag_nlsk; 169static int __net_init diag_net_init(struct net *net)
175EXPORT_SYMBOL_GPL(sock_diag_nlsk); 170{
171 struct netlink_kernel_cfg cfg = {
172 .input = sock_diag_rcv,
173 };
174
175 net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG,
176 THIS_MODULE, &cfg);
177 return net->diag_nlsk == NULL ? -ENOMEM : 0;
178}
179
180static void __net_exit diag_net_exit(struct net *net)
181{
182 netlink_kernel_release(net->diag_nlsk);
183 net->diag_nlsk = NULL;
184}
185
186static struct pernet_operations diag_net_ops = {
187 .init = diag_net_init,
188 .exit = diag_net_exit,
189};
176 190
177static int __init sock_diag_init(void) 191static int __init sock_diag_init(void)
178{ 192{
179 sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0, 193 return register_pernet_subsys(&diag_net_ops);
180 sock_diag_rcv, NULL, THIS_MODULE);
181 return sock_diag_nlsk == NULL ? -ENOMEM : 0;
182} 194}
183 195
184static void __exit sock_diag_exit(void) 196static void __exit sock_diag_exit(void)
185{ 197{
186 netlink_kernel_release(sock_diag_nlsk); 198 unregister_pernet_subsys(&diag_net_ops);
187} 199}
188 200
189module_init(sock_diag_init); 201module_init(sock_diag_init);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 656c7c75b192..81f2bb62dea3 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -28,8 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <net/sock.h> 29#include <net/sock.h>
30 30
31/** 31/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements 32 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate 33 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are: 34 * and co-exist on Ethernet. Current DCB features are:
@@ -196,92 +195,66 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
196static LIST_HEAD(dcb_app_list); 195static LIST_HEAD(dcb_app_list);
197static DEFINE_SPINLOCK(dcb_lock); 196static DEFINE_SPINLOCK(dcb_lock);
198 197
199/* standard netlink reply call */ 198static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
200static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, 199 u32 flags, struct nlmsghdr **nlhp)
201 u32 seq, u16 flags)
202{ 200{
203 struct sk_buff *dcbnl_skb; 201 struct sk_buff *skb;
204 struct dcbmsg *dcb; 202 struct dcbmsg *dcb;
205 struct nlmsghdr *nlh; 203 struct nlmsghdr *nlh;
206 int ret = -EINVAL;
207 204
208 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 205 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
209 if (!dcbnl_skb) 206 if (!skb)
210 return ret; 207 return NULL;
211 208
212 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags); 209 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
210 BUG_ON(!nlh);
213 211
214 dcb = NLMSG_DATA(nlh); 212 dcb = nlmsg_data(nlh);
215 dcb->dcb_family = AF_UNSPEC; 213 dcb->dcb_family = AF_UNSPEC;
216 dcb->cmd = cmd; 214 dcb->cmd = cmd;
217 dcb->dcb_pad = 0; 215 dcb->dcb_pad = 0;
218 216
219 ret = nla_put_u8(dcbnl_skb, attr, value); 217 if (nlhp)
220 if (ret) 218 *nlhp = nlh;
221 goto err;
222 219
223 /* end the message, assign the nlmsg_len. */ 220 return skb;
224 nlmsg_end(dcbnl_skb, nlh);
225 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
226 if (ret)
227 return -EINVAL;
228
229 return 0;
230nlmsg_failure:
231err:
232 kfree_skb(dcbnl_skb);
233 return ret;
234} 221}
235 222
236static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb, 223static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
237 u32 pid, u32 seq, u16 flags) 224 u32 seq, struct nlattr **tb, struct sk_buff *skb)
238{ 225{
239 int ret = -EINVAL;
240
241 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 226 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
242 if (!netdev->dcbnl_ops->getstate) 227 if (!netdev->dcbnl_ops->getstate)
243 return ret; 228 return -EOPNOTSUPP;
244
245 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
246 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
247 229
248 return ret; 230 return nla_put_u8(skb, DCB_ATTR_STATE,
231 netdev->dcbnl_ops->getstate(netdev));
249} 232}
250 233
251static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, 234static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
252 u32 pid, u32 seq, u16 flags) 235 u32 seq, struct nlattr **tb, struct sk_buff *skb)
253{ 236{
254 struct sk_buff *dcbnl_skb;
255 struct nlmsghdr *nlh;
256 struct dcbmsg *dcb;
257 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 237 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
258 u8 value; 238 u8 value;
259 int ret = -EINVAL; 239 int ret;
260 int i; 240 int i;
261 int getall = 0; 241 int getall = 0;
262 242
263 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg) 243 if (!tb[DCB_ATTR_PFC_CFG])
264 return ret; 244 return -EINVAL;
245
246 if (!netdev->dcbnl_ops->getpfccfg)
247 return -EOPNOTSUPP;
265 248
266 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 249 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
267 tb[DCB_ATTR_PFC_CFG], 250 tb[DCB_ATTR_PFC_CFG],
268 dcbnl_pfc_up_nest); 251 dcbnl_pfc_up_nest);
269 if (ret) 252 if (ret)
270 goto err_out; 253 return ret;
271
272 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
273 if (!dcbnl_skb)
274 goto err_out;
275
276 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
277
278 dcb = NLMSG_DATA(nlh);
279 dcb->dcb_family = AF_UNSPEC;
280 dcb->cmd = DCB_CMD_PFC_GCFG;
281 254
282 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG); 255 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
283 if (!nest) 256 if (!nest)
284 goto err; 257 return -EMSGSIZE;
285 258
286 if (data[DCB_PFC_UP_ATTR_ALL]) 259 if (data[DCB_PFC_UP_ATTR_ALL])
287 getall = 1; 260 getall = 1;
@@ -292,103 +265,53 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
292 265
293 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 266 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
294 &value); 267 &value);
295 ret = nla_put_u8(dcbnl_skb, i, value); 268 ret = nla_put_u8(skb, i, value);
296
297 if (ret) { 269 if (ret) {
298 nla_nest_cancel(dcbnl_skb, nest); 270 nla_nest_cancel(skb, nest);
299 goto err; 271 return ret;
300 } 272 }
301 } 273 }
302 nla_nest_end(dcbnl_skb, nest); 274 nla_nest_end(skb, nest);
303
304 nlmsg_end(dcbnl_skb, nlh);
305
306 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
307 if (ret)
308 goto err_out;
309 275
310 return 0; 276 return 0;
311nlmsg_failure:
312err:
313 kfree_skb(dcbnl_skb);
314err_out:
315 return -EINVAL;
316} 277}
317 278
318static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, 279static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
319 u32 pid, u32 seq, u16 flags) 280 u32 seq, struct nlattr **tb, struct sk_buff *skb)
320{ 281{
321 struct sk_buff *dcbnl_skb;
322 struct nlmsghdr *nlh;
323 struct dcbmsg *dcb;
324 u8 perm_addr[MAX_ADDR_LEN]; 282 u8 perm_addr[MAX_ADDR_LEN];
325 int ret = -EINVAL;
326 283
327 if (!netdev->dcbnl_ops->getpermhwaddr) 284 if (!netdev->dcbnl_ops->getpermhwaddr)
328 return ret; 285 return -EOPNOTSUPP;
329
330 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
331 if (!dcbnl_skb)
332 goto err_out;
333
334 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
335
336 dcb = NLMSG_DATA(nlh);
337 dcb->dcb_family = AF_UNSPEC;
338 dcb->cmd = DCB_CMD_GPERM_HWADDR;
339 286
340 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 287 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
341 288
342 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), 289 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
343 perm_addr);
344
345 nlmsg_end(dcbnl_skb, nlh);
346
347 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
348 if (ret)
349 goto err_out;
350
351 return 0;
352
353nlmsg_failure:
354 kfree_skb(dcbnl_skb);
355err_out:
356 return -EINVAL;
357} 290}
358 291
359static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, 292static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
360 u32 pid, u32 seq, u16 flags) 293 u32 seq, struct nlattr **tb, struct sk_buff *skb)
361{ 294{
362 struct sk_buff *dcbnl_skb;
363 struct nlmsghdr *nlh;
364 struct dcbmsg *dcb;
365 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 295 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
366 u8 value; 296 u8 value;
367 int ret = -EINVAL; 297 int ret;
368 int i; 298 int i;
369 int getall = 0; 299 int getall = 0;
370 300
371 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap) 301 if (!tb[DCB_ATTR_CAP])
372 return ret; 302 return -EINVAL;
303
304 if (!netdev->dcbnl_ops->getcap)
305 return -EOPNOTSUPP;
373 306
374 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 307 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
375 dcbnl_cap_nest); 308 dcbnl_cap_nest);
376 if (ret) 309 if (ret)
377 goto err_out; 310 return ret;
378
379 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
380 if (!dcbnl_skb)
381 goto err_out;
382
383 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
384
385 dcb = NLMSG_DATA(nlh);
386 dcb->dcb_family = AF_UNSPEC;
387 dcb->cmd = DCB_CMD_GCAP;
388 311
389 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP); 312 nest = nla_nest_start(skb, DCB_ATTR_CAP);
390 if (!nest) 313 if (!nest)
391 goto err; 314 return -EMSGSIZE;
392 315
393 if (data[DCB_CAP_ATTR_ALL]) 316 if (data[DCB_CAP_ATTR_ALL])
394 getall = 1; 317 getall = 1;
@@ -398,69 +321,41 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
398 continue; 321 continue;
399 322
400 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 323 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
401 ret = nla_put_u8(dcbnl_skb, i, value); 324 ret = nla_put_u8(skb, i, value);
402
403 if (ret) { 325 if (ret) {
404 nla_nest_cancel(dcbnl_skb, nest); 326 nla_nest_cancel(skb, nest);
405 goto err; 327 return ret;
406 } 328 }
407 } 329 }
408 } 330 }
409 nla_nest_end(dcbnl_skb, nest); 331 nla_nest_end(skb, nest);
410
411 nlmsg_end(dcbnl_skb, nlh);
412
413 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
414 if (ret)
415 goto err_out;
416 332
417 return 0; 333 return 0;
418nlmsg_failure:
419err:
420 kfree_skb(dcbnl_skb);
421err_out:
422 return -EINVAL;
423} 334}
424 335
425static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, 336static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
426 u32 pid, u32 seq, u16 flags) 337 u32 seq, struct nlattr **tb, struct sk_buff *skb)
427{ 338{
428 struct sk_buff *dcbnl_skb;
429 struct nlmsghdr *nlh;
430 struct dcbmsg *dcb;
431 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 339 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
432 u8 value; 340 u8 value;
433 int ret = -EINVAL; 341 int ret;
434 int i; 342 int i;
435 int getall = 0; 343 int getall = 0;
436 344
437 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs) 345 if (!tb[DCB_ATTR_NUMTCS])
438 return ret; 346 return -EINVAL;
347
348 if (!netdev->dcbnl_ops->getnumtcs)
349 return -EOPNOTSUPP;
439 350
440 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 351 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
441 dcbnl_numtcs_nest); 352 dcbnl_numtcs_nest);
442 if (ret) { 353 if (ret)
443 ret = -EINVAL; 354 return ret;
444 goto err_out;
445 }
446
447 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
448 if (!dcbnl_skb) {
449 ret = -EINVAL;
450 goto err_out;
451 }
452
453 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
454
455 dcb = NLMSG_DATA(nlh);
456 dcb->dcb_family = AF_UNSPEC;
457 dcb->cmd = DCB_CMD_GNUMTCS;
458 355
459 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS); 356 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
460 if (!nest) { 357 if (!nest)
461 ret = -EINVAL; 358 return -EMSGSIZE;
462 goto err;
463 }
464 359
465 if (data[DCB_NUMTCS_ATTR_ALL]) 360 if (data[DCB_NUMTCS_ATTR_ALL])
466 getall = 1; 361 getall = 1;
@@ -471,53 +366,37 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
471 366
472 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 367 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
473 if (!ret) { 368 if (!ret) {
474 ret = nla_put_u8(dcbnl_skb, i, value); 369 ret = nla_put_u8(skb, i, value);
475
476 if (ret) { 370 if (ret) {
477 nla_nest_cancel(dcbnl_skb, nest); 371 nla_nest_cancel(skb, nest);
478 ret = -EINVAL; 372 return ret;
479 goto err;
480 } 373 }
481 } else { 374 } else
482 goto err; 375 return -EINVAL;
483 }
484 }
485 nla_nest_end(dcbnl_skb, nest);
486
487 nlmsg_end(dcbnl_skb, nlh);
488
489 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
490 if (ret) {
491 ret = -EINVAL;
492 goto err_out;
493 } 376 }
377 nla_nest_end(skb, nest);
494 378
495 return 0; 379 return 0;
496nlmsg_failure:
497err:
498 kfree_skb(dcbnl_skb);
499err_out:
500 return ret;
501} 380}
502 381
503static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb, 382static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
504 u32 pid, u32 seq, u16 flags) 383 u32 seq, struct nlattr **tb, struct sk_buff *skb)
505{ 384{
506 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 385 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
507 int ret = -EINVAL; 386 int ret;
508 u8 value; 387 u8 value;
509 int i; 388 int i;
510 389
511 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs) 390 if (!tb[DCB_ATTR_NUMTCS])
512 return ret; 391 return -EINVAL;
392
393 if (!netdev->dcbnl_ops->setnumtcs)
394 return -EOPNOTSUPP;
513 395
514 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 396 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
515 dcbnl_numtcs_nest); 397 dcbnl_numtcs_nest);
516 398 if (ret)
517 if (ret) { 399 return ret;
518 ret = -EINVAL;
519 goto err;
520 }
521 400
522 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 401 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
523 if (data[i] == NULL) 402 if (data[i] == NULL)
@@ -526,84 +405,68 @@ static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
526 value = nla_get_u8(data[i]); 405 value = nla_get_u8(data[i]);
527 406
528 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 407 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
529
530 if (ret) 408 if (ret)
531 goto operr; 409 break;
532 } 410 }
533 411
534operr: 412 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
535 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
536 DCB_ATTR_NUMTCS, pid, seq, flags);
537
538err:
539 return ret;
540} 413}
541 414
542static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb, 415static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
543 u32 pid, u32 seq, u16 flags) 416 u32 seq, struct nlattr **tb, struct sk_buff *skb)
544{ 417{
545 int ret = -EINVAL;
546
547 if (!netdev->dcbnl_ops->getpfcstate) 418 if (!netdev->dcbnl_ops->getpfcstate)
548 return ret; 419 return -EOPNOTSUPP;
549
550 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
551 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
552 pid, seq, flags);
553 420
554 return ret; 421 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
422 netdev->dcbnl_ops->getpfcstate(netdev));
555} 423}
556 424
557static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb, 425static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
558 u32 pid, u32 seq, u16 flags) 426 u32 seq, struct nlattr **tb, struct sk_buff *skb)
559{ 427{
560 int ret = -EINVAL;
561 u8 value; 428 u8 value;
562 429
563 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate) 430 if (!tb[DCB_ATTR_PFC_STATE])
564 return ret; 431 return -EINVAL;
432
433 if (!netdev->dcbnl_ops->setpfcstate)
434 return -EOPNOTSUPP;
565 435
566 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 436 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
567 437
568 netdev->dcbnl_ops->setpfcstate(netdev, value); 438 netdev->dcbnl_ops->setpfcstate(netdev, value);
569 439
570 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE, 440 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
571 pid, seq, flags);
572
573 return ret;
574} 441}
575 442
576static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, 443static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
577 u32 pid, u32 seq, u16 flags) 444 u32 seq, struct nlattr **tb, struct sk_buff *skb)
578{ 445{
579 struct sk_buff *dcbnl_skb;
580 struct nlmsghdr *nlh;
581 struct dcbmsg *dcb;
582 struct nlattr *app_nest; 446 struct nlattr *app_nest;
583 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 447 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
584 u16 id; 448 u16 id;
585 u8 up, idtype; 449 u8 up, idtype;
586 int ret = -EINVAL; 450 int ret;
587 451
588 if (!tb[DCB_ATTR_APP]) 452 if (!tb[DCB_ATTR_APP])
589 goto out; 453 return -EINVAL;
590 454
591 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 455 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
592 dcbnl_app_nest); 456 dcbnl_app_nest);
593 if (ret) 457 if (ret)
594 goto out; 458 return ret;
595 459
596 ret = -EINVAL;
597 /* all must be non-null */ 460 /* all must be non-null */
598 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 461 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
599 (!app_tb[DCB_APP_ATTR_ID])) 462 (!app_tb[DCB_APP_ATTR_ID]))
600 goto out; 463 return -EINVAL;
601 464
602 /* either by eth type or by socket number */ 465 /* either by eth type or by socket number */
603 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 466 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
604 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 467 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
605 (idtype != DCB_APP_IDTYPE_PORTNUM)) 468 (idtype != DCB_APP_IDTYPE_PORTNUM))
606 goto out; 469 return -EINVAL;
607 470
608 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 471 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
609 472
@@ -617,138 +480,106 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
617 up = dcb_getapp(netdev, &app); 480 up = dcb_getapp(netdev, &app);
618 } 481 }
619 482
620 /* send this back */ 483 app_nest = nla_nest_start(skb, DCB_ATTR_APP);
621 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
622 if (!dcbnl_skb)
623 goto out;
624
625 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
626 dcb = NLMSG_DATA(nlh);
627 dcb->dcb_family = AF_UNSPEC;
628 dcb->cmd = DCB_CMD_GAPP;
629
630 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
631 if (!app_nest) 484 if (!app_nest)
632 goto out_cancel; 485 return -EMSGSIZE;
633 486
634 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 487 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
635 if (ret) 488 if (ret)
636 goto out_cancel; 489 goto out_cancel;
637 490
638 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id); 491 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
639 if (ret) 492 if (ret)
640 goto out_cancel; 493 goto out_cancel;
641 494
642 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up); 495 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
643 if (ret) 496 if (ret)
644 goto out_cancel; 497 goto out_cancel;
645 498
646 nla_nest_end(dcbnl_skb, app_nest); 499 nla_nest_end(skb, app_nest);
647 nlmsg_end(dcbnl_skb, nlh);
648
649 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
650 if (ret)
651 goto nlmsg_failure;
652 500
653 goto out; 501 return 0;
654 502
655out_cancel: 503out_cancel:
656 nla_nest_cancel(dcbnl_skb, app_nest); 504 nla_nest_cancel(skb, app_nest);
657nlmsg_failure:
658 kfree_skb(dcbnl_skb);
659out:
660 return ret; 505 return ret;
661} 506}
662 507
663static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, 508static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
664 u32 pid, u32 seq, u16 flags) 509 u32 seq, struct nlattr **tb, struct sk_buff *skb)
665{ 510{
666 int err, ret = -EINVAL; 511 int ret;
667 u16 id; 512 u16 id;
668 u8 up, idtype; 513 u8 up, idtype;
669 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 514 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
670 515
671 if (!tb[DCB_ATTR_APP]) 516 if (!tb[DCB_ATTR_APP])
672 goto out; 517 return -EINVAL;
673 518
674 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 519 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
675 dcbnl_app_nest); 520 dcbnl_app_nest);
676 if (ret) 521 if (ret)
677 goto out; 522 return ret;
678 523
679 ret = -EINVAL;
680 /* all must be non-null */ 524 /* all must be non-null */
681 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 525 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
682 (!app_tb[DCB_APP_ATTR_ID]) || 526 (!app_tb[DCB_APP_ATTR_ID]) ||
683 (!app_tb[DCB_APP_ATTR_PRIORITY])) 527 (!app_tb[DCB_APP_ATTR_PRIORITY]))
684 goto out; 528 return -EINVAL;
685 529
686 /* either by eth type or by socket number */ 530 /* either by eth type or by socket number */
687 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 531 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
688 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 532 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
689 (idtype != DCB_APP_IDTYPE_PORTNUM)) 533 (idtype != DCB_APP_IDTYPE_PORTNUM))
690 goto out; 534 return -EINVAL;
691 535
692 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 536 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
693 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 537 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
694 538
695 if (netdev->dcbnl_ops->setapp) { 539 if (netdev->dcbnl_ops->setapp) {
696 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 540 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
697 } else { 541 } else {
698 struct dcb_app app; 542 struct dcb_app app;
699 app.selector = idtype; 543 app.selector = idtype;
700 app.protocol = id; 544 app.protocol = id;
701 app.priority = up; 545 app.priority = up;
702 err = dcb_setapp(netdev, &app); 546 ret = dcb_setapp(netdev, &app);
703 } 547 }
704 548
705 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 549 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
706 pid, seq, flags);
707 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); 550 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
708out: 551
709 return ret; 552 return ret;
710} 553}
711 554
712static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, 555static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
713 u32 pid, u32 seq, u16 flags, int dir) 556 struct nlattr **tb, struct sk_buff *skb, int dir)
714{ 557{
715 struct sk_buff *dcbnl_skb;
716 struct nlmsghdr *nlh;
717 struct dcbmsg *dcb;
718 struct nlattr *pg_nest, *param_nest, *data; 558 struct nlattr *pg_nest, *param_nest, *data;
719 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 559 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
720 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 560 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
721 u8 prio, pgid, tc_pct, up_map; 561 u8 prio, pgid, tc_pct, up_map;
722 int ret = -EINVAL; 562 int ret;
723 int getall = 0; 563 int getall = 0;
724 int i; 564 int i;
725 565
726 if (!tb[DCB_ATTR_PG_CFG] || 566 if (!tb[DCB_ATTR_PG_CFG])
727 !netdev->dcbnl_ops->getpgtccfgtx || 567 return -EINVAL;
568
569 if (!netdev->dcbnl_ops->getpgtccfgtx ||
728 !netdev->dcbnl_ops->getpgtccfgrx || 570 !netdev->dcbnl_ops->getpgtccfgrx ||
729 !netdev->dcbnl_ops->getpgbwgcfgtx || 571 !netdev->dcbnl_ops->getpgbwgcfgtx ||
730 !netdev->dcbnl_ops->getpgbwgcfgrx) 572 !netdev->dcbnl_ops->getpgbwgcfgrx)
731 return ret; 573 return -EOPNOTSUPP;
732 574
733 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 575 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
734 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 576 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
735
736 if (ret) 577 if (ret)
737 goto err_out; 578 return ret;
738
739 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
740 if (!dcbnl_skb)
741 goto err_out;
742
743 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
744
745 dcb = NLMSG_DATA(nlh);
746 dcb->dcb_family = AF_UNSPEC;
747 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
748 579
749 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG); 580 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
750 if (!pg_nest) 581 if (!pg_nest)
751 goto err; 582 return -EMSGSIZE;
752 583
753 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 584 if (pg_tb[DCB_PG_ATTR_TC_ALL])
754 getall = 1; 585 getall = 1;
@@ -766,7 +597,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
766 if (ret) 597 if (ret)
767 goto err_pg; 598 goto err_pg;
768 599
769 param_nest = nla_nest_start(dcbnl_skb, i); 600 param_nest = nla_nest_start(skb, i);
770 if (!param_nest) 601 if (!param_nest)
771 goto err_pg; 602 goto err_pg;
772 603
@@ -789,33 +620,33 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
789 620
790 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 621 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
791 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 622 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
792 ret = nla_put_u8(dcbnl_skb, 623 ret = nla_put_u8(skb,
793 DCB_TC_ATTR_PARAM_PGID, pgid); 624 DCB_TC_ATTR_PARAM_PGID, pgid);
794 if (ret) 625 if (ret)
795 goto err_param; 626 goto err_param;
796 } 627 }
797 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 628 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
798 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 629 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
799 ret = nla_put_u8(dcbnl_skb, 630 ret = nla_put_u8(skb,
800 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 631 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
801 if (ret) 632 if (ret)
802 goto err_param; 633 goto err_param;
803 } 634 }
804 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 635 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
805 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 636 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
806 ret = nla_put_u8(dcbnl_skb, 637 ret = nla_put_u8(skb,
807 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 638 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
808 if (ret) 639 if (ret)
809 goto err_param; 640 goto err_param;
810 } 641 }
811 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 642 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
812 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 643 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
813 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT, 644 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
814 tc_pct); 645 tc_pct);
815 if (ret) 646 if (ret)
816 goto err_param; 647 goto err_param;
817 } 648 }
818 nla_nest_end(dcbnl_skb, param_nest); 649 nla_nest_end(skb, param_nest);
819 } 650 }
820 651
821 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 652 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
@@ -838,80 +669,71 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
838 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 669 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
839 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 670 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
840 } 671 }
841 ret = nla_put_u8(dcbnl_skb, i, tc_pct); 672 ret = nla_put_u8(skb, i, tc_pct);
842
843 if (ret) 673 if (ret)
844 goto err_pg; 674 goto err_pg;
845 } 675 }
846 676
847 nla_nest_end(dcbnl_skb, pg_nest); 677 nla_nest_end(skb, pg_nest);
848
849 nlmsg_end(dcbnl_skb, nlh);
850
851 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
852 if (ret)
853 goto err_out;
854 678
855 return 0; 679 return 0;
856 680
857err_param: 681err_param:
858 nla_nest_cancel(dcbnl_skb, param_nest); 682 nla_nest_cancel(skb, param_nest);
859err_pg: 683err_pg:
860 nla_nest_cancel(dcbnl_skb, pg_nest); 684 nla_nest_cancel(skb, pg_nest);
861nlmsg_failure: 685
862err: 686 return -EMSGSIZE;
863 kfree_skb(dcbnl_skb);
864err_out:
865 ret = -EINVAL;
866 return ret;
867} 687}
868 688
869static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb, 689static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
870 u32 pid, u32 seq, u16 flags) 690 u32 seq, struct nlattr **tb, struct sk_buff *skb)
871{ 691{
872 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0); 692 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
873} 693}
874 694
875static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb, 695static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
876 u32 pid, u32 seq, u16 flags) 696 u32 seq, struct nlattr **tb, struct sk_buff *skb)
877{ 697{
878 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1); 698 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
879} 699}
880 700
881static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb, 701static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
882 u32 pid, u32 seq, u16 flags) 702 u32 seq, struct nlattr **tb, struct sk_buff *skb)
883{ 703{
884 int ret = -EINVAL;
885 u8 value; 704 u8 value;
886 705
887 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate) 706 if (!tb[DCB_ATTR_STATE])
888 return ret; 707 return -EINVAL;
889 708
890 value = nla_get_u8(tb[DCB_ATTR_STATE]); 709 if (!netdev->dcbnl_ops->setstate)
710 return -EOPNOTSUPP;
891 711
892 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value), 712 value = nla_get_u8(tb[DCB_ATTR_STATE]);
893 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
894 pid, seq, flags);
895 713
896 return ret; 714 return nla_put_u8(skb, DCB_ATTR_STATE,
715 netdev->dcbnl_ops->setstate(netdev, value));
897} 716}
898 717
899static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb, 718static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
900 u32 pid, u32 seq, u16 flags) 719 u32 seq, struct nlattr **tb, struct sk_buff *skb)
901{ 720{
902 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 721 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
903 int i; 722 int i;
904 int ret = -EINVAL; 723 int ret;
905 u8 value; 724 u8 value;
906 725
907 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg) 726 if (!tb[DCB_ATTR_PFC_CFG])
908 return ret; 727 return -EINVAL;
728
729 if (!netdev->dcbnl_ops->setpfccfg)
730 return -EOPNOTSUPP;
909 731
910 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 732 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
911 tb[DCB_ATTR_PFC_CFG], 733 tb[DCB_ATTR_PFC_CFG],
912 dcbnl_pfc_up_nest); 734 dcbnl_pfc_up_nest);
913 if (ret) 735 if (ret)
914 goto err; 736 return ret;
915 737
916 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 738 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
917 if (data[i] == NULL) 739 if (data[i] == NULL)
@@ -921,50 +743,53 @@ static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
921 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 743 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
922 } 744 }
923 745
924 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG, 746 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
925 pid, seq, flags);
926err:
927 return ret;
928} 747}
929 748
930static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, 749static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
931 u32 pid, u32 seq, u16 flags) 750 u32 seq, struct nlattr **tb, struct sk_buff *skb)
932{ 751{
933 int ret = -EINVAL; 752 int ret;
934 753
935 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall) 754 if (!tb[DCB_ATTR_SET_ALL])
936 return ret; 755 return -EINVAL;
756
757 if (!netdev->dcbnl_ops->setall)
758 return -EOPNOTSUPP;
937 759
938 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, 760 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
939 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); 761 netdev->dcbnl_ops->setall(netdev));
940 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); 762 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
941 763
942 return ret; 764 return ret;
943} 765}
944 766
945static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb, 767static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
946 u32 pid, u32 seq, u16 flags, int dir) 768 u32 seq, struct nlattr **tb, struct sk_buff *skb,
769 int dir)
947{ 770{
948 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 771 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
949 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 772 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
950 int ret = -EINVAL; 773 int ret;
951 int i; 774 int i;
952 u8 pgid; 775 u8 pgid;
953 u8 up_map; 776 u8 up_map;
954 u8 prio; 777 u8 prio;
955 u8 tc_pct; 778 u8 tc_pct;
956 779
957 if (!tb[DCB_ATTR_PG_CFG] || 780 if (!tb[DCB_ATTR_PG_CFG])
958 !netdev->dcbnl_ops->setpgtccfgtx || 781 return -EINVAL;
782
783 if (!netdev->dcbnl_ops->setpgtccfgtx ||
959 !netdev->dcbnl_ops->setpgtccfgrx || 784 !netdev->dcbnl_ops->setpgtccfgrx ||
960 !netdev->dcbnl_ops->setpgbwgcfgtx || 785 !netdev->dcbnl_ops->setpgbwgcfgtx ||
961 !netdev->dcbnl_ops->setpgbwgcfgrx) 786 !netdev->dcbnl_ops->setpgbwgcfgrx)
962 return ret; 787 return -EOPNOTSUPP;
963 788
964 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 789 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
965 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 790 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
966 if (ret) 791 if (ret)
967 goto err; 792 return ret;
968 793
969 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 794 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
970 if (!pg_tb[i]) 795 if (!pg_tb[i])
@@ -973,7 +798,7 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
973 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 798 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
974 pg_tb[i], dcbnl_tc_param_nest); 799 pg_tb[i], dcbnl_tc_param_nest);
975 if (ret) 800 if (ret)
976 goto err; 801 return ret;
977 802
978 pgid = DCB_ATTR_VALUE_UNDEFINED; 803 pgid = DCB_ATTR_VALUE_UNDEFINED;
979 prio = DCB_ATTR_VALUE_UNDEFINED; 804 prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1026,63 +851,47 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
1026 } 851 }
1027 } 852 }
1028 853
1029 ret = dcbnl_reply(0, RTM_SETDCB, 854 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
1030 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1031 DCB_ATTR_PG_CFG, pid, seq, flags);
1032
1033err:
1034 return ret;
1035} 855}
1036 856
1037static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb, 857static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1038 u32 pid, u32 seq, u16 flags) 858 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1039{ 859{
1040 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0); 860 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
1041} 861}
1042 862
1043static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb, 863static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1044 u32 pid, u32 seq, u16 flags) 864 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1045{ 865{
1046 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1); 866 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
1047} 867}
1048 868
1049static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb, 869static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1050 u32 pid, u32 seq, u16 flags) 870 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1051{ 871{
1052 struct sk_buff *dcbnl_skb;
1053 struct nlmsghdr *nlh;
1054 struct dcbmsg *dcb;
1055 struct nlattr *bcn_nest; 872 struct nlattr *bcn_nest;
1056 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 873 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1057 u8 value_byte; 874 u8 value_byte;
1058 u32 value_integer; 875 u32 value_integer;
1059 int ret = -EINVAL; 876 int ret;
1060 bool getall = false; 877 bool getall = false;
1061 int i; 878 int i;
1062 879
1063 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp || 880 if (!tb[DCB_ATTR_BCN])
881 return -EINVAL;
882
883 if (!netdev->dcbnl_ops->getbcnrp ||
1064 !netdev->dcbnl_ops->getbcncfg) 884 !netdev->dcbnl_ops->getbcncfg)
1065 return ret; 885 return -EOPNOTSUPP;
1066 886
1067 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, 887 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1068 tb[DCB_ATTR_BCN], dcbnl_bcn_nest); 888 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1069
1070 if (ret) 889 if (ret)
1071 goto err_out; 890 return ret;
1072
1073 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1074 if (!dcbnl_skb)
1075 goto err_out;
1076
1077 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1078
1079 dcb = NLMSG_DATA(nlh);
1080 dcb->dcb_family = AF_UNSPEC;
1081 dcb->cmd = DCB_CMD_BCN_GCFG;
1082 891
1083 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN); 892 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
1084 if (!bcn_nest) 893 if (!bcn_nest)
1085 goto err; 894 return -EMSGSIZE;
1086 895
1087 if (bcn_tb[DCB_BCN_ATTR_ALL]) 896 if (bcn_tb[DCB_BCN_ATTR_ALL])
1088 getall = true; 897 getall = true;
@@ -1093,7 +902,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1093 902
1094 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 903 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1095 &value_byte); 904 &value_byte);
1096 ret = nla_put_u8(dcbnl_skb, i, value_byte); 905 ret = nla_put_u8(skb, i, value_byte);
1097 if (ret) 906 if (ret)
1098 goto err_bcn; 907 goto err_bcn;
1099 } 908 }
@@ -1104,49 +913,41 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1104 913
1105 netdev->dcbnl_ops->getbcncfg(netdev, i, 914 netdev->dcbnl_ops->getbcncfg(netdev, i,
1106 &value_integer); 915 &value_integer);
1107 ret = nla_put_u32(dcbnl_skb, i, value_integer); 916 ret = nla_put_u32(skb, i, value_integer);
1108 if (ret) 917 if (ret)
1109 goto err_bcn; 918 goto err_bcn;
1110 } 919 }
1111 920
1112 nla_nest_end(dcbnl_skb, bcn_nest); 921 nla_nest_end(skb, bcn_nest);
1113
1114 nlmsg_end(dcbnl_skb, nlh);
1115
1116 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1117 if (ret)
1118 goto err_out;
1119 922
1120 return 0; 923 return 0;
1121 924
1122err_bcn: 925err_bcn:
1123 nla_nest_cancel(dcbnl_skb, bcn_nest); 926 nla_nest_cancel(skb, bcn_nest);
1124nlmsg_failure:
1125err:
1126 kfree_skb(dcbnl_skb);
1127err_out:
1128 ret = -EINVAL;
1129 return ret; 927 return ret;
1130} 928}
1131 929
1132static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb, 930static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1133 u32 pid, u32 seq, u16 flags) 931 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1134{ 932{
1135 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 933 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1136 int i; 934 int i;
1137 int ret = -EINVAL; 935 int ret;
1138 u8 value_byte; 936 u8 value_byte;
1139 u32 value_int; 937 u32 value_int;
1140 938
1141 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg || 939 if (!tb[DCB_ATTR_BCN])
940 return -EINVAL;
941
942 if (!netdev->dcbnl_ops->setbcncfg ||
1142 !netdev->dcbnl_ops->setbcnrp) 943 !netdev->dcbnl_ops->setbcnrp)
1143 return ret; 944 return -EOPNOTSUPP;
1144 945
1145 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, 946 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1146 tb[DCB_ATTR_BCN], 947 tb[DCB_ATTR_BCN],
1147 dcbnl_pfc_up_nest); 948 dcbnl_pfc_up_nest);
1148 if (ret) 949 if (ret)
1149 goto err; 950 return ret;
1150 951
1151 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 952 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1152 if (data[i] == NULL) 953 if (data[i] == NULL)
@@ -1164,10 +965,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1164 i, value_int); 965 i, value_int);
1165 } 966 }
1166 967
1167 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN, 968 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
1168 pid, seq, flags);
1169err:
1170 return ret;
1171} 969}
1172 970
1173static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 971static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
@@ -1233,20 +1031,21 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1233 struct dcb_app_type *itr; 1031 struct dcb_app_type *itr;
1234 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1032 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1235 int dcbx; 1033 int dcbx;
1236 int err = -EMSGSIZE; 1034 int err;
1237 1035
1238 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1036 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1239 goto nla_put_failure; 1037 return -EMSGSIZE;
1038
1240 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1039 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1241 if (!ieee) 1040 if (!ieee)
1242 goto nla_put_failure; 1041 return -EMSGSIZE;
1243 1042
1244 if (ops->ieee_getets) { 1043 if (ops->ieee_getets) {
1245 struct ieee_ets ets; 1044 struct ieee_ets ets;
1246 err = ops->ieee_getets(netdev, &ets); 1045 err = ops->ieee_getets(netdev, &ets);
1247 if (!err && 1046 if (!err &&
1248 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1047 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1249 goto nla_put_failure; 1048 return -EMSGSIZE;
1250 } 1049 }
1251 1050
1252 if (ops->ieee_getmaxrate) { 1051 if (ops->ieee_getmaxrate) {
@@ -1256,7 +1055,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1256 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1055 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1257 sizeof(maxrate), &maxrate); 1056 sizeof(maxrate), &maxrate);
1258 if (err) 1057 if (err)
1259 goto nla_put_failure; 1058 return -EMSGSIZE;
1260 } 1059 }
1261 } 1060 }
1262 1061
@@ -1265,12 +1064,12 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1265 err = ops->ieee_getpfc(netdev, &pfc); 1064 err = ops->ieee_getpfc(netdev, &pfc);
1266 if (!err && 1065 if (!err &&
1267 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1066 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1268 goto nla_put_failure; 1067 return -EMSGSIZE;
1269 } 1068 }
1270 1069
1271 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1070 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1272 if (!app) 1071 if (!app)
1273 goto nla_put_failure; 1072 return -EMSGSIZE;
1274 1073
1275 spin_lock(&dcb_lock); 1074 spin_lock(&dcb_lock);
1276 list_for_each_entry(itr, &dcb_app_list, list) { 1075 list_for_each_entry(itr, &dcb_app_list, list) {
@@ -1279,7 +1078,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1279 &itr->app); 1078 &itr->app);
1280 if (err) { 1079 if (err) {
1281 spin_unlock(&dcb_lock); 1080 spin_unlock(&dcb_lock);
1282 goto nla_put_failure; 1081 return -EMSGSIZE;
1283 } 1082 }
1284 } 1083 }
1285 } 1084 }
@@ -1298,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1298 err = ops->ieee_peer_getets(netdev, &ets); 1097 err = ops->ieee_peer_getets(netdev, &ets);
1299 if (!err && 1098 if (!err &&
1300 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1099 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1301 goto nla_put_failure; 1100 return -EMSGSIZE;
1302 } 1101 }
1303 1102
1304 if (ops->ieee_peer_getpfc) { 1103 if (ops->ieee_peer_getpfc) {
@@ -1306,7 +1105,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1306 err = ops->ieee_peer_getpfc(netdev, &pfc); 1105 err = ops->ieee_peer_getpfc(netdev, &pfc);
1307 if (!err && 1106 if (!err &&
1308 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1107 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1309 goto nla_put_failure; 1108 return -EMSGSIZE;
1310 } 1109 }
1311 1110
1312 if (ops->peer_getappinfo && ops->peer_getapptable) { 1111 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1315,20 +1114,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1315 DCB_ATTR_IEEE_APP_UNSPEC, 1114 DCB_ATTR_IEEE_APP_UNSPEC,
1316 DCB_ATTR_IEEE_APP); 1115 DCB_ATTR_IEEE_APP);
1317 if (err) 1116 if (err)
1318 goto nla_put_failure; 1117 return -EMSGSIZE;
1319 } 1118 }
1320 1119
1321 nla_nest_end(skb, ieee); 1120 nla_nest_end(skb, ieee);
1322 if (dcbx >= 0) { 1121 if (dcbx >= 0) {
1323 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1122 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1324 if (err) 1123 if (err)
1325 goto nla_put_failure; 1124 return -EMSGSIZE;
1326 } 1125 }
1327 1126
1328 return 0; 1127 return 0;
1329
1330nla_put_failure:
1331 return err;
1332} 1128}
1333 1129
1334static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, 1130static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
@@ -1340,13 +1136,13 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1340 struct nlattr *pg = nla_nest_start(skb, i); 1136 struct nlattr *pg = nla_nest_start(skb, i);
1341 1137
1342 if (!pg) 1138 if (!pg)
1343 goto nla_put_failure; 1139 return -EMSGSIZE;
1344 1140
1345 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 1141 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1346 struct nlattr *tc_nest = nla_nest_start(skb, i); 1142 struct nlattr *tc_nest = nla_nest_start(skb, i);
1347 1143
1348 if (!tc_nest) 1144 if (!tc_nest)
1349 goto nla_put_failure; 1145 return -EMSGSIZE;
1350 1146
1351 pgid = DCB_ATTR_VALUE_UNDEFINED; 1147 pgid = DCB_ATTR_VALUE_UNDEFINED;
1352 prio = DCB_ATTR_VALUE_UNDEFINED; 1148 prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1364,7 +1160,7 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1364 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || 1160 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1365 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || 1161 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1366 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) 1162 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1367 goto nla_put_failure; 1163 return -EMSGSIZE;
1368 nla_nest_end(skb, tc_nest); 1164 nla_nest_end(skb, tc_nest);
1369 } 1165 }
1370 1166
@@ -1378,13 +1174,10 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1378 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1174 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1379 &tc_pct); 1175 &tc_pct);
1380 if (nla_put_u8(skb, i, tc_pct)) 1176 if (nla_put_u8(skb, i, tc_pct))
1381 goto nla_put_failure; 1177 return -EMSGSIZE;
1382 } 1178 }
1383 nla_nest_end(skb, pg); 1179 nla_nest_end(skb, pg);
1384 return 0; 1180 return 0;
1385
1386nla_put_failure:
1387 return -EMSGSIZE;
1388} 1181}
1389 1182
1390static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) 1183static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
@@ -1531,27 +1324,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1531 struct net *net = dev_net(dev); 1324 struct net *net = dev_net(dev);
1532 struct sk_buff *skb; 1325 struct sk_buff *skb;
1533 struct nlmsghdr *nlh; 1326 struct nlmsghdr *nlh;
1534 struct dcbmsg *dcb;
1535 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1327 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1536 int err; 1328 int err;
1537 1329
1538 if (!ops) 1330 if (!ops)
1539 return -EOPNOTSUPP; 1331 return -EOPNOTSUPP;
1540 1332
1541 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1333 skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
1542 if (!skb) 1334 if (!skb)
1543 return -ENOBUFS; 1335 return -ENOBUFS;
1544 1336
1545 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1546 if (nlh == NULL) {
1547 nlmsg_free(skb);
1548 return -EMSGSIZE;
1549 }
1550
1551 dcb = NLMSG_DATA(nlh);
1552 dcb->dcb_family = AF_UNSPEC;
1553 dcb->cmd = cmd;
1554
1555 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) 1337 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1556 err = dcbnl_ieee_fill(skb, dev); 1338 err = dcbnl_ieee_fill(skb, dev);
1557 else 1339 else
@@ -1559,8 +1341,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1559 1341
1560 if (err < 0) { 1342 if (err < 0) {
1561 /* Report error to broadcast listeners */ 1343 /* Report error to broadcast listeners */
1562 nlmsg_cancel(skb, nlh); 1344 nlmsg_free(skb);
1563 kfree_skb(skb);
1564 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1345 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1565 } else { 1346 } else {
1566 /* End nlmsg and notify broadcast listeners */ 1347 /* End nlmsg and notify broadcast listeners */
@@ -1590,15 +1371,15 @@ EXPORT_SYMBOL(dcbnl_cee_notify);
1590 * No attempt is made to reconcile the case where only part of the 1371 * No attempt is made to reconcile the case where only part of the
1591 * cmd can be completed. 1372 * cmd can be completed.
1592 */ 1373 */
1593static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, 1374static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1594 u32 pid, u32 seq, u16 flags) 1375 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1595{ 1376{
1596 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1377 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1597 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1378 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1598 int err = -EOPNOTSUPP; 1379 int err;
1599 1380
1600 if (!ops) 1381 if (!ops)
1601 return err; 1382 return -EOPNOTSUPP;
1602 1383
1603 if (!tb[DCB_ATTR_IEEE]) 1384 if (!tb[DCB_ATTR_IEEE])
1604 return -EINVAL; 1385 return -EINVAL;
@@ -1649,58 +1430,28 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1649 } 1430 }
1650 1431
1651err: 1432err:
1652 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, 1433 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1653 pid, seq, flags);
1654 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1434 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1655 return err; 1435 return err;
1656} 1436}
1657 1437
1658static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, 1438static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1659 u32 pid, u32 seq, u16 flags) 1439 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1660{ 1440{
1661 struct net *net = dev_net(netdev);
1662 struct sk_buff *skb;
1663 struct nlmsghdr *nlh;
1664 struct dcbmsg *dcb;
1665 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1441 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1666 int err;
1667 1442
1668 if (!ops) 1443 if (!ops)
1669 return -EOPNOTSUPP; 1444 return -EOPNOTSUPP;
1670 1445
1671 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1446 return dcbnl_ieee_fill(skb, netdev);
1672 if (!skb)
1673 return -ENOBUFS;
1674
1675 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1676 if (nlh == NULL) {
1677 nlmsg_free(skb);
1678 return -EMSGSIZE;
1679 }
1680
1681 dcb = NLMSG_DATA(nlh);
1682 dcb->dcb_family = AF_UNSPEC;
1683 dcb->cmd = DCB_CMD_IEEE_GET;
1684
1685 err = dcbnl_ieee_fill(skb, netdev);
1686
1687 if (err < 0) {
1688 nlmsg_cancel(skb, nlh);
1689 kfree_skb(skb);
1690 } else {
1691 nlmsg_end(skb, nlh);
1692 err = rtnl_unicast(skb, net, pid);
1693 }
1694
1695 return err;
1696} 1447}
1697 1448
1698static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb, 1449static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1699 u32 pid, u32 seq, u16 flags) 1450 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1700{ 1451{
1701 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1452 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1702 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1453 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1703 int err = -EOPNOTSUPP; 1454 int err;
1704 1455
1705 if (!ops) 1456 if (!ops)
1706 return -EOPNOTSUPP; 1457 return -EOPNOTSUPP;
@@ -1733,32 +1484,26 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1733 } 1484 }
1734 1485
1735err: 1486err:
1736 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, 1487 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1737 pid, seq, flags);
1738 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1488 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1739 return err; 1489 return err;
1740} 1490}
1741 1491
1742 1492
1743/* DCBX configuration */ 1493/* DCBX configuration */
1744static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb, 1494static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1745 u32 pid, u32 seq, u16 flags) 1495 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1746{ 1496{
1747 int ret;
1748
1749 if (!netdev->dcbnl_ops->getdcbx) 1497 if (!netdev->dcbnl_ops->getdcbx)
1750 return -EOPNOTSUPP; 1498 return -EOPNOTSUPP;
1751 1499
1752 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB, 1500 return nla_put_u8(skb, DCB_ATTR_DCBX,
1753 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags); 1501 netdev->dcbnl_ops->getdcbx(netdev));
1754
1755 return ret;
1756} 1502}
1757 1503
1758static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb, 1504static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1759 u32 pid, u32 seq, u16 flags) 1505 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1760{ 1506{
1761 int ret;
1762 u8 value; 1507 u8 value;
1763 1508
1764 if (!netdev->dcbnl_ops->setdcbx) 1509 if (!netdev->dcbnl_ops->setdcbx)
@@ -1769,19 +1514,13 @@ static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1769 1514
1770 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1515 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1771 1516
1772 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value), 1517 return nla_put_u8(skb, DCB_ATTR_DCBX,
1773 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX, 1518 netdev->dcbnl_ops->setdcbx(netdev, value));
1774 pid, seq, flags);
1775
1776 return ret;
1777} 1519}
1778 1520
1779static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb, 1521static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1780 u32 pid, u32 seq, u16 flags) 1522 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1781{ 1523{
1782 struct sk_buff *dcbnl_skb;
1783 struct nlmsghdr *nlh;
1784 struct dcbmsg *dcb;
1785 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1524 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1786 u8 value; 1525 u8 value;
1787 int ret, i; 1526 int ret, i;
@@ -1796,25 +1535,11 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1796 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1535 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1797 dcbnl_featcfg_nest); 1536 dcbnl_featcfg_nest);
1798 if (ret) 1537 if (ret)
1799 goto err_out; 1538 return ret;
1800
1801 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1802 if (!dcbnl_skb) {
1803 ret = -ENOBUFS;
1804 goto err_out;
1805 }
1806
1807 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1808
1809 dcb = NLMSG_DATA(nlh);
1810 dcb->dcb_family = AF_UNSPEC;
1811 dcb->cmd = DCB_CMD_GFEATCFG;
1812 1539
1813 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG); 1540 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1814 if (!nest) { 1541 if (!nest)
1815 ret = -EMSGSIZE; 1542 return -EMSGSIZE;
1816 goto nla_put_failure;
1817 }
1818 1543
1819 if (data[DCB_FEATCFG_ATTR_ALL]) 1544 if (data[DCB_FEATCFG_ATTR_ALL])
1820 getall = 1; 1545 getall = 1;
@@ -1825,28 +1550,21 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1825 1550
1826 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1551 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1827 if (!ret) 1552 if (!ret)
1828 ret = nla_put_u8(dcbnl_skb, i, value); 1553 ret = nla_put_u8(skb, i, value);
1829 1554
1830 if (ret) { 1555 if (ret) {
1831 nla_nest_cancel(dcbnl_skb, nest); 1556 nla_nest_cancel(skb, nest);
1832 goto nla_put_failure; 1557 goto nla_put_failure;
1833 } 1558 }
1834 } 1559 }
1835 nla_nest_end(dcbnl_skb, nest); 1560 nla_nest_end(skb, nest);
1836 1561
1837 nlmsg_end(dcbnl_skb, nlh);
1838
1839 return rtnl_unicast(dcbnl_skb, &init_net, pid);
1840nla_put_failure: 1562nla_put_failure:
1841 nlmsg_cancel(dcbnl_skb, nlh);
1842nlmsg_failure:
1843 kfree_skb(dcbnl_skb);
1844err_out:
1845 return ret; 1563 return ret;
1846} 1564}
1847 1565
1848static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb, 1566static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1849 u32 pid, u32 seq, u16 flags) 1567 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1850{ 1568{
1851 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1569 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1852 int ret, i; 1570 int ret, i;
@@ -1876,60 +1594,73 @@ static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1876 goto err; 1594 goto err;
1877 } 1595 }
1878err: 1596err:
1879 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG, 1597 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1880 pid, seq, flags);
1881 1598
1882 return ret; 1599 return ret;
1883} 1600}
1884 1601
1885/* Handle CEE DCBX GET commands. */ 1602/* Handle CEE DCBX GET commands. */
1886static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, 1603static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1887 u32 pid, u32 seq, u16 flags) 1604 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1888{ 1605{
1889 struct net *net = dev_net(netdev);
1890 struct sk_buff *skb;
1891 struct nlmsghdr *nlh;
1892 struct dcbmsg *dcb;
1893 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1606 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1894 int err;
1895 1607
1896 if (!ops) 1608 if (!ops)
1897 return -EOPNOTSUPP; 1609 return -EOPNOTSUPP;
1898 1610
1899 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1611 return dcbnl_cee_fill(skb, netdev);
1900 if (!skb) 1612}
1901 return -ENOBUFS;
1902
1903 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1904 if (nlh == NULL) {
1905 nlmsg_free(skb);
1906 return -EMSGSIZE;
1907 }
1908 1613
1909 dcb = NLMSG_DATA(nlh); 1614struct reply_func {
1910 dcb->dcb_family = AF_UNSPEC; 1615 /* reply netlink message type */
1911 dcb->cmd = DCB_CMD_CEE_GET; 1616 int type;
1912 1617
1913 err = dcbnl_cee_fill(skb, netdev); 1618 /* function to fill message contents */
1619 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1620 struct nlattr **, struct sk_buff *);
1621};
1914 1622
1915 if (err < 0) { 1623static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1916 nlmsg_cancel(skb, nlh); 1624 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1917 nlmsg_free(skb); 1625 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1918 } else { 1626 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1919 nlmsg_end(skb, nlh); 1627 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1920 err = rtnl_unicast(skb, net, pid); 1628 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1921 } 1629 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1922 return err; 1630 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1923} 1631 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1632 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1633 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1634 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1635 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1636 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1637 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1638 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1639 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1640 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1641 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1642 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1643 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1644 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1645 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1646 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1647 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1648 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1649 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1650 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1651};
1924 1652
1925static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1653static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1926{ 1654{
1927 struct net *net = sock_net(skb->sk); 1655 struct net *net = sock_net(skb->sk);
1928 struct net_device *netdev; 1656 struct net_device *netdev;
1929 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh); 1657 struct dcbmsg *dcb = nlmsg_data(nlh);
1930 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1658 struct nlattr *tb[DCB_ATTR_MAX + 1];
1931 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1659 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1932 int ret = -EINVAL; 1660 int ret = -EINVAL;
1661 struct sk_buff *reply_skb;
1662 struct nlmsghdr *reply_nlh = NULL;
1663 const struct reply_func *fn;
1933 1664
1934 if (!net_eq(net, &init_net)) 1665 if (!net_eq(net, &init_net))
1935 return -EINVAL; 1666 return -EINVAL;
@@ -1939,136 +1670,78 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1939 if (ret < 0) 1670 if (ret < 0)
1940 return ret; 1671 return ret;
1941 1672
1673 if (dcb->cmd > DCB_CMD_MAX)
1674 return -EINVAL;
1675
1676 /* check if a reply function has been defined for the command */
1677 fn = &reply_funcs[dcb->cmd];
1678 if (!fn->cb)
1679 return -EOPNOTSUPP;
1680
1942 if (!tb[DCB_ATTR_IFNAME]) 1681 if (!tb[DCB_ATTR_IFNAME])
1943 return -EINVAL; 1682 return -EINVAL;
1944 1683
1945 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); 1684 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1946 if (!netdev) 1685 if (!netdev)
1947 return -EINVAL; 1686 return -ENODEV;
1948 1687
1949 if (!netdev->dcbnl_ops) 1688 if (!netdev->dcbnl_ops) {
1950 goto errout; 1689 ret = -EOPNOTSUPP;
1951
1952 switch (dcb->cmd) {
1953 case DCB_CMD_GSTATE:
1954 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1955 nlh->nlmsg_flags);
1956 goto out;
1957 case DCB_CMD_PFC_GCFG:
1958 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1959 nlh->nlmsg_flags);
1960 goto out;
1961 case DCB_CMD_GPERM_HWADDR:
1962 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1963 nlh->nlmsg_flags);
1964 goto out;
1965 case DCB_CMD_PGTX_GCFG:
1966 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1967 nlh->nlmsg_flags);
1968 goto out;
1969 case DCB_CMD_PGRX_GCFG:
1970 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1971 nlh->nlmsg_flags);
1972 goto out;
1973 case DCB_CMD_BCN_GCFG:
1974 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1975 nlh->nlmsg_flags);
1976 goto out;
1977 case DCB_CMD_SSTATE:
1978 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1979 nlh->nlmsg_flags);
1980 goto out;
1981 case DCB_CMD_PFC_SCFG:
1982 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1983 nlh->nlmsg_flags);
1984 goto out; 1690 goto out;
1691 }
1985 1692
1986 case DCB_CMD_SET_ALL: 1693 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
1987 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq, 1694 nlh->nlmsg_flags, &reply_nlh);
1988 nlh->nlmsg_flags); 1695 if (!reply_skb) {
1989 goto out; 1696 ret = -ENOBUFS;
1990 case DCB_CMD_PGTX_SCFG:
1991 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1992 nlh->nlmsg_flags);
1993 goto out;
1994 case DCB_CMD_PGRX_SCFG:
1995 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1996 nlh->nlmsg_flags);
1997 goto out;
1998 case DCB_CMD_GCAP:
1999 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
2000 nlh->nlmsg_flags);
2001 goto out;
2002 case DCB_CMD_GNUMTCS:
2003 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2004 nlh->nlmsg_flags);
2005 goto out;
2006 case DCB_CMD_SNUMTCS:
2007 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2008 nlh->nlmsg_flags);
2009 goto out;
2010 case DCB_CMD_PFC_GSTATE:
2011 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2012 nlh->nlmsg_flags);
2013 goto out;
2014 case DCB_CMD_PFC_SSTATE:
2015 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2016 nlh->nlmsg_flags);
2017 goto out;
2018 case DCB_CMD_BCN_SCFG:
2019 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
2020 nlh->nlmsg_flags);
2021 goto out;
2022 case DCB_CMD_GAPP:
2023 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
2024 nlh->nlmsg_flags);
2025 goto out;
2026 case DCB_CMD_SAPP:
2027 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2028 nlh->nlmsg_flags);
2029 goto out;
2030 case DCB_CMD_IEEE_SET:
2031 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
2032 nlh->nlmsg_flags);
2033 goto out;
2034 case DCB_CMD_IEEE_GET:
2035 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2036 nlh->nlmsg_flags);
2037 goto out;
2038 case DCB_CMD_IEEE_DEL:
2039 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2040 nlh->nlmsg_flags);
2041 goto out;
2042 case DCB_CMD_GDCBX:
2043 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2044 nlh->nlmsg_flags);
2045 goto out;
2046 case DCB_CMD_SDCBX:
2047 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2048 nlh->nlmsg_flags);
2049 goto out;
2050 case DCB_CMD_GFEATCFG:
2051 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2052 nlh->nlmsg_flags);
2053 goto out;
2054 case DCB_CMD_SFEATCFG:
2055 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2056 nlh->nlmsg_flags);
2057 goto out; 1697 goto out;
2058 case DCB_CMD_CEE_GET: 1698 }
2059 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq, 1699
2060 nlh->nlmsg_flags); 1700 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1701 if (ret < 0) {
1702 nlmsg_free(reply_skb);
2061 goto out; 1703 goto out;
2062 default:
2063 goto errout;
2064 } 1704 }
2065errout: 1705
2066 ret = -EINVAL; 1706 nlmsg_end(reply_skb, reply_nlh);
1707
1708 ret = rtnl_unicast(reply_skb, &init_net, pid);
2067out: 1709out:
2068 dev_put(netdev); 1710 dev_put(netdev);
2069 return ret; 1711 return ret;
2070} 1712}
2071 1713
1714static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1715 int ifindex, int prio)
1716{
1717 struct dcb_app_type *itr;
1718
1719 list_for_each_entry(itr, &dcb_app_list, list) {
1720 if (itr->app.selector == app->selector &&
1721 itr->app.protocol == app->protocol &&
1722 itr->ifindex == ifindex &&
1723 (!prio || itr->app.priority == prio))
1724 return itr;
1725 }
1726
1727 return NULL;
1728}
1729
1730static int dcb_app_add(const struct dcb_app *app, int ifindex)
1731{
1732 struct dcb_app_type *entry;
1733
1734 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1735 if (!entry)
1736 return -ENOMEM;
1737
1738 memcpy(&entry->app, app, sizeof(*app));
1739 entry->ifindex = ifindex;
1740 list_add(&entry->list, &dcb_app_list);
1741
1742 return 0;
1743}
1744
2072/** 1745/**
2073 * dcb_getapp - retrieve the DCBX application user priority 1746 * dcb_getapp - retrieve the DCBX application user priority
2074 * 1747 *
@@ -2082,14 +1755,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2082 u8 prio = 0; 1755 u8 prio = 0;
2083 1756
2084 spin_lock(&dcb_lock); 1757 spin_lock(&dcb_lock);
2085 list_for_each_entry(itr, &dcb_app_list, list) { 1758 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
2086 if (itr->app.selector == app->selector && 1759 prio = itr->app.priority;
2087 itr->app.protocol == app->protocol &&
2088 itr->ifindex == dev->ifindex) {
2089 prio = itr->app.priority;
2090 break;
2091 }
2092 }
2093 spin_unlock(&dcb_lock); 1760 spin_unlock(&dcb_lock);
2094 1761
2095 return prio; 1762 return prio;
@@ -2107,6 +1774,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2107{ 1774{
2108 struct dcb_app_type *itr; 1775 struct dcb_app_type *itr;
2109 struct dcb_app_type event; 1776 struct dcb_app_type event;
1777 int err = 0;
2110 1778
2111 event.ifindex = dev->ifindex; 1779 event.ifindex = dev->ifindex;
2112 memcpy(&event.app, new, sizeof(event.app)); 1780 memcpy(&event.app, new, sizeof(event.app));
@@ -2115,36 +1783,23 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2115 1783
2116 spin_lock(&dcb_lock); 1784 spin_lock(&dcb_lock);
2117 /* Search for existing match and replace */ 1785 /* Search for existing match and replace */
2118 list_for_each_entry(itr, &dcb_app_list, list) { 1786 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
2119 if (itr->app.selector == new->selector && 1787 if (new->priority)
2120 itr->app.protocol == new->protocol && 1788 itr->app.priority = new->priority;
2121 itr->ifindex == dev->ifindex) { 1789 else {
2122 if (new->priority) 1790 list_del(&itr->list);
2123 itr->app.priority = new->priority; 1791 kfree(itr);
2124 else {
2125 list_del(&itr->list);
2126 kfree(itr);
2127 }
2128 goto out;
2129 } 1792 }
1793 goto out;
2130 } 1794 }
2131 /* App type does not exist add new application type */ 1795 /* App type does not exist add new application type */
2132 if (new->priority) { 1796 if (new->priority)
2133 struct dcb_app_type *entry; 1797 err = dcb_app_add(new, dev->ifindex);
2134 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2135 if (!entry) {
2136 spin_unlock(&dcb_lock);
2137 return -ENOMEM;
2138 }
2139
2140 memcpy(&entry->app, new, sizeof(*new));
2141 entry->ifindex = dev->ifindex;
2142 list_add(&entry->list, &dcb_app_list);
2143 }
2144out: 1798out:
2145 spin_unlock(&dcb_lock); 1799 spin_unlock(&dcb_lock);
2146 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1800 if (!err)
2147 return 0; 1801 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1802 return err;
2148} 1803}
2149EXPORT_SYMBOL(dcb_setapp); 1804EXPORT_SYMBOL(dcb_setapp);
2150 1805
@@ -2161,13 +1816,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2161 u8 prio = 0; 1816 u8 prio = 0;
2162 1817
2163 spin_lock(&dcb_lock); 1818 spin_lock(&dcb_lock);
2164 list_for_each_entry(itr, &dcb_app_list, list) { 1819 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
2165 if (itr->app.selector == app->selector && 1820 prio |= 1 << itr->app.priority;
2166 itr->app.protocol == app->protocol &&
2167 itr->ifindex == dev->ifindex) {
2168 prio |= 1 << itr->app.priority;
2169 }
2170 }
2171 spin_unlock(&dcb_lock); 1821 spin_unlock(&dcb_lock);
2172 1822
2173 return prio; 1823 return prio;
@@ -2183,7 +1833,6 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2183 */ 1833 */
2184int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1834int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2185{ 1835{
2186 struct dcb_app_type *itr, *entry;
2187 struct dcb_app_type event; 1836 struct dcb_app_type event;
2188 int err = 0; 1837 int err = 0;
2189 1838
@@ -2194,26 +1843,12 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2194 1843
2195 spin_lock(&dcb_lock); 1844 spin_lock(&dcb_lock);
2196 /* Search for existing match and abort if found */ 1845 /* Search for existing match and abort if found */
2197 list_for_each_entry(itr, &dcb_app_list, list) { 1846 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
2198 if (itr->app.selector == new->selector && 1847 err = -EEXIST;
2199 itr->app.protocol == new->protocol &&
2200 itr->app.priority == new->priority &&
2201 itr->ifindex == dev->ifindex) {
2202 err = -EEXIST;
2203 goto out;
2204 }
2205 }
2206
2207 /* App entry does not exist add new entry */
2208 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2209 if (!entry) {
2210 err = -ENOMEM;
2211 goto out; 1848 goto out;
2212 } 1849 }
2213 1850
2214 memcpy(&entry->app, new, sizeof(*new)); 1851 err = dcb_app_add(new, dev->ifindex);
2215 entry->ifindex = dev->ifindex;
2216 list_add(&entry->list, &dcb_app_list);
2217out: 1852out:
2218 spin_unlock(&dcb_lock); 1853 spin_unlock(&dcb_lock);
2219 if (!err) 1854 if (!err)
@@ -2240,19 +1875,12 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2240 1875
2241 spin_lock(&dcb_lock); 1876 spin_lock(&dcb_lock);
2242 /* Search for existing match and remove it. */ 1877 /* Search for existing match and remove it. */
2243 list_for_each_entry(itr, &dcb_app_list, list) { 1878 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
2244 if (itr->app.selector == del->selector && 1879 list_del(&itr->list);
2245 itr->app.protocol == del->protocol && 1880 kfree(itr);
2246 itr->app.priority == del->priority && 1881 err = 0;
2247 itr->ifindex == dev->ifindex) {
2248 list_del(&itr->list);
2249 kfree(itr);
2250 err = 0;
2251 goto out;
2252 }
2253 } 1882 }
2254 1883
2255out:
2256 spin_unlock(&dcb_lock); 1884 spin_unlock(&dcb_lock);
2257 if (!err) 1885 if (!err)
2258 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1886 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index e2ab0627a5ff..a269aa7f7923 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -50,7 +50,8 @@ static inline u8 dccp_ackvec_state(const u8 *cell)
50 return *cell & ~DCCPAV_MAX_RUNLEN; 50 return *cell & ~DCCPAV_MAX_RUNLEN;
51} 51}
52 52
53/** struct dccp_ackvec - Ack Vector main data structure 53/**
54 * struct dccp_ackvec - Ack Vector main data structure
54 * 55 *
55 * This implements a fixed-size circular buffer within an array and is largely 56 * This implements a fixed-size circular buffer within an array and is largely
56 * based on Appendix A of RFC 4340. 57 * based on Appendix A of RFC 4340.
@@ -76,7 +77,8 @@ struct dccp_ackvec {
76 struct list_head av_records; 77 struct list_head av_records;
77}; 78};
78 79
79/** struct dccp_ackvec_record - Records information about sent Ack Vectors 80/**
81 * struct dccp_ackvec_record - Records information about sent Ack Vectors
80 * 82 *
81 * These list entries define the additional information which the HC-Receiver 83 * These list entries define the additional information which the HC-Receiver
82 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A. 84 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
@@ -121,6 +123,7 @@ static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
121 * @len: length of @vec 123 * @len: length of @vec
122 * @nonce: whether @vec had an ECN nonce of 0 or 1 124 * @nonce: whether @vec had an ECN nonce of 0 or 1
123 * @node: FIFO - arranged in descending order of ack_ackno 125 * @node: FIFO - arranged in descending order of ack_ackno
126 *
124 * This structure is used by CCIDs to access Ack Vectors in a received skb. 127 * This structure is used by CCIDs to access Ack Vectors in a received skb.
125 */ 128 */
126struct dccp_ackvec_parsed { 129struct dccp_ackvec_parsed {
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 48b585a5cba7..597557254ddb 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -46,6 +46,7 @@ bool ccid_support_check(u8 const *ccid_array, u8 array_len)
46 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs 46 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
47 * @ccid_array: pointer to copy into 47 * @ccid_array: pointer to copy into
48 * @array_len: value to return length into 48 * @array_len: value to return length into
49 *
49 * This function allocates memory - caller must see that it is freed after use. 50 * This function allocates memory - caller must see that it is freed after use.
50 */ 51 */
51int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) 52int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8c67bedf85b0..d65e98798eca 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -113,6 +113,7 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
113/** 113/**
114 * ccid3_hc_tx_update_x - Update allowed sending rate X 114 * ccid3_hc_tx_update_x - Update allowed sending rate X
115 * @stamp: most recent time if available - can be left NULL. 115 * @stamp: most recent time if available - can be left NULL.
116 *
116 * This function tracks draft rfc3448bis, check there for latest details. 117 * This function tracks draft rfc3448bis, check there for latest details.
117 * 118 *
118 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support 119 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
@@ -161,9 +162,11 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
161 } 162 }
162} 163}
163 164
164/* 165/**
165 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) 166 * ccid3_hc_tx_update_s - Track the mean packet size `s'
166 * @len: DCCP packet payload size in bytes 167 * @len: DCCP packet payload size in bytes
168 *
169 * cf. RFC 4342, 5.3 and RFC 3448, 4.1
167 */ 170 */
168static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len) 171static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
169{ 172{
@@ -270,6 +273,7 @@ out:
270/** 273/**
271 * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets 274 * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
272 * @skb: next packet candidate to send on @sk 275 * @skb: next packet candidate to send on @sk
276 *
273 * This function uses the convention of ccid_packet_dequeue_eval() and 277 * This function uses the convention of ccid_packet_dequeue_eval() and
274 * returns a millisecond-delay value between 0 and t_mbi = 64000 msec. 278 * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
275 */ 279 */
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 497723c4d4bb..57f9fd78c4df 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -133,6 +133,7 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
133 * @rh: Receive history containing a fresh loss event 133 * @rh: Receive history containing a fresh loss event
134 * @calc_first_li: Caller-dependent routine to compute length of first interval 134 * @calc_first_li: Caller-dependent routine to compute length of first interval
135 * @sk: Used by @calc_first_li in caller-specific way (subtyping) 135 * @sk: Used by @calc_first_li in caller-specific way (subtyping)
136 *
136 * Updates I_mean and returns 1 if a new interval has in fact been added to @lh. 137 * Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
137 */ 138 */
138int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, 139int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index de8fe294bf0b..08df7a3acb3d 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -315,6 +315,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
315 * @ndp: The NDP count belonging to @skb 315 * @ndp: The NDP count belonging to @skb
316 * @calc_first_li: Caller-dependent computation of first loss interval in @lh 316 * @calc_first_li: Caller-dependent computation of first loss interval in @lh
317 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add) 317 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
318 *
318 * Chooses action according to pending loss, updates LI database when a new 319 * Chooses action according to pending loss, updates LI database when a new
319 * loss was detected, and does required post-processing. Returns 1 when caller 320 * loss was detected, and does required post-processing. Returns 1 when caller
320 * should send feedback, 0 otherwise. 321 * should send feedback, 0 otherwise.
@@ -387,7 +388,7 @@ static inline struct tfrc_rx_hist_entry *
387} 388}
388 389
389/** 390/**
390 * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry 391 * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
391 */ 392 */
392static inline struct tfrc_rx_hist_entry * 393static inline struct tfrc_rx_hist_entry *
393 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) 394 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index a052a4377e26..88ef98285bec 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -611,6 +611,7 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
611 * @s: packet size in bytes 611 * @s: packet size in bytes
612 * @R: RTT scaled by 1000000 (i.e., microseconds) 612 * @R: RTT scaled by 1000000 (i.e., microseconds)
613 * @p: loss ratio estimate scaled by 1000000 613 * @p: loss ratio estimate scaled by 1000000
614 *
614 * Returns X_calc in bytes per second (not scaled). 615 * Returns X_calc in bytes per second (not scaled).
615 */ 616 */
616u32 tfrc_calc_x(u16 s, u32 R, u32 p) 617u32 tfrc_calc_x(u16 s, u32 R, u32 p)
@@ -659,6 +660,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
659/** 660/**
660 * tfrc_calc_x_reverse_lookup - try to find p given f(p) 661 * tfrc_calc_x_reverse_lookup - try to find p given f(p)
661 * @fvalue: function value to match, scaled by 1000000 662 * @fvalue: function value to match, scaled by 1000000
663 *
662 * Returns closest match for p, also scaled by 1000000 664 * Returns closest match for p, also scaled by 1000000
663 */ 665 */
664u32 tfrc_calc_x_reverse_lookup(u32 fvalue) 666u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 9040be049d8c..708e75bf623d 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -352,6 +352,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
352 * @dccpd_opt_len: total length of all options (5.8) in the packet 352 * @dccpd_opt_len: total length of all options (5.8) in the packet
353 * @dccpd_seq: sequence number 353 * @dccpd_seq: sequence number
354 * @dccpd_ack_seq: acknowledgment number subheader field value 354 * @dccpd_ack_seq: acknowledgment number subheader field value
355 *
355 * This is used for transmission as well as for reception. 356 * This is used for transmission as well as for reception.
356 */ 357 */
357struct dccp_skb_cb { 358struct dccp_skb_cb {
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 78a2ad70e1b0..9733ddbc96cb 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -350,6 +350,7 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
350 * @feat_num: feature to activate, one of %dccp_feature_numbers 350 * @feat_num: feature to activate, one of %dccp_feature_numbers
351 * @local: whether local (1) or remote (0) @feat_num is meant 351 * @local: whether local (1) or remote (0) @feat_num is meant
352 * @fval: the value (SP or NN) to activate, or NULL to use the default value 352 * @fval: the value (SP or NN) to activate, or NULL to use the default value
353 *
353 * For general use this function is preferable over __dccp_feat_activate(). 354 * For general use this function is preferable over __dccp_feat_activate().
354 */ 355 */
355static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local, 356static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
@@ -446,6 +447,7 @@ static struct dccp_feat_entry *dccp_feat_list_lookup(struct list_head *fn_list,
446 * @head: list to add to 447 * @head: list to add to
447 * @feat: feature number 448 * @feat: feature number
448 * @local: whether the local (1) or remote feature with number @feat is meant 449 * @local: whether the local (1) or remote feature with number @feat is meant
450 *
449 * This is the only constructor and serves to ensure the above invariants. 451 * This is the only constructor and serves to ensure the above invariants.
450 */ 452 */
451static struct dccp_feat_entry * 453static struct dccp_feat_entry *
@@ -504,6 +506,7 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
504 * @feat: one of %dccp_feature_numbers 506 * @feat: one of %dccp_feature_numbers
505 * @local: whether local (1) or remote (0) @feat_num is being confirmed 507 * @local: whether local (1) or remote (0) @feat_num is being confirmed
506 * @fval: pointer to NN/SP value to be inserted or NULL 508 * @fval: pointer to NN/SP value to be inserted or NULL
509 *
507 * Returns 0 on success, a Reset code for further processing otherwise. 510 * Returns 0 on success, a Reset code for further processing otherwise.
508 */ 511 */
509static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local, 512static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
@@ -691,6 +694,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
691 * @feat: an NN feature from %dccp_feature_numbers 694 * @feat: an NN feature from %dccp_feature_numbers
692 * @mandatory: use Mandatory option if 1 695 * @mandatory: use Mandatory option if 1
693 * @nn_val: value to register (restricted to 4 bytes) 696 * @nn_val: value to register (restricted to 4 bytes)
697 *
694 * Note that NN features are local by definition (RFC 4340, 6.3.2). 698 * Note that NN features are local by definition (RFC 4340, 6.3.2).
695 */ 699 */
696static int __feat_register_nn(struct list_head *fn, u8 feat, 700static int __feat_register_nn(struct list_head *fn, u8 feat,
@@ -760,6 +764,7 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
760 * dccp_feat_nn_get - Query current/pending value of NN feature 764 * dccp_feat_nn_get - Query current/pending value of NN feature
761 * @sk: DCCP socket of an established connection 765 * @sk: DCCP socket of an established connection
762 * @feat: NN feature number from %dccp_feature_numbers 766 * @feat: NN feature number from %dccp_feature_numbers
767 *
763 * For a known NN feature, returns value currently being negotiated, or 768 * For a known NN feature, returns value currently being negotiated, or
764 * current (confirmed) value if no negotiation is going on. 769 * current (confirmed) value if no negotiation is going on.
765 */ 770 */
@@ -790,6 +795,7 @@ EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
790 * @sk: DCCP socket of an established connection 795 * @sk: DCCP socket of an established connection
791 * @feat: NN feature number from %dccp_feature_numbers 796 * @feat: NN feature number from %dccp_feature_numbers
792 * @nn_val: the new value to use 797 * @nn_val: the new value to use
798 *
793 * This function is used to communicate NN updates out-of-band. 799 * This function is used to communicate NN updates out-of-band.
794 */ 800 */
795int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val) 801int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
@@ -930,6 +936,7 @@ static const struct ccid_dependency *dccp_feat_ccid_deps(u8 ccid, bool is_local)
930 * @fn: feature-negotiation list to update 936 * @fn: feature-negotiation list to update
931 * @id: CCID number to track 937 * @id: CCID number to track
932 * @is_local: whether TX CCID (1) or RX CCID (0) is meant 938 * @is_local: whether TX CCID (1) or RX CCID (0) is meant
939 *
933 * This function needs to be called after registering all other features. 940 * This function needs to be called after registering all other features.
934 */ 941 */
935static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local) 942static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
@@ -953,6 +960,7 @@ static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
953/** 960/**
954 * dccp_feat_finalise_settings - Finalise settings before starting negotiation 961 * dccp_feat_finalise_settings - Finalise settings before starting negotiation
955 * @dp: client or listening socket (settings will be inherited) 962 * @dp: client or listening socket (settings will be inherited)
963 *
956 * This is called after all registrations (socket initialisation, sysctls, and 964 * This is called after all registrations (socket initialisation, sysctls, and
957 * sockopt calls), and before sending the first packet containing Change options 965 * sockopt calls), and before sending the first packet containing Change options
958 * (ie. client-Request or server-Response), to ensure internal consistency. 966 * (ie. client-Request or server-Response), to ensure internal consistency.
@@ -1284,6 +1292,7 @@ confirmation_failed:
1284 * @feat: NN number, one of %dccp_feature_numbers 1292 * @feat: NN number, one of %dccp_feature_numbers
1285 * @val: NN value 1293 * @val: NN value
1286 * @len: length of @val in bytes 1294 * @len: length of @val in bytes
1295 *
1287 * This function combines the functionality of change_recv/confirm_recv, with 1296 * This function combines the functionality of change_recv/confirm_recv, with
1288 * the following differences (reset codes are the same): 1297 * the following differences (reset codes are the same):
1289 * - cleanup after receiving the Confirm; 1298 * - cleanup after receiving the Confirm;
@@ -1379,6 +1388,7 @@ fast_path_failed:
1379 * @feat: one of %dccp_feature_numbers 1388 * @feat: one of %dccp_feature_numbers
1380 * @val: value contents of @opt 1389 * @val: value contents of @opt
1381 * @len: length of @val in bytes 1390 * @len: length of @val in bytes
1391 *
1382 * Returns 0 on success, a Reset code for ending the connection otherwise. 1392 * Returns 0 on success, a Reset code for ending the connection otherwise.
1383 */ 1393 */
1384int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, 1394int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
diff --git a/net/dccp/input.c b/net/dccp/input.c
index bc93a333931e..14cdafad7a90 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -710,6 +710,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
710/** 710/**
711 * dccp_sample_rtt - Validate and finalise computation of RTT sample 711 * dccp_sample_rtt - Validate and finalise computation of RTT sample
712 * @delta: number of microseconds between packet and acknowledgment 712 * @delta: number of microseconds between packet and acknowledgment
713 *
713 * The routine is kept generic to work in different contexts. It should be 714 * The routine is kept generic to work in different contexts. It should be
714 * called immediately when the ACK used for the RTT sample arrives. 715 * called immediately when the ACK used for the RTT sample arrives.
715 */ 716 */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 07f5579ca756..176ecdba4a22 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -161,17 +161,10 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
161 if (sk->sk_state == DCCP_LISTEN) 161 if (sk->sk_state == DCCP_LISTEN)
162 return; 162 return;
163 163
164 /* We don't check in the destentry if pmtu discovery is forbidden 164 dst = inet_csk_update_pmtu(sk, mtu);
165 * on this route. We just assume that no packet_to_big packets 165 if (!dst)
166 * are send back when pmtu discovery is not active.
167 * There is a small race when the user changes this flag in the
168 * route, but I think that's acceptable.
169 */
170 if ((dst = __sk_dst_check(sk, 0)) == NULL)
171 return; 166 return;
172 167
173 dst->ops->update_pmtu(dst, mtu);
174
175 /* Something is about to be wrong... Remember soft error 168 /* Something is about to be wrong... Remember soft error
176 * for the case, if this connection will not able to recover. 169 * for the case, if this connection will not able to recover.
177 */ 170 */
@@ -195,6 +188,14 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
195 } /* else let the usual retransmit timer handle it */ 188 } /* else let the usual retransmit timer handle it */
196} 189}
197 190
191static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
192{
193 struct dst_entry *dst = __sk_dst_check(sk, 0);
194
195 if (dst)
196 dst->ops->redirect(dst, sk, skb);
197}
198
198/* 199/*
199 * This routine is called by the ICMP module when it gets some sort of error 200 * This routine is called by the ICMP module when it gets some sort of error
200 * condition. If err < 0 then the socket should be closed and the error 201 * condition. If err < 0 then the socket should be closed and the error
@@ -259,6 +260,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
259 } 260 }
260 261
261 switch (type) { 262 switch (type) {
263 case ICMP_REDIRECT:
264 dccp_do_redirect(skb, sk);
265 goto out;
262 case ICMP_SOURCE_QUENCH: 266 case ICMP_SOURCE_QUENCH:
263 /* Just silently ignore these. */ 267 /* Just silently ignore these. */
264 goto out; 268 goto out;
@@ -477,7 +481,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
477 struct rtable *rt; 481 struct rtable *rt;
478 const struct iphdr *iph = ip_hdr(skb); 482 const struct iphdr *iph = ip_hdr(skb);
479 struct flowi4 fl4 = { 483 struct flowi4 fl4 = {
480 .flowi4_oif = skb_rtable(skb)->rt_iif, 484 .flowi4_oif = inet_iif(skb),
481 .daddr = iph->saddr, 485 .daddr = iph->saddr,
482 .saddr = iph->daddr, 486 .saddr = iph->daddr,
483 .flowi4_tos = RT_CONN_FLAGS(sk), 487 .flowi4_tos = RT_CONN_FLAGS(sk),
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fa9512d86f3b..56840b249f3b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -130,6 +130,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
130 130
131 np = inet6_sk(sk); 131 np = inet6_sk(sk);
132 132
133 if (type == NDISC_REDIRECT) {
134 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
135
136 if (dst)
137 dst->ops->redirect(dst, sk, skb);
138 }
139
133 if (type == ICMPV6_PKT_TOOBIG) { 140 if (type == ICMPV6_PKT_TOOBIG) {
134 struct dst_entry *dst = NULL; 141 struct dst_entry *dst = NULL;
135 142
@@ -138,37 +145,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
138 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) 145 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
139 goto out; 146 goto out;
140 147
141 /* icmp should have updated the destination cache entry */ 148 dst = inet6_csk_update_pmtu(sk, ntohl(info));
142 dst = __sk_dst_check(sk, np->dst_cookie); 149 if (!dst)
143 if (dst == NULL) { 150 goto out;
144 struct inet_sock *inet = inet_sk(sk);
145 struct flowi6 fl6;
146
147 /* BUGGG_FUTURE: Again, it is not clear how
148 to handle rthdr case. Ignore this complexity
149 for now.
150 */
151 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowi6_proto = IPPROTO_DCCP;
153 fl6.daddr = np->daddr;
154 fl6.saddr = np->saddr;
155 fl6.flowi6_oif = sk->sk_bound_dev_if;
156 fl6.fl6_dport = inet->inet_dport;
157 fl6.fl6_sport = inet->inet_sport;
158 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
159
160 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
161 if (IS_ERR(dst)) {
162 sk->sk_err_soft = -PTR_ERR(dst);
163 goto out;
164 }
165 } else
166 dst_hold(dst);
167 151
168 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 152 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
169 dccp_sync_mss(sk, dst_mtu(dst)); 153 dccp_sync_mss(sk, dst_mtu(dst));
170 } /* else let the usual retransmit timer handle it */
171 dst_release(dst);
172 goto out; 154 goto out;
173 } 155 }
174 156
@@ -237,7 +219,6 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
237 struct inet6_request_sock *ireq6 = inet6_rsk(req); 219 struct inet6_request_sock *ireq6 = inet6_rsk(req);
238 struct ipv6_pinfo *np = inet6_sk(sk); 220 struct ipv6_pinfo *np = inet6_sk(sk);
239 struct sk_buff *skb; 221 struct sk_buff *skb;
240 struct ipv6_txoptions *opt = NULL;
241 struct in6_addr *final_p, final; 222 struct in6_addr *final_p, final;
242 struct flowi6 fl6; 223 struct flowi6 fl6;
243 int err = -1; 224 int err = -1;
@@ -253,9 +234,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
253 fl6.fl6_sport = inet_rsk(req)->loc_port; 234 fl6.fl6_sport = inet_rsk(req)->loc_port;
254 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 235 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
255 236
256 opt = np->opt;
257 237
258 final_p = fl6_update_dst(&fl6, opt, &final); 238 final_p = fl6_update_dst(&fl6, np->opt, &final);
259 239
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 240 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
261 if (IS_ERR(dst)) { 241 if (IS_ERR(dst)) {
@@ -272,13 +252,11 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
272 &ireq6->loc_addr, 252 &ireq6->loc_addr,
273 &ireq6->rmt_addr); 253 &ireq6->rmt_addr);
274 fl6.daddr = ireq6->rmt_addr; 254 fl6.daddr = ireq6->rmt_addr;
275 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 255 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
276 err = net_xmit_eval(err); 256 err = net_xmit_eval(err);
277 } 257 }
278 258
279done: 259done:
280 if (opt != NULL && opt != np->opt)
281 sock_kfree_s(sk, opt, opt->tot_len);
282 dst_release(dst); 260 dst_release(dst);
283 return err; 261 return err;
284} 262}
@@ -473,7 +451,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
473 struct inet_sock *newinet; 451 struct inet_sock *newinet;
474 struct dccp6_sock *newdp6; 452 struct dccp6_sock *newdp6;
475 struct sock *newsk; 453 struct sock *newsk;
476 struct ipv6_txoptions *opt;
477 454
478 if (skb->protocol == htons(ETH_P_IP)) { 455 if (skb->protocol == htons(ETH_P_IP)) {
479 /* 456 /*
@@ -518,7 +495,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
518 return newsk; 495 return newsk;
519 } 496 }
520 497
521 opt = np->opt;
522 498
523 if (sk_acceptq_is_full(sk)) 499 if (sk_acceptq_is_full(sk))
524 goto out_overflow; 500 goto out_overflow;
@@ -530,7 +506,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
530 memset(&fl6, 0, sizeof(fl6)); 506 memset(&fl6, 0, sizeof(fl6));
531 fl6.flowi6_proto = IPPROTO_DCCP; 507 fl6.flowi6_proto = IPPROTO_DCCP;
532 fl6.daddr = ireq6->rmt_addr; 508 fl6.daddr = ireq6->rmt_addr;
533 final_p = fl6_update_dst(&fl6, opt, &final); 509 final_p = fl6_update_dst(&fl6, np->opt, &final);
534 fl6.saddr = ireq6->loc_addr; 510 fl6.saddr = ireq6->loc_addr;
535 fl6.flowi6_oif = sk->sk_bound_dev_if; 511 fl6.flowi6_oif = sk->sk_bound_dev_if;
536 fl6.fl6_dport = inet_rsk(req)->rmt_port; 512 fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -595,11 +571,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
595 * Yes, keeping reference count would be much more clever, but we make 571 * Yes, keeping reference count would be much more clever, but we make
596 * one more one thing there: reattach optmem to newsk. 572 * one more one thing there: reattach optmem to newsk.
597 */ 573 */
598 if (opt != NULL) { 574 if (np->opt != NULL)
599 newnp->opt = ipv6_dup_options(newsk, opt); 575 newnp->opt = ipv6_dup_options(newsk, np->opt);
600 if (opt != np->opt)
601 sock_kfree_s(sk, opt, opt->tot_len);
602 }
603 576
604 inet_csk(newsk)->icsk_ext_hdr_len = 0; 577 inet_csk(newsk)->icsk_ext_hdr_len = 0;
605 if (newnp->opt != NULL) 578 if (newnp->opt != NULL)
@@ -625,8 +598,6 @@ out_nonewsk:
625 dst_release(dst); 598 dst_release(dst);
626out: 599out:
627 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 600 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
628 if (opt != NULL && opt != np->opt)
629 sock_kfree_s(sk, opt, opt->tot_len);
630 return NULL; 601 return NULL;
631} 602}
632 603
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 68fa6b7a3e01..a58e0b634050 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -527,6 +527,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb)
527 * @val: NN value or SP array (preferred element first) to copy 527 * @val: NN value or SP array (preferred element first) to copy
528 * @len: true length of @val in bytes (excluding first element repetition) 528 * @len: true length of @val in bytes (excluding first element repetition)
529 * @repeat_first: whether to copy the first element of @val twice 529 * @repeat_first: whether to copy the first element of @val twice
530 *
530 * The last argument is used to construct Confirm options, where the preferred 531 * The last argument is used to construct Confirm options, where the preferred
531 * value and the preference list appear separately (RFC 4340, 6.3.1). Preference 532 * value and the preference list appear separately (RFC 4340, 6.3.1). Preference
532 * lists are kept such that the preferred entry is always first, so we only need 533 * lists are kept such that the preferred entry is always first, so we only need
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 787367308797..d17fc90a74b6 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -214,6 +214,7 @@ void dccp_write_space(struct sock *sk)
214 * dccp_wait_for_ccid - Await CCID send permission 214 * dccp_wait_for_ccid - Await CCID send permission
215 * @sk: socket to wait for 215 * @sk: socket to wait for
216 * @delay: timeout in jiffies 216 * @delay: timeout in jiffies
217 *
217 * This is used by CCIDs which need to delay the send time in process context. 218 * This is used by CCIDs which need to delay the send time in process context.
218 */ 219 */
219static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) 220static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 7eaf98799729..102d6106a942 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -505,6 +505,14 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
505 return 0; 505 return 0;
506} 506}
507 507
508static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
509{
510 if (rta[RTA_TABLE - 1])
511 table = nla_get_u32((struct nlattr *) rta[RTA_TABLE - 1]);
512
513 return table;
514}
515
508static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 516static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
509{ 517{
510 struct net *net = sock_net(skb->sk); 518 struct net *net = sock_net(skb->sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ac90f658586c..3aede1b459fd 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
202{ 202{
203 struct dst_entry *dst = skb_dst(skb); 203 struct dst_entry *dst = skb_dst(skb);
204 struct dn_route *rt = (struct dn_route *)dst; 204 struct dn_route *rt = (struct dn_route *)dst;
205 struct neighbour *neigh = dst_get_neighbour_noref(dst); 205 struct neighbour *neigh = rt->n;
206 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
207 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
208 unsigned int seq; 208 unsigned int seq;
@@ -240,7 +240,7 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
240 kfree_skb(skb); 240 kfree_skb(skb);
241 return -ENOBUFS; 241 return -ENOBUFS;
242 } 242 }
243 kfree_skb(skb); 243 consume_skb(skb);
244 skb = skb2; 244 skb = skb2;
245 net_info_ratelimited("dn_long_output: Increasing headroom\n"); 245 net_info_ratelimited("dn_long_output: Increasing headroom\n");
246 } 246 }
@@ -283,7 +283,7 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
283 kfree_skb(skb); 283 kfree_skb(skb);
284 return -ENOBUFS; 284 return -ENOBUFS;
285 } 285 }
286 kfree_skb(skb); 286 consume_skb(skb);
287 skb = skb2; 287 skb = skb2;
288 net_info_ratelimited("dn_short_output: Increasing headroom\n"); 288 net_info_ratelimited("dn_short_output: Increasing headroom\n");
289 } 289 }
@@ -322,7 +322,7 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
322 kfree_skb(skb); 322 kfree_skb(skb);
323 return -ENOBUFS; 323 return -ENOBUFS;
324 } 324 }
325 kfree_skb(skb); 325 consume_skb(skb);
326 skb = skb2; 326 skb = skb2;
327 net_info_ratelimited("dn_phase3_output: Increasing headroom\n"); 327 net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
328 } 328 }
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 564a6ad13ce7..8a96047c7c94 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -322,7 +322,7 @@ static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned c
322 /* Set "cross subchannel" bit in ackcrs */ 322 /* Set "cross subchannel" bit in ackcrs */
323 ackcrs |= 0x2000; 323 ackcrs |= 0x2000;
324 324
325 ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen); 325 ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
326 326
327 *ptr++ = cpu_to_le16(acknum); 327 *ptr++ = cpu_to_le16(acknum);
328 *ptr++ = cpu_to_le16(ackcrs); 328 *ptr++ = cpu_to_le16(ackcrs);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 586302e557ad..85a3604c87c8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -114,10 +114,16 @@ static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); 114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115static unsigned int dn_dst_mtu(const struct dst_entry *dst); 115static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116static void dn_dst_destroy(struct dst_entry *); 116static void dn_dst_destroy(struct dst_entry *);
117static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
117static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 118static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
118static void dn_dst_link_failure(struct sk_buff *); 119static void dn_dst_link_failure(struct sk_buff *);
119static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 120static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
120static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr); 121 struct sk_buff *skb , u32 mtu);
122static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
123 struct sk_buff *skb);
124static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
125 struct sk_buff *skb,
126 const void *daddr);
121static int dn_route_input(struct sk_buff *); 127static int dn_route_input(struct sk_buff *);
122static void dn_run_flush(unsigned long dummy); 128static void dn_run_flush(unsigned long dummy);
123 129
@@ -138,17 +144,37 @@ static struct dst_ops dn_dst_ops = {
138 .mtu = dn_dst_mtu, 144 .mtu = dn_dst_mtu,
139 .cow_metrics = dst_cow_metrics_generic, 145 .cow_metrics = dst_cow_metrics_generic,
140 .destroy = dn_dst_destroy, 146 .destroy = dn_dst_destroy,
147 .ifdown = dn_dst_ifdown,
141 .negative_advice = dn_dst_negative_advice, 148 .negative_advice = dn_dst_negative_advice,
142 .link_failure = dn_dst_link_failure, 149 .link_failure = dn_dst_link_failure,
143 .update_pmtu = dn_dst_update_pmtu, 150 .update_pmtu = dn_dst_update_pmtu,
151 .redirect = dn_dst_redirect,
144 .neigh_lookup = dn_dst_neigh_lookup, 152 .neigh_lookup = dn_dst_neigh_lookup,
145}; 153};
146 154
147static void dn_dst_destroy(struct dst_entry *dst) 155static void dn_dst_destroy(struct dst_entry *dst)
148{ 156{
157 struct dn_route *rt = (struct dn_route *) dst;
158
159 if (rt->n)
160 neigh_release(rt->n);
149 dst_destroy_metrics_generic(dst); 161 dst_destroy_metrics_generic(dst);
150} 162}
151 163
164static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
165{
166 if (how) {
167 struct dn_route *rt = (struct dn_route *) dst;
168 struct neighbour *n = rt->n;
169
170 if (n && n->dev == dev) {
171 n->dev = dev_net(dev)->loopback_dev;
172 dev_hold(n->dev);
173 dev_put(dev);
174 }
175 }
176}
177
152static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) 178static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
153{ 179{
154 __u16 tmp = (__u16 __force)(src ^ dst); 180 __u16 tmp = (__u16 __force)(src ^ dst);
@@ -242,9 +268,11 @@ static int dn_dst_gc(struct dst_ops *ops)
242 * We update both the mtu and the advertised mss (i.e. the segment size we 268 * We update both the mtu and the advertised mss (i.e. the segment size we
243 * advertise to the other end). 269 * advertise to the other end).
244 */ 270 */
245static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 271static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
272 struct sk_buff *skb, u32 mtu)
246{ 273{
247 struct neighbour *n = dst_get_neighbour_noref(dst); 274 struct dn_route *rt = (struct dn_route *) dst;
275 struct neighbour *n = rt->n;
248 u32 min_mtu = 230; 276 u32 min_mtu = 230;
249 struct dn_dev *dn; 277 struct dn_dev *dn;
250 278
@@ -269,6 +297,11 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
269 } 297 }
270} 298}
271 299
300static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
301 struct sk_buff *skb)
302{
303}
304
272/* 305/*
273 * When a route has been marked obsolete. (e.g. routing cache flush) 306 * When a route has been marked obsolete. (e.g. routing cache flush)
274 */ 307 */
@@ -713,7 +746,8 @@ out:
713static int dn_to_neigh_output(struct sk_buff *skb) 746static int dn_to_neigh_output(struct sk_buff *skb)
714{ 747{
715 struct dst_entry *dst = skb_dst(skb); 748 struct dst_entry *dst = skb_dst(skb);
716 struct neighbour *n = dst_get_neighbour_noref(dst); 749 struct dn_route *rt = (struct dn_route *) dst;
750 struct neighbour *n = rt->n;
717 751
718 return n->output(n, skb); 752 return n->output(n, skb);
719} 753}
@@ -727,7 +761,7 @@ static int dn_output(struct sk_buff *skb)
727 761
728 int err = -EINVAL; 762 int err = -EINVAL;
729 763
730 if (dst_get_neighbour_noref(dst) == NULL) 764 if (rt->n == NULL)
731 goto error; 765 goto error;
732 766
733 skb->dev = dev; 767 skb->dev = dev;
@@ -828,7 +862,9 @@ static unsigned int dn_dst_mtu(const struct dst_entry *dst)
828 return mtu ? : dst->dev->mtu; 862 return mtu ? : dst->dev->mtu;
829} 863}
830 864
831static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 865static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
866 struct sk_buff *skb,
867 const void *daddr)
832{ 868{
833 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); 869 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
834} 870}
@@ -848,11 +884,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
848 } 884 }
849 rt->rt_type = res->type; 885 rt->rt_type = res->type;
850 886
851 if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) { 887 if (dev != NULL && rt->n == NULL) {
852 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 888 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
853 if (IS_ERR(n)) 889 if (IS_ERR(n))
854 return PTR_ERR(n); 890 return PTR_ERR(n);
855 dst_set_neighbour(&rt->dst, n); 891 rt->n = n;
856 } 892 }
857 893
858 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 894 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1140,7 +1176,7 @@ make_route:
1140 if (dev_out->flags & IFF_LOOPBACK) 1176 if (dev_out->flags & IFF_LOOPBACK)
1141 flags |= RTCF_LOCAL; 1177 flags |= RTCF_LOCAL;
1142 1178
1143 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST); 1179 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
1144 if (rt == NULL) 1180 if (rt == NULL)
1145 goto e_nobufs; 1181 goto e_nobufs;
1146 1182
@@ -1159,7 +1195,7 @@ make_route:
1159 rt->rt_dst_map = fld.daddr; 1195 rt->rt_dst_map = fld.daddr;
1160 rt->rt_src_map = fld.saddr; 1196 rt->rt_src_map = fld.saddr;
1161 1197
1162 dst_set_neighbour(&rt->dst, neigh); 1198 rt->n = neigh;
1163 neigh = NULL; 1199 neigh = NULL;
1164 1200
1165 rt->dst.lastuse = jiffies; 1201 rt->dst.lastuse = jiffies;
@@ -1388,7 +1424,6 @@ static int dn_route_input_slow(struct sk_buff *skb)
1388 /* Packet was intra-ethernet, so we know its on-link */ 1424 /* Packet was intra-ethernet, so we know its on-link */
1389 if (cb->rt_flags & DN_RT_F_IE) { 1425 if (cb->rt_flags & DN_RT_F_IE) {
1390 gateway = cb->src; 1426 gateway = cb->src;
1391 flags |= RTCF_DIRECTSRC;
1392 goto make_route; 1427 goto make_route;
1393 } 1428 }
1394 1429
@@ -1401,14 +1436,13 @@ static int dn_route_input_slow(struct sk_buff *skb)
1401 1436
1402 /* Close eyes and pray */ 1437 /* Close eyes and pray */
1403 gateway = cb->src; 1438 gateway = cb->src;
1404 flags |= RTCF_DIRECTSRC;
1405 goto make_route; 1439 goto make_route;
1406 default: 1440 default:
1407 goto e_inval; 1441 goto e_inval;
1408 } 1442 }
1409 1443
1410make_route: 1444make_route:
1411 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST); 1445 rt = dst_alloc(&dn_dst_ops, out_dev, 0, DST_OBSOLETE_NONE, DST_HOST);
1412 if (rt == NULL) 1446 if (rt == NULL)
1413 goto e_nobufs; 1447 goto e_nobufs;
1414 1448
@@ -1429,7 +1463,7 @@ make_route:
1429 rt->fld.flowidn_iif = in_dev->ifindex; 1463 rt->fld.flowidn_iif = in_dev->ifindex;
1430 rt->fld.flowidn_mark = fld.flowidn_mark; 1464 rt->fld.flowidn_mark = fld.flowidn_mark;
1431 1465
1432 dst_set_neighbour(&rt->dst, neigh); 1466 rt->n = neigh;
1433 rt->dst.lastuse = jiffies; 1467 rt->dst.lastuse = jiffies;
1434 rt->dst.output = dn_rt_bug; 1468 rt->dst.output = dn_rt_bug;
1435 switch (res.type) { 1469 switch (res.type) {
@@ -1515,54 +1549,68 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1515 struct dn_route *rt = (struct dn_route *)skb_dst(skb); 1549 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1516 struct rtmsg *r; 1550 struct rtmsg *r;
1517 struct nlmsghdr *nlh; 1551 struct nlmsghdr *nlh;
1518 unsigned char *b = skb_tail_pointer(skb);
1519 long expires; 1552 long expires;
1520 1553
1521 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1554 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
1522 r = NLMSG_DATA(nlh); 1555 if (!nlh)
1556 return -EMSGSIZE;
1557
1558 r = nlmsg_data(nlh);
1523 r->rtm_family = AF_DECnet; 1559 r->rtm_family = AF_DECnet;
1524 r->rtm_dst_len = 16; 1560 r->rtm_dst_len = 16;
1525 r->rtm_src_len = 0; 1561 r->rtm_src_len = 0;
1526 r->rtm_tos = 0; 1562 r->rtm_tos = 0;
1527 r->rtm_table = RT_TABLE_MAIN; 1563 r->rtm_table = RT_TABLE_MAIN;
1528 RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1529 r->rtm_type = rt->rt_type; 1564 r->rtm_type = rt->rt_type;
1530 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1565 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1531 r->rtm_scope = RT_SCOPE_UNIVERSE; 1566 r->rtm_scope = RT_SCOPE_UNIVERSE;
1532 r->rtm_protocol = RTPROT_UNSPEC; 1567 r->rtm_protocol = RTPROT_UNSPEC;
1568
1533 if (rt->rt_flags & RTCF_NOTIFY) 1569 if (rt->rt_flags & RTCF_NOTIFY)
1534 r->rtm_flags |= RTM_F_NOTIFY; 1570 r->rtm_flags |= RTM_F_NOTIFY;
1535 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1571
1572 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
1573 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
1574 goto errout;
1575
1536 if (rt->fld.saddr) { 1576 if (rt->fld.saddr) {
1537 r->rtm_src_len = 16; 1577 r->rtm_src_len = 16;
1538 RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr); 1578 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
1579 goto errout;
1539 } 1580 }
1540 if (rt->dst.dev) 1581 if (rt->dst.dev &&
1541 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); 1582 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
1583 goto errout;
1584
1542 /* 1585 /*
1543 * Note to self - change this if input routes reverse direction when 1586 * Note to self - change this if input routes reverse direction when
1544 * they deal only with inputs and not with replies like they do 1587 * they deal only with inputs and not with replies like they do
1545 * currently. 1588 * currently.
1546 */ 1589 */
1547 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1590 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
1548 if (rt->rt_daddr != rt->rt_gateway) 1591 goto errout;
1549 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1592
1593 if (rt->rt_daddr != rt->rt_gateway &&
1594 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
1595 goto errout;
1596
1550 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 1597 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1551 goto rtattr_failure; 1598 goto errout;
1599
1552 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; 1600 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1553 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1601 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
1554 rt->dst.error) < 0) 1602 rt->dst.error) < 0)
1555 goto rtattr_failure; 1603 goto errout;
1556 if (dn_is_input_route(rt))
1557 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif);
1558 1604
1559 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1605 if (dn_is_input_route(rt) &&
1560 return skb->len; 1606 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
1607 goto errout;
1561 1608
1562nlmsg_failure: 1609 return nlmsg_end(skb, nlh);
1563rtattr_failure: 1610
1564 nlmsg_trim(skb, b); 1611errout:
1565 return -1; 1612 nlmsg_cancel(skb, nlh);
1613 return -EMSGSIZE;
1566} 1614}
1567 1615
1568/* 1616/*
@@ -1572,7 +1620,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1572{ 1620{
1573 struct net *net = sock_net(in_skb->sk); 1621 struct net *net = sock_net(in_skb->sk);
1574 struct rtattr **rta = arg; 1622 struct rtattr **rta = arg;
1575 struct rtmsg *rtm = NLMSG_DATA(nlh); 1623 struct rtmsg *rtm = nlmsg_data(nlh);
1576 struct dn_route *rt = NULL; 1624 struct dn_route *rt = NULL;
1577 struct dn_skb_cb *cb; 1625 struct dn_skb_cb *cb;
1578 int err; 1626 int err;
@@ -1585,7 +1633,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1585 memset(&fld, 0, sizeof(fld)); 1633 memset(&fld, 0, sizeof(fld));
1586 fld.flowidn_proto = DNPROTO_NSP; 1634 fld.flowidn_proto = DNPROTO_NSP;
1587 1635
1588 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1636 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1589 if (skb == NULL) 1637 if (skb == NULL)
1590 return -ENOBUFS; 1638 return -ENOBUFS;
1591 skb_reset_mac_header(skb); 1639 skb_reset_mac_header(skb);
@@ -1663,13 +1711,16 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1663 struct dn_route *rt; 1711 struct dn_route *rt;
1664 int h, s_h; 1712 int h, s_h;
1665 int idx, s_idx; 1713 int idx, s_idx;
1714 struct rtmsg *rtm;
1666 1715
1667 if (!net_eq(net, &init_net)) 1716 if (!net_eq(net, &init_net))
1668 return 0; 1717 return 0;
1669 1718
1670 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1719 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
1671 return -EINVAL; 1720 return -EINVAL;
1672 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1721
1722 rtm = nlmsg_data(cb->nlh);
1723 if (!(rtm->rtm_flags & RTM_F_CLONED))
1673 return 0; 1724 return 0;
1674 1725
1675 s_h = cb->args[0]; 1726 s_h = cb->args[0];
@@ -1769,12 +1820,11 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1769 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1820 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1770 1821
1771 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1822 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1772 rt->dst.dev ? rt->dst.dev->name : "*", 1823 rt->dst.dev ? rt->dst.dev->name : "*",
1773 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), 1824 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1774 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), 1825 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1775 atomic_read(&rt->dst.__refcnt), 1826 atomic_read(&rt->dst.__refcnt),
1776 rt->dst.__use, 1827 rt->dst.__use, 0);
1777 (int) dst_metric(&rt->dst, RTAX_RTT));
1778 return 0; 1828 return 0;
1779} 1829}
1780 1830
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 650f3380c98a..16c986ab1228 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -297,61 +297,75 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
297{ 297{
298 struct rtmsg *rtm; 298 struct rtmsg *rtm;
299 struct nlmsghdr *nlh; 299 struct nlmsghdr *nlh;
300 unsigned char *b = skb_tail_pointer(skb);
301 300
302 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 301 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
303 rtm = NLMSG_DATA(nlh); 302 if (!nlh)
303 return -EMSGSIZE;
304
305 rtm = nlmsg_data(nlh);
304 rtm->rtm_family = AF_DECnet; 306 rtm->rtm_family = AF_DECnet;
305 rtm->rtm_dst_len = dst_len; 307 rtm->rtm_dst_len = dst_len;
306 rtm->rtm_src_len = 0; 308 rtm->rtm_src_len = 0;
307 rtm->rtm_tos = 0; 309 rtm->rtm_tos = 0;
308 rtm->rtm_table = tb_id; 310 rtm->rtm_table = tb_id;
309 RTA_PUT_U32(skb, RTA_TABLE, tb_id);
310 rtm->rtm_flags = fi->fib_flags; 311 rtm->rtm_flags = fi->fib_flags;
311 rtm->rtm_scope = scope; 312 rtm->rtm_scope = scope;
312 rtm->rtm_type = type; 313 rtm->rtm_type = type;
313 if (rtm->rtm_dst_len)
314 RTA_PUT(skb, RTA_DST, 2, dst);
315 rtm->rtm_protocol = fi->fib_protocol; 314 rtm->rtm_protocol = fi->fib_protocol;
316 if (fi->fib_priority) 315
317 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 316 if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
317 goto errout;
318
319 if (rtm->rtm_dst_len &&
320 nla_put(skb, RTA_DST, 2, dst) < 0)
321 goto errout;
322
323 if (fi->fib_priority &&
324 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
325 goto errout;
326
318 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 327 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
319 goto rtattr_failure; 328 goto errout;
329
320 if (fi->fib_nhs == 1) { 330 if (fi->fib_nhs == 1) {
321 if (fi->fib_nh->nh_gw) 331 if (fi->fib_nh->nh_gw &&
322 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); 332 nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
323 if (fi->fib_nh->nh_oif) 333 goto errout;
324 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 334
335 if (fi->fib_nh->nh_oif &&
336 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
337 goto errout;
325 } 338 }
339
326 if (fi->fib_nhs > 1) { 340 if (fi->fib_nhs > 1) {
327 struct rtnexthop *nhp; 341 struct rtnexthop *nhp;
328 struct rtattr *mp_head; 342 struct nlattr *mp_head;
329 if (skb_tailroom(skb) <= RTA_SPACE(0)) 343
330 goto rtattr_failure; 344 if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
331 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); 345 goto errout;
332 346
333 for_nexthops(fi) { 347 for_nexthops(fi) {
334 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 348 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
335 goto rtattr_failure; 349 goto errout;
336 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 350
337 nhp->rtnh_flags = nh->nh_flags & 0xFF; 351 nhp->rtnh_flags = nh->nh_flags & 0xFF;
338 nhp->rtnh_hops = nh->nh_weight - 1; 352 nhp->rtnh_hops = nh->nh_weight - 1;
339 nhp->rtnh_ifindex = nh->nh_oif; 353 nhp->rtnh_ifindex = nh->nh_oif;
340 if (nh->nh_gw) 354
341 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 355 if (nh->nh_gw &&
356 nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
357 goto errout;
358
342 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; 359 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
343 } endfor_nexthops(fi); 360 } endfor_nexthops(fi);
344 mp_head->rta_type = RTA_MULTIPATH;
345 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
346 }
347 361
348 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 362 nla_nest_end(skb, mp_head);
349 return skb->len; 363 }
350 364
365 return nlmsg_end(skb, nlh);
351 366
352nlmsg_failure: 367errout:
353rtattr_failure: 368 nlmsg_cancel(skb, nlh);
354 nlmsg_trim(skb, b);
355 return -EMSGSIZE; 369 return -EMSGSIZE;
356} 370}
357 371
@@ -476,7 +490,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
476 return 0; 490 return 0;
477 491
478 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && 492 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
479 ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED) 493 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
480 return dn_cache_dump(skb, cb); 494 return dn_cache_dump(skb, cb);
481 495
482 s_h = cb->args[0]; 496 s_h = cb->args[0];
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 44b890936fc0..11db0ecf342f 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -42,23 +42,23 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
42 size = NLMSG_SPACE(rt_skb->len); 42 size = NLMSG_SPACE(rt_skb->len);
43 size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); 43 size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
44 skb = alloc_skb(size, GFP_ATOMIC); 44 skb = alloc_skb(size, GFP_ATOMIC);
45 if (!skb) 45 if (!skb) {
46 goto nlmsg_failure; 46 *errp = -ENOMEM;
47 return NULL;
48 }
47 old_tail = skb->tail; 49 old_tail = skb->tail;
48 nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh)); 50 nlh = nlmsg_put(skb, 0, 0, 0, size - sizeof(*nlh), 0);
51 if (!nlh) {
52 kfree_skb(skb);
53 *errp = -ENOMEM;
54 return NULL;
55 }
49 rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh); 56 rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh);
50 rtm->nfdn_ifindex = rt_skb->dev->ifindex; 57 rtm->nfdn_ifindex = rt_skb->dev->ifindex;
51 ptr = NFDN_RTMSG(rtm); 58 ptr = NFDN_RTMSG(rtm);
52 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); 59 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
53 nlh->nlmsg_len = skb->tail - old_tail; 60 nlh->nlmsg_len = skb->tail - old_tail;
54 return skb; 61 return skb;
55
56nlmsg_failure:
57 if (skb)
58 kfree_skb(skb);
59 *errp = -ENOMEM;
60 net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
61 return NULL;
62} 62}
63 63
64static void dnrmg_send_peer(struct sk_buff *skb) 64static void dnrmg_send_peer(struct sk_buff *skb)
@@ -117,7 +117,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
117 117
118static struct nf_hook_ops dnrmg_ops __read_mostly = { 118static struct nf_hook_ops dnrmg_ops __read_mostly = {
119 .hook = dnrmg_hook, 119 .hook = dnrmg_hook,
120 .pf = PF_DECnet, 120 .pf = NFPROTO_DECNET,
121 .hooknum = NF_DN_ROUTE, 121 .hooknum = NF_DN_ROUTE,
122 .priority = NF_DN_PRI_DNRTMSG, 122 .priority = NF_DN_PRI_DNRTMSG,
123}; 123};
@@ -125,11 +125,13 @@ static struct nf_hook_ops dnrmg_ops __read_mostly = {
125static int __init dn_rtmsg_init(void) 125static int __init dn_rtmsg_init(void)
126{ 126{
127 int rv = 0; 127 int rv = 0;
128 struct netlink_kernel_cfg cfg = {
129 .groups = DNRNG_NLGRP_MAX,
130 .input = dnrmg_receive_user_skb,
131 };
128 132
129 dnrmg = netlink_kernel_create(&init_net, 133 dnrmg = netlink_kernel_create(&init_net,
130 NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, 134 NETLINK_DNRTMSG, THIS_MODULE, &cfg);
131 dnrmg_receive_user_skb,
132 NULL, THIS_MODULE);
133 if (dnrmg == NULL) { 135 if (dnrmg == NULL) {
134 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); 136 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
135 return -ENOMEM; 137 return -ENOMEM;
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 7cef1d8ace27..323177505404 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,5 +3,3 @@
3# 3#
4 4
5obj-y += eth.o 5obj-y += eth.o
6obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
7obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 36e58800a9e3..4efad533e5f6 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -232,6 +232,7 @@ EXPORT_SYMBOL(eth_header_parse);
232 * @neigh: source neighbour 232 * @neigh: source neighbour
233 * @hh: destination cache entry 233 * @hh: destination cache entry
234 * @type: Ethernet type field 234 * @type: Ethernet type field
235 *
235 * Create an Ethernet header template from the neighbour. 236 * Create an Ethernet header template from the neighbour.
236 */ 237 */
237int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) 238int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
@@ -274,6 +275,7 @@ EXPORT_SYMBOL(eth_header_cache_update);
274 * eth_mac_addr - set new Ethernet hardware address 275 * eth_mac_addr - set new Ethernet hardware address
275 * @dev: network device 276 * @dev: network device
276 * @p: socket address 277 * @p: socket address
278 *
277 * Change hardware address of device. 279 * Change hardware address of device.
278 * 280 *
279 * This doesn't change hardware matching, so needs to be overridden 281 * This doesn't change hardware matching, so needs to be overridden
@@ -283,7 +285,7 @@ int eth_mac_addr(struct net_device *dev, void *p)
283{ 285{
284 struct sockaddr *addr = p; 286 struct sockaddr *addr = p;
285 287
286 if (netif_running(dev)) 288 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
287 return -EBUSY; 289 return -EBUSY;
288 if (!is_valid_ether_addr(addr->sa_data)) 290 if (!is_valid_ether_addr(addr->sa_data))
289 return -EADDRNOTAVAIL; 291 return -EADDRNOTAVAIL;
@@ -331,6 +333,7 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
331/** 333/**
332 * ether_setup - setup Ethernet network device 334 * ether_setup - setup Ethernet network device
333 * @dev: network device 335 * @dev: network device
336 *
334 * Fill in the fields of the device structure with Ethernet-generic values. 337 * Fill in the fields of the device structure with Ethernet-generic values.
335 */ 338 */
336void ether_setup(struct net_device *dev) 339void ether_setup(struct net_device *dev)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 32eb4179e8fa..6a095225148e 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,7 +55,6 @@
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
59#include <net/af_ieee802154.h> 58#include <net/af_ieee802154.h>
60#include <net/ieee802154.h> 59#include <net/ieee802154.h>
61#include <net/ieee802154_netdev.h> 60#include <net/ieee802154_netdev.h>
@@ -114,7 +113,6 @@ struct lowpan_dev_record {
114 113
115struct lowpan_fragment { 114struct lowpan_fragment {
116 struct sk_buff *skb; /* skb to be assembled */ 115 struct sk_buff *skb; /* skb to be assembled */
117 spinlock_t lock; /* concurency lock */
118 u16 length; /* length to be assemled */ 116 u16 length; /* length to be assemled */
119 u32 bytes_rcv; /* bytes received */ 117 u32 bytes_rcv; /* bytes received */
120 u16 tag; /* current fragment tag */ 118 u16 tag; /* current fragment tag */
@@ -124,7 +122,7 @@ struct lowpan_fragment {
124 122
125static unsigned short fragment_tag; 123static unsigned short fragment_tag;
126static LIST_HEAD(lowpan_fragments); 124static LIST_HEAD(lowpan_fragments);
127spinlock_t flist_lock; 125static DEFINE_SPINLOCK(flist_lock);
128 126
129static inline struct 127static inline struct
130lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) 128lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
@@ -240,8 +238,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
240 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); 238 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
241 } 239 }
242 240
243 pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount, 241 pr_debug("uncompressing %d + %d => ", prefcount, postcount);
244 postcount);
245 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); 242 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
246 243
247 return 0; 244 return 0;
@@ -252,13 +249,11 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
252{ 249{
253 struct udphdr *uh = udp_hdr(skb); 250 struct udphdr *uh = udp_hdr(skb);
254 251
255 pr_debug("(%s): UDP header compression\n", __func__);
256
257 if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == 252 if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
258 LOWPAN_NHC_UDP_4BIT_PORT) && 253 LOWPAN_NHC_UDP_4BIT_PORT) &&
259 ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == 254 ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
260 LOWPAN_NHC_UDP_4BIT_PORT)) { 255 LOWPAN_NHC_UDP_4BIT_PORT)) {
261 pr_debug("(%s): both ports compression to 4 bits\n", __func__); 256 pr_debug("UDP header: both ports compression to 4 bits\n");
262 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; 257 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
263 **(hc06_ptr + 1) = /* subtraction is faster */ 258 **(hc06_ptr + 1) = /* subtraction is faster */
264 (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + 259 (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
@@ -266,20 +261,20 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
266 *hc06_ptr += 2; 261 *hc06_ptr += 2;
267 } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == 262 } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
268 LOWPAN_NHC_UDP_8BIT_PORT) { 263 LOWPAN_NHC_UDP_8BIT_PORT) {
269 pr_debug("(%s): remove 8 bits of dest\n", __func__); 264 pr_debug("UDP header: remove 8 bits of dest\n");
270 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; 265 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
271 memcpy(*hc06_ptr + 1, &uh->source, 2); 266 memcpy(*hc06_ptr + 1, &uh->source, 2);
272 **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); 267 **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
273 *hc06_ptr += 4; 268 *hc06_ptr += 4;
274 } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == 269 } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
275 LOWPAN_NHC_UDP_8BIT_PORT) { 270 LOWPAN_NHC_UDP_8BIT_PORT) {
276 pr_debug("(%s): remove 8 bits of source\n", __func__); 271 pr_debug("UDP header: remove 8 bits of source\n");
277 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; 272 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
278 memcpy(*hc06_ptr + 1, &uh->dest, 2); 273 memcpy(*hc06_ptr + 1, &uh->dest, 2);
279 **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); 274 **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
280 *hc06_ptr += 4; 275 *hc06_ptr += 4;
281 } else { 276 } else {
282 pr_debug("(%s): can't compress header\n", __func__); 277 pr_debug("UDP header: can't compress\n");
283 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; 278 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
284 memcpy(*hc06_ptr + 1, &uh->source, 2); 279 memcpy(*hc06_ptr + 1, &uh->source, 2);
285 memcpy(*hc06_ptr + 3, &uh->dest, 2); 280 memcpy(*hc06_ptr + 3, &uh->dest, 2);
@@ -291,25 +286,26 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
291 *hc06_ptr += 2; 286 *hc06_ptr += 2;
292} 287}
293 288
294static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) 289static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
295{ 290{
296 u8 ret; 291 if (unlikely(!pskb_may_pull(skb, 1)))
292 return -EINVAL;
297 293
298 ret = skb->data[0]; 294 *val = skb->data[0];
299 skb_pull(skb, 1); 295 skb_pull(skb, 1);
300 296
301 return ret; 297 return 0;
302} 298}
303 299
304static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) 300static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
305{ 301{
306 u16 ret; 302 if (unlikely(!pskb_may_pull(skb, 2)))
307 303 return -EINVAL;
308 BUG_ON(!pskb_may_pull(skb, 2));
309 304
310 ret = skb->data[0] | (skb->data[1] << 8); 305 *val = (skb->data[0] << 8) | skb->data[1];
311 skb_pull(skb, 2); 306 skb_pull(skb, 2);
312 return ret; 307
308 return 0;
313} 309}
314 310
315static int 311static int
@@ -318,10 +314,14 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
318 struct udphdr *uh = udp_hdr(skb); 314 struct udphdr *uh = udp_hdr(skb);
319 u8 tmp; 315 u8 tmp;
320 316
321 tmp = lowpan_fetch_skb_u8(skb); 317 if (!uh)
318 goto err;
319
320 if (lowpan_fetch_skb_u8(skb, &tmp))
321 goto err;
322 322
323 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { 323 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
324 pr_debug("(%s): UDP header uncompression\n", __func__); 324 pr_debug("UDP header uncompression\n");
325 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { 325 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
326 case LOWPAN_NHC_UDP_CS_P_00: 326 case LOWPAN_NHC_UDP_CS_P_00:
327 memcpy(&uh->source, &skb->data[0], 2); 327 memcpy(&uh->source, &skb->data[0], 2);
@@ -347,19 +347,19 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
347 skb_pull(skb, 1); 347 skb_pull(skb, 1);
348 break; 348 break;
349 default: 349 default:
350 pr_debug("(%s) ERROR: unknown UDP format\n", __func__); 350 pr_debug("ERROR: unknown UDP format\n");
351 goto err; 351 goto err;
352 break; 352 break;
353 } 353 }
354 354
355 pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", 355 pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
356 __func__, uh->source, uh->dest); 356 uh->source, uh->dest);
357 357
358 /* copy checksum */ 358 /* copy checksum */
359 memcpy(&uh->check, &skb->data[0], 2); 359 memcpy(&uh->check, &skb->data[0], 2);
360 skb_pull(skb, 2); 360 skb_pull(skb, 2);
361 } else { 361 } else {
362 pr_debug("(%s): ERROR: unsupported NH format\n", __func__); 362 pr_debug("ERROR: unsupported NH format\n");
363 goto err; 363 goto err;
364 } 364 }
365 365
@@ -392,10 +392,9 @@ static int lowpan_header_create(struct sk_buff *skb,
392 hdr = ipv6_hdr(skb); 392 hdr = ipv6_hdr(skb);
393 hc06_ptr = head + 2; 393 hc06_ptr = head + 2;
394 394
395 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" 395 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
396 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__, 396 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
397 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, 397 ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
398 hdr->hop_limit);
399 398
400 lowpan_raw_dump_table(__func__, "raw skb network header dump", 399 lowpan_raw_dump_table(__func__, "raw skb network header dump",
401 skb_network_header(skb), sizeof(struct ipv6hdr)); 400 skb_network_header(skb), sizeof(struct ipv6hdr));
@@ -490,28 +489,28 @@ static int lowpan_header_create(struct sk_buff *skb,
490 break; 489 break;
491 default: 490 default:
492 *hc06_ptr = hdr->hop_limit; 491 *hc06_ptr = hdr->hop_limit;
492 hc06_ptr += 1;
493 break; 493 break;
494 } 494 }
495 495
496 /* source address compression */ 496 /* source address compression */
497 if (is_addr_unspecified(&hdr->saddr)) { 497 if (is_addr_unspecified(&hdr->saddr)) {
498 pr_debug("(%s): source address is unspecified, setting SAC\n", 498 pr_debug("source address is unspecified, setting SAC\n");
499 __func__);
500 iphc1 |= LOWPAN_IPHC_SAC; 499 iphc1 |= LOWPAN_IPHC_SAC;
501 /* TODO: context lookup */ 500 /* TODO: context lookup */
502 } else if (is_addr_link_local(&hdr->saddr)) { 501 } else if (is_addr_link_local(&hdr->saddr)) {
503 pr_debug("(%s): source address is link-local\n", __func__); 502 pr_debug("source address is link-local\n");
504 iphc1 |= lowpan_compress_addr_64(&hc06_ptr, 503 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
505 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); 504 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
506 } else { 505 } else {
507 pr_debug("(%s): send the full source address\n", __func__); 506 pr_debug("send the full source address\n");
508 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); 507 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
509 hc06_ptr += 16; 508 hc06_ptr += 16;
510 } 509 }
511 510
512 /* destination address compression */ 511 /* destination address compression */
513 if (is_addr_mcast(&hdr->daddr)) { 512 if (is_addr_mcast(&hdr->daddr)) {
514 pr_debug("(%s): destination address is multicast", __func__); 513 pr_debug("destination address is multicast: ");
515 iphc1 |= LOWPAN_IPHC_M; 514 iphc1 |= LOWPAN_IPHC_M;
516 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { 515 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
517 pr_debug("compressed to 1 octet\n"); 516 pr_debug("compressed to 1 octet\n");
@@ -540,14 +539,13 @@ static int lowpan_header_create(struct sk_buff *skb,
540 hc06_ptr += 16; 539 hc06_ptr += 16;
541 } 540 }
542 } else { 541 } else {
543 pr_debug("(%s): destination address is unicast: ", __func__);
544 /* TODO: context lookup */ 542 /* TODO: context lookup */
545 if (is_addr_link_local(&hdr->daddr)) { 543 if (is_addr_link_local(&hdr->daddr)) {
546 pr_debug("destination address is link-local\n"); 544 pr_debug("dest address is unicast and link-local\n");
547 iphc1 |= lowpan_compress_addr_64(&hc06_ptr, 545 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
548 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); 546 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
549 } else { 547 } else {
550 pr_debug("using full address\n"); 548 pr_debug("dest address is unicast: using full one\n");
551 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); 549 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
552 hc06_ptr += 16; 550 hc06_ptr += 16;
553 } 551 }
@@ -639,19 +637,15 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
639{ 637{
640 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; 638 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
641 639
642 pr_debug("%s: timer expired for frame with tag %d\n", __func__, 640 pr_debug("timer expired for frame with tag %d\n", entry->tag);
643 entry->tag);
644 641
645 spin_lock(&flist_lock);
646 list_del(&entry->list); 642 list_del(&entry->list);
647 spin_unlock(&flist_lock);
648
649 dev_kfree_skb(entry->skb); 643 dev_kfree_skb(entry->skb);
650 kfree(entry); 644 kfree(entry);
651} 645}
652 646
653static struct lowpan_fragment * 647static struct lowpan_fragment *
654lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag) 648lowpan_alloc_new_frame(struct sk_buff *skb, u8 len, u16 tag)
655{ 649{
656 struct lowpan_fragment *frame; 650 struct lowpan_fragment *frame;
657 651
@@ -662,12 +656,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
662 656
663 INIT_LIST_HEAD(&frame->list); 657 INIT_LIST_HEAD(&frame->list);
664 658
665 frame->length = (iphc0 & 7) | (len << 3); 659 frame->length = len;
666 frame->tag = tag; 660 frame->tag = tag;
667 661
668 /* allocate buffer for frame assembling */ 662 /* allocate buffer for frame assembling */
669 frame->skb = alloc_skb(frame->length + 663 frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
670 sizeof(struct ipv6hdr), GFP_ATOMIC); 664 sizeof(struct ipv6hdr));
671 665
672 if (!frame->skb) 666 if (!frame->skb)
673 goto skb_err; 667 goto skb_err;
@@ -710,7 +704,9 @@ lowpan_process_data(struct sk_buff *skb)
710 /* at least two bytes will be used for the encoding */ 704 /* at least two bytes will be used for the encoding */
711 if (skb->len < 2) 705 if (skb->len < 2)
712 goto drop; 706 goto drop;
713 iphc0 = lowpan_fetch_skb_u8(skb); 707
708 if (lowpan_fetch_skb_u8(skb, &iphc0))
709 goto drop;
714 710
715 /* fragments assembling */ 711 /* fragments assembling */
716 switch (iphc0 & LOWPAN_DISPATCH_MASK) { 712 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
@@ -718,18 +714,23 @@ lowpan_process_data(struct sk_buff *skb)
718 case LOWPAN_DISPATCH_FRAGN: 714 case LOWPAN_DISPATCH_FRAGN:
719 { 715 {
720 struct lowpan_fragment *frame; 716 struct lowpan_fragment *frame;
721 u8 len, offset; 717 /* slen stores the rightmost 8 bits of the 11 bits length */
722 u16 tag; 718 u8 slen, offset;
719 u16 len, tag;
723 bool found = false; 720 bool found = false;
724 721
725 len = lowpan_fetch_skb_u8(skb); /* frame length */ 722 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
726 tag = lowpan_fetch_skb_u16(skb); 723 lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
724 goto drop;
725
726 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
727 len = ((iphc0 & 7) << 8) | slen;
727 728
728 /* 729 /*
729 * check if frame assembling with the same tag is 730 * check if frame assembling with the same tag is
730 * already in progress 731 * already in progress
731 */ 732 */
732 spin_lock(&flist_lock); 733 spin_lock_bh(&flist_lock);
733 734
734 list_for_each_entry(frame, &lowpan_fragments, list) 735 list_for_each_entry(frame, &lowpan_fragments, list)
735 if (frame->tag == tag) { 736 if (frame->tag == tag) {
@@ -739,7 +740,7 @@ lowpan_process_data(struct sk_buff *skb)
739 740
740 /* alloc new frame structure */ 741 /* alloc new frame structure */
741 if (!found) { 742 if (!found) {
742 frame = lowpan_alloc_new_frame(skb, iphc0, len, tag); 743 frame = lowpan_alloc_new_frame(skb, len, tag);
743 if (!frame) 744 if (!frame)
744 goto unlock_and_drop; 745 goto unlock_and_drop;
745 } 746 }
@@ -747,7 +748,8 @@ lowpan_process_data(struct sk_buff *skb)
747 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) 748 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
748 goto unlock_and_drop; 749 goto unlock_and_drop;
749 750
750 offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ 751 if (lowpan_fetch_skb_u8(skb, &offset)) /* fetch offset */
752 goto unlock_and_drop;
751 753
752 /* if payload fits buffer, copy it */ 754 /* if payload fits buffer, copy it */
753 if (likely((offset * 8 + skb->len) <= frame->length)) 755 if (likely((offset * 8 + skb->len) <= frame->length))
@@ -762,17 +764,20 @@ lowpan_process_data(struct sk_buff *skb)
762 if ((frame->bytes_rcv == frame->length) && 764 if ((frame->bytes_rcv == frame->length) &&
763 frame->timer.expires > jiffies) { 765 frame->timer.expires > jiffies) {
764 /* if timer haven't expired - first of all delete it */ 766 /* if timer haven't expired - first of all delete it */
765 del_timer(&frame->timer); 767 del_timer_sync(&frame->timer);
766 list_del(&frame->list); 768 list_del(&frame->list);
767 spin_unlock(&flist_lock); 769 spin_unlock_bh(&flist_lock);
768 770
769 dev_kfree_skb(skb); 771 dev_kfree_skb(skb);
770 skb = frame->skb; 772 skb = frame->skb;
771 kfree(frame); 773 kfree(frame);
772 iphc0 = lowpan_fetch_skb_u8(skb); 774
775 if (lowpan_fetch_skb_u8(skb, &iphc0))
776 goto drop;
777
773 break; 778 break;
774 } 779 }
775 spin_unlock(&flist_lock); 780 spin_unlock_bh(&flist_lock);
776 781
777 return kfree_skb(skb), 0; 782 return kfree_skb(skb), 0;
778 } 783 }
@@ -780,20 +785,19 @@ lowpan_process_data(struct sk_buff *skb)
780 break; 785 break;
781 } 786 }
782 787
783 iphc1 = lowpan_fetch_skb_u8(skb); 788 if (lowpan_fetch_skb_u8(skb, &iphc1))
789 goto drop;
784 790
785 _saddr = mac_cb(skb)->sa.hwaddr; 791 _saddr = mac_cb(skb)->sa.hwaddr;
786 _daddr = mac_cb(skb)->da.hwaddr; 792 _daddr = mac_cb(skb)->da.hwaddr;
787 793
788 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1); 794 pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
789 795
790 /* another if the CID flag is set */ 796 /* another if the CID flag is set */
791 if (iphc1 & LOWPAN_IPHC_CID) { 797 if (iphc1 & LOWPAN_IPHC_CID) {
792 pr_debug("(%s): CID flag is set, increase header with one\n", 798 pr_debug("CID flag is set, increase header with one\n");
793 __func__); 799 if (lowpan_fetch_skb_u8(skb, &num_context))
794 if (!skb->len)
795 goto drop; 800 goto drop;
796 num_context = lowpan_fetch_skb_u8(skb);
797 } 801 }
798 802
799 hdr.version = 6; 803 hdr.version = 6;
@@ -805,9 +809,9 @@ lowpan_process_data(struct sk_buff *skb)
805 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) 809 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
806 */ 810 */
807 case 0: /* 00b */ 811 case 0: /* 00b */
808 if (!skb->len) 812 if (lowpan_fetch_skb_u8(skb, &tmp))
809 goto drop; 813 goto drop;
810 tmp = lowpan_fetch_skb_u8(skb); 814
811 memcpy(&hdr.flow_lbl, &skb->data[0], 3); 815 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
812 skb_pull(skb, 3); 816 skb_pull(skb, 3);
813 hdr.priority = ((tmp >> 2) & 0x0f); 817 hdr.priority = ((tmp >> 2) & 0x0f);
@@ -819,9 +823,9 @@ lowpan_process_data(struct sk_buff *skb)
819 * ECN + DSCP (1 byte), Flow Label is elided 823 * ECN + DSCP (1 byte), Flow Label is elided
820 */ 824 */
821 case 1: /* 10b */ 825 case 1: /* 10b */
822 if (!skb->len) 826 if (lowpan_fetch_skb_u8(skb, &tmp))
823 goto drop; 827 goto drop;
824 tmp = lowpan_fetch_skb_u8(skb); 828
825 hdr.priority = ((tmp >> 2) & 0x0f); 829 hdr.priority = ((tmp >> 2) & 0x0f);
826 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); 830 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
827 hdr.flow_lbl[1] = 0; 831 hdr.flow_lbl[1] = 0;
@@ -832,9 +836,9 @@ lowpan_process_data(struct sk_buff *skb)
832 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided 836 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
833 */ 837 */
834 case 2: /* 01b */ 838 case 2: /* 01b */
835 if (!skb->len) 839 if (lowpan_fetch_skb_u8(skb, &tmp))
836 goto drop; 840 goto drop;
837 tmp = lowpan_fetch_skb_u8(skb); 841
838 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); 842 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
839 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); 843 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
840 skb_pull(skb, 2); 844 skb_pull(skb, 2);
@@ -853,27 +857,26 @@ lowpan_process_data(struct sk_buff *skb)
853 /* Next Header */ 857 /* Next Header */
854 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { 858 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
855 /* Next header is carried inline */ 859 /* Next header is carried inline */
856 if (!skb->len) 860 if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
857 goto drop; 861 goto drop;
858 hdr.nexthdr = lowpan_fetch_skb_u8(skb); 862
859 pr_debug("(%s): NH flag is set, next header is carried " 863 pr_debug("NH flag is set, next header carried inline: %02x\n",
860 "inline: %02x\n", __func__, hdr.nexthdr); 864 hdr.nexthdr);
861 } 865 }
862 866
863 /* Hop Limit */ 867 /* Hop Limit */
864 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) 868 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
865 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; 869 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
866 else { 870 else {
867 if (!skb->len) 871 if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
868 goto drop; 872 goto drop;
869 hdr.hop_limit = lowpan_fetch_skb_u8(skb);
870 } 873 }
871 874
872 /* Extract SAM to the tmp variable */ 875 /* Extract SAM to the tmp variable */
873 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; 876 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
874 877
875 /* Source address uncompression */ 878 /* Source address uncompression */
876 pr_debug("(%s): source address stateless compression\n", __func__); 879 pr_debug("source address stateless compression\n");
877 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, 880 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
878 lowpan_unc_llconf[tmp], skb->data); 881 lowpan_unc_llconf[tmp], skb->data);
879 if (err) 882 if (err)
@@ -885,19 +888,15 @@ lowpan_process_data(struct sk_buff *skb)
885 /* check for Multicast Compression */ 888 /* check for Multicast Compression */
886 if (iphc1 & LOWPAN_IPHC_M) { 889 if (iphc1 & LOWPAN_IPHC_M) {
887 if (iphc1 & LOWPAN_IPHC_DAC) { 890 if (iphc1 & LOWPAN_IPHC_DAC) {
888 pr_debug("(%s): destination address context-based " 891 pr_debug("dest: context-based mcast compression\n");
889 "multicast compression\n", __func__);
890 /* TODO: implement this */ 892 /* TODO: implement this */
891 } else { 893 } else {
892 u8 prefix[] = {0xff, 0x02}; 894 u8 prefix[] = {0xff, 0x02};
893 895
894 pr_debug("(%s): destination address non-context-based" 896 pr_debug("dest: non context-based mcast compression\n");
895 " multicast compression\n", __func__);
896 if (0 < tmp && tmp < 3) { 897 if (0 < tmp && tmp < 3) {
897 if (!skb->len) 898 if (lowpan_fetch_skb_u8(skb, &prefix[1]))
898 goto drop; 899 goto drop;
899 else
900 prefix[1] = lowpan_fetch_skb_u8(skb);
901 } 900 }
902 901
903 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, 902 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
@@ -906,8 +905,7 @@ lowpan_process_data(struct sk_buff *skb)
906 goto drop; 905 goto drop;
907 } 906 }
908 } else { 907 } else {
909 pr_debug("(%s): destination address stateless compression\n", 908 pr_debug("dest: stateless compression\n");
910 __func__);
911 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, 909 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
912 lowpan_unc_llconf[tmp], skb->data); 910 lowpan_unc_llconf[tmp], skb->data);
913 if (err) 911 if (err)
@@ -922,11 +920,11 @@ lowpan_process_data(struct sk_buff *skb)
922 /* Not fragmented package */ 920 /* Not fragmented package */
923 hdr.payload_len = htons(skb->len); 921 hdr.payload_len = htons(skb->len);
924 922
925 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__, 923 pr_debug("skb headroom size = %d, data length = %d\n",
926 skb_headroom(skb), skb->len); 924 skb_headroom(skb), skb->len);
927 925
928 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" 926 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
929 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version, 927 "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
930 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); 928 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
931 929
932 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, 930 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
@@ -934,12 +932,25 @@ lowpan_process_data(struct sk_buff *skb)
934 return lowpan_skb_deliver(skb, &hdr); 932 return lowpan_skb_deliver(skb, &hdr);
935 933
936unlock_and_drop: 934unlock_and_drop:
937 spin_unlock(&flist_lock); 935 spin_unlock_bh(&flist_lock);
938drop: 936drop:
939 kfree_skb(skb); 937 kfree_skb(skb);
940 return -EINVAL; 938 return -EINVAL;
941} 939}
942 940
941static int lowpan_set_address(struct net_device *dev, void *p)
942{
943 struct sockaddr *sa = p;
944
945 if (netif_running(dev))
946 return -EBUSY;
947
948 /* TODO: validate addr */
949 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
950
951 return 0;
952}
953
943static int lowpan_get_mac_header_length(struct sk_buff *skb) 954static int lowpan_get_mac_header_length(struct sk_buff *skb)
944{ 955{
945 /* 956 /*
@@ -997,10 +1008,10 @@ lowpan_skb_fragmentation(struct sk_buff *skb)
997 tag = fragment_tag++; 1008 tag = fragment_tag++;
998 1009
999 /* first fragment header */ 1010 /* first fragment header */
1000 head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7); 1011 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
1001 head[1] = (payload_length >> 3) & 0xff; 1012 head[1] = payload_length & 0xff;
1002 head[2] = tag & 0xff; 1013 head[2] = tag >> 8;
1003 head[3] = tag >> 8; 1014 head[3] = tag & 0xff;
1004 1015
1005 err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); 1016 err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
1006 1017
@@ -1028,11 +1039,11 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1028{ 1039{
1029 int err = -1; 1040 int err = -1;
1030 1041
1031 pr_debug("(%s): package xmit\n", __func__); 1042 pr_debug("package xmit\n");
1032 1043
1033 skb->dev = lowpan_dev_info(dev)->real_dev; 1044 skb->dev = lowpan_dev_info(dev)->real_dev;
1034 if (skb->dev == NULL) { 1045 if (skb->dev == NULL) {
1035 pr_debug("(%s) ERROR: no real wpan device found\n", __func__); 1046 pr_debug("ERROR: no real wpan device found\n");
1036 goto error; 1047 goto error;
1037 } 1048 }
1038 1049
@@ -1041,14 +1052,13 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1041 goto out; 1052 goto out;
1042 } 1053 }
1043 1054
1044 pr_debug("(%s): frame is too big, fragmentation is needed\n", 1055 pr_debug("frame is too big, fragmentation is needed\n");
1045 __func__);
1046 err = lowpan_skb_fragmentation(skb); 1056 err = lowpan_skb_fragmentation(skb);
1047error: 1057error:
1048 dev_kfree_skb(skb); 1058 dev_kfree_skb(skb);
1049out: 1059out:
1050 if (err < 0) 1060 if (err < 0)
1051 pr_debug("(%s): ERROR: xmit failed\n", __func__); 1061 pr_debug("ERROR: xmit failed\n");
1052 1062
1053 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 1063 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
1054} 1064}
@@ -1083,7 +1093,7 @@ static struct header_ops lowpan_header_ops = {
1083 1093
1084static const struct net_device_ops lowpan_netdev_ops = { 1094static const struct net_device_ops lowpan_netdev_ops = {
1085 .ndo_start_xmit = lowpan_xmit, 1095 .ndo_start_xmit = lowpan_xmit,
1086 .ndo_set_mac_address = eth_mac_addr, 1096 .ndo_set_mac_address = lowpan_set_address,
1087}; 1097};
1088 1098
1089static struct ieee802154_mlme_ops lowpan_mlme = { 1099static struct ieee802154_mlme_ops lowpan_mlme = {
@@ -1094,8 +1104,6 @@ static struct ieee802154_mlme_ops lowpan_mlme = {
1094 1104
1095static void lowpan_setup(struct net_device *dev) 1105static void lowpan_setup(struct net_device *dev)
1096{ 1106{
1097 pr_debug("(%s)\n", __func__);
1098
1099 dev->addr_len = IEEE802154_ADDR_LEN; 1107 dev->addr_len = IEEE802154_ADDR_LEN;
1100 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); 1108 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
1101 dev->type = ARPHRD_IEEE802154; 1109 dev->type = ARPHRD_IEEE802154;
@@ -1115,8 +1123,6 @@ static void lowpan_setup(struct net_device *dev)
1115 1123
1116static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) 1124static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
1117{ 1125{
1118 pr_debug("(%s)\n", __func__);
1119
1120 if (tb[IFLA_ADDRESS]) { 1126 if (tb[IFLA_ADDRESS]) {
1121 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) 1127 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
1122 return -EINVAL; 1128 return -EINVAL;
@@ -1157,7 +1163,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1157 struct net_device *real_dev; 1163 struct net_device *real_dev;
1158 struct lowpan_dev_record *entry; 1164 struct lowpan_dev_record *entry;
1159 1165
1160 pr_debug("(%s)\n", __func__); 1166 pr_debug("adding new link\n");
1161 1167
1162 if (!tb[IFLA_LINK]) 1168 if (!tb[IFLA_LINK])
1163 return -EINVAL; 1169 return -EINVAL;
@@ -1183,8 +1189,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1183 list_add_tail(&entry->list, &lowpan_devices); 1189 list_add_tail(&entry->list, &lowpan_devices);
1184 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1190 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1185 1191
1186 spin_lock_init(&flist_lock);
1187
1188 register_netdevice(dev); 1192 register_netdevice(dev);
1189 1193
1190 return 0; 1194 return 0;
@@ -1195,19 +1199,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1195 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1199 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1196 struct net_device *real_dev = lowpan_dev->real_dev; 1200 struct net_device *real_dev = lowpan_dev->real_dev;
1197 struct lowpan_dev_record *entry, *tmp; 1201 struct lowpan_dev_record *entry, *tmp;
1198 struct lowpan_fragment *frame, *tframe;
1199 1202
1200 ASSERT_RTNL(); 1203 ASSERT_RTNL();
1201 1204
1202 spin_lock(&flist_lock);
1203 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1204 del_timer(&frame->timer);
1205 list_del(&frame->list);
1206 dev_kfree_skb(frame->skb);
1207 kfree(frame);
1208 }
1209 spin_unlock(&flist_lock);
1210
1211 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1205 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1212 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1206 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1213 if (entry->ldev == dev) { 1207 if (entry->ldev == dev) {
@@ -1252,8 +1246,6 @@ static int __init lowpan_init_module(void)
1252{ 1246{
1253 int err = 0; 1247 int err = 0;
1254 1248
1255 pr_debug("(%s)\n", __func__);
1256
1257 err = lowpan_netlink_init(); 1249 err = lowpan_netlink_init();
1258 if (err < 0) 1250 if (err < 0)
1259 goto out; 1251 goto out;
@@ -1265,11 +1257,24 @@ out:
1265 1257
1266static void __exit lowpan_cleanup_module(void) 1258static void __exit lowpan_cleanup_module(void)
1267{ 1259{
1268 pr_debug("(%s)\n", __func__); 1260 struct lowpan_fragment *frame, *tframe;
1269 1261
1270 lowpan_netlink_fini(); 1262 lowpan_netlink_fini();
1271 1263
1272 dev_remove_pack(&lowpan_packet_type); 1264 dev_remove_pack(&lowpan_packet_type);
1265
1266 /* Now 6lowpan packet_type is removed, so no new fragments are
1267 * expected on RX, therefore that's the time to clean incomplete
1268 * fragments.
1269 */
1270 spin_lock_bh(&flist_lock);
1271 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1272 del_timer_sync(&frame->timer);
1273 list_del(&frame->list);
1274 dev_kfree_skb(frame->skb);
1275 kfree(frame);
1276 }
1277 spin_unlock_bh(&flist_lock);
1273} 1278}
1274 1279
1275module_init(lowpan_init_module); 1280module_init(lowpan_init_module);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 6fbb2ad7bb6d..16705611589a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
230 mtu = dev->mtu; 230 mtu = dev->mtu;
231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
232 232
233 if (size > mtu) {
234 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
235 err = -EINVAL;
236 goto out_dev;
237 }
238
233 hlen = LL_RESERVED_SPACE(dev); 239 hlen = LL_RESERVED_SPACE(dev);
234 tlen = dev->needed_tailroom; 240 tlen = dev->needed_tailroom;
235 skb = sock_alloc_send_skb(sk, hlen + tlen + size, 241 skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
258 if (err < 0) 264 if (err < 0)
259 goto out_skb; 265 goto out_skb;
260 266
261 if (size > mtu) {
262 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
263 err = -EINVAL;
264 goto out_skb;
265 }
266
267 skb->dev = dev; 267 skb->dev = dev;
268 skb->sk = sk; 268 skb->sk = sk;
269 skb->protocol = htons(ETH_P_IEEE802154); 269 skb->protocol = htons(ETH_P_IEEE802154);
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index c8097ae2482f..97351e1d07a4 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -44,7 +44,7 @@ struct genl_family nl802154_family = {
44struct sk_buff *ieee802154_nl_create(int flags, u8 req) 44struct sk_buff *ieee802154_nl_create(int flags, u8 req)
45{ 45{
46 void *hdr; 46 void *hdr;
47 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 47 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
48 unsigned long f; 48 unsigned long f;
49 49
50 if (!msg) 50 if (!msg)
@@ -80,7 +80,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
80 int flags, u8 req) 80 int flags, u8 req)
81{ 81{
82 void *hdr; 82 void *hdr;
83 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 83 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
84 84
85 if (!msg) 85 if (!msg)
86 return NULL; 86 return NULL;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index ca92587720f4..1e9917124e75 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -530,7 +530,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
530 if (!dev) 530 if (!dev)
531 return -ENODEV; 531 return -ENODEV;
532 532
533 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 533 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
534 if (!msg) 534 if (!msg)
535 goto out_dev; 535 goto out_dev;
536 536
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index eed291626da6..d54be34cca94 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -101,7 +101,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
101 if (!phy) 101 if (!phy)
102 return -ENODEV; 102 return -ENODEV;
103 103
104 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 104 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
105 if (!msg) 105 if (!msg)
106 goto out_dev; 106 goto out_dev;
107 107
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 20f1cb5c8aba..5a19aeb86094 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -310,6 +310,17 @@ config SYN_COOKIES
310 310
311 If unsure, say N. 311 If unsure, say N.
312 312
313config NET_IPVTI
314 tristate "Virtual (secure) IP: tunneling"
315 select INET_TUNNEL
316 depends on INET_XFRM_MODE_TUNNEL
317 ---help---
318 Tunneling means encapsulating data of one protocol type within
319 another protocol and sending it over a channel that understands the
320 encapsulating protocol. This can be used with xfrm mode tunnel to give
321 the notion of a secure tunnel for IPSEC and then use routing protocol
322 on top.
323
313config INET_AH 324config INET_AH
314 tristate "IP: AH transformation" 325 tristate "IP: AH transformation"
315 select XFRM_ALGO 326 select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ff75d3bbcd6a..15ca63ec604e 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -7,7 +7,7 @@ obj-y := route.o inetpeer.o protocol.o \
7 ip_output.o ip_sockglue.o inet_hashtables.o \ 7 ip_output.o ip_sockglue.o inet_hashtables.o \
8 inet_timewait_sock.o inet_connection_sock.o \ 8 inet_timewait_sock.o inet_connection_sock.o \
9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ 9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
10 tcp_minisocks.o tcp_cong.o \ 10 tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udplite.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 fib_frontend.o fib_semantics.o fib_trie.o \ 13 fib_frontend.o fib_semantics.o fib_trie.o \
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
20obj-$(CONFIG_NET_IPIP) += ipip.o 20obj-$(CONFIG_NET_IPIP) += ipip.o
21obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o 21obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
22obj-$(CONFIG_NET_IPGRE) += ip_gre.o 22obj-$(CONFIG_NET_IPGRE) += ip_gre.o
23obj-$(CONFIG_NET_IPVTI) += ip_vti.o
23obj-$(CONFIG_SYN_COOKIES) += syncookies.o 24obj-$(CONFIG_SYN_COOKIES) += syncookies.o
24obj-$(CONFIG_INET_AH) += ah4.o 25obj-$(CONFIG_INET_AH) += ah4.o
25obj-$(CONFIG_INET_ESP) += esp4.o 26obj-$(CONFIG_INET_ESP) += esp4.o
@@ -48,7 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
48obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o 49obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
49obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o 50obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
50obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o 51obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
51obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o 52obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
52obj-$(CONFIG_NETLABEL) += cipso_ipv4.o 53obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
53 54
54obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ 55obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c8f7aee587d1..fe4582ca969a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -157,6 +157,7 @@ void inet_sock_destruct(struct sock *sk)
157 157
158 kfree(rcu_dereference_protected(inet->inet_opt, 1)); 158 kfree(rcu_dereference_protected(inet->inet_opt, 1));
159 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); 159 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
160 dst_release(sk->sk_rx_dst);
160 sk_refcnt_debug_dec(sk); 161 sk_refcnt_debug_dec(sk);
161} 162}
162EXPORT_SYMBOL(inet_sock_destruct); 163EXPORT_SYMBOL(inet_sock_destruct);
@@ -242,20 +243,18 @@ void build_ehash_secret(void)
242} 243}
243EXPORT_SYMBOL(build_ehash_secret); 244EXPORT_SYMBOL(build_ehash_secret);
244 245
245static inline int inet_netns_ok(struct net *net, int protocol) 246static inline int inet_netns_ok(struct net *net, __u8 protocol)
246{ 247{
247 int hash;
248 const struct net_protocol *ipprot; 248 const struct net_protocol *ipprot;
249 249
250 if (net_eq(net, &init_net)) 250 if (net_eq(net, &init_net))
251 return 1; 251 return 1;
252 252
253 hash = protocol & (MAX_INET_PROTOS - 1); 253 ipprot = rcu_dereference(inet_protos[protocol]);
254 ipprot = rcu_dereference(inet_protos[hash]); 254 if (ipprot == NULL) {
255
256 if (ipprot == NULL)
257 /* raw IP is OK */ 255 /* raw IP is OK */
258 return 1; 256 return 1;
257 }
259 return ipprot->netns_ok; 258 return ipprot->netns_ok;
260} 259}
261 260
@@ -553,15 +552,16 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
553 552
554 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 553 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
555 return -EAGAIN; 554 return -EAGAIN;
556 return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); 555 return sk->sk_prot->connect(sk, uaddr, addr_len);
557} 556}
558EXPORT_SYMBOL(inet_dgram_connect); 557EXPORT_SYMBOL(inet_dgram_connect);
559 558
560static long inet_wait_for_connect(struct sock *sk, long timeo) 559static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
561{ 560{
562 DEFINE_WAIT(wait); 561 DEFINE_WAIT(wait);
563 562
564 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 563 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
564 sk->sk_write_pending += writebias;
565 565
566 /* Basic assumption: if someone sets sk->sk_err, he _must_ 566 /* Basic assumption: if someone sets sk->sk_err, he _must_
567 * change state of the socket from TCP_SYN_*. 567 * change state of the socket from TCP_SYN_*.
@@ -577,6 +577,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
577 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 577 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
578 } 578 }
579 finish_wait(sk_sleep(sk), &wait); 579 finish_wait(sk_sleep(sk), &wait);
580 sk->sk_write_pending -= writebias;
580 return timeo; 581 return timeo;
581} 582}
582 583
@@ -584,8 +585,8 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
584 * Connect to a remote host. There is regrettably still a little 585 * Connect to a remote host. There is regrettably still a little
585 * TCP 'magic' in here. 586 * TCP 'magic' in here.
586 */ 587 */
587int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, 588int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
588 int addr_len, int flags) 589 int addr_len, int flags)
589{ 590{
590 struct sock *sk = sock->sk; 591 struct sock *sk = sock->sk;
591 int err; 592 int err;
@@ -594,8 +595,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
594 if (addr_len < sizeof(uaddr->sa_family)) 595 if (addr_len < sizeof(uaddr->sa_family))
595 return -EINVAL; 596 return -EINVAL;
596 597
597 lock_sock(sk);
598
599 if (uaddr->sa_family == AF_UNSPEC) { 598 if (uaddr->sa_family == AF_UNSPEC) {
600 err = sk->sk_prot->disconnect(sk, flags); 599 err = sk->sk_prot->disconnect(sk, flags);
601 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; 600 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -635,8 +634,12 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
635 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 634 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
636 635
637 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 636 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
637 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
638 tcp_sk(sk)->fastopen_req &&
639 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
640
638 /* Error code is set above */ 641 /* Error code is set above */
639 if (!timeo || !inet_wait_for_connect(sk, timeo)) 642 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
640 goto out; 643 goto out;
641 644
642 err = sock_intr_errno(timeo); 645 err = sock_intr_errno(timeo);
@@ -658,7 +661,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
658 sock->state = SS_CONNECTED; 661 sock->state = SS_CONNECTED;
659 err = 0; 662 err = 0;
660out: 663out:
661 release_sock(sk);
662 return err; 664 return err;
663 665
664sock_error: 666sock_error:
@@ -668,6 +670,18 @@ sock_error:
668 sock->state = SS_DISCONNECTING; 670 sock->state = SS_DISCONNECTING;
669 goto out; 671 goto out;
670} 672}
673EXPORT_SYMBOL(__inet_stream_connect);
674
675int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
676 int addr_len, int flags)
677{
678 int err;
679
680 lock_sock(sock->sk);
681 err = __inet_stream_connect(sock, uaddr, addr_len, flags);
682 release_sock(sock->sk);
683 return err;
684}
671EXPORT_SYMBOL(inet_stream_connect); 685EXPORT_SYMBOL(inet_stream_connect);
672 686
673/* 687/*
@@ -1216,8 +1230,8 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1216 1230
1217static int inet_gso_send_check(struct sk_buff *skb) 1231static int inet_gso_send_check(struct sk_buff *skb)
1218{ 1232{
1219 const struct iphdr *iph;
1220 const struct net_protocol *ops; 1233 const struct net_protocol *ops;
1234 const struct iphdr *iph;
1221 int proto; 1235 int proto;
1222 int ihl; 1236 int ihl;
1223 int err = -EINVAL; 1237 int err = -EINVAL;
@@ -1236,7 +1250,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
1236 __skb_pull(skb, ihl); 1250 __skb_pull(skb, ihl);
1237 skb_reset_transport_header(skb); 1251 skb_reset_transport_header(skb);
1238 iph = ip_hdr(skb); 1252 iph = ip_hdr(skb);
1239 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1253 proto = iph->protocol;
1240 err = -EPROTONOSUPPORT; 1254 err = -EPROTONOSUPPORT;
1241 1255
1242 rcu_read_lock(); 1256 rcu_read_lock();
@@ -1253,8 +1267,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1253 netdev_features_t features) 1267 netdev_features_t features)
1254{ 1268{
1255 struct sk_buff *segs = ERR_PTR(-EINVAL); 1269 struct sk_buff *segs = ERR_PTR(-EINVAL);
1256 struct iphdr *iph;
1257 const struct net_protocol *ops; 1270 const struct net_protocol *ops;
1271 struct iphdr *iph;
1258 int proto; 1272 int proto;
1259 int ihl; 1273 int ihl;
1260 int id; 1274 int id;
@@ -1286,7 +1300,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1286 skb_reset_transport_header(skb); 1300 skb_reset_transport_header(skb);
1287 iph = ip_hdr(skb); 1301 iph = ip_hdr(skb);
1288 id = ntohs(iph->id); 1302 id = ntohs(iph->id);
1289 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1303 proto = iph->protocol;
1290 segs = ERR_PTR(-EPROTONOSUPPORT); 1304 segs = ERR_PTR(-EPROTONOSUPPORT);
1291 1305
1292 rcu_read_lock(); 1306 rcu_read_lock();
@@ -1340,7 +1354,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1340 goto out; 1354 goto out;
1341 } 1355 }
1342 1356
1343 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1357 proto = iph->protocol;
1344 1358
1345 rcu_read_lock(); 1359 rcu_read_lock();
1346 ops = rcu_dereference(inet_protos[proto]); 1360 ops = rcu_dereference(inet_protos[proto]);
@@ -1398,11 +1412,11 @@ out:
1398 1412
1399static int inet_gro_complete(struct sk_buff *skb) 1413static int inet_gro_complete(struct sk_buff *skb)
1400{ 1414{
1401 const struct net_protocol *ops; 1415 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1402 struct iphdr *iph = ip_hdr(skb); 1416 struct iphdr *iph = ip_hdr(skb);
1403 int proto = iph->protocol & (MAX_INET_PROTOS - 1); 1417 const struct net_protocol *ops;
1418 int proto = iph->protocol;
1404 int err = -ENOSYS; 1419 int err = -ENOSYS;
1405 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1406 1420
1407 csum_replace2(&iph->check, iph->tot_len, newlen); 1421 csum_replace2(&iph->check, iph->tot_len, newlen);
1408 iph->tot_len = newlen; 1422 iph->tot_len = newlen;
@@ -1520,14 +1534,15 @@ static const struct net_protocol igmp_protocol = {
1520#endif 1534#endif
1521 1535
1522static const struct net_protocol tcp_protocol = { 1536static const struct net_protocol tcp_protocol = {
1523 .handler = tcp_v4_rcv, 1537 .early_demux = tcp_v4_early_demux,
1524 .err_handler = tcp_v4_err, 1538 .handler = tcp_v4_rcv,
1525 .gso_send_check = tcp_v4_gso_send_check, 1539 .err_handler = tcp_v4_err,
1526 .gso_segment = tcp_tso_segment, 1540 .gso_send_check = tcp_v4_gso_send_check,
1527 .gro_receive = tcp4_gro_receive, 1541 .gso_segment = tcp_tso_segment,
1528 .gro_complete = tcp4_gro_complete, 1542 .gro_receive = tcp4_gro_receive,
1529 .no_policy = 1, 1543 .gro_complete = tcp4_gro_complete,
1530 .netns_ok = 1, 1544 .no_policy = 1,
1545 .netns_ok = 1,
1531}; 1546};
1532 1547
1533static const struct net_protocol udp_protocol = { 1548static const struct net_protocol udp_protocol = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e8f2617ecd47..a0d8392491c3 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -398,16 +398,25 @@ static void ah4_err(struct sk_buff *skb, u32 info)
398 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 398 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
399 struct xfrm_state *x; 399 struct xfrm_state *x;
400 400
401 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 401 switch (icmp_hdr(skb)->type) {
402 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 402 case ICMP_DEST_UNREACH:
403 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
404 return;
405 case ICMP_REDIRECT:
406 break;
407 default:
403 return; 408 return;
409 }
404 410
405 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 411 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
406 ah->spi, IPPROTO_AH, AF_INET); 412 ah->spi, IPPROTO_AH, AF_INET);
407 if (!x) 413 if (!x)
408 return; 414 return;
409 pr_debug("pmtu discovery on SA AH/%08x/%08x\n", 415
410 ntohl(ah->spi), ntohl(iph->daddr)); 416 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
418 else
419 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
411 xfrm_state_put(x); 420 xfrm_state_put(x);
412} 421}
413 422
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cda37be02f8d..77e87aff419a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -475,8 +475,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
475 return 1; 475 return 1;
476 } 476 }
477 477
478 paddr = skb_rtable(skb)->rt_gateway; 478 paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr);
479
480 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, 479 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
481 paddr, dev)) 480 paddr, dev))
482 return 0; 481 return 0;
@@ -790,7 +789,8 @@ static int arp_process(struct sk_buff *skb)
790 * Check for bad requests for 127.x.x.x and requests for multicast 789 * Check for bad requests for 127.x.x.x and requests for multicast
791 * addresses. If this is one such, delete it. 790 * addresses. If this is one such, delete it.
792 */ 791 */
793 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) 792 if (ipv4_is_multicast(tip) ||
793 (!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip)))
794 goto out; 794 goto out;
795 795
796/* 796/*
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c48adc565e92..667c1d4ca984 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1725 case CIPSO_V4_TAG_LOCAL: 1725 case CIPSO_V4_TAG_LOCAL:
1726 /* This is a non-standard tag that we only allow for 1726 /* This is a non-standard tag that we only allow for
1727 * local connections, so if the incoming interface is 1727 * local connections, so if the incoming interface is
1728 * not the loopback device drop the packet. */ 1728 * not the loopback device drop the packet. Further,
1729 if (!(skb->dev->flags & IFF_LOOPBACK)) { 1729 * there is no legitimate reason for setting this from
1730 * userspace so reject it if skb is NULL. */
1731 if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
1730 err_offset = opt_iter; 1732 err_offset = opt_iter;
1731 goto validate_return_locked; 1733 goto validate_return_locked;
1732 } 1734 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 10e15a144e95..44bf82e3aef7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1500,7 +1500,8 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1500 1500
1501 if (cnf == net->ipv4.devconf_dflt) 1501 if (cnf == net->ipv4.devconf_dflt)
1502 devinet_copy_dflt_conf(net, i); 1502 devinet_copy_dflt_conf(net, i);
1503 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) 1503 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
1504 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
1504 if ((new_value == 0) && (old_value != 0)) 1505 if ((new_value == 0) && (old_value != 0))
1505 rt_cache_flush(net, 0); 1506 rt_cache_flush(net, 0);
1506 } 1507 }
@@ -1617,6 +1618,8 @@ static struct devinet_sysctl_table {
1617 "force_igmp_version"), 1618 "force_igmp_version"),
1618 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES, 1619 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
1619 "promote_secondaries"), 1620 "promote_secondaries"),
1621 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
1622 "route_localnet"),
1620 }, 1623 },
1621}; 1624};
1622 1625
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cb982a61536f..b61e9deb7c7e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -484,16 +484,25 @@ static void esp4_err(struct sk_buff *skb, u32 info)
484 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 484 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
485 struct xfrm_state *x; 485 struct xfrm_state *x;
486 486
487 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 487 switch (icmp_hdr(skb)->type) {
488 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 488 case ICMP_DEST_UNREACH:
489 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
490 return;
491 case ICMP_REDIRECT:
492 break;
493 default:
489 return; 494 return;
495 }
490 496
491 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 497 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
492 esph->spi, IPPROTO_ESP, AF_INET); 498 esph->spi, IPPROTO_ESP, AF_INET);
493 if (!x) 499 if (!x)
494 return; 500 return;
495 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 501
496 ntohl(esph->spi), ntohl(iph->daddr)); 502 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
503 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 else
505 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
497 xfrm_state_put(x); 506 xfrm_state_put(x);
498} 507}
499 508
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3854411fa37c..c43ae3fba792 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -31,6 +31,7 @@
31#include <linux/if_addr.h> 31#include <linux/if_addr.h>
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/cache.h>
34#include <linux/init.h> 35#include <linux/init.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -85,6 +86,24 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
85 tb = fib_trie_table(id); 86 tb = fib_trie_table(id);
86 if (!tb) 87 if (!tb)
87 return NULL; 88 return NULL;
89
90 switch (id) {
91 case RT_TABLE_LOCAL:
92 net->ipv4.fib_local = tb;
93 break;
94
95 case RT_TABLE_MAIN:
96 net->ipv4.fib_main = tb;
97 break;
98
99 case RT_TABLE_DEFAULT:
100 net->ipv4.fib_default = tb;
101 break;
102
103 default:
104 break;
105 }
106
88 h = id & (FIB_TABLE_HASHSZ - 1); 107 h = id & (FIB_TABLE_HASHSZ - 1);
89 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); 108 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
90 return tb; 109 return tb;
@@ -150,10 +169,6 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
150 if (ipv4_is_multicast(addr)) 169 if (ipv4_is_multicast(addr))
151 return RTN_MULTICAST; 170 return RTN_MULTICAST;
152 171
153#ifdef CONFIG_IP_MULTIPLE_TABLES
154 res.r = NULL;
155#endif
156
157 local_table = fib_get_table(net, RT_TABLE_LOCAL); 172 local_table = fib_get_table(net, RT_TABLE_LOCAL);
158 if (local_table) { 173 if (local_table) {
159 ret = RTN_UNICAST; 174 ret = RTN_UNICAST;
@@ -180,6 +195,44 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
180} 195}
181EXPORT_SYMBOL(inet_dev_addr_type); 196EXPORT_SYMBOL(inet_dev_addr_type);
182 197
198__be32 fib_compute_spec_dst(struct sk_buff *skb)
199{
200 struct net_device *dev = skb->dev;
201 struct in_device *in_dev;
202 struct fib_result res;
203 struct rtable *rt;
204 struct flowi4 fl4;
205 struct net *net;
206 int scope;
207
208 rt = skb_rtable(skb);
209 if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
210 RTCF_LOCAL)
211 return ip_hdr(skb)->daddr;
212
213 in_dev = __in_dev_get_rcu(dev);
214 BUG_ON(!in_dev);
215
216 net = dev_net(dev);
217
218 scope = RT_SCOPE_UNIVERSE;
219 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
220 fl4.flowi4_oif = 0;
221 fl4.flowi4_iif = net->loopback_dev->ifindex;
222 fl4.daddr = ip_hdr(skb)->saddr;
223 fl4.saddr = 0;
224 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
225 fl4.flowi4_scope = scope;
226 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
227 if (!fib_lookup(net, &fl4, &res))
228 return FIB_RES_PREFSRC(net, res);
229 } else {
230 scope = RT_SCOPE_LINK;
231 }
232
233 return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
234}
235
183/* Given (packet source, input interface) and optional (dst, oif, tos): 236/* Given (packet source, input interface) and optional (dst, oif, tos):
184 * - (main) check, that source is valid i.e. not broadcast or our local 237 * - (main) check, that source is valid i.e. not broadcast or our local
185 * address. 238 * address.
@@ -188,17 +241,15 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 241 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 242 * called with rcu_read_lock()
190 */ 243 */
191int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, 244static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
192 int oif, struct net_device *dev, __be32 *spec_dst, 245 u8 tos, int oif, struct net_device *dev,
193 u32 *itag) 246 int rpf, struct in_device *idev, u32 *itag)
194{ 247{
195 struct in_device *in_dev; 248 int ret, no_addr, accept_local;
196 struct flowi4 fl4;
197 struct fib_result res; 249 struct fib_result res;
198 int no_addr, rpf, accept_local; 250 struct flowi4 fl4;
199 bool dev_match;
200 int ret;
201 struct net *net; 251 struct net *net;
252 bool dev_match;
202 253
203 fl4.flowi4_oif = 0; 254 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 255 fl4.flowi4_iif = oif;
@@ -207,20 +258,10 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
207 fl4.flowi4_tos = tos; 258 fl4.flowi4_tos = tos;
208 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 259 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
209 260
210 no_addr = rpf = accept_local = 0; 261 no_addr = idev->ifa_list == NULL;
211 in_dev = __in_dev_get_rcu(dev);
212 if (in_dev) {
213 no_addr = in_dev->ifa_list == NULL;
214
215 /* Ignore rp_filter for packets protected by IPsec. */
216 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
217
218 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
219 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
220 }
221 262
222 if (in_dev == NULL) 263 accept_local = IN_DEV_ACCEPT_LOCAL(idev);
223 goto e_inval; 264 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
224 265
225 net = dev_net(dev); 266 net = dev_net(dev);
226 if (fib_lookup(net, &fl4, &res)) 267 if (fib_lookup(net, &fl4, &res))
@@ -229,7 +270,6 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
229 if (res.type != RTN_LOCAL || !accept_local) 270 if (res.type != RTN_LOCAL || !accept_local)
230 goto e_inval; 271 goto e_inval;
231 } 272 }
232 *spec_dst = FIB_RES_PREFSRC(net, res);
233 fib_combine_itag(itag, &res); 273 fib_combine_itag(itag, &res);
234 dev_match = false; 274 dev_match = false;
235 275
@@ -258,17 +298,14 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
258 298
259 ret = 0; 299 ret = 0;
260 if (fib_lookup(net, &fl4, &res) == 0) { 300 if (fib_lookup(net, &fl4, &res) == 0) {
261 if (res.type == RTN_UNICAST) { 301 if (res.type == RTN_UNICAST)
262 *spec_dst = FIB_RES_PREFSRC(net, res);
263 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; 302 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
264 }
265 } 303 }
266 return ret; 304 return ret;
267 305
268last_resort: 306last_resort:
269 if (rpf) 307 if (rpf)
270 goto e_rpf; 308 goto e_rpf;
271 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
272 *itag = 0; 309 *itag = 0;
273 return 0; 310 return 0;
274 311
@@ -278,6 +315,20 @@ e_rpf:
278 return -EXDEV; 315 return -EXDEV;
279} 316}
280 317
318/* Ignore rp_filter for packets protected by IPsec. */
319int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
320 u8 tos, int oif, struct net_device *dev,
321 struct in_device *idev, u32 *itag)
322{
323 int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
324
325 if (!r && !fib_num_tclassid_users(dev_net(dev))) {
326 *itag = 0;
327 return 0;
328 }
329 return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
330}
331
281static inline __be32 sk_extract_addr(struct sockaddr *addr) 332static inline __be32 sk_extract_addr(struct sockaddr *addr)
282{ 333{
283 return ((struct sockaddr_in *) addr)->sin_addr.s_addr; 334 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
@@ -879,10 +930,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
879 .flowi4_scope = frn->fl_scope, 930 .flowi4_scope = frn->fl_scope,
880 }; 931 };
881 932
882#ifdef CONFIG_IP_MULTIPLE_TABLES
883 res.r = NULL;
884#endif
885
886 frn->err = -ENOENT; 933 frn->err = -ENOENT;
887 if (tb) { 934 if (tb) {
888 local_bh_disable(); 935 local_bh_disable();
@@ -935,8 +982,11 @@ static void nl_fib_input(struct sk_buff *skb)
935static int __net_init nl_fib_lookup_init(struct net *net) 982static int __net_init nl_fib_lookup_init(struct net *net)
936{ 983{
937 struct sock *sk; 984 struct sock *sk;
938 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 985 struct netlink_kernel_cfg cfg = {
939 nl_fib_input, NULL, THIS_MODULE); 986 .input = nl_fib_input,
987 };
988
989 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
940 if (sk == NULL) 990 if (sk == NULL)
941 return -EAFNOSUPPORT; 991 return -EAFNOSUPPORT;
942 net->ipv4.fibnl = sk; 992 net->ipv4.fibnl = sk;
@@ -996,6 +1046,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
996 1046
997 if (event == NETDEV_UNREGISTER) { 1047 if (event == NETDEV_UNREGISTER) {
998 fib_disable_ip(dev, 2, -1); 1048 fib_disable_ip(dev, 2, -1);
1049 rt_flush_dev(dev);
999 return NOTIFY_DONE; 1050 return NOTIFY_DONE;
1000 } 1051 }
1001 1052
@@ -1021,11 +1072,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1021 rt_cache_flush(dev_net(dev), 0); 1072 rt_cache_flush(dev_net(dev), 0);
1022 break; 1073 break;
1023 case NETDEV_UNREGISTER_BATCH: 1074 case NETDEV_UNREGISTER_BATCH:
1024 /* The batch unregister is only called on the first
1025 * device in the list of devices being unregistered.
1026 * Therefore we should not pass dev_net(dev) in here.
1027 */
1028 rt_cache_flush_batch(NULL);
1029 break; 1075 break;
1030 } 1076 }
1031 return NOTIFY_DONE; 1077 return NOTIFY_DONE;
@@ -1090,6 +1136,9 @@ static int __net_init fib_net_init(struct net *net)
1090{ 1136{
1091 int error; 1137 int error;
1092 1138
1139#ifdef CONFIG_IP_ROUTE_CLASSID
1140 net->ipv4.fib_num_tclassid_users = 0;
1141#endif
1093 error = ip_fib_net_init(net); 1142 error = ip_fib_net_init(net);
1094 if (error < 0) 1143 if (error < 0)
1095 goto out; 1144 goto out;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2d043f71ef70..a83d74e498d2 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -47,14 +47,7 @@ struct fib4_rule {
47#endif 47#endif
48}; 48};
49 49
50#ifdef CONFIG_IP_ROUTE_CLASSID 50int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
51u32 fib_rules_tclass(const struct fib_result *res)
52{
53 return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
54}
55#endif
56
57int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
58{ 51{
59 struct fib_lookup_arg arg = { 52 struct fib_lookup_arg arg = {
60 .result = res, 53 .result = res,
@@ -63,11 +56,15 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
63 int err; 56 int err;
64 57
65 err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg); 58 err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
66 res->r = arg.rule; 59#ifdef CONFIG_IP_ROUTE_CLASSID
67 60 if (arg.rule)
61 res->tclassid = ((struct fib4_rule *)arg.rule)->tclassid;
62 else
63 res->tclassid = 0;
64#endif
68 return err; 65 return err;
69} 66}
70EXPORT_SYMBOL_GPL(fib_lookup); 67EXPORT_SYMBOL_GPL(__fib_lookup);
71 68
72static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, 69static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
73 int flags, struct fib_lookup_arg *arg) 70 int flags, struct fib_lookup_arg *arg)
@@ -169,8 +166,11 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
169 rule4->dst = nla_get_be32(tb[FRA_DST]); 166 rule4->dst = nla_get_be32(tb[FRA_DST]);
170 167
171#ifdef CONFIG_IP_ROUTE_CLASSID 168#ifdef CONFIG_IP_ROUTE_CLASSID
172 if (tb[FRA_FLOW]) 169 if (tb[FRA_FLOW]) {
173 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); 170 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
171 if (rule4->tclassid)
172 net->ipv4.fib_num_tclassid_users++;
173 }
174#endif 174#endif
175 175
176 rule4->src_len = frh->src_len; 176 rule4->src_len = frh->src_len;
@@ -179,11 +179,24 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
179 rule4->dstmask = inet_make_mask(rule4->dst_len); 179 rule4->dstmask = inet_make_mask(rule4->dst_len);
180 rule4->tos = frh->tos; 180 rule4->tos = frh->tos;
181 181
182 net->ipv4.fib_has_custom_rules = true;
182 err = 0; 183 err = 0;
183errout: 184errout:
184 return err; 185 return err;
185} 186}
186 187
188static void fib4_rule_delete(struct fib_rule *rule)
189{
190 struct net *net = rule->fr_net;
191#ifdef CONFIG_IP_ROUTE_CLASSID
192 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
193
194 if (rule4->tclassid)
195 net->ipv4.fib_num_tclassid_users--;
196#endif
197 net->ipv4.fib_has_custom_rules = true;
198}
199
187static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 200static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
188 struct nlattr **tb) 201 struct nlattr **tb)
189{ 202{
@@ -256,6 +269,7 @@ static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
256 .action = fib4_rule_action, 269 .action = fib4_rule_action,
257 .match = fib4_rule_match, 270 .match = fib4_rule_match,
258 .configure = fib4_rule_configure, 271 .configure = fib4_rule_configure,
272 .delete = fib4_rule_delete,
259 .compare = fib4_rule_compare, 273 .compare = fib4_rule_compare,
260 .fill = fib4_rule_fill, 274 .fill = fib4_rule_fill,
261 .default_pref = fib_default_rule_pref, 275 .default_pref = fib_default_rule_pref,
@@ -295,6 +309,7 @@ int __net_init fib4_rules_init(struct net *net)
295 if (err < 0) 309 if (err < 0)
296 goto fail; 310 goto fail;
297 net->ipv4.rules_ops = ops; 311 net->ipv4.rules_ops = ops;
312 net->ipv4.fib_has_custom_rules = false;
298 return 0; 313 return 0;
299 314
300fail: 315fail:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e5b7182fa099..da80dc14cc76 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -140,6 +140,62 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
140 }, 140 },
141}; 141};
142 142
143static void rt_fibinfo_free(struct rtable __rcu **rtp)
144{
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147 if (!rt)
148 return;
149
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
153 */
154
155 dst_free(&rt->dst);
156}
157
158static void free_nh_exceptions(struct fib_nh *nh)
159{
160 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
161 int i;
162
163 for (i = 0; i < FNHE_HASH_SIZE; i++) {
164 struct fib_nh_exception *fnhe;
165
166 fnhe = rcu_dereference_protected(hash[i].chain, 1);
167 while (fnhe) {
168 struct fib_nh_exception *next;
169
170 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
171
172 rt_fibinfo_free(&fnhe->fnhe_rth);
173
174 kfree(fnhe);
175
176 fnhe = next;
177 }
178 }
179 kfree(hash);
180}
181
182static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
183{
184 int cpu;
185
186 if (!rtp)
187 return;
188
189 for_each_possible_cpu(cpu) {
190 struct rtable *rt;
191
192 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
193 if (rt)
194 dst_free(&rt->dst);
195 }
196 free_percpu(rtp);
197}
198
143/* Release a nexthop info record */ 199/* Release a nexthop info record */
144static void free_fib_info_rcu(struct rcu_head *head) 200static void free_fib_info_rcu(struct rcu_head *head)
145{ 201{
@@ -148,6 +204,10 @@ static void free_fib_info_rcu(struct rcu_head *head)
148 change_nexthops(fi) { 204 change_nexthops(fi) {
149 if (nexthop_nh->nh_dev) 205 if (nexthop_nh->nh_dev)
150 dev_put(nexthop_nh->nh_dev); 206 dev_put(nexthop_nh->nh_dev);
207 if (nexthop_nh->nh_exceptions)
208 free_nh_exceptions(nexthop_nh);
209 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
210 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
151 } endfor_nexthops(fi); 211 } endfor_nexthops(fi);
152 212
153 release_net(fi->fib_net); 213 release_net(fi->fib_net);
@@ -163,6 +223,12 @@ void free_fib_info(struct fib_info *fi)
163 return; 223 return;
164 } 224 }
165 fib_info_cnt--; 225 fib_info_cnt--;
226#ifdef CONFIG_IP_ROUTE_CLASSID
227 change_nexthops(fi) {
228 if (nexthop_nh->nh_tclassid)
229 fi->fib_net->ipv4.fib_num_tclassid_users--;
230 } endfor_nexthops(fi);
231#endif
166 call_rcu(&fi->rcu, free_fib_info_rcu); 232 call_rcu(&fi->rcu, free_fib_info_rcu);
167} 233}
168 234
@@ -421,6 +487,8 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
421#ifdef CONFIG_IP_ROUTE_CLASSID 487#ifdef CONFIG_IP_ROUTE_CLASSID
422 nla = nla_find(attrs, attrlen, RTA_FLOW); 488 nla = nla_find(attrs, attrlen, RTA_FLOW);
423 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 489 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
490 if (nexthop_nh->nh_tclassid)
491 fi->fib_net->ipv4.fib_num_tclassid_users++;
424#endif 492#endif
425 } 493 }
426 494
@@ -769,6 +837,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
769 fi->fib_nhs = nhs; 837 fi->fib_nhs = nhs;
770 change_nexthops(fi) { 838 change_nexthops(fi) {
771 nexthop_nh->nh_parent = fi; 839 nexthop_nh->nh_parent = fi;
840 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
772 } endfor_nexthops(fi) 841 } endfor_nexthops(fi)
773 842
774 if (cfg->fc_mx) { 843 if (cfg->fc_mx) {
@@ -779,9 +848,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
779 int type = nla_type(nla); 848 int type = nla_type(nla);
780 849
781 if (type) { 850 if (type) {
851 u32 val;
852
782 if (type > RTAX_MAX) 853 if (type > RTAX_MAX)
783 goto err_inval; 854 goto err_inval;
784 fi->fib_metrics[type - 1] = nla_get_u32(nla); 855 val = nla_get_u32(nla);
856 if (type == RTAX_ADVMSS && val > 65535 - 40)
857 val = 65535 - 40;
858 if (type == RTAX_MTU && val > 65535 - 15)
859 val = 65535 - 15;
860 fi->fib_metrics[type - 1] = val;
785 } 861 }
786 } 862 }
787 } 863 }
@@ -810,6 +886,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
810 nh->nh_flags = cfg->fc_flags; 886 nh->nh_flags = cfg->fc_flags;
811#ifdef CONFIG_IP_ROUTE_CLASSID 887#ifdef CONFIG_IP_ROUTE_CLASSID
812 nh->nh_tclassid = cfg->fc_flow; 888 nh->nh_tclassid = cfg->fc_flow;
889 if (nh->nh_tclassid)
890 fi->fib_net->ipv4.fib_num_tclassid_users++;
813#endif 891#endif
814#ifdef CONFIG_IP_ROUTE_MULTIPATH 892#ifdef CONFIG_IP_ROUTE_MULTIPATH
815 nh->nh_weight = 1; 893 nh->nh_weight = 1;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7b4bd6..f0cdb30921c0 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -159,7 +159,6 @@ struct trie {
159#endif 159#endif
160}; 160};
161 161
162static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
163static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, 162static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
164 int wasfull); 163 int wasfull);
165static struct rt_trie_node *resize(struct trie *t, struct tnode *tn); 164static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
@@ -473,7 +472,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
473 } 472 }
474 473
475 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode), 474 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
476 sizeof(struct rt_trie_node) << bits); 475 sizeof(struct rt_trie_node *) << bits);
477 return tn; 476 return tn;
478} 477}
479 478
@@ -490,7 +489,7 @@ static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *
490 return ((struct tnode *) n)->pos == tn->pos + tn->bits; 489 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
491} 490}
492 491
493static inline void put_child(struct trie *t, struct tnode *tn, int i, 492static inline void put_child(struct tnode *tn, int i,
494 struct rt_trie_node *n) 493 struct rt_trie_node *n)
495{ 494{
496 tnode_put_child_reorg(tn, i, n, -1); 495 tnode_put_child_reorg(tn, i, n, -1);
@@ -754,8 +753,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
754 goto nomem; 753 goto nomem;
755 } 754 }
756 755
757 put_child(t, tn, 2*i, (struct rt_trie_node *) left); 756 put_child(tn, 2*i, (struct rt_trie_node *) left);
758 put_child(t, tn, 2*i+1, (struct rt_trie_node *) right); 757 put_child(tn, 2*i+1, (struct rt_trie_node *) right);
759 } 758 }
760 } 759 }
761 760
@@ -776,9 +775,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
776 if (tkey_extract_bits(node->key, 775 if (tkey_extract_bits(node->key,
777 oldtnode->pos + oldtnode->bits, 776 oldtnode->pos + oldtnode->bits,
778 1) == 0) 777 1) == 0)
779 put_child(t, tn, 2*i, node); 778 put_child(tn, 2*i, node);
780 else 779 else
781 put_child(t, tn, 2*i+1, node); 780 put_child(tn, 2*i+1, node);
782 continue; 781 continue;
783 } 782 }
784 783
@@ -786,8 +785,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
786 inode = (struct tnode *) node; 785 inode = (struct tnode *) node;
787 786
788 if (inode->bits == 1) { 787 if (inode->bits == 1) {
789 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0])); 788 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
790 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1])); 789 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
791 790
792 tnode_free_safe(inode); 791 tnode_free_safe(inode);
793 continue; 792 continue;
@@ -817,22 +816,22 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
817 */ 816 */
818 817
819 left = (struct tnode *) tnode_get_child(tn, 2*i); 818 left = (struct tnode *) tnode_get_child(tn, 2*i);
820 put_child(t, tn, 2*i, NULL); 819 put_child(tn, 2*i, NULL);
821 820
822 BUG_ON(!left); 821 BUG_ON(!left);
823 822
824 right = (struct tnode *) tnode_get_child(tn, 2*i+1); 823 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
825 put_child(t, tn, 2*i+1, NULL); 824 put_child(tn, 2*i+1, NULL);
826 825
827 BUG_ON(!right); 826 BUG_ON(!right);
828 827
829 size = tnode_child_length(left); 828 size = tnode_child_length(left);
830 for (j = 0; j < size; j++) { 829 for (j = 0; j < size; j++) {
831 put_child(t, left, j, rtnl_dereference(inode->child[j])); 830 put_child(left, j, rtnl_dereference(inode->child[j]));
832 put_child(t, right, j, rtnl_dereference(inode->child[j + size])); 831 put_child(right, j, rtnl_dereference(inode->child[j + size]));
833 } 832 }
834 put_child(t, tn, 2*i, resize(t, left)); 833 put_child(tn, 2*i, resize(t, left));
835 put_child(t, tn, 2*i+1, resize(t, right)); 834 put_child(tn, 2*i+1, resize(t, right));
836 835
837 tnode_free_safe(inode); 836 tnode_free_safe(inode);
838 } 837 }
@@ -877,7 +876,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
877 if (!newn) 876 if (!newn)
878 goto nomem; 877 goto nomem;
879 878
880 put_child(t, tn, i/2, (struct rt_trie_node *)newn); 879 put_child(tn, i/2, (struct rt_trie_node *)newn);
881 } 880 }
882 881
883 } 882 }
@@ -892,21 +891,21 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
892 if (left == NULL) { 891 if (left == NULL) {
893 if (right == NULL) /* Both are empty */ 892 if (right == NULL) /* Both are empty */
894 continue; 893 continue;
895 put_child(t, tn, i/2, right); 894 put_child(tn, i/2, right);
896 continue; 895 continue;
897 } 896 }
898 897
899 if (right == NULL) { 898 if (right == NULL) {
900 put_child(t, tn, i/2, left); 899 put_child(tn, i/2, left);
901 continue; 900 continue;
902 } 901 }
903 902
904 /* Two nonempty children */ 903 /* Two nonempty children */
905 newBinNode = (struct tnode *) tnode_get_child(tn, i/2); 904 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
906 put_child(t, tn, i/2, NULL); 905 put_child(tn, i/2, NULL);
907 put_child(t, newBinNode, 0, left); 906 put_child(newBinNode, 0, left);
908 put_child(t, newBinNode, 1, right); 907 put_child(newBinNode, 1, right);
909 put_child(t, tn, i/2, resize(t, newBinNode)); 908 put_child(tn, i/2, resize(t, newBinNode));
910 } 909 }
911 tnode_free_safe(oldtnode); 910 tnode_free_safe(oldtnode);
912 return tn; 911 return tn;
@@ -1007,9 +1006,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1007 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) { 1006 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
1008 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1007 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1009 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 1008 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
1010 tn = (struct tnode *) resize(t, (struct tnode *)tn); 1009 tn = (struct tnode *)resize(t, tn);
1011 1010
1012 tnode_put_child_reorg((struct tnode *)tp, cindex, 1011 tnode_put_child_reorg(tp, cindex,
1013 (struct rt_trie_node *)tn, wasfull); 1012 (struct rt_trie_node *)tn, wasfull);
1014 1013
1015 tp = node_parent((struct rt_trie_node *) tn); 1014 tp = node_parent((struct rt_trie_node *) tn);
@@ -1024,7 +1023,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1024 1023
1025 /* Handle last (top) tnode */ 1024 /* Handle last (top) tnode */
1026 if (IS_TNODE(tn)) 1025 if (IS_TNODE(tn))
1027 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1026 tn = (struct tnode *)resize(t, tn);
1028 1027
1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1028 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
1030 tnode_free_flush(); 1029 tnode_free_flush();
@@ -1125,7 +1124,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1125 node_set_parent((struct rt_trie_node *)l, tp); 1124 node_set_parent((struct rt_trie_node *)l, tp);
1126 1125
1127 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1126 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1128 put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l); 1127 put_child(tp, cindex, (struct rt_trie_node *)l);
1129 } else { 1128 } else {
1130 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */ 1129 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1131 /* 1130 /*
@@ -1155,13 +1154,12 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1155 node_set_parent((struct rt_trie_node *)tn, tp); 1154 node_set_parent((struct rt_trie_node *)tn, tp);
1156 1155
1157 missbit = tkey_extract_bits(key, newpos, 1); 1156 missbit = tkey_extract_bits(key, newpos, 1);
1158 put_child(t, tn, missbit, (struct rt_trie_node *)l); 1157 put_child(tn, missbit, (struct rt_trie_node *)l);
1159 put_child(t, tn, 1-missbit, n); 1158 put_child(tn, 1-missbit, n);
1160 1159
1161 if (tp) { 1160 if (tp) {
1162 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1161 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1163 put_child(t, (struct tnode *)tp, cindex, 1162 put_child(tp, cindex, (struct rt_trie_node *)tn);
1164 (struct rt_trie_node *)tn);
1165 } else { 1163 } else {
1166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1164 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
1167 tp = tn; 1165 tp = tn;
@@ -1620,7 +1618,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1620 1618
1621 if (tp) { 1619 if (tp) {
1622 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); 1620 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1623 put_child(t, (struct tnode *)tp, cindex, NULL); 1621 put_child(tp, cindex, NULL);
1624 trie_rebalance(t, tp); 1622 trie_rebalance(t, tp);
1625 } else 1623 } else
1626 RCU_INIT_POINTER(t->trie, NULL); 1624 RCU_INIT_POINTER(t->trie, NULL);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c75efbdc71cb..f2eccd531746 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -95,6 +95,7 @@
95#include <net/checksum.h> 95#include <net/checksum.h>
96#include <net/xfrm.h> 96#include <net/xfrm.h>
97#include <net/inet_common.h> 97#include <net/inet_common.h>
98#include <net/ip_fib.h>
98 99
99/* 100/*
100 * Build xmit assembly blocks 101 * Build xmit assembly blocks
@@ -253,10 +254,10 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
253 254
254 /* Limit if icmp type is enabled in ratemask. */ 255 /* Limit if icmp type is enabled in ratemask. */
255 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { 256 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
256 if (!rt->peer) 257 struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
257 rt_bind_peer(rt, fl4->daddr, 1); 258 rc = inet_peer_xrlim_allow(peer,
258 rc = inet_peer_xrlim_allow(rt->peer,
259 net->ipv4.sysctl_icmp_ratelimit); 259 net->ipv4.sysctl_icmp_ratelimit);
260 inet_putpeer(peer);
260 } 261 }
261out: 262out:
262 return rc; 263 return rc;
@@ -334,7 +335,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
334 struct flowi4 fl4; 335 struct flowi4 fl4;
335 struct sock *sk; 336 struct sock *sk;
336 struct inet_sock *inet; 337 struct inet_sock *inet;
337 __be32 daddr; 338 __be32 daddr, saddr;
338 339
339 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) 340 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
340 return; 341 return;
@@ -348,6 +349,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
348 349
349 inet->tos = ip_hdr(skb)->tos; 350 inet->tos = ip_hdr(skb)->tos;
350 daddr = ipc.addr = ip_hdr(skb)->saddr; 351 daddr = ipc.addr = ip_hdr(skb)->saddr;
352 saddr = fib_compute_spec_dst(skb);
351 ipc.opt = NULL; 353 ipc.opt = NULL;
352 ipc.tx_flags = 0; 354 ipc.tx_flags = 0;
353 if (icmp_param->replyopts.opt.opt.optlen) { 355 if (icmp_param->replyopts.opt.opt.optlen) {
@@ -357,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
357 } 359 }
358 memset(&fl4, 0, sizeof(fl4)); 360 memset(&fl4, 0, sizeof(fl4));
359 fl4.daddr = daddr; 361 fl4.daddr = daddr;
360 fl4.saddr = rt->rt_spec_dst; 362 fl4.saddr = saddr;
361 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 363 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
362 fl4.flowi4_proto = IPPROTO_ICMP; 364 fl4.flowi4_proto = IPPROTO_ICMP;
363 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 365 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -569,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
569 rcu_read_lock(); 571 rcu_read_lock();
570 if (rt_is_input_route(rt) && 572 if (rt_is_input_route(rt) &&
571 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 573 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
572 dev = dev_get_by_index_rcu(net, rt->rt_iif); 574 dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
573 575
574 if (dev) 576 if (dev)
575 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); 577 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -632,6 +634,27 @@ out:;
632EXPORT_SYMBOL(icmp_send); 634EXPORT_SYMBOL(icmp_send);
633 635
634 636
637static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
638{
639 const struct iphdr *iph = (const struct iphdr *) skb->data;
640 const struct net_protocol *ipprot;
641 int protocol = iph->protocol;
642
643 /* Checkin full IP header plus 8 bytes of protocol to
644 * avoid additional coding at protocol handlers.
645 */
646 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
647 return;
648
649 raw_icmp_error(skb, protocol, info);
650
651 rcu_read_lock();
652 ipprot = rcu_dereference(inet_protos[protocol]);
653 if (ipprot && ipprot->err_handler)
654 ipprot->err_handler(skb, info);
655 rcu_read_unlock();
656}
657
635/* 658/*
636 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH. 659 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
637 */ 660 */
@@ -640,10 +663,8 @@ static void icmp_unreach(struct sk_buff *skb)
640{ 663{
641 const struct iphdr *iph; 664 const struct iphdr *iph;
642 struct icmphdr *icmph; 665 struct icmphdr *icmph;
643 int hash, protocol;
644 const struct net_protocol *ipprot;
645 u32 info = 0;
646 struct net *net; 666 struct net *net;
667 u32 info = 0;
647 668
648 net = dev_net(skb_dst(skb)->dev); 669 net = dev_net(skb_dst(skb)->dev);
649 670
@@ -674,9 +695,7 @@ static void icmp_unreach(struct sk_buff *skb)
674 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), 695 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
675 &iph->daddr); 696 &iph->daddr);
676 } else { 697 } else {
677 info = ip_rt_frag_needed(net, iph, 698 info = ntohs(icmph->un.frag.mtu);
678 ntohs(icmph->un.frag.mtu),
679 skb->dev);
680 if (!info) 699 if (!info)
681 goto out; 700 goto out;
682 } 701 }
@@ -720,26 +739,7 @@ static void icmp_unreach(struct sk_buff *skb)
720 goto out; 739 goto out;
721 } 740 }
722 741
723 /* Checkin full IP header plus 8 bytes of protocol to 742 icmp_socket_deliver(skb, info);
724 * avoid additional coding at protocol handlers.
725 */
726 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
727 goto out;
728
729 iph = (const struct iphdr *)skb->data;
730 protocol = iph->protocol;
731
732 /*
733 * Deliver ICMP message to raw sockets. Pretty useless feature?
734 */
735 raw_icmp_error(skb, protocol, info);
736
737 hash = protocol & (MAX_INET_PROTOS - 1);
738 rcu_read_lock();
739 ipprot = rcu_dereference(inet_protos[hash]);
740 if (ipprot && ipprot->err_handler)
741 ipprot->err_handler(skb, info);
742 rcu_read_unlock();
743 743
744out: 744out:
745 return; 745 return;
@@ -755,46 +755,15 @@ out_err:
755 755
756static void icmp_redirect(struct sk_buff *skb) 756static void icmp_redirect(struct sk_buff *skb)
757{ 757{
758 const struct iphdr *iph; 758 if (skb->len < sizeof(struct iphdr)) {
759 759 ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
760 if (skb->len < sizeof(struct iphdr)) 760 return;
761 goto out_err;
762
763 /*
764 * Get the copied header of the packet that caused the redirect
765 */
766 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
767 goto out;
768
769 iph = (const struct iphdr *)skb->data;
770
771 switch (icmp_hdr(skb)->code & 7) {
772 case ICMP_REDIR_NET:
773 case ICMP_REDIR_NETTOS:
774 /*
775 * As per RFC recommendations now handle it as a host redirect.
776 */
777 case ICMP_REDIR_HOST:
778 case ICMP_REDIR_HOSTTOS:
779 ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
780 icmp_hdr(skb)->un.gateway,
781 iph->saddr, skb->dev);
782 break;
783 } 761 }
784 762
785 /* Ping wants to see redirects. 763 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
786 * Let's pretend they are errors of sorts... */ 764 return;
787 if (iph->protocol == IPPROTO_ICMP &&
788 iph->ihl >= 5 &&
789 pskb_may_pull(skb, (iph->ihl<<2)+8)) {
790 ping_err(skb, icmp_hdr(skb)->un.gateway);
791 }
792 765
793out: 766 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
794 return;
795out_err:
796 ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
797 goto out;
798} 767}
799 768
800/* 769/*
@@ -868,86 +837,6 @@ out_err:
868 goto out; 837 goto out;
869} 838}
870 839
871
872/*
873 * Handle ICMP_ADDRESS_MASK requests. (RFC950)
874 *
875 * RFC1122 (3.2.2.9). A host MUST only send replies to
876 * ADDRESS_MASK requests if it's been configured as an address mask
877 * agent. Receiving a request doesn't constitute implicit permission to
878 * act as one. Of course, implementing this correctly requires (SHOULD)
879 * a way to turn the functionality on and off. Another one for sysctl(),
880 * I guess. -- MS
881 *
882 * RFC1812 (4.3.3.9). A router MUST implement it.
883 * A router SHOULD have switch turning it on/off.
884 * This switch MUST be ON by default.
885 *
886 * Gratuitous replies, zero-source replies are not implemented,
887 * that complies with RFC. DO NOT implement them!!! All the idea
888 * of broadcast addrmask replies as specified in RFC950 is broken.
889 * The problem is that it is not uncommon to have several prefixes
890 * on one physical interface. Moreover, addrmask agent can even be
891 * not aware of existing another prefixes.
892 * If source is zero, addrmask agent cannot choose correct prefix.
893 * Gratuitous mask announcements suffer from the same problem.
894 * RFC1812 explains it, but still allows to use ADDRMASK,
895 * that is pretty silly. --ANK
896 *
897 * All these rules are so bizarre, that I removed kernel addrmask
898 * support at all. It is wrong, it is obsolete, nobody uses it in
899 * any case. --ANK
900 *
901 * Furthermore you can do it with a usermode address agent program
902 * anyway...
903 */
904
905static void icmp_address(struct sk_buff *skb)
906{
907#if 0
908 net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
909#endif
910}
911
912/*
913 * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
914 * loudly if an inconsistency is found.
915 * called with rcu_read_lock()
916 */
917
918static void icmp_address_reply(struct sk_buff *skb)
919{
920 struct rtable *rt = skb_rtable(skb);
921 struct net_device *dev = skb->dev;
922 struct in_device *in_dev;
923 struct in_ifaddr *ifa;
924
925 if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
926 return;
927
928 in_dev = __in_dev_get_rcu(dev);
929 if (!in_dev)
930 return;
931
932 if (in_dev->ifa_list &&
933 IN_DEV_LOG_MARTIANS(in_dev) &&
934 IN_DEV_FORWARD(in_dev)) {
935 __be32 _mask, *mp;
936
937 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
938 BUG_ON(mp == NULL);
939 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
940 if (*mp == ifa->ifa_mask &&
941 inet_ifa_match(ip_hdr(skb)->saddr, ifa))
942 break;
943 }
944 if (!ifa)
945 net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
946 mp,
947 dev->name, &ip_hdr(skb)->saddr);
948 }
949}
950
951static void icmp_discard(struct sk_buff *skb) 840static void icmp_discard(struct sk_buff *skb)
952{ 841{
953} 842}
@@ -1111,10 +1000,10 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1111 .handler = icmp_discard, 1000 .handler = icmp_discard,
1112 }, 1001 },
1113 [ICMP_ADDRESS] = { 1002 [ICMP_ADDRESS] = {
1114 .handler = icmp_address, 1003 .handler = icmp_discard,
1115 }, 1004 },
1116 [ICMP_ADDRESSREPLY] = { 1005 [ICMP_ADDRESSREPLY] = {
1117 .handler = icmp_address_reply, 1006 .handler = icmp_discard,
1118 }, 1007 },
1119}; 1008};
1120 1009
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f9ee7417f6a0..db0cf17c00f7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -374,18 +374,19 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
374 const struct inet_request_sock *ireq = inet_rsk(req); 374 const struct inet_request_sock *ireq = inet_rsk(req);
375 struct ip_options_rcu *opt = inet_rsk(req)->opt; 375 struct ip_options_rcu *opt = inet_rsk(req)->opt;
376 struct net *net = sock_net(sk); 376 struct net *net = sock_net(sk);
377 int flags = inet_sk_flowi_flags(sk);
377 378
378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 379 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 380 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
380 sk->sk_protocol, 381 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, 382 flags,
382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 383 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 384 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
384 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 385 security_req_classify_flow(req, flowi4_to_flowi(fl4));
385 rt = ip_route_output_flow(net, fl4, sk); 386 rt = ip_route_output_flow(net, fl4, sk);
386 if (IS_ERR(rt)) 387 if (IS_ERR(rt))
387 goto no_route; 388 goto no_route;
388 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway) 389 if (opt && opt->opt.is_strictroute && rt->rt_gateway)
389 goto route_err; 390 goto route_err;
390 return &rt->dst; 391 return &rt->dst;
391 392
@@ -418,7 +419,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
418 rt = ip_route_output_flow(net, fl4, sk); 419 rt = ip_route_output_flow(net, fl4, sk);
419 if (IS_ERR(rt)) 420 if (IS_ERR(rt))
420 goto no_route; 421 goto no_route;
421 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway) 422 if (opt && opt->opt.is_strictroute && rt->rt_gateway)
422 goto route_err; 423 goto route_err;
423 return &rt->dst; 424 return &rt->dst;
424 425
@@ -799,3 +800,49 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
799} 800}
800EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); 801EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
801#endif 802#endif
803
804static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
805{
806 const struct inet_sock *inet = inet_sk(sk);
807 const struct ip_options_rcu *inet_opt;
808 __be32 daddr = inet->inet_daddr;
809 struct flowi4 *fl4;
810 struct rtable *rt;
811
812 rcu_read_lock();
813 inet_opt = rcu_dereference(inet->inet_opt);
814 if (inet_opt && inet_opt->opt.srr)
815 daddr = inet_opt->opt.faddr;
816 fl4 = &fl->u.ip4;
817 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
818 inet->inet_saddr, inet->inet_dport,
819 inet->inet_sport, sk->sk_protocol,
820 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
821 if (IS_ERR(rt))
822 rt = NULL;
823 if (rt)
824 sk_setup_caps(sk, &rt->dst);
825 rcu_read_unlock();
826
827 return &rt->dst;
828}
829
830struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
831{
832 struct dst_entry *dst = __sk_dst_check(sk, 0);
833 struct inet_sock *inet = inet_sk(sk);
834
835 if (!dst) {
836 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
837 if (!dst)
838 goto out;
839 }
840 dst->ops->update_pmtu(dst, sk, NULL, mtu);
841
842 dst = __sk_dst_check(sk, 0);
843 if (!dst)
844 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
845out:
846 return dst;
847}
848EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 46d1e7199a8c..570e61f9611f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -46,9 +46,6 @@ struct inet_diag_entry {
46 u16 userlocks; 46 u16 userlocks;
47}; 47};
48 48
49#define INET_DIAG_PUT(skb, attrtype, attrlen) \
50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
51
52static DEFINE_MUTEX(inet_diag_table_mutex); 49static DEFINE_MUTEX(inet_diag_table_mutex);
53 50
54static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 51static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
@@ -78,24 +75,22 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
78 const struct inet_sock *inet = inet_sk(sk); 75 const struct inet_sock *inet = inet_sk(sk);
79 struct inet_diag_msg *r; 76 struct inet_diag_msg *r;
80 struct nlmsghdr *nlh; 77 struct nlmsghdr *nlh;
78 struct nlattr *attr;
81 void *info = NULL; 79 void *info = NULL;
82 struct inet_diag_meminfo *minfo = NULL;
83 unsigned char *b = skb_tail_pointer(skb);
84 const struct inet_diag_handler *handler; 80 const struct inet_diag_handler *handler;
85 int ext = req->idiag_ext; 81 int ext = req->idiag_ext;
86 82
87 handler = inet_diag_table[req->sdiag_protocol]; 83 handler = inet_diag_table[req->sdiag_protocol];
88 BUG_ON(handler == NULL); 84 BUG_ON(handler == NULL);
89 85
90 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 86 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
91 nlh->nlmsg_flags = nlmsg_flags; 87 nlmsg_flags);
88 if (!nlh)
89 return -EMSGSIZE;
92 90
93 r = NLMSG_DATA(nlh); 91 r = nlmsg_data(nlh);
94 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 92 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
95 93
96 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
97 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
98
99 r->idiag_family = sk->sk_family; 94 r->idiag_family = sk->sk_family;
100 r->idiag_state = sk->sk_state; 95 r->idiag_state = sk->sk_state;
101 r->idiag_timer = 0; 96 r->idiag_timer = 0;
@@ -113,7 +108,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
113 * hence this needs to be included regardless of socket family. 108 * hence this needs to be included regardless of socket family.
114 */ 109 */
115 if (ext & (1 << (INET_DIAG_TOS - 1))) 110 if (ext & (1 << (INET_DIAG_TOS - 1)))
116 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); 111 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
112 goto errout;
117 113
118#if IS_ENABLED(CONFIG_IPV6) 114#if IS_ENABLED(CONFIG_IPV6)
119 if (r->idiag_family == AF_INET6) { 115 if (r->idiag_family == AF_INET6) {
@@ -121,24 +117,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
121 117
122 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; 118 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
123 *(struct in6_addr *)r->id.idiag_dst = np->daddr; 119 *(struct in6_addr *)r->id.idiag_dst = np->daddr;
120
124 if (ext & (1 << (INET_DIAG_TCLASS - 1))) 121 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
125 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); 122 if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
123 goto errout;
126 } 124 }
127#endif 125#endif
128 126
129 r->idiag_uid = sock_i_uid(sk); 127 r->idiag_uid = sock_i_uid(sk);
130 r->idiag_inode = sock_i_ino(sk); 128 r->idiag_inode = sock_i_ino(sk);
131 129
132 if (minfo) { 130 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
133 minfo->idiag_rmem = sk_rmem_alloc_get(sk); 131 struct inet_diag_meminfo minfo = {
134 minfo->idiag_wmem = sk->sk_wmem_queued; 132 .idiag_rmem = sk_rmem_alloc_get(sk),
135 minfo->idiag_fmem = sk->sk_forward_alloc; 133 .idiag_wmem = sk->sk_wmem_queued,
136 minfo->idiag_tmem = sk_wmem_alloc_get(sk); 134 .idiag_fmem = sk->sk_forward_alloc,
135 .idiag_tmem = sk_wmem_alloc_get(sk),
136 };
137
138 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
139 goto errout;
137 } 140 }
138 141
139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) 142 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
140 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) 143 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
141 goto rtattr_failure; 144 goto errout;
142 145
143 if (icsk == NULL) { 146 if (icsk == NULL) {
144 handler->idiag_get_info(sk, r, NULL); 147 handler->idiag_get_info(sk, r, NULL);
@@ -165,16 +168,20 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
165 } 168 }
166#undef EXPIRES_IN_MS 169#undef EXPIRES_IN_MS
167 170
168 if (ext & (1 << (INET_DIAG_INFO - 1))) 171 if (ext & (1 << (INET_DIAG_INFO - 1))) {
169 info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info)); 172 attr = nla_reserve(skb, INET_DIAG_INFO,
170 173 sizeof(struct tcp_info));
171 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 174 if (!attr)
172 const size_t len = strlen(icsk->icsk_ca_ops->name); 175 goto errout;
173 176
174 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 177 info = nla_data(attr);
175 icsk->icsk_ca_ops->name);
176 } 178 }
177 179
180 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
181 if (nla_put_string(skb, INET_DIAG_CONG,
182 icsk->icsk_ca_ops->name) < 0)
183 goto errout;
184
178 handler->idiag_get_info(sk, r, info); 185 handler->idiag_get_info(sk, r, info);
179 186
180 if (sk->sk_state < TCP_TIME_WAIT && 187 if (sk->sk_state < TCP_TIME_WAIT &&
@@ -182,12 +189,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
182 icsk->icsk_ca_ops->get_info(sk, ext, skb); 189 icsk->icsk_ca_ops->get_info(sk, ext, skb);
183 190
184out: 191out:
185 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 192 return nlmsg_end(skb, nlh);
186 return skb->len;
187 193
188rtattr_failure: 194errout:
189nlmsg_failure: 195 nlmsg_cancel(skb, nlh);
190 nlmsg_trim(skb, b);
191 return -EMSGSIZE; 196 return -EMSGSIZE;
192} 197}
193EXPORT_SYMBOL_GPL(inet_sk_diag_fill); 198EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
@@ -208,14 +213,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
208{ 213{
209 long tmo; 214 long tmo;
210 struct inet_diag_msg *r; 215 struct inet_diag_msg *r;
211 const unsigned char *previous_tail = skb_tail_pointer(skb); 216 struct nlmsghdr *nlh;
212 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
213 unlh->nlmsg_type, sizeof(*r));
214 217
215 r = NLMSG_DATA(nlh); 218 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
216 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 219 nlmsg_flags);
220 if (!nlh)
221 return -EMSGSIZE;
217 222
218 nlh->nlmsg_flags = nlmsg_flags; 223 r = nlmsg_data(nlh);
224 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
219 225
220 tmo = tw->tw_ttd - jiffies; 226 tmo = tw->tw_ttd - jiffies;
221 if (tmo < 0) 227 if (tmo < 0)
@@ -245,11 +251,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
245 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; 251 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
246 } 252 }
247#endif 253#endif
248 nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; 254
249 return skb->len; 255 return nlmsg_end(skb, nlh);
250nlmsg_failure:
251 nlmsg_trim(skb, previous_tail);
252 return -EMSGSIZE;
253} 256}
254 257
255static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 258static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -269,16 +272,17 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
269 int err; 272 int err;
270 struct sock *sk; 273 struct sock *sk;
271 struct sk_buff *rep; 274 struct sk_buff *rep;
275 struct net *net = sock_net(in_skb->sk);
272 276
273 err = -EINVAL; 277 err = -EINVAL;
274 if (req->sdiag_family == AF_INET) { 278 if (req->sdiag_family == AF_INET) {
275 sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0], 279 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
276 req->id.idiag_dport, req->id.idiag_src[0], 280 req->id.idiag_dport, req->id.idiag_src[0],
277 req->id.idiag_sport, req->id.idiag_if); 281 req->id.idiag_sport, req->id.idiag_if);
278 } 282 }
279#if IS_ENABLED(CONFIG_IPV6) 283#if IS_ENABLED(CONFIG_IPV6)
280 else if (req->sdiag_family == AF_INET6) { 284 else if (req->sdiag_family == AF_INET6) {
281 sk = inet6_lookup(&init_net, hashinfo, 285 sk = inet6_lookup(net, hashinfo,
282 (struct in6_addr *)req->id.idiag_dst, 286 (struct in6_addr *)req->id.idiag_dst,
283 req->id.idiag_dport, 287 req->id.idiag_dport,
284 (struct in6_addr *)req->id.idiag_src, 288 (struct in6_addr *)req->id.idiag_src,
@@ -298,23 +302,23 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
298 if (err) 302 if (err)
299 goto out; 303 goto out;
300 304
301 err = -ENOMEM; 305 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
302 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 306 sizeof(struct inet_diag_meminfo) +
303 sizeof(struct inet_diag_meminfo) + 307 sizeof(struct tcp_info) + 64, GFP_KERNEL);
304 sizeof(struct tcp_info) + 64)), 308 if (!rep) {
305 GFP_KERNEL); 309 err = -ENOMEM;
306 if (!rep)
307 goto out; 310 goto out;
311 }
308 312
309 err = sk_diag_fill(sk, rep, req, 313 err = sk_diag_fill(sk, rep, req,
310 NETLINK_CB(in_skb).pid, 314 NETLINK_CB(in_skb).pid,
311 nlh->nlmsg_seq, 0, nlh); 315 nlh->nlmsg_seq, 0, nlh);
312 if (err < 0) { 316 if (err < 0) {
313 WARN_ON(err == -EMSGSIZE); 317 WARN_ON(err == -EMSGSIZE);
314 kfree_skb(rep); 318 nlmsg_free(rep);
315 goto out; 319 goto out;
316 } 320 }
317 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 321 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
318 MSG_DONTWAIT); 322 MSG_DONTWAIT);
319 if (err > 0) 323 if (err > 0)
320 err = 0; 324 err = 0;
@@ -592,15 +596,16 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
592{ 596{
593 const struct inet_request_sock *ireq = inet_rsk(req); 597 const struct inet_request_sock *ireq = inet_rsk(req);
594 struct inet_sock *inet = inet_sk(sk); 598 struct inet_sock *inet = inet_sk(sk);
595 unsigned char *b = skb_tail_pointer(skb);
596 struct inet_diag_msg *r; 599 struct inet_diag_msg *r;
597 struct nlmsghdr *nlh; 600 struct nlmsghdr *nlh;
598 long tmo; 601 long tmo;
599 602
600 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 603 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
601 nlh->nlmsg_flags = NLM_F_MULTI; 604 NLM_F_MULTI);
602 r = NLMSG_DATA(nlh); 605 if (!nlh)
606 return -EMSGSIZE;
603 607
608 r = nlmsg_data(nlh);
604 r->idiag_family = sk->sk_family; 609 r->idiag_family = sk->sk_family;
605 r->idiag_state = TCP_SYN_RECV; 610 r->idiag_state = TCP_SYN_RECV;
606 r->idiag_timer = 1; 611 r->idiag_timer = 1;
@@ -628,13 +633,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
628 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; 633 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
629 } 634 }
630#endif 635#endif
631 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
632
633 return skb->len;
634 636
635nlmsg_failure: 637 return nlmsg_end(skb, nlh);
636 nlmsg_trim(skb, b);
637 return -1;
638} 638}
639 639
640static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 640static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
@@ -725,6 +725,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
725{ 725{
726 int i, num; 726 int i, num;
727 int s_i, s_num; 727 int s_i, s_num;
728 struct net *net = sock_net(skb->sk);
728 729
729 s_i = cb->args[1]; 730 s_i = cb->args[1];
730 s_num = num = cb->args[2]; 731 s_num = num = cb->args[2];
@@ -744,6 +745,9 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
744 sk_nulls_for_each(sk, node, &ilb->head) { 745 sk_nulls_for_each(sk, node, &ilb->head) {
745 struct inet_sock *inet = inet_sk(sk); 746 struct inet_sock *inet = inet_sk(sk);
746 747
748 if (!net_eq(sock_net(sk), net))
749 continue;
750
747 if (num < s_num) { 751 if (num < s_num) {
748 num++; 752 num++;
749 continue; 753 continue;
@@ -814,6 +818,8 @@ skip_listen_ht:
814 sk_nulls_for_each(sk, node, &head->chain) { 818 sk_nulls_for_each(sk, node, &head->chain) {
815 struct inet_sock *inet = inet_sk(sk); 819 struct inet_sock *inet = inet_sk(sk);
816 820
821 if (!net_eq(sock_net(sk), net))
822 continue;
817 if (num < s_num) 823 if (num < s_num)
818 goto next_normal; 824 goto next_normal;
819 if (!(r->idiag_states & (1 << sk->sk_state))) 825 if (!(r->idiag_states & (1 << sk->sk_state)))
@@ -840,6 +846,8 @@ next_normal:
840 846
841 inet_twsk_for_each(tw, node, 847 inet_twsk_for_each(tw, node,
842 &head->twchain) { 848 &head->twchain) {
849 if (!net_eq(twsk_net(tw), net))
850 continue;
843 851
844 if (num < s_num) 852 if (num < s_num)
845 goto next_dying; 853 goto next_dying;
@@ -892,7 +900,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
892 if (nlmsg_attrlen(cb->nlh, hdrlen)) 900 if (nlmsg_attrlen(cb->nlh, hdrlen))
893 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 901 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
894 902
895 return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc); 903 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
896} 904}
897 905
898static inline int inet_diag_type2proto(int type) 906static inline int inet_diag_type2proto(int type)
@@ -909,7 +917,7 @@ static inline int inet_diag_type2proto(int type)
909 917
910static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) 918static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
911{ 919{
912 struct inet_diag_req *rc = NLMSG_DATA(cb->nlh); 920 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
913 struct inet_diag_req_v2 req; 921 struct inet_diag_req_v2 req;
914 struct nlattr *bc = NULL; 922 struct nlattr *bc = NULL;
915 int hdrlen = sizeof(struct inet_diag_req); 923 int hdrlen = sizeof(struct inet_diag_req);
@@ -929,7 +937,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
929static int inet_diag_get_exact_compat(struct sk_buff *in_skb, 937static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
930 const struct nlmsghdr *nlh) 938 const struct nlmsghdr *nlh)
931{ 939{
932 struct inet_diag_req *rc = NLMSG_DATA(nlh); 940 struct inet_diag_req *rc = nlmsg_data(nlh);
933 struct inet_diag_req_v2 req; 941 struct inet_diag_req_v2 req;
934 942
935 req.sdiag_family = rc->idiag_family; 943 req.sdiag_family = rc->idiag_family;
@@ -944,6 +952,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
944static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) 952static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
945{ 953{
946 int hdrlen = sizeof(struct inet_diag_req); 954 int hdrlen = sizeof(struct inet_diag_req);
955 struct net *net = sock_net(skb->sk);
947 956
948 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || 957 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
949 nlmsg_len(nlh) < hdrlen) 958 nlmsg_len(nlh) < hdrlen)
@@ -964,7 +973,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
964 struct netlink_dump_control c = { 973 struct netlink_dump_control c = {
965 .dump = inet_diag_dump_compat, 974 .dump = inet_diag_dump_compat,
966 }; 975 };
967 return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c); 976 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
968 } 977 }
969 } 978 }
970 979
@@ -974,6 +983,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
974static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 983static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
975{ 984{
976 int hdrlen = sizeof(struct inet_diag_req_v2); 985 int hdrlen = sizeof(struct inet_diag_req_v2);
986 struct net *net = sock_net(skb->sk);
977 987
978 if (nlmsg_len(h) < hdrlen) 988 if (nlmsg_len(h) < hdrlen)
979 return -EINVAL; 989 return -EINVAL;
@@ -992,11 +1002,11 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
992 struct netlink_dump_control c = { 1002 struct netlink_dump_control c = {
993 .dump = inet_diag_dump, 1003 .dump = inet_diag_dump,
994 }; 1004 };
995 return netlink_dump_start(sock_diag_nlsk, skb, h, &c); 1005 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
996 } 1006 }
997 } 1007 }
998 1008
999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 1009 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1000} 1010}
1001 1011
1002static const struct sock_diag_handler inet_diag_handler = { 1012static const struct sock_diag_handler inet_diag_handler = {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5ff2a51b6d0c..85190e69297b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -243,12 +243,12 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
243 if (q == NULL) 243 if (q == NULL)
244 return NULL; 244 return NULL;
245 245
246 q->net = nf;
246 f->constructor(q, arg); 247 f->constructor(q, arg);
247 atomic_add(f->qsize, &nf->mem); 248 atomic_add(f->qsize, &nf->mem);
248 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 249 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
249 spin_lock_init(&q->lock); 250 spin_lock_init(&q->lock);
250 atomic_set(&q->refcnt, 1); 251 atomic_set(&q->refcnt, 1);
251 q->net = nf;
252 252
253 return q; 253 return q;
254} 254}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b2509..e1e0a4e8fd34 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -82,23 +82,39 @@ static const struct inet_peer peer_fake_node = {
82 .avl_height = 0 82 .avl_height = 0
83}; 83};
84 84
85struct inet_peer_base { 85void inet_peer_base_init(struct inet_peer_base *bp)
86 struct inet_peer __rcu *root; 86{
87 seqlock_t lock; 87 bp->root = peer_avl_empty_rcu;
88 int total; 88 seqlock_init(&bp->lock);
89}; 89 bp->flush_seq = ~0U;
90 bp->total = 0;
91}
92EXPORT_SYMBOL_GPL(inet_peer_base_init);
90 93
91static struct inet_peer_base v4_peers = { 94static atomic_t v4_seq = ATOMIC_INIT(0);
92 .root = peer_avl_empty_rcu, 95static atomic_t v6_seq = ATOMIC_INIT(0);
93 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
94 .total = 0,
95};
96 96
97static struct inet_peer_base v6_peers = { 97static atomic_t *inetpeer_seq_ptr(int family)
98 .root = peer_avl_empty_rcu, 98{
99 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), 99 return (family == AF_INET ? &v4_seq : &v6_seq);
100 .total = 0, 100}
101}; 101
102static inline void flush_check(struct inet_peer_base *base, int family)
103{
104 atomic_t *fp = inetpeer_seq_ptr(family);
105
106 if (unlikely(base->flush_seq != atomic_read(fp))) {
107 inetpeer_invalidate_tree(base);
108 base->flush_seq = atomic_read(fp);
109 }
110}
111
112void inetpeer_invalidate_family(int family)
113{
114 atomic_t *fp = inetpeer_seq_ptr(family);
115
116 atomic_inc(fp);
117}
102 118
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 119#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104 120
@@ -110,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
110 126
111static void inetpeer_gc_worker(struct work_struct *work) 127static void inetpeer_gc_worker(struct work_struct *work)
112{ 128{
113 struct inet_peer *p, *n; 129 struct inet_peer *p, *n, *c;
114 LIST_HEAD(list); 130 LIST_HEAD(list);
115 131
116 spin_lock_bh(&gc_lock); 132 spin_lock_bh(&gc_lock);
@@ -122,17 +138,19 @@ static void inetpeer_gc_worker(struct work_struct *work)
122 138
123 list_for_each_entry_safe(p, n, &list, gc_list) { 139 list_for_each_entry_safe(p, n, &list, gc_list) {
124 140
125 if(need_resched()) 141 if (need_resched())
126 cond_resched(); 142 cond_resched();
127 143
128 if (p->avl_left != peer_avl_empty) { 144 c = rcu_dereference_protected(p->avl_left, 1);
129 list_add_tail(&p->avl_left->gc_list, &list); 145 if (c != peer_avl_empty) {
130 p->avl_left = peer_avl_empty; 146 list_add_tail(&c->gc_list, &list);
147 p->avl_left = peer_avl_empty_rcu;
131 } 148 }
132 149
133 if (p->avl_right != peer_avl_empty) { 150 c = rcu_dereference_protected(p->avl_right, 1);
134 list_add_tail(&p->avl_right->gc_list, &list); 151 if (c != peer_avl_empty) {
135 p->avl_right = peer_avl_empty; 152 list_add_tail(&c->gc_list, &list);
153 p->avl_right = peer_avl_empty_rcu;
136 } 154 }
137 155
138 n = list_entry(p->gc_list.next, struct inet_peer, gc_list); 156 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
@@ -401,11 +419,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
401 call_rcu(&p->rcu, inetpeer_free_rcu); 419 call_rcu(&p->rcu, inetpeer_free_rcu);
402} 420}
403 421
404static struct inet_peer_base *family_to_base(int family)
405{
406 return family == AF_INET ? &v4_peers : &v6_peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */ 422/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base, 423static int inet_peer_gc(struct inet_peer_base *base,
411 struct inet_peer __rcu **stack[PEER_MAXDEPTH], 424 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
@@ -443,14 +456,17 @@ static int inet_peer_gc(struct inet_peer_base *base,
443 return cnt; 456 return cnt;
444} 457}
445 458
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) 459struct inet_peer *inet_getpeer(struct inet_peer_base *base,
460 const struct inetpeer_addr *daddr,
461 int create)
447{ 462{
448 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 463 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 struct inet_peer_base *base = family_to_base(daddr->family);
450 struct inet_peer *p; 464 struct inet_peer *p;
451 unsigned int sequence; 465 unsigned int sequence;
452 int invalidated, gccnt = 0; 466 int invalidated, gccnt = 0;
453 467
468 flush_check(base, daddr->family);
469
454 /* Attempt a lockless lookup first. 470 /* Attempt a lockless lookup first.
455 * Because of a concurrent writer, we might not find an existing entry. 471 * Because of a concurrent writer, we might not find an existing entry.
456 */ 472 */
@@ -492,13 +508,9 @@ relookup:
492 (daddr->family == AF_INET) ? 508 (daddr->family == AF_INET) ?
493 secure_ip_id(daddr->addr.a4) : 509 secure_ip_id(daddr->addr.a4) :
494 secure_ipv6_id(daddr->addr.a6)); 510 secure_ipv6_id(daddr->addr.a6));
495 p->tcp_ts_stamp = 0;
496 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497 p->rate_tokens = 0; 512 p->rate_tokens = 0;
498 p->rate_last = 0; 513 p->rate_last = 0;
499 p->pmtu_expires = 0;
500 p->pmtu_orig = 0;
501 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502 INIT_LIST_HEAD(&p->gc_list); 514 INIT_LIST_HEAD(&p->gc_list);
503 515
504 /* Link the node. */ 516 /* Link the node. */
@@ -571,26 +583,19 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
571 schedule_delayed_work(&gc_work, gc_delay); 583 schedule_delayed_work(&gc_work, gc_delay);
572} 584}
573 585
574void inetpeer_invalidate_tree(int family) 586void inetpeer_invalidate_tree(struct inet_peer_base *base)
575{ 587{
576 struct inet_peer *old, *new, *prev; 588 struct inet_peer *root;
577 struct inet_peer_base *base = family_to_base(family);
578 589
579 write_seqlock_bh(&base->lock); 590 write_seqlock_bh(&base->lock);
580 591
581 old = base->root; 592 root = rcu_deref_locked(base->root, base);
582 if (old == peer_avl_empty_rcu) 593 if (root != peer_avl_empty) {
583 goto out; 594 base->root = peer_avl_empty_rcu;
584
585 new = peer_avl_empty_rcu;
586
587 prev = cmpxchg(&base->root, old, new);
588 if (prev == old) {
589 base->total = 0; 595 base->total = 0;
590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); 596 call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
591 } 597 }
592 598
593out:
594 write_sequnlock_bh(&base->lock); 599 write_sequnlock_bh(&base->lock);
595} 600}
596EXPORT_SYMBOL(inetpeer_invalidate_tree); 601EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9dbd3dd6022d..8d07c973409c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -171,6 +171,10 @@ static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
171static void ip4_frag_init(struct inet_frag_queue *q, void *a) 171static void ip4_frag_init(struct inet_frag_queue *q, void *a)
172{ 172{
173 struct ipq *qp = container_of(q, struct ipq, q); 173 struct ipq *qp = container_of(q, struct ipq, q);
174 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
175 frags);
176 struct net *net = container_of(ipv4, struct net, ipv4);
177
174 struct ip4_create_arg *arg = a; 178 struct ip4_create_arg *arg = a;
175 179
176 qp->protocol = arg->iph->protocol; 180 qp->protocol = arg->iph->protocol;
@@ -180,7 +184,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
180 qp->daddr = arg->iph->daddr; 184 qp->daddr = arg->iph->daddr;
181 qp->user = arg->user; 185 qp->user = arg->user;
182 qp->peer = sysctl_ipfrag_max_dist ? 186 qp->peer = sysctl_ipfrag_max_dist ?
183 inet_getpeer_v4(arg->iph->saddr, 1) : NULL; 187 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
184} 188}
185 189
186static __inline__ void ip4_frag_free(struct inet_frag_queue *q) 190static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f49047b79609..b062a98574f2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -516,9 +516,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
516 case ICMP_PORT_UNREACH: 516 case ICMP_PORT_UNREACH:
517 /* Impossible event. */ 517 /* Impossible event. */
518 return; 518 return;
519 case ICMP_FRAG_NEEDED:
520 /* Soft state for pmtu is maintained by IP core. */
521 return;
522 default: 519 default:
523 /* All others are translated to HOST_UNREACH. 520 /* All others are translated to HOST_UNREACH.
524 rfc2003 contains "deep thoughts" about NET_UNREACH, 521 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -531,6 +528,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
531 if (code != ICMP_EXC_TTL) 528 if (code != ICMP_EXC_TTL)
532 return; 529 return;
533 break; 530 break;
531
532 case ICMP_REDIRECT:
533 break;
534 } 534 }
535 535
536 rcu_read_lock(); 536 rcu_read_lock();
@@ -538,7 +538,20 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
538 flags & GRE_KEY ? 538 flags & GRE_KEY ?
539 *(((__be32 *)p) + (grehlen / 4) - 1) : 0, 539 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
540 p[1]); 540 p[1]);
541 if (t == NULL || t->parms.iph.daddr == 0 || 541 if (t == NULL)
542 goto out;
543
544 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
545 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
546 t->parms.link, 0, IPPROTO_GRE, 0);
547 goto out;
548 }
549 if (type == ICMP_REDIRECT) {
550 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
551 IPPROTO_GRE, 0);
552 goto out;
553 }
554 if (t->parms.iph.daddr == 0 ||
542 ipv4_is_multicast(t->parms.iph.daddr)) 555 ipv4_is_multicast(t->parms.iph.daddr))
543 goto out; 556 goto out;
544 557
@@ -753,7 +766,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
753 766
754 if (skb->protocol == htons(ETH_P_IP)) { 767 if (skb->protocol == htons(ETH_P_IP)) {
755 rt = skb_rtable(skb); 768 rt = skb_rtable(skb);
756 dst = rt->rt_gateway; 769 dst = rt_nexthop(rt, old_iph->daddr);
757 } 770 }
758#if IS_ENABLED(CONFIG_IPV6) 771#if IS_ENABLED(CONFIG_IPV6)
759 else if (skb->protocol == htons(ETH_P_IPV6)) { 772 else if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -820,7 +833,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
820 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 833 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
821 834
822 if (skb_dst(skb)) 835 if (skb_dst(skb))
823 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 836 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
824 837
825 if (skb->protocol == htons(ETH_P_IP)) { 838 if (skb->protocol == htons(ETH_P_IP)) {
826 df |= (old_iph->frag_off&htons(IP_DF)); 839 df |= (old_iph->frag_off&htons(IP_DF));
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 8590144ca330..f1395a6fb35f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -198,14 +198,13 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
198 rcu_read_lock(); 198 rcu_read_lock();
199 { 199 {
200 int protocol = ip_hdr(skb)->protocol; 200 int protocol = ip_hdr(skb)->protocol;
201 int hash, raw;
202 const struct net_protocol *ipprot; 201 const struct net_protocol *ipprot;
202 int raw;
203 203
204 resubmit: 204 resubmit:
205 raw = raw_local_deliver(skb, protocol); 205 raw = raw_local_deliver(skb, protocol);
206 206
207 hash = protocol & (MAX_INET_PROTOS - 1); 207 ipprot = rcu_dereference(inet_protos[protocol]);
208 ipprot = rcu_dereference(inet_protos[hash]);
209 if (ipprot != NULL) { 208 if (ipprot != NULL) {
210 int ret; 209 int ret;
211 210
@@ -314,26 +313,35 @@ drop:
314 return true; 313 return true;
315} 314}
316 315
316int sysctl_ip_early_demux __read_mostly = 1;
317EXPORT_SYMBOL(sysctl_ip_early_demux);
318
317static int ip_rcv_finish(struct sk_buff *skb) 319static int ip_rcv_finish(struct sk_buff *skb)
318{ 320{
319 const struct iphdr *iph = ip_hdr(skb); 321 const struct iphdr *iph = ip_hdr(skb);
320 struct rtable *rt; 322 struct rtable *rt;
321 323
324 if (sysctl_ip_early_demux && !skb_dst(skb)) {
325 const struct net_protocol *ipprot;
326 int protocol = iph->protocol;
327
328 ipprot = rcu_dereference(inet_protos[protocol]);
329 if (ipprot && ipprot->early_demux) {
330 ipprot->early_demux(skb);
331 /* must reload iph, skb->head might have changed */
332 iph = ip_hdr(skb);
333 }
334 }
335
322 /* 336 /*
323 * Initialise the virtual path cache for the packet. It describes 337 * Initialise the virtual path cache for the packet. It describes
324 * how the packet travels inside Linux networking. 338 * how the packet travels inside Linux networking.
325 */ 339 */
326 if (skb_dst(skb) == NULL) { 340 if (!skb_dst(skb)) {
327 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 341 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
328 iph->tos, skb->dev); 342 iph->tos, skb->dev);
329 if (unlikely(err)) { 343 if (unlikely(err)) {
330 if (err == -EHOSTUNREACH) 344 if (err == -EXDEV)
331 IP_INC_STATS_BH(dev_net(skb->dev),
332 IPSTATS_MIB_INADDRERRORS);
333 else if (err == -ENETUNREACH)
334 IP_INC_STATS_BH(dev_net(skb->dev),
335 IPSTATS_MIB_INNOROUTES);
336 else if (err == -EXDEV)
337 NET_INC_STATS_BH(dev_net(skb->dev), 345 NET_INC_STATS_BH(dev_net(skb->dev),
338 LINUX_MIB_IPRPFILTER); 346 LINUX_MIB_IPRPFILTER);
339 goto drop; 347 goto drop;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 708b99494e23..1dc01f9793d5 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -27,6 +27,7 @@
27#include <net/icmp.h> 27#include <net/icmp.h>
28#include <net/route.h> 28#include <net/route.h>
29#include <net/cipso_ipv4.h> 29#include <net/cipso_ipv4.h>
30#include <net/ip_fib.h>
30 31
31/* 32/*
32 * Write options to IP header, record destination address to 33 * Write options to IP header, record destination address to
@@ -92,7 +93,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
92 unsigned char *sptr, *dptr; 93 unsigned char *sptr, *dptr;
93 int soffset, doffset; 94 int soffset, doffset;
94 int optlen; 95 int optlen;
95 __be32 daddr;
96 96
97 memset(dopt, 0, sizeof(struct ip_options)); 97 memset(dopt, 0, sizeof(struct ip_options));
98 98
@@ -104,8 +104,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
104 sptr = skb_network_header(skb); 104 sptr = skb_network_header(skb);
105 dptr = dopt->__data; 105 dptr = dopt->__data;
106 106
107 daddr = skb_rtable(skb)->rt_spec_dst;
108
109 if (sopt->rr) { 107 if (sopt->rr) {
110 optlen = sptr[sopt->rr+1]; 108 optlen = sptr[sopt->rr+1];
111 soffset = sptr[sopt->rr+2]; 109 soffset = sptr[sopt->rr+2];
@@ -179,6 +177,8 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
179 doffset -= 4; 177 doffset -= 4;
180 } 178 }
181 if (doffset > 3) { 179 if (doffset > 3) {
180 __be32 daddr = fib_compute_spec_dst(skb);
181
182 memcpy(&start[doffset-1], &daddr, 4); 182 memcpy(&start[doffset-1], &daddr, 4);
183 dopt->faddr = faddr; 183 dopt->faddr = faddr;
184 dptr[0] = start[0]; 184 dptr[0] = start[0];
@@ -241,6 +241,15 @@ void ip_options_fragment(struct sk_buff *skb)
241 opt->ts_needtime = 0; 241 opt->ts_needtime = 0;
242} 242}
243 243
244/* helper used by ip_options_compile() to call fib_compute_spec_dst()
245 * at most one time.
246 */
247static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
248{
249 if (*spec_dst == htonl(INADDR_ANY))
250 *spec_dst = fib_compute_spec_dst(skb);
251}
252
244/* 253/*
245 * Verify options and fill pointers in struct options. 254 * Verify options and fill pointers in struct options.
246 * Caller should clear *opt, and set opt->data. 255 * Caller should clear *opt, and set opt->data.
@@ -250,12 +259,12 @@ void ip_options_fragment(struct sk_buff *skb)
250int ip_options_compile(struct net *net, 259int ip_options_compile(struct net *net,
251 struct ip_options *opt, struct sk_buff *skb) 260 struct ip_options *opt, struct sk_buff *skb)
252{ 261{
253 int l; 262 __be32 spec_dst = htonl(INADDR_ANY);
254 unsigned char *iph;
255 unsigned char *optptr;
256 int optlen;
257 unsigned char *pp_ptr = NULL; 263 unsigned char *pp_ptr = NULL;
258 struct rtable *rt = NULL; 264 struct rtable *rt = NULL;
265 unsigned char *optptr;
266 unsigned char *iph;
267 int optlen, l;
259 268
260 if (skb != NULL) { 269 if (skb != NULL) {
261 rt = skb_rtable(skb); 270 rt = skb_rtable(skb);
@@ -331,7 +340,8 @@ int ip_options_compile(struct net *net,
331 goto error; 340 goto error;
332 } 341 }
333 if (rt) { 342 if (rt) {
334 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 343 spec_dst_fill(&spec_dst, skb);
344 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
335 opt->is_changed = 1; 345 opt->is_changed = 1;
336 } 346 }
337 optptr[2] += 4; 347 optptr[2] += 4;
@@ -373,7 +383,8 @@ int ip_options_compile(struct net *net,
373 } 383 }
374 opt->ts = optptr - iph; 384 opt->ts = optptr - iph;
375 if (rt) { 385 if (rt) {
376 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 386 spec_dst_fill(&spec_dst, skb);
387 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
377 timeptr = &optptr[optptr[2]+3]; 388 timeptr = &optptr[optptr[2]+3];
378 } 389 }
379 opt->ts_needaddr = 1; 390 opt->ts_needaddr = 1;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 451f97c42eb4..ba39a52d18c1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -113,19 +113,6 @@ int ip_local_out(struct sk_buff *skb)
113} 113}
114EXPORT_SYMBOL_GPL(ip_local_out); 114EXPORT_SYMBOL_GPL(ip_local_out);
115 115
116/* dev_loopback_xmit for use with netfilter. */
117static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118{
119 skb_reset_mac_header(newskb);
120 __skb_pull(newskb, skb_network_offset(newskb));
121 newskb->pkt_type = PACKET_LOOPBACK;
122 newskb->ip_summed = CHECKSUM_UNNECESSARY;
123 WARN_ON(!skb_dst(newskb));
124 skb_dst_force(newskb);
125 netif_rx_ni(newskb);
126 return 0;
127}
128
129static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) 116static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130{ 117{
131 int ttl = inet->uc_ttl; 118 int ttl = inet->uc_ttl;
@@ -183,6 +170,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
183 struct net_device *dev = dst->dev; 170 struct net_device *dev = dst->dev;
184 unsigned int hh_len = LL_RESERVED_SPACE(dev); 171 unsigned int hh_len = LL_RESERVED_SPACE(dev);
185 struct neighbour *neigh; 172 struct neighbour *neigh;
173 u32 nexthop;
186 174
187 if (rt->rt_type == RTN_MULTICAST) { 175 if (rt->rt_type == RTN_MULTICAST) {
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); 176 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -200,19 +188,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
200 } 188 }
201 if (skb->sk) 189 if (skb->sk)
202 skb_set_owner_w(skb2, skb->sk); 190 skb_set_owner_w(skb2, skb->sk);
203 kfree_skb(skb); 191 consume_skb(skb);
204 skb = skb2; 192 skb = skb2;
205 } 193 }
206 194
207 rcu_read_lock(); 195 rcu_read_lock_bh();
208 neigh = dst_get_neighbour_noref(dst); 196 nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr;
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
209 if (neigh) { 200 if (neigh) {
210 int res = neigh_output(neigh, skb); 201 int res = dst_neigh_output(dst, neigh, skb);
211 202
212 rcu_read_unlock(); 203 rcu_read_unlock_bh();
213 return res; 204 return res;
214 } 205 }
215 rcu_read_unlock(); 206 rcu_read_unlock_bh();
216 207
217 net_dbg_ratelimited("%s: No header cache and no neighbour!\n", 208 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
218 __func__); 209 __func__);
@@ -281,7 +272,7 @@ int ip_mc_output(struct sk_buff *skb)
281 if (newskb) 272 if (newskb)
282 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 273 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
283 newskb, NULL, newskb->dev, 274 newskb, NULL, newskb->dev,
284 ip_dev_loopback_xmit); 275 dev_loopback_xmit);
285 } 276 }
286 277
287 /* Multicasts with ttl 0 must not go beyond the host */ 278 /* Multicasts with ttl 0 must not go beyond the host */
@@ -296,7 +287,7 @@ int ip_mc_output(struct sk_buff *skb)
296 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 287 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
297 if (newskb) 288 if (newskb)
298 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb, 289 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
299 NULL, newskb->dev, ip_dev_loopback_xmit); 290 NULL, newskb->dev, dev_loopback_xmit);
300 } 291 }
301 292
302 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, 293 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
@@ -380,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
380 skb_dst_set_noref(skb, &rt->dst); 371 skb_dst_set_noref(skb, &rt->dst);
381 372
382packet_routed: 373packet_routed:
383 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway) 374 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway)
384 goto no_route; 375 goto no_route;
385 376
386 /* OK, we know where to send it, allocate and build IP header. */ 377 /* OK, we know where to send it, allocate and build IP header. */
@@ -709,7 +700,7 @@ slow_path:
709 700
710 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); 701 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
711 } 702 }
712 kfree_skb(skb); 703 consume_skb(skb);
713 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS); 704 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
714 return err; 705 return err;
715 706
@@ -1472,19 +1463,34 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1472 1463
1473/* 1464/*
1474 * Generic function to send a packet as reply to another packet. 1465 * Generic function to send a packet as reply to another packet.
1475 * Used to send TCP resets so far. ICMP should use this function too. 1466 * Used to send some TCP resets/acks so far.
1476 * 1467 *
1477 * Should run single threaded per socket because it uses the sock 1468 * Use a fake percpu inet socket to avoid false sharing and contention.
1478 * structure to pass arguments.
1479 */ 1469 */
1480void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, 1470static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
1481 const struct ip_reply_arg *arg, unsigned int len) 1471 .sk = {
1472 .__sk_common = {
1473 .skc_refcnt = ATOMIC_INIT(1),
1474 },
1475 .sk_wmem_alloc = ATOMIC_INIT(1),
1476 .sk_allocation = GFP_ATOMIC,
1477 .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
1478 },
1479 .pmtudisc = IP_PMTUDISC_WANT,
1480 .uc_ttl = -1,
1481};
1482
1483void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
1484 __be32 saddr, const struct ip_reply_arg *arg,
1485 unsigned int len)
1482{ 1486{
1483 struct inet_sock *inet = inet_sk(sk);
1484 struct ip_options_data replyopts; 1487 struct ip_options_data replyopts;
1485 struct ipcm_cookie ipc; 1488 struct ipcm_cookie ipc;
1486 struct flowi4 fl4; 1489 struct flowi4 fl4;
1487 struct rtable *rt = skb_rtable(skb); 1490 struct rtable *rt = skb_rtable(skb);
1491 struct sk_buff *nskb;
1492 struct sock *sk;
1493 struct inet_sock *inet;
1488 1494
1489 if (ip_options_echo(&replyopts.opt.opt, skb)) 1495 if (ip_options_echo(&replyopts.opt.opt, skb))
1490 return; 1496 return;
@@ -1502,38 +1508,39 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1502 1508
1503 flowi4_init_output(&fl4, arg->bound_dev_if, 0, 1509 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1504 RT_TOS(arg->tos), 1510 RT_TOS(arg->tos),
1505 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1511 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1506 ip_reply_arg_flowi_flags(arg), 1512 ip_reply_arg_flowi_flags(arg),
1507 daddr, rt->rt_spec_dst, 1513 daddr, saddr,
1508 tcp_hdr(skb)->source, tcp_hdr(skb)->dest); 1514 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1509 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1515 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1510 rt = ip_route_output_key(sock_net(sk), &fl4); 1516 rt = ip_route_output_key(net, &fl4);
1511 if (IS_ERR(rt)) 1517 if (IS_ERR(rt))
1512 return; 1518 return;
1513 1519
1514 /* And let IP do all the hard work. 1520 inet = &get_cpu_var(unicast_sock);
1515 1521
1516 This chunk is not reenterable, hence spinlock.
1517 Note that it uses the fact, that this function is called
1518 with locally disabled BH and that sk cannot be already spinlocked.
1519 */
1520 bh_lock_sock(sk);
1521 inet->tos = arg->tos; 1522 inet->tos = arg->tos;
1523 sk = &inet->sk;
1522 sk->sk_priority = skb->priority; 1524 sk->sk_priority = skb->priority;
1523 sk->sk_protocol = ip_hdr(skb)->protocol; 1525 sk->sk_protocol = ip_hdr(skb)->protocol;
1524 sk->sk_bound_dev_if = arg->bound_dev_if; 1526 sk->sk_bound_dev_if = arg->bound_dev_if;
1527 sock_net_set(sk, net);
1528 __skb_queue_head_init(&sk->sk_write_queue);
1529 sk->sk_sndbuf = sysctl_wmem_default;
1525 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, 1530 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1526 &ipc, &rt, MSG_DONTWAIT); 1531 &ipc, &rt, MSG_DONTWAIT);
1527 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 1532 nskb = skb_peek(&sk->sk_write_queue);
1533 if (nskb) {
1528 if (arg->csumoffset >= 0) 1534 if (arg->csumoffset >= 0)
1529 *((__sum16 *)skb_transport_header(skb) + 1535 *((__sum16 *)skb_transport_header(nskb) +
1530 arg->csumoffset) = csum_fold(csum_add(skb->csum, 1536 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1531 arg->csum)); 1537 arg->csum));
1532 skb->ip_summed = CHECKSUM_NONE; 1538 nskb->ip_summed = CHECKSUM_NONE;
1539 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1533 ip_push_pending_frames(sk, &fl4); 1540 ip_push_pending_frames(sk, &fl4);
1534 } 1541 }
1535 1542
1536 bh_unlock_sock(sk); 1543 put_cpu_var(unicast_sock);
1537 1544
1538 ip_rt_put(rt); 1545 ip_rt_put(rt);
1539} 1546}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 0d11f234d615..5eea4a811042 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -40,6 +40,7 @@
40#if IS_ENABLED(CONFIG_IPV6) 40#if IS_ENABLED(CONFIG_IPV6)
41#include <net/transp_v6.h> 41#include <net/transp_v6.h>
42#endif 42#endif
43#include <net/ip_fib.h>
43 44
44#include <linux/errqueue.h> 45#include <linux/errqueue.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
@@ -1019,18 +1020,17 @@ e_inval:
1019 * @sk: socket 1020 * @sk: socket
1020 * @skb: buffer 1021 * @skb: buffer
1021 * 1022 *
1022 * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst 1023 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1023 * in skb->cb[] before dst drop. 1024 * destination in skb->cb[] before dst drop.
1024 * This way, receiver doesnt make cache line misses to read rtable. 1025 * This way, receiver doesnt make cache line misses to read rtable.
1025 */ 1026 */
1026void ipv4_pktinfo_prepare(struct sk_buff *skb) 1027void ipv4_pktinfo_prepare(struct sk_buff *skb)
1027{ 1028{
1028 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); 1029 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1029 const struct rtable *rt = skb_rtable(skb);
1030 1030
1031 if (rt) { 1031 if (skb_rtable(skb)) {
1032 pktinfo->ipi_ifindex = rt->rt_iif; 1032 pktinfo->ipi_ifindex = inet_iif(skb);
1033 pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst; 1033 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1034 } else { 1034 } else {
1035 pktinfo->ipi_ifindex = 0; 1035 pktinfo->ipi_ifindex = 0;
1036 pktinfo->ipi_spec_dst.s_addr = 0; 1036 pktinfo->ipi_spec_dst.s_addr = 0;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
new file mode 100644
index 000000000000..3511ffba7bd4
--- /dev/null
+++ b/net/ipv4/ip_vti.c
@@ -0,0 +1,956 @@
1/*
2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
4 *
5 * Authors:
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15/*
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
17
18 For comments look at net/ipv4/ip_gre.c --ANK
19 */
20
21
22#include <linux/capability.h>
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/uaccess.h>
27#include <linux/skbuff.h>
28#include <linux/netdevice.h>
29#include <linux/in.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <linux/netfilter_ipv4.h>
36#include <linux/if_ether.h>
37
38#include <net/sock.h>
39#include <net/ip.h>
40#include <net/icmp.h>
41#include <net/ipip.h>
42#include <net/inet_ecn.h>
43#include <net/xfrm.h>
44#include <net/net_namespace.h>
45#include <net/netns/generic.h>
46
47#define HASH_SIZE 16
48#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
49
50static struct rtnl_link_ops vti_link_ops __read_mostly;
51
52static int vti_net_id __read_mostly;
53struct vti_net {
54 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
55 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
56 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
57 struct ip_tunnel __rcu *tunnels_wc[1];
58 struct ip_tunnel __rcu **tunnels[4];
59
60 struct net_device *fb_tunnel_dev;
61};
62
63static int vti_fb_tunnel_init(struct net_device *dev);
64static int vti_tunnel_init(struct net_device *dev);
65static void vti_tunnel_setup(struct net_device *dev);
66static void vti_dev_free(struct net_device *dev);
67static int vti_tunnel_bind_dev(struct net_device *dev);
68
69/* Locking : hash tables are protected by RCU and RTNL */
70
71#define for_each_ip_tunnel_rcu(start) \
72 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
73
74/* often modified stats are per cpu, other are shared (netdev->stats) */
75struct pcpu_tstats {
76 u64 rx_packets;
77 u64 rx_bytes;
78 u64 tx_packets;
79 u64 tx_bytes;
80 struct u64_stats_sync syncp;
81};
82
83#define VTI_XMIT(stats1, stats2) do { \
84 int err; \
85 int pkt_len = skb->len; \
86 err = dst_output(skb); \
87 if (net_xmit_eval(err) == 0) { \
88 u64_stats_update_begin(&(stats1)->syncp); \
89 (stats1)->tx_bytes += pkt_len; \
90 (stats1)->tx_packets++; \
91 u64_stats_update_end(&(stats1)->syncp); \
92 } else { \
93 (stats2)->tx_errors++; \
94 (stats2)->tx_aborted_errors++; \
95 } \
96} while (0)
97
98
99static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
100 struct rtnl_link_stats64 *tot)
101{
102 int i;
103
104 for_each_possible_cpu(i) {
105 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
106 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
107 unsigned int start;
108
109 do {
110 start = u64_stats_fetch_begin_bh(&tstats->syncp);
111 rx_packets = tstats->rx_packets;
112 tx_packets = tstats->tx_packets;
113 rx_bytes = tstats->rx_bytes;
114 tx_bytes = tstats->tx_bytes;
115 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
116
117 tot->rx_packets += rx_packets;
118 tot->tx_packets += tx_packets;
119 tot->rx_bytes += rx_bytes;
120 tot->tx_bytes += tx_bytes;
121 }
122
123 tot->multicast = dev->stats.multicast;
124 tot->rx_crc_errors = dev->stats.rx_crc_errors;
125 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
126 tot->rx_length_errors = dev->stats.rx_length_errors;
127 tot->rx_errors = dev->stats.rx_errors;
128 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
129 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
130 tot->tx_dropped = dev->stats.tx_dropped;
131 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
132 tot->tx_errors = dev->stats.tx_errors;
133
134 return tot;
135}
136
137static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
138 __be32 remote, __be32 local)
139{
140 unsigned h0 = HASH(remote);
141 unsigned h1 = HASH(local);
142 struct ip_tunnel *t;
143 struct vti_net *ipn = net_generic(net, vti_net_id);
144
145 for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
146 if (local == t->parms.iph.saddr &&
147 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
148 return t;
149 for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
150 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
151 return t;
152
153 for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
154 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
155 return t;
156
157 for_each_ip_tunnel_rcu(ipn->tunnels_wc[0])
158 if (t && (t->dev->flags&IFF_UP))
159 return t;
160 return NULL;
161}
162
163static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
164 struct ip_tunnel_parm *parms)
165{
166 __be32 remote = parms->iph.daddr;
167 __be32 local = parms->iph.saddr;
168 unsigned h = 0;
169 int prio = 0;
170
171 if (remote) {
172 prio |= 2;
173 h ^= HASH(remote);
174 }
175 if (local) {
176 prio |= 1;
177 h ^= HASH(local);
178 }
179 return &ipn->tunnels[prio][h];
180}
181
182static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
183 struct ip_tunnel *t)
184{
185 return __vti_bucket(ipn, &t->parms);
186}
187
188static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
189{
190 struct ip_tunnel __rcu **tp;
191 struct ip_tunnel *iter;
192
193 for (tp = vti_bucket(ipn, t);
194 (iter = rtnl_dereference(*tp)) != NULL;
195 tp = &iter->next) {
196 if (t == iter) {
197 rcu_assign_pointer(*tp, t->next);
198 break;
199 }
200 }
201}
202
203static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
204{
205 struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
206
207 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
208 rcu_assign_pointer(*tp, t);
209}
210
211static struct ip_tunnel *vti_tunnel_locate(struct net *net,
212 struct ip_tunnel_parm *parms,
213 int create)
214{
215 __be32 remote = parms->iph.daddr;
216 __be32 local = parms->iph.saddr;
217 struct ip_tunnel *t, *nt;
218 struct ip_tunnel __rcu **tp;
219 struct net_device *dev;
220 char name[IFNAMSIZ];
221 struct vti_net *ipn = net_generic(net, vti_net_id);
222
223 for (tp = __vti_bucket(ipn, parms);
224 (t = rtnl_dereference(*tp)) != NULL;
225 tp = &t->next) {
226 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
227 return t;
228 }
229 if (!create)
230 return NULL;
231
232 if (parms->name[0])
233 strlcpy(name, parms->name, IFNAMSIZ);
234 else
235 strcpy(name, "vti%d");
236
237 dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
238 if (dev == NULL)
239 return NULL;
240
241 dev_net_set(dev, net);
242
243 nt = netdev_priv(dev);
244 nt->parms = *parms;
245 dev->rtnl_link_ops = &vti_link_ops;
246
247 vti_tunnel_bind_dev(dev);
248
249 if (register_netdevice(dev) < 0)
250 goto failed_free;
251
252 dev_hold(dev);
253 vti_tunnel_link(ipn, nt);
254 return nt;
255
256failed_free:
257 free_netdev(dev);
258 return NULL;
259}
260
261static void vti_tunnel_uninit(struct net_device *dev)
262{
263 struct net *net = dev_net(dev);
264 struct vti_net *ipn = net_generic(net, vti_net_id);
265
266 vti_tunnel_unlink(ipn, netdev_priv(dev));
267 dev_put(dev);
268}
269
270static int vti_err(struct sk_buff *skb, u32 info)
271{
272
273 /* All the routers (except for Linux) return only
274 * 8 bytes of packet payload. It means, that precise relaying of
275 * ICMP in the real Internet is absolutely infeasible.
276 */
277 struct iphdr *iph = (struct iphdr *)skb->data;
278 const int type = icmp_hdr(skb)->type;
279 const int code = icmp_hdr(skb)->code;
280 struct ip_tunnel *t;
281 int err;
282
283 switch (type) {
284 default:
285 case ICMP_PARAMETERPROB:
286 return 0;
287
288 case ICMP_DEST_UNREACH:
289 switch (code) {
290 case ICMP_SR_FAILED:
291 case ICMP_PORT_UNREACH:
292 /* Impossible event. */
293 return 0;
294 default:
295 /* All others are translated to HOST_UNREACH. */
296 break;
297 }
298 break;
299 case ICMP_TIME_EXCEEDED:
300 if (code != ICMP_EXC_TTL)
301 return 0;
302 break;
303 }
304
305 err = -ENOENT;
306
307 rcu_read_lock();
308 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
309 if (t == NULL)
310 goto out;
311
312 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
313 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
314 t->parms.link, 0, IPPROTO_IPIP, 0);
315 err = 0;
316 goto out;
317 }
318
319 err = 0;
320 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
321 goto out;
322
323 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
324 t->err_count++;
325 else
326 t->err_count = 1;
327 t->err_time = jiffies;
328out:
329 rcu_read_unlock();
330 return err;
331}
332
333/* We dont digest the packet therefore let the packet pass */
334static int vti_rcv(struct sk_buff *skb)
335{
336 struct ip_tunnel *tunnel;
337 const struct iphdr *iph = ip_hdr(skb);
338
339 rcu_read_lock();
340 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
341 if (tunnel != NULL) {
342 struct pcpu_tstats *tstats;
343
344 tstats = this_cpu_ptr(tunnel->dev->tstats);
345 u64_stats_update_begin(&tstats->syncp);
346 tstats->rx_packets++;
347 tstats->rx_bytes += skb->len;
348 u64_stats_update_end(&tstats->syncp);
349
350 skb->dev = tunnel->dev;
351 rcu_read_unlock();
352 return 1;
353 }
354 rcu_read_unlock();
355
356 return -1;
357}
358
359/* This function assumes it is being called from dev_queue_xmit()
360 * and that skb is filled properly by that function.
361 */
362
363static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
364{
365 struct ip_tunnel *tunnel = netdev_priv(dev);
366 struct pcpu_tstats *tstats;
367 struct iphdr *tiph = &tunnel->parms.iph;
368 u8 tos;
369 struct rtable *rt; /* Route to the other host */
370 struct net_device *tdev; /* Device to other host */
371 struct iphdr *old_iph = ip_hdr(skb);
372 __be32 dst = tiph->daddr;
373 struct flowi4 fl4;
374
375 if (skb->protocol != htons(ETH_P_IP))
376 goto tx_error;
377
378 tos = old_iph->tos;
379
380 memset(&fl4, 0, sizeof(fl4));
381 flowi4_init_output(&fl4, tunnel->parms.link,
382 htonl(tunnel->parms.i_key), RT_TOS(tos),
383 RT_SCOPE_UNIVERSE,
384 IPPROTO_IPIP, 0,
385 dst, tiph->saddr, 0, 0);
386 rt = ip_route_output_key(dev_net(dev), &fl4);
387 if (IS_ERR(rt)) {
388 dev->stats.tx_carrier_errors++;
389 goto tx_error_icmp;
390 }
391 /* if there is no transform then this tunnel is not functional.
392 * Or if the xfrm is not mode tunnel.
393 */
394 if (!rt->dst.xfrm ||
395 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
396 dev->stats.tx_carrier_errors++;
397 goto tx_error_icmp;
398 }
399 tdev = rt->dst.dev;
400
401 if (tdev == dev) {
402 ip_rt_put(rt);
403 dev->stats.collisions++;
404 goto tx_error;
405 }
406
407 if (tunnel->err_count > 0) {
408 if (time_before(jiffies,
409 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
410 tunnel->err_count--;
411 dst_link_failure(skb);
412 } else
413 tunnel->err_count = 0;
414 }
415
416 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
417 IPSKB_REROUTED);
418 skb_dst_drop(skb);
419 skb_dst_set(skb, &rt->dst);
420 nf_reset(skb);
421 skb->dev = skb_dst(skb)->dev;
422
423 tstats = this_cpu_ptr(dev->tstats);
424 VTI_XMIT(tstats, &dev->stats);
425 return NETDEV_TX_OK;
426
427tx_error_icmp:
428 dst_link_failure(skb);
429tx_error:
430 dev->stats.tx_errors++;
431 dev_kfree_skb(skb);
432 return NETDEV_TX_OK;
433}
434
435static int vti_tunnel_bind_dev(struct net_device *dev)
436{
437 struct net_device *tdev = NULL;
438 struct ip_tunnel *tunnel;
439 struct iphdr *iph;
440
441 tunnel = netdev_priv(dev);
442 iph = &tunnel->parms.iph;
443
444 if (iph->daddr) {
445 struct rtable *rt;
446 struct flowi4 fl4;
447 memset(&fl4, 0, sizeof(fl4));
448 flowi4_init_output(&fl4, tunnel->parms.link,
449 htonl(tunnel->parms.i_key),
450 RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
451 IPPROTO_IPIP, 0,
452 iph->daddr, iph->saddr, 0, 0);
453 rt = ip_route_output_key(dev_net(dev), &fl4);
454 if (!IS_ERR(rt)) {
455 tdev = rt->dst.dev;
456 ip_rt_put(rt);
457 }
458 dev->flags |= IFF_POINTOPOINT;
459 }
460
461 if (!tdev && tunnel->parms.link)
462 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
463
464 if (tdev) {
465 dev->hard_header_len = tdev->hard_header_len +
466 sizeof(struct iphdr);
467 dev->mtu = tdev->mtu;
468 }
469 dev->iflink = tunnel->parms.link;
470 return dev->mtu;
471}
472
473static int
474vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
475{
476 int err = 0;
477 struct ip_tunnel_parm p;
478 struct ip_tunnel *t;
479 struct net *net = dev_net(dev);
480 struct vti_net *ipn = net_generic(net, vti_net_id);
481
482 switch (cmd) {
483 case SIOCGETTUNNEL:
484 t = NULL;
485 if (dev == ipn->fb_tunnel_dev) {
486 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
487 sizeof(p))) {
488 err = -EFAULT;
489 break;
490 }
491 t = vti_tunnel_locate(net, &p, 0);
492 }
493 if (t == NULL)
494 t = netdev_priv(dev);
495 memcpy(&p, &t->parms, sizeof(p));
496 p.i_flags |= GRE_KEY | VTI_ISVTI;
497 p.o_flags |= GRE_KEY;
498 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
499 err = -EFAULT;
500 break;
501
502 case SIOCADDTUNNEL:
503 case SIOCCHGTUNNEL:
504 err = -EPERM;
505 if (!capable(CAP_NET_ADMIN))
506 goto done;
507
508 err = -EFAULT;
509 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
510 goto done;
511
512 err = -EINVAL;
513 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
514 p.iph.ihl != 5)
515 goto done;
516
517 t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
518
519 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
520 if (t != NULL) {
521 if (t->dev != dev) {
522 err = -EEXIST;
523 break;
524 }
525 } else {
526 if (((dev->flags&IFF_POINTOPOINT) &&
527 !p.iph.daddr) ||
528 (!(dev->flags&IFF_POINTOPOINT) &&
529 p.iph.daddr)) {
530 err = -EINVAL;
531 break;
532 }
533 t = netdev_priv(dev);
534 vti_tunnel_unlink(ipn, t);
535 synchronize_net();
536 t->parms.iph.saddr = p.iph.saddr;
537 t->parms.iph.daddr = p.iph.daddr;
538 t->parms.i_key = p.i_key;
539 t->parms.o_key = p.o_key;
540 t->parms.iph.protocol = IPPROTO_IPIP;
541 memcpy(dev->dev_addr, &p.iph.saddr, 4);
542 memcpy(dev->broadcast, &p.iph.daddr, 4);
543 vti_tunnel_link(ipn, t);
544 netdev_state_change(dev);
545 }
546 }
547
548 if (t) {
549 err = 0;
550 if (cmd == SIOCCHGTUNNEL) {
551 t->parms.i_key = p.i_key;
552 t->parms.o_key = p.o_key;
553 if (t->parms.link != p.link) {
554 t->parms.link = p.link;
555 vti_tunnel_bind_dev(dev);
556 netdev_state_change(dev);
557 }
558 }
559 p.i_flags |= GRE_KEY | VTI_ISVTI;
560 p.o_flags |= GRE_KEY;
561 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
562 sizeof(p)))
563 err = -EFAULT;
564 } else
565 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
566 break;
567
568 case SIOCDELTUNNEL:
569 err = -EPERM;
570 if (!capable(CAP_NET_ADMIN))
571 goto done;
572
573 if (dev == ipn->fb_tunnel_dev) {
574 err = -EFAULT;
575 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
576 sizeof(p)))
577 goto done;
578 err = -ENOENT;
579
580 t = vti_tunnel_locate(net, &p, 0);
581 if (t == NULL)
582 goto done;
583 err = -EPERM;
584 if (t->dev == ipn->fb_tunnel_dev)
585 goto done;
586 dev = t->dev;
587 }
588 unregister_netdevice(dev);
589 err = 0;
590 break;
591
592 default:
593 err = -EINVAL;
594 }
595
596done:
597 return err;
598}
599
600static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
601{
602 if (new_mtu < 68 || new_mtu > 0xFFF8)
603 return -EINVAL;
604 dev->mtu = new_mtu;
605 return 0;
606}
607
608static const struct net_device_ops vti_netdev_ops = {
609 .ndo_init = vti_tunnel_init,
610 .ndo_uninit = vti_tunnel_uninit,
611 .ndo_start_xmit = vti_tunnel_xmit,
612 .ndo_do_ioctl = vti_tunnel_ioctl,
613 .ndo_change_mtu = vti_tunnel_change_mtu,
614 .ndo_get_stats64 = vti_get_stats64,
615};
616
617static void vti_dev_free(struct net_device *dev)
618{
619 free_percpu(dev->tstats);
620 free_netdev(dev);
621}
622
623static void vti_tunnel_setup(struct net_device *dev)
624{
625 dev->netdev_ops = &vti_netdev_ops;
626 dev->destructor = vti_dev_free;
627
628 dev->type = ARPHRD_TUNNEL;
629 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
630 dev->mtu = ETH_DATA_LEN;
631 dev->flags = IFF_NOARP;
632 dev->iflink = 0;
633 dev->addr_len = 4;
634 dev->features |= NETIF_F_NETNS_LOCAL;
635 dev->features |= NETIF_F_LLTX;
636 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
637}
638
639static int vti_tunnel_init(struct net_device *dev)
640{
641 struct ip_tunnel *tunnel = netdev_priv(dev);
642
643 tunnel->dev = dev;
644 strcpy(tunnel->parms.name, dev->name);
645
646 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
647 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
648
649 dev->tstats = alloc_percpu(struct pcpu_tstats);
650 if (!dev->tstats)
651 return -ENOMEM;
652
653 return 0;
654}
655
656static int __net_init vti_fb_tunnel_init(struct net_device *dev)
657{
658 struct ip_tunnel *tunnel = netdev_priv(dev);
659 struct iphdr *iph = &tunnel->parms.iph;
660 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
661
662 tunnel->dev = dev;
663 strcpy(tunnel->parms.name, dev->name);
664
665 iph->version = 4;
666 iph->protocol = IPPROTO_IPIP;
667 iph->ihl = 5;
668
669 dev->tstats = alloc_percpu(struct pcpu_tstats);
670 if (!dev->tstats)
671 return -ENOMEM;
672
673 dev_hold(dev);
674 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
675 return 0;
676}
677
678static struct xfrm_tunnel vti_handler __read_mostly = {
679 .handler = vti_rcv,
680 .err_handler = vti_err,
681 .priority = 1,
682};
683
684static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
685{
686 int prio;
687
688 for (prio = 1; prio < 4; prio++) {
689 int h;
690 for (h = 0; h < HASH_SIZE; h++) {
691 struct ip_tunnel *t;
692
693 t = rtnl_dereference(ipn->tunnels[prio][h]);
694 while (t != NULL) {
695 unregister_netdevice_queue(t->dev, head);
696 t = rtnl_dereference(t->next);
697 }
698 }
699 }
700}
701
702static int __net_init vti_init_net(struct net *net)
703{
704 int err;
705 struct vti_net *ipn = net_generic(net, vti_net_id);
706
707 ipn->tunnels[0] = ipn->tunnels_wc;
708 ipn->tunnels[1] = ipn->tunnels_l;
709 ipn->tunnels[2] = ipn->tunnels_r;
710 ipn->tunnels[3] = ipn->tunnels_r_l;
711
712 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
713 "ip_vti0",
714 vti_tunnel_setup);
715 if (!ipn->fb_tunnel_dev) {
716 err = -ENOMEM;
717 goto err_alloc_dev;
718 }
719 dev_net_set(ipn->fb_tunnel_dev, net);
720
721 err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
722 if (err)
723 goto err_reg_dev;
724 ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
725
726 err = register_netdev(ipn->fb_tunnel_dev);
727 if (err)
728 goto err_reg_dev;
729 return 0;
730
731err_reg_dev:
732 vti_dev_free(ipn->fb_tunnel_dev);
733err_alloc_dev:
734 /* nothing */
735 return err;
736}
737
738static void __net_exit vti_exit_net(struct net *net)
739{
740 struct vti_net *ipn = net_generic(net, vti_net_id);
741 LIST_HEAD(list);
742
743 rtnl_lock();
744 vti_destroy_tunnels(ipn, &list);
745 unregister_netdevice_many(&list);
746 rtnl_unlock();
747}
748
749static struct pernet_operations vti_net_ops = {
750 .init = vti_init_net,
751 .exit = vti_exit_net,
752 .id = &vti_net_id,
753 .size = sizeof(struct vti_net),
754};
755
756static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
757{
758 return 0;
759}
760
761static void vti_netlink_parms(struct nlattr *data[],
762 struct ip_tunnel_parm *parms)
763{
764 memset(parms, 0, sizeof(*parms));
765
766 parms->iph.protocol = IPPROTO_IPIP;
767
768 if (!data)
769 return;
770
771 if (data[IFLA_VTI_LINK])
772 parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
773
774 if (data[IFLA_VTI_IKEY])
775 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
776
777 if (data[IFLA_VTI_OKEY])
778 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
779
780 if (data[IFLA_VTI_LOCAL])
781 parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
782
783 if (data[IFLA_VTI_REMOTE])
784 parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
785
786}
787
788static int vti_newlink(struct net *src_net, struct net_device *dev,
789 struct nlattr *tb[], struct nlattr *data[])
790{
791 struct ip_tunnel *nt;
792 struct net *net = dev_net(dev);
793 struct vti_net *ipn = net_generic(net, vti_net_id);
794 int mtu;
795 int err;
796
797 nt = netdev_priv(dev);
798 vti_netlink_parms(data, &nt->parms);
799
800 if (vti_tunnel_locate(net, &nt->parms, 0))
801 return -EEXIST;
802
803 mtu = vti_tunnel_bind_dev(dev);
804 if (!tb[IFLA_MTU])
805 dev->mtu = mtu;
806
807 err = register_netdevice(dev);
808 if (err)
809 goto out;
810
811 dev_hold(dev);
812 vti_tunnel_link(ipn, nt);
813
814out:
815 return err;
816}
817
818static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
819 struct nlattr *data[])
820{
821 struct ip_tunnel *t, *nt;
822 struct net *net = dev_net(dev);
823 struct vti_net *ipn = net_generic(net, vti_net_id);
824 struct ip_tunnel_parm p;
825 int mtu;
826
827 if (dev == ipn->fb_tunnel_dev)
828 return -EINVAL;
829
830 nt = netdev_priv(dev);
831 vti_netlink_parms(data, &p);
832
833 t = vti_tunnel_locate(net, &p, 0);
834
835 if (t) {
836 if (t->dev != dev)
837 return -EEXIST;
838 } else {
839 t = nt;
840
841 vti_tunnel_unlink(ipn, t);
842 t->parms.iph.saddr = p.iph.saddr;
843 t->parms.iph.daddr = p.iph.daddr;
844 t->parms.i_key = p.i_key;
845 t->parms.o_key = p.o_key;
846 if (dev->type != ARPHRD_ETHER) {
847 memcpy(dev->dev_addr, &p.iph.saddr, 4);
848 memcpy(dev->broadcast, &p.iph.daddr, 4);
849 }
850 vti_tunnel_link(ipn, t);
851 netdev_state_change(dev);
852 }
853
854 if (t->parms.link != p.link) {
855 t->parms.link = p.link;
856 mtu = vti_tunnel_bind_dev(dev);
857 if (!tb[IFLA_MTU])
858 dev->mtu = mtu;
859 netdev_state_change(dev);
860 }
861
862 return 0;
863}
864
865static size_t vti_get_size(const struct net_device *dev)
866{
867 return
868 /* IFLA_VTI_LINK */
869 nla_total_size(4) +
870 /* IFLA_VTI_IKEY */
871 nla_total_size(4) +
872 /* IFLA_VTI_OKEY */
873 nla_total_size(4) +
874 /* IFLA_VTI_LOCAL */
875 nla_total_size(4) +
876 /* IFLA_VTI_REMOTE */
877 nla_total_size(4) +
878 0;
879}
880
881static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
882{
883 struct ip_tunnel *t = netdev_priv(dev);
884 struct ip_tunnel_parm *p = &t->parms;
885
886 nla_put_u32(skb, IFLA_VTI_LINK, p->link);
887 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
888 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
889 nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
890 nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
891
892 return 0;
893}
894
895static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
896 [IFLA_VTI_LINK] = { .type = NLA_U32 },
897 [IFLA_VTI_IKEY] = { .type = NLA_U32 },
898 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
899 [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
900 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
901};
902
903static struct rtnl_link_ops vti_link_ops __read_mostly = {
904 .kind = "vti",
905 .maxtype = IFLA_VTI_MAX,
906 .policy = vti_policy,
907 .priv_size = sizeof(struct ip_tunnel),
908 .setup = vti_tunnel_setup,
909 .validate = vti_tunnel_validate,
910 .newlink = vti_newlink,
911 .changelink = vti_changelink,
912 .get_size = vti_get_size,
913 .fill_info = vti_fill_info,
914};
915
916static int __init vti_init(void)
917{
918 int err;
919
920 pr_info("IPv4 over IPSec tunneling driver\n");
921
922 err = register_pernet_device(&vti_net_ops);
923 if (err < 0)
924 return err;
925 err = xfrm4_mode_tunnel_input_register(&vti_handler);
926 if (err < 0) {
927 unregister_pernet_device(&vti_net_ops);
928 pr_info(KERN_INFO "vti init: can't register tunnel\n");
929 }
930
931 err = rtnl_link_register(&vti_link_ops);
932 if (err < 0)
933 goto rtnl_link_failed;
934
935 return err;
936
937rtnl_link_failed:
938 xfrm4_mode_tunnel_input_deregister(&vti_handler);
939 unregister_pernet_device(&vti_net_ops);
940 return err;
941}
942
943static void __exit vti_fini(void)
944{
945 rtnl_link_unregister(&vti_link_ops);
946 if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
947 pr_info("vti close: can't deregister tunnel\n");
948
949 unregister_pernet_device(&vti_net_ops);
950}
951
952module_init(vti_init);
953module_exit(vti_fini);
954MODULE_LICENSE("GPL");
955MODULE_ALIAS_RTNL_LINK("vti");
956MODULE_ALIAS_NETDEV("ip_vti0");
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 63b64c45a826..d3ab47e19a89 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -31,17 +31,26 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
32 struct xfrm_state *x; 32 struct xfrm_state *x;
33 33
34 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 34 switch (icmp_hdr(skb)->type) {
35 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 35 case ICMP_DEST_UNREACH:
36 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
37 return;
38 case ICMP_REDIRECT:
39 break;
40 default:
36 return; 41 return;
42 }
37 43
38 spi = htonl(ntohs(ipch->cpi)); 44 spi = htonl(ntohs(ipch->cpi));
39 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 45 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
40 spi, IPPROTO_COMP, AF_INET); 46 spi, IPPROTO_COMP, AF_INET);
41 if (!x) 47 if (!x)
42 return; 48 return;
43 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n", 49
44 spi, &iph->daddr); 50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
51 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
52 else
53 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
45 xfrm_state_put(x); 54 xfrm_state_put(x);
46} 55}
47 56
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 2d0f99bf61b3..99af1f0cc658 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -348,9 +348,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
348 case ICMP_PORT_UNREACH: 348 case ICMP_PORT_UNREACH:
349 /* Impossible event. */ 349 /* Impossible event. */
350 return 0; 350 return 0;
351 case ICMP_FRAG_NEEDED:
352 /* Soft state for pmtu is maintained by IP core. */
353 return 0;
354 default: 351 default:
355 /* All others are translated to HOST_UNREACH. 352 /* All others are translated to HOST_UNREACH.
356 rfc2003 contains "deep thoughts" about NET_UNREACH, 353 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -363,13 +360,32 @@ static int ipip_err(struct sk_buff *skb, u32 info)
363 if (code != ICMP_EXC_TTL) 360 if (code != ICMP_EXC_TTL)
364 return 0; 361 return 0;
365 break; 362 break;
363 case ICMP_REDIRECT:
364 break;
366 } 365 }
367 366
368 err = -ENOENT; 367 err = -ENOENT;
369 368
370 rcu_read_lock(); 369 rcu_read_lock();
371 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 370 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
372 if (t == NULL || t->parms.iph.daddr == 0) 371 if (t == NULL)
372 goto out;
373
374 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
375 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
376 t->dev->ifindex, 0, IPPROTO_IPIP, 0);
377 err = 0;
378 goto out;
379 }
380
381 if (type == ICMP_REDIRECT) {
382 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
383 IPPROTO_IPIP, 0);
384 err = 0;
385 goto out;
386 }
387
388 if (t->parms.iph.daddr == 0)
373 goto out; 389 goto out;
374 390
375 err = 0; 391 err = 0;
@@ -471,7 +487,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
471 dev->stats.tx_fifo_errors++; 487 dev->stats.tx_fifo_errors++;
472 goto tx_error; 488 goto tx_error;
473 } 489 }
474 dst = rt->rt_gateway; 490 dst = rt_nexthop(rt, old_iph->daddr);
475 } 491 }
476 492
477 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 493 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -503,7 +519,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
503 } 519 }
504 520
505 if (skb_dst(skb)) 521 if (skb_dst(skb))
506 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 522 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
507 523
508 if ((old_iph->frag_off & htons(IP_DF)) && 524 if ((old_iph->frag_off & htons(IP_DF)) &&
509 mtu < ntohs(old_iph->tot_len)) { 525 mtu < ntohs(old_iph->tot_len)) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c94bbc6f2ba3..8eec8f4a0536 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -524,8 +524,8 @@ failure:
524} 524}
525#endif 525#endif
526 526
527/* 527/**
528 * Delete a VIF entry 528 * vif_delete - Delete a VIF entry
529 * @notify: Set to 1, if the caller is a notifier_call 529 * @notify: Set to 1, if the caller is a notifier_call
530 */ 530 */
531 531
@@ -1795,9 +1795,12 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1795 .daddr = iph->daddr, 1795 .daddr = iph->daddr,
1796 .saddr = iph->saddr, 1796 .saddr = iph->saddr,
1797 .flowi4_tos = RT_TOS(iph->tos), 1797 .flowi4_tos = RT_TOS(iph->tos),
1798 .flowi4_oif = rt->rt_oif, 1798 .flowi4_oif = (rt_is_output_route(rt) ?
1799 .flowi4_iif = rt->rt_iif, 1799 skb->dev->ifindex : 0),
1800 .flowi4_mark = rt->rt_mark, 1800 .flowi4_iif = (rt_is_output_route(rt) ?
1801 net->loopback_dev->ifindex :
1802 skb->dev->ifindex),
1803 .flowi4_mark = skb->mark,
1801 }; 1804 };
1802 struct mr_table *mrt; 1805 struct mr_table *mrt;
1803 int err; 1806 int err;
@@ -2006,37 +2009,37 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2006{ 2009{
2007 int ct; 2010 int ct;
2008 struct rtnexthop *nhp; 2011 struct rtnexthop *nhp;
2009 u8 *b = skb_tail_pointer(skb); 2012 struct nlattr *mp_attr;
2010 struct rtattr *mp_head;
2011 2013
2012 /* If cache is unresolved, don't try to parse IIF and OIF */ 2014 /* If cache is unresolved, don't try to parse IIF and OIF */
2013 if (c->mfc_parent >= MAXVIFS) 2015 if (c->mfc_parent >= MAXVIFS)
2014 return -ENOENT; 2016 return -ENOENT;
2015 2017
2016 if (VIF_EXISTS(mrt, c->mfc_parent)) 2018 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2017 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); 2019 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2020 return -EMSGSIZE;
2018 2021
2019 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2022 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2023 return -EMSGSIZE;
2020 2024
2021 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2025 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2022 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2026 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2023 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 2027 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2024 goto rtattr_failure; 2028 nla_nest_cancel(skb, mp_attr);
2025 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 2029 return -EMSGSIZE;
2030 }
2031
2026 nhp->rtnh_flags = 0; 2032 nhp->rtnh_flags = 0;
2027 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2033 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2028 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; 2034 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2029 nhp->rtnh_len = sizeof(*nhp); 2035 nhp->rtnh_len = sizeof(*nhp);
2030 } 2036 }
2031 } 2037 }
2032 mp_head->rta_type = RTA_MULTIPATH; 2038
2033 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 2039 nla_nest_end(skb, mp_attr);
2040
2034 rtm->rtm_type = RTN_MULTICAST; 2041 rtm->rtm_type = RTN_MULTICAST;
2035 return 1; 2042 return 1;
2036
2037rtattr_failure:
2038 nlmsg_trim(skb, b);
2039 return -EMSGSIZE;
2040} 2043}
2041 2044
2042int ipmr_get_route(struct net *net, struct sk_buff *skb, 2045int ipmr_get_route(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 2f210c79dc87..cbb6a1a6f6f7 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -52,7 +52,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
52 struct nf_nat_ipv4_range newrange; 52 struct nf_nat_ipv4_range newrange;
53 const struct nf_nat_ipv4_multi_range_compat *mr; 53 const struct nf_nat_ipv4_multi_range_compat *mr;
54 const struct rtable *rt; 54 const struct rtable *rt;
55 __be32 newsrc; 55 __be32 newsrc, nh;
56 56
57 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); 57 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
58 58
@@ -70,7 +70,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
70 70
71 mr = par->targinfo; 71 mr = par->targinfo;
72 rt = skb_rtable(skb); 72 rt = skb_rtable(skb);
73 newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); 73 nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
74 newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE);
74 if (!newsrc) { 75 if (!newsrc) {
75 pr_info("%s ate my IP address\n", par->out->name); 76 pr_info("%s ate my IP address\n", par->out->name);
76 return NF_DROP; 77 return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ba5756d20165..1109f7f6c254 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -196,12 +196,15 @@ static void ipt_ulog_packet(unsigned int hooknum,
196 196
197 pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold); 197 pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
198 198
199 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ 199 nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
200 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 200 sizeof(*pm)+copy_len, 0);
201 sizeof(*pm)+copy_len); 201 if (!nlh) {
202 pr_debug("error during nlmsg_put\n");
203 goto out_unlock;
204 }
202 ub->qlen++; 205 ub->qlen++;
203 206
204 pm = NLMSG_DATA(nlh); 207 pm = nlmsg_data(nlh);
205 208
206 /* We might not have a timestamp, get one */ 209 /* We might not have a timestamp, get one */
207 if (skb->tstamp.tv64 == 0) 210 if (skb->tstamp.tv64 == 0)
@@ -261,13 +264,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
261 nlh->nlmsg_type = NLMSG_DONE; 264 nlh->nlmsg_type = NLMSG_DONE;
262 ulog_send(groupnum); 265 ulog_send(groupnum);
263 } 266 }
264 267out_unlock:
265 spin_unlock_bh(&ulog_lock); 268 spin_unlock_bh(&ulog_lock);
266 269
267 return; 270 return;
268 271
269nlmsg_failure:
270 pr_debug("error during NLMSG_PUT\n");
271alloc_failure: 272alloc_failure:
272 pr_debug("Error building netlink message\n"); 273 pr_debug("Error building netlink message\n");
273 spin_unlock_bh(&ulog_lock); 274 spin_unlock_bh(&ulog_lock);
@@ -380,6 +381,9 @@ static struct nf_logger ipt_ulog_logger __read_mostly = {
380static int __init ulog_tg_init(void) 381static int __init ulog_tg_init(void)
381{ 382{
382 int ret, i; 383 int ret, i;
384 struct netlink_kernel_cfg cfg = {
385 .groups = ULOG_MAXNLGROUPS,
386 };
383 387
384 pr_debug("init module\n"); 388 pr_debug("init module\n");
385 389
@@ -392,9 +396,8 @@ static int __init ulog_tg_init(void)
392 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 396 for (i = 0; i < ULOG_MAXNLGROUPS; i++)
393 setup_timer(&ulog_buffers[i].timer, ulog_timer, i); 397 setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
394 398
395 nflognl = netlink_kernel_create(&init_net, 399 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
396 NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, 400 THIS_MODULE, &cfg);
397 NULL, THIS_MODULE);
398 if (!nflognl) 401 if (!nflognl)
399 return -ENOMEM; 402 return -ENOMEM;
400 403
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 91747d4ebc26..e7ff2dcab6ce 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -95,11 +95,11 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
95 return NF_ACCEPT; 95 return NF_ACCEPT;
96} 96}
97 97
98static unsigned int ipv4_confirm(unsigned int hooknum, 98static unsigned int ipv4_helper(unsigned int hooknum,
99 struct sk_buff *skb, 99 struct sk_buff *skb,
100 const struct net_device *in, 100 const struct net_device *in,
101 const struct net_device *out, 101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *)) 102 int (*okfn)(struct sk_buff *))
103{ 103{
104 struct nf_conn *ct; 104 struct nf_conn *ct;
105 enum ip_conntrack_info ctinfo; 105 enum ip_conntrack_info ctinfo;
@@ -110,24 +110,38 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
110 /* This is where we call the helper: as the packet goes out. */ 110 /* This is where we call the helper: as the packet goes out. */
111 ct = nf_ct_get(skb, &ctinfo); 111 ct = nf_ct_get(skb, &ctinfo);
112 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 112 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
113 goto out; 113 return NF_ACCEPT;
114 114
115 help = nfct_help(ct); 115 help = nfct_help(ct);
116 if (!help) 116 if (!help)
117 goto out; 117 return NF_ACCEPT;
118 118
119 /* rcu_read_lock()ed by nf_hook_slow */ 119 /* rcu_read_lock()ed by nf_hook_slow */
120 helper = rcu_dereference(help->helper); 120 helper = rcu_dereference(help->helper);
121 if (!helper) 121 if (!helper)
122 goto out; 122 return NF_ACCEPT;
123 123
124 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), 124 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
125 ct, ctinfo); 125 ct, ctinfo);
126 if (ret != NF_ACCEPT) { 126 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
127 nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL, 127 nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
128 "nf_ct_%s: dropping packet", helper->name); 128 "nf_ct_%s: dropping packet", helper->name);
129 return ret;
130 } 129 }
130 return ret;
131}
132
133static unsigned int ipv4_confirm(unsigned int hooknum,
134 struct sk_buff *skb,
135 const struct net_device *in,
136 const struct net_device *out,
137 int (*okfn)(struct sk_buff *))
138{
139 struct nf_conn *ct;
140 enum ip_conntrack_info ctinfo;
141
142 ct = nf_ct_get(skb, &ctinfo);
143 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
144 goto out;
131 145
132 /* adjust seqs for loopback traffic only in outgoing direction */ 146 /* adjust seqs for loopback traffic only in outgoing direction */
133 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 147 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
@@ -185,6 +199,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
185 .priority = NF_IP_PRI_CONNTRACK, 199 .priority = NF_IP_PRI_CONNTRACK,
186 }, 200 },
187 { 201 {
202 .hook = ipv4_helper,
203 .owner = THIS_MODULE,
204 .pf = NFPROTO_IPV4,
205 .hooknum = NF_INET_POST_ROUTING,
206 .priority = NF_IP_PRI_CONNTRACK_HELPER,
207 },
208 {
188 .hook = ipv4_confirm, 209 .hook = ipv4_confirm,
189 .owner = THIS_MODULE, 210 .owner = THIS_MODULE,
190 .pf = NFPROTO_IPV4, 211 .pf = NFPROTO_IPV4,
@@ -192,6 +213,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
192 .priority = NF_IP_PRI_CONNTRACK_CONFIRM, 213 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
193 }, 214 },
194 { 215 {
216 .hook = ipv4_helper,
217 .owner = THIS_MODULE,
218 .pf = NFPROTO_IPV4,
219 .hooknum = NF_INET_LOCAL_IN,
220 .priority = NF_IP_PRI_CONNTRACK_HELPER,
221 },
222 {
195 .hook = ipv4_confirm, 223 .hook = ipv4_confirm,
196 .owner = THIS_MODULE, 224 .owner = THIS_MODULE,
197 .pf = NFPROTO_IPV4, 225 .pf = NFPROTO_IPV4,
@@ -207,35 +235,30 @@ static int log_invalid_proto_max = 255;
207static ctl_table ip_ct_sysctl_table[] = { 235static ctl_table ip_ct_sysctl_table[] = {
208 { 236 {
209 .procname = "ip_conntrack_max", 237 .procname = "ip_conntrack_max",
210 .data = &nf_conntrack_max,
211 .maxlen = sizeof(int), 238 .maxlen = sizeof(int),
212 .mode = 0644, 239 .mode = 0644,
213 .proc_handler = proc_dointvec, 240 .proc_handler = proc_dointvec,
214 }, 241 },
215 { 242 {
216 .procname = "ip_conntrack_count", 243 .procname = "ip_conntrack_count",
217 .data = &init_net.ct.count,
218 .maxlen = sizeof(int), 244 .maxlen = sizeof(int),
219 .mode = 0444, 245 .mode = 0444,
220 .proc_handler = proc_dointvec, 246 .proc_handler = proc_dointvec,
221 }, 247 },
222 { 248 {
223 .procname = "ip_conntrack_buckets", 249 .procname = "ip_conntrack_buckets",
224 .data = &init_net.ct.htable_size,
225 .maxlen = sizeof(unsigned int), 250 .maxlen = sizeof(unsigned int),
226 .mode = 0444, 251 .mode = 0444,
227 .proc_handler = proc_dointvec, 252 .proc_handler = proc_dointvec,
228 }, 253 },
229 { 254 {
230 .procname = "ip_conntrack_checksum", 255 .procname = "ip_conntrack_checksum",
231 .data = &init_net.ct.sysctl_checksum,
232 .maxlen = sizeof(int), 256 .maxlen = sizeof(int),
233 .mode = 0644, 257 .mode = 0644,
234 .proc_handler = proc_dointvec, 258 .proc_handler = proc_dointvec,
235 }, 259 },
236 { 260 {
237 .procname = "ip_conntrack_log_invalid", 261 .procname = "ip_conntrack_log_invalid",
238 .data = &init_net.ct.sysctl_log_invalid,
239 .maxlen = sizeof(unsigned int), 262 .maxlen = sizeof(unsigned int),
240 .mode = 0644, 263 .mode = 0644,
241 .proc_handler = proc_dointvec_minmax, 264 .proc_handler = proc_dointvec_minmax,
@@ -351,6 +374,25 @@ static struct nf_sockopt_ops so_getorigdst = {
351 .owner = THIS_MODULE, 374 .owner = THIS_MODULE,
352}; 375};
353 376
377static int ipv4_init_net(struct net *net)
378{
379#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
380 struct nf_ip_net *in = &net->ct.nf_ct_proto;
381 in->ctl_table = kmemdup(ip_ct_sysctl_table,
382 sizeof(ip_ct_sysctl_table),
383 GFP_KERNEL);
384 if (!in->ctl_table)
385 return -ENOMEM;
386
387 in->ctl_table[0].data = &nf_conntrack_max;
388 in->ctl_table[1].data = &net->ct.count;
389 in->ctl_table[2].data = &net->ct.htable_size;
390 in->ctl_table[3].data = &net->ct.sysctl_checksum;
391 in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
392#endif
393 return 0;
394}
395
354struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { 396struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
355 .l3proto = PF_INET, 397 .l3proto = PF_INET,
356 .name = "ipv4", 398 .name = "ipv4",
@@ -366,8 +408,8 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
366#endif 408#endif
367#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 409#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
368 .ctl_table_path = "net/ipv4/netfilter", 410 .ctl_table_path = "net/ipv4/netfilter",
369 .ctl_table = ip_ct_sysctl_table,
370#endif 411#endif
412 .init_net = ipv4_init_net,
371 .me = THIS_MODULE, 413 .me = THIS_MODULE,
372}; 414};
373 415
@@ -378,6 +420,65 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
378MODULE_ALIAS("ip_conntrack"); 420MODULE_ALIAS("ip_conntrack");
379MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
380 422
423static int ipv4_net_init(struct net *net)
424{
425 int ret = 0;
426
427 ret = nf_conntrack_l4proto_register(net,
428 &nf_conntrack_l4proto_tcp4);
429 if (ret < 0) {
430 pr_err("nf_conntrack_l4proto_tcp4 :protocol register failed\n");
431 goto out_tcp;
432 }
433 ret = nf_conntrack_l4proto_register(net,
434 &nf_conntrack_l4proto_udp4);
435 if (ret < 0) {
436 pr_err("nf_conntrack_l4proto_udp4 :protocol register failed\n");
437 goto out_udp;
438 }
439 ret = nf_conntrack_l4proto_register(net,
440 &nf_conntrack_l4proto_icmp);
441 if (ret < 0) {
442 pr_err("nf_conntrack_l4proto_icmp4 :protocol register failed\n");
443 goto out_icmp;
444 }
445 ret = nf_conntrack_l3proto_register(net,
446 &nf_conntrack_l3proto_ipv4);
447 if (ret < 0) {
448 pr_err("nf_conntrack_l3proto_ipv4 :protocol register failed\n");
449 goto out_ipv4;
450 }
451 return 0;
452out_ipv4:
453 nf_conntrack_l4proto_unregister(net,
454 &nf_conntrack_l4proto_icmp);
455out_icmp:
456 nf_conntrack_l4proto_unregister(net,
457 &nf_conntrack_l4proto_udp4);
458out_udp:
459 nf_conntrack_l4proto_unregister(net,
460 &nf_conntrack_l4proto_tcp4);
461out_tcp:
462 return ret;
463}
464
465static void ipv4_net_exit(struct net *net)
466{
467 nf_conntrack_l3proto_unregister(net,
468 &nf_conntrack_l3proto_ipv4);
469 nf_conntrack_l4proto_unregister(net,
470 &nf_conntrack_l4proto_icmp);
471 nf_conntrack_l4proto_unregister(net,
472 &nf_conntrack_l4proto_udp4);
473 nf_conntrack_l4proto_unregister(net,
474 &nf_conntrack_l4proto_tcp4);
475}
476
477static struct pernet_operations ipv4_net_ops = {
478 .init = ipv4_net_init,
479 .exit = ipv4_net_exit,
480};
481
381static int __init nf_conntrack_l3proto_ipv4_init(void) 482static int __init nf_conntrack_l3proto_ipv4_init(void)
382{ 483{
383 int ret = 0; 484 int ret = 0;
@@ -391,35 +492,17 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
391 return ret; 492 return ret;
392 } 493 }
393 494
394 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); 495 ret = register_pernet_subsys(&ipv4_net_ops);
395 if (ret < 0) { 496 if (ret < 0) {
396 pr_err("nf_conntrack_ipv4: can't register tcp.\n"); 497 pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
397 goto cleanup_sockopt; 498 goto cleanup_sockopt;
398 } 499 }
399 500
400 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
401 if (ret < 0) {
402 pr_err("nf_conntrack_ipv4: can't register udp.\n");
403 goto cleanup_tcp;
404 }
405
406 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
407 if (ret < 0) {
408 pr_err("nf_conntrack_ipv4: can't register icmp.\n");
409 goto cleanup_udp;
410 }
411
412 ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
413 if (ret < 0) {
414 pr_err("nf_conntrack_ipv4: can't register ipv4\n");
415 goto cleanup_icmp;
416 }
417
418 ret = nf_register_hooks(ipv4_conntrack_ops, 501 ret = nf_register_hooks(ipv4_conntrack_ops,
419 ARRAY_SIZE(ipv4_conntrack_ops)); 502 ARRAY_SIZE(ipv4_conntrack_ops));
420 if (ret < 0) { 503 if (ret < 0) {
421 pr_err("nf_conntrack_ipv4: can't register hooks.\n"); 504 pr_err("nf_conntrack_ipv4: can't register hooks.\n");
422 goto cleanup_ipv4; 505 goto cleanup_pernet;
423 } 506 }
424#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 507#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
425 ret = nf_conntrack_ipv4_compat_init(); 508 ret = nf_conntrack_ipv4_compat_init();
@@ -431,14 +514,8 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
431 cleanup_hooks: 514 cleanup_hooks:
432 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 515 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
433#endif 516#endif
434 cleanup_ipv4: 517 cleanup_pernet:
435 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 518 unregister_pernet_subsys(&ipv4_net_ops);
436 cleanup_icmp:
437 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
438 cleanup_udp:
439 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
440 cleanup_tcp:
441 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
442 cleanup_sockopt: 519 cleanup_sockopt:
443 nf_unregister_sockopt(&so_getorigdst); 520 nf_unregister_sockopt(&so_getorigdst);
444 return ret; 521 return ret;
@@ -451,10 +528,7 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
451 nf_conntrack_ipv4_compat_fini(); 528 nf_conntrack_ipv4_compat_fini();
452#endif 529#endif
453 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 530 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
454 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 531 unregister_pernet_subsys(&ipv4_net_ops);
455 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
456 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
457 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
458 nf_unregister_sockopt(&so_getorigdst); 532 nf_unregister_sockopt(&so_getorigdst);
459} 533}
460 534
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 0847e373d33c..5241d997ab75 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -23,6 +23,11 @@
23 23
24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; 24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
25 25
26static inline struct nf_icmp_net *icmp_pernet(struct net *net)
27{
28 return &net->ct.nf_ct_proto.icmp;
29}
30
26static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 31static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
27 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
28{ 33{
@@ -77,7 +82,7 @@ static int icmp_print_tuple(struct seq_file *s,
77 82
78static unsigned int *icmp_get_timeouts(struct net *net) 83static unsigned int *icmp_get_timeouts(struct net *net)
79{ 84{
80 return &nf_ct_icmp_timeout; 85 return &icmp_pernet(net)->timeout;
81} 86}
82 87
83/* Returns verdict for packet, or -1 for invalid. */ 88/* Returns verdict for packet, or -1 for invalid. */
@@ -274,16 +279,18 @@ static int icmp_nlattr_tuple_size(void)
274#include <linux/netfilter/nfnetlink.h> 279#include <linux/netfilter/nfnetlink.h>
275#include <linux/netfilter/nfnetlink_cttimeout.h> 280#include <linux/netfilter/nfnetlink_cttimeout.h>
276 281
277static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 282static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
283 struct net *net, void *data)
278{ 284{
279 unsigned int *timeout = data; 285 unsigned int *timeout = data;
286 struct nf_icmp_net *in = icmp_pernet(net);
280 287
281 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { 288 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
282 *timeout = 289 *timeout =
283 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; 290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
284 } else { 291 } else {
285 /* Set default ICMP timeout. */ 292 /* Set default ICMP timeout. */
286 *timeout = nf_ct_icmp_timeout; 293 *timeout = in->timeout;
287 } 294 }
288 return 0; 295 return 0;
289} 296}
@@ -308,11 +315,9 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
308#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
309 316
310#ifdef CONFIG_SYSCTL 317#ifdef CONFIG_SYSCTL
311static struct ctl_table_header *icmp_sysctl_header;
312static struct ctl_table icmp_sysctl_table[] = { 318static struct ctl_table icmp_sysctl_table[] = {
313 { 319 {
314 .procname = "nf_conntrack_icmp_timeout", 320 .procname = "nf_conntrack_icmp_timeout",
315 .data = &nf_ct_icmp_timeout,
316 .maxlen = sizeof(unsigned int), 321 .maxlen = sizeof(unsigned int),
317 .mode = 0644, 322 .mode = 0644,
318 .proc_handler = proc_dointvec_jiffies, 323 .proc_handler = proc_dointvec_jiffies,
@@ -323,7 +328,6 @@ static struct ctl_table icmp_sysctl_table[] = {
323static struct ctl_table icmp_compat_sysctl_table[] = { 328static struct ctl_table icmp_compat_sysctl_table[] = {
324 { 329 {
325 .procname = "ip_conntrack_icmp_timeout", 330 .procname = "ip_conntrack_icmp_timeout",
326 .data = &nf_ct_icmp_timeout,
327 .maxlen = sizeof(unsigned int), 331 .maxlen = sizeof(unsigned int),
328 .mode = 0644, 332 .mode = 0644,
329 .proc_handler = proc_dointvec_jiffies, 333 .proc_handler = proc_dointvec_jiffies,
@@ -333,6 +337,62 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
333#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 337#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
334#endif /* CONFIG_SYSCTL */ 338#endif /* CONFIG_SYSCTL */
335 339
340static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
341 struct nf_icmp_net *in)
342{
343#ifdef CONFIG_SYSCTL
344 pn->ctl_table = kmemdup(icmp_sysctl_table,
345 sizeof(icmp_sysctl_table),
346 GFP_KERNEL);
347 if (!pn->ctl_table)
348 return -ENOMEM;
349
350 pn->ctl_table[0].data = &in->timeout;
351#endif
352 return 0;
353}
354
355static int icmp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
356 struct nf_icmp_net *in)
357{
358#ifdef CONFIG_SYSCTL
359#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
360 pn->ctl_compat_table = kmemdup(icmp_compat_sysctl_table,
361 sizeof(icmp_compat_sysctl_table),
362 GFP_KERNEL);
363 if (!pn->ctl_compat_table)
364 return -ENOMEM;
365
366 pn->ctl_compat_table[0].data = &in->timeout;
367#endif
368#endif
369 return 0;
370}
371
372static int icmp_init_net(struct net *net, u_int16_t proto)
373{
374 int ret;
375 struct nf_icmp_net *in = icmp_pernet(net);
376 struct nf_proto_net *pn = &in->pn;
377
378 in->timeout = nf_ct_icmp_timeout;
379
380 ret = icmp_kmemdup_compat_sysctl_table(pn, in);
381 if (ret < 0)
382 return ret;
383
384 ret = icmp_kmemdup_sysctl_table(pn, in);
385 if (ret < 0)
386 nf_ct_kfree_compat_sysctl_table(pn);
387
388 return ret;
389}
390
391static struct nf_proto_net *icmp_get_net_proto(struct net *net)
392{
393 return &net->ct.nf_ct_proto.icmp.pn;
394}
395
336struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = 396struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
337{ 397{
338 .l3proto = PF_INET, 398 .l3proto = PF_INET,
@@ -362,11 +422,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
362 .nla_policy = icmp_timeout_nla_policy, 422 .nla_policy = icmp_timeout_nla_policy,
363 }, 423 },
364#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 424#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
365#ifdef CONFIG_SYSCTL 425 .init_net = icmp_init_net,
366 .ctl_table_header = &icmp_sysctl_header, 426 .get_net_proto = icmp_get_net_proto,
367 .ctl_table = icmp_sysctl_table,
368#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
369 .ctl_compat_table = icmp_compat_sysctl_table,
370#endif
371#endif
372}; 427};
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9bb1b8a37a22..742815518b0f 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -94,14 +94,14 @@ static struct nf_hook_ops ipv4_defrag_ops[] = {
94 { 94 {
95 .hook = ipv4_conntrack_defrag, 95 .hook = ipv4_conntrack_defrag,
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
97 .pf = PF_INET, 97 .pf = NFPROTO_IPV4,
98 .hooknum = NF_INET_PRE_ROUTING, 98 .hooknum = NF_INET_PRE_ROUTING,
99 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 99 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
100 }, 100 },
101 { 101 {
102 .hook = ipv4_conntrack_defrag, 102 .hook = ipv4_conntrack_defrag,
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .pf = PF_INET, 104 .pf = NFPROTO_IPV4,
105 .hooknum = NF_INET_LOCAL_OUT, 105 .hooknum = NF_INET_LOCAL_OUT,
106 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 106 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
107 }, 107 },
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 7b22382ff0e9..3c04d24e2976 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -13,10 +13,10 @@
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/udp.h> 14#include <linux/udp.h>
15 15
16#include <net/netfilter/nf_nat_helper.h>
17#include <net/netfilter/nf_nat_rule.h>
18#include <net/netfilter/nf_conntrack_helper.h> 16#include <net/netfilter/nf_conntrack_helper.h>
19#include <net/netfilter/nf_conntrack_expect.h> 17#include <net/netfilter/nf_conntrack_expect.h>
18#include <net/netfilter/nf_nat_helper.h>
19#include <net/netfilter/nf_nat_rule.h>
20#include <linux/netfilter/nf_conntrack_amanda.h> 20#include <linux/netfilter/nf_conntrack_amanda.h>
21 21
22MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); 22MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index abb52adf5acd..44b082fd48ab 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -691,6 +691,10 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
691 .expectfn = nf_nat_follow_master, 691 .expectfn = nf_nat_follow_master,
692}; 692};
693 693
694static struct nfq_ct_nat_hook nfq_ct_nat = {
695 .seq_adjust = nf_nat_tcp_seq_adjust,
696};
697
694static int __init nf_nat_init(void) 698static int __init nf_nat_init(void)
695{ 699{
696 size_t i; 700 size_t i;
@@ -731,6 +735,7 @@ static int __init nf_nat_init(void)
731 nfnetlink_parse_nat_setup); 735 nfnetlink_parse_nat_setup);
732 BUG_ON(nf_ct_nat_offset != NULL); 736 BUG_ON(nf_ct_nat_offset != NULL);
733 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); 737 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
738 RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
734 return 0; 739 return 0;
735 740
736 cleanup_extend: 741 cleanup_extend:
@@ -747,6 +752,7 @@ static void __exit nf_nat_cleanup(void)
747 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); 752 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
748 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
749 RCU_INIT_POINTER(nf_ct_nat_offset, NULL); 754 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
755 RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
750 synchronize_net(); 756 synchronize_net();
751} 757}
752 758
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index cad29c121318..c6784a18c1c4 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -95,7 +95,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
95 unsigned char **data, 95 unsigned char **data,
96 TransportAddress *taddr, int count) 96 TransportAddress *taddr, int count)
97{ 97{
98 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 98 const struct nf_ct_h323_master *info = nfct_help_data(ct);
99 int dir = CTINFO2DIR(ctinfo); 99 int dir = CTINFO2DIR(ctinfo);
100 int i; 100 int i;
101 __be16 port; 101 __be16 port;
@@ -178,7 +178,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
178 struct nf_conntrack_expect *rtp_exp, 178 struct nf_conntrack_expect *rtp_exp,
179 struct nf_conntrack_expect *rtcp_exp) 179 struct nf_conntrack_expect *rtcp_exp)
180{ 180{
181 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 181 struct nf_ct_h323_master *info = nfct_help_data(ct);
182 int dir = CTINFO2DIR(ctinfo); 182 int dir = CTINFO2DIR(ctinfo);
183 int i; 183 int i;
184 u_int16_t nated_port; 184 u_int16_t nated_port;
@@ -330,7 +330,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
330 TransportAddress *taddr, __be16 port, 330 TransportAddress *taddr, __be16 port,
331 struct nf_conntrack_expect *exp) 331 struct nf_conntrack_expect *exp)
332{ 332{
333 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 333 struct nf_ct_h323_master *info = nfct_help_data(ct);
334 int dir = CTINFO2DIR(ctinfo); 334 int dir = CTINFO2DIR(ctinfo);
335 u_int16_t nated_port = ntohs(port); 335 u_int16_t nated_port = ntohs(port);
336 336
@@ -419,7 +419,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
419 unsigned char **data, TransportAddress *taddr, int idx, 419 unsigned char **data, TransportAddress *taddr, int idx,
420 __be16 port, struct nf_conntrack_expect *exp) 420 __be16 port, struct nf_conntrack_expect *exp)
421{ 421{
422 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 422 struct nf_ct_h323_master *info = nfct_help_data(ct);
423 int dir = CTINFO2DIR(ctinfo); 423 int dir = CTINFO2DIR(ctinfo);
424 u_int16_t nated_port = ntohs(port); 424 u_int16_t nated_port = ntohs(port);
425 union nf_inet_addr addr; 425 union nf_inet_addr addr;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index af65958f6308..2e59ad0b90ca 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,6 +153,19 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
157 u32 ctinfo, int off)
158{
159 const struct tcphdr *th;
160
161 if (nf_ct_protonum(ct) != IPPROTO_TCP)
162 return;
163
164 th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
165 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
166}
167EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
168
156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data, 169static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 170 int datalen, __sum16 *check, int oldlen)
158{ 171{
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index c273d58980ae..388140881ebe 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -49,7 +49,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
49 const struct nf_nat_pptp *nat_pptp_info; 49 const struct nf_nat_pptp *nat_pptp_info;
50 struct nf_nat_ipv4_range range; 50 struct nf_nat_ipv4_range range;
51 51
52 ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; 52 ct_pptp_info = nfct_help_data(master);
53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; 53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
54 54
55 /* And here goes the grand finale of corrosion... */ 55 /* And here goes the grand finale of corrosion... */
@@ -123,7 +123,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
123 __be16 new_callid; 123 __be16 new_callid;
124 unsigned int cid_off; 124 unsigned int cid_off;
125 125
126 ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; 126 ct_pptp_info = nfct_help_data(ct);
127 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; 127 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
128 128
129 new_callid = ct_pptp_info->pns_call_id; 129 new_callid = ct_pptp_info->pns_call_id;
@@ -192,7 +192,7 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
192 struct nf_ct_pptp_master *ct_pptp_info; 192 struct nf_ct_pptp_master *ct_pptp_info;
193 struct nf_nat_pptp *nat_pptp_info; 193 struct nf_nat_pptp *nat_pptp_info;
194 194
195 ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; 195 ct_pptp_info = nfct_help_data(ct);
196 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; 196 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
197 197
198 /* save original PAC call ID in nat_info */ 198 /* save original PAC call ID in nat_info */
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 746edec8b86e..bac712293fd6 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -405,7 +405,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
405 405
406 ptr = *octets; 406 ptr = *octets;
407 while (ctx->pointer < eoc) { 407 while (ctx->pointer < eoc) {
408 if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { 408 if (!asn1_octet_decode(ctx, ptr++)) {
409 kfree(*octets); 409 kfree(*octets);
410 *octets = NULL; 410 *octets = NULL;
411 return 0; 411 return 0;
@@ -759,7 +759,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
759 } 759 }
760 break; 760 break;
761 case SNMP_OBJECTID: 761 case SNMP_OBJECTID:
762 if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { 762 if (!asn1_oid_decode(ctx, end, &lp, &len)) {
763 kfree(id); 763 kfree(id);
764 return 0; 764 return 0;
765 } 765 }
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index a2901bf829c0..9dbb8d284f99 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -8,10 +8,10 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/udp.h> 9#include <linux/udp.h>
10 10
11#include <net/netfilter/nf_nat_helper.h>
12#include <net/netfilter/nf_nat_rule.h>
13#include <net/netfilter/nf_conntrack_helper.h> 11#include <net/netfilter/nf_conntrack_helper.h>
14#include <net/netfilter/nf_conntrack_expect.h> 12#include <net/netfilter/nf_conntrack_expect.h>
13#include <net/netfilter/nf_nat_helper.h>
14#include <net/netfilter/nf_nat_rule.h>
15#include <linux/netfilter/nf_conntrack_tftp.h> 15#include <linux/netfilter/nf_conntrack_tftp.h>
16 16
17MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); 17MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2c00e8bf684d..6232d476f37e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -371,6 +371,7 @@ void ping_err(struct sk_buff *skb, u32 info)
371 break; 371 break;
372 case ICMP_DEST_UNREACH: 372 case ICMP_DEST_UNREACH:
373 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 373 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
374 ipv4_sk_update_pmtu(skb, sk, info);
374 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { 375 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
375 err = EMSGSIZE; 376 err = EMSGSIZE;
376 harderr = 1; 377 harderr = 1;
@@ -386,6 +387,7 @@ void ping_err(struct sk_buff *skb, u32 info)
386 break; 387 break;
387 case ICMP_REDIRECT: 388 case ICMP_REDIRECT:
388 /* See ICMP_SOURCE_QUENCH */ 389 /* See ICMP_SOURCE_QUENCH */
390 ipv4_sk_redirect(skb, sk);
389 err = EREMOTEIO; 391 err = EREMOTEIO;
390 break; 392 break;
391 } 393 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8af0d44e4e22..957acd12250b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -232,7 +232,6 @@ static const struct snmp_mib snmp4_net_list[] = {
232 SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), 232 SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
233 SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV), 233 SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
234 SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV), 234 SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
235 SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
236 SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA), 235 SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
237 SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE), 236 SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
238 SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY), 237 SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
@@ -258,6 +257,12 @@ static const struct snmp_mib snmp4_net_list[] = {
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 257 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
259 SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), 258 SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
260 SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), 259 SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
260 SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE),
261 SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP),
262 SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE),
263 SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
264 SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
265 SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
261 SNMP_MIB_SENTINEL 266 SNMP_MIB_SENTINEL
262}; 267};
263 268
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 9ae5c01cd0b2..8918eff1426d 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -36,9 +36,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
36 36
37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
38{ 38{
39 int hash = protocol & (MAX_INET_PROTOS - 1); 39 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
40
41 return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
42 NULL, prot) ? 0 : -1; 40 NULL, prot) ? 0 : -1;
43} 41}
44EXPORT_SYMBOL(inet_add_protocol); 42EXPORT_SYMBOL(inet_add_protocol);
@@ -49,9 +47,9 @@ EXPORT_SYMBOL(inet_add_protocol);
49 47
50int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 48int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
51{ 49{
52 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 50 int ret;
53 51
54 ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash], 52 ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol],
55 prot, NULL) == prot) ? 0 : -1; 53 prot, NULL) == prot) ? 0 : -1;
56 54
57 synchronize_net(); 55 synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4032b818f3e4..ff0f071969ea 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -216,6 +216,11 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
216 int err = 0; 216 int err = 0;
217 int harderr = 0; 217 int harderr = 0;
218 218
219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
220 ipv4_sk_update_pmtu(skb, sk, info);
221 else if (type == ICMP_REDIRECT)
222 ipv4_sk_redirect(skb, sk);
223
219 /* Report error on raw socket, if: 224 /* Report error on raw socket, if:
220 1. User requested ip_recverr. 225 1. User requested ip_recverr.
221 2. Socket is connected (otherwise the error indication 226 2. Socket is connected (otherwise the error indication
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 98b30d08efe9..c035251beb07 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -133,10 +133,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
133static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 133static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 134static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135static int ip_rt_min_advmss __read_mostly = 256; 135static int ip_rt_min_advmss __read_mostly = 256;
136static int rt_chain_length_max __read_mostly = 20;
137
138static struct delayed_work expires_work;
139static unsigned long expires_ljiffies;
140 136
141/* 137/*
142 * Interface to generic destination cache. 138 * Interface to generic destination cache.
@@ -145,11 +141,13 @@ static unsigned long expires_ljiffies;
145static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
146static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 142static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
147static unsigned int ipv4_mtu(const struct dst_entry *dst); 143static unsigned int ipv4_mtu(const struct dst_entry *dst);
148static void ipv4_dst_destroy(struct dst_entry *dst);
149static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 144static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
150static void ipv4_link_failure(struct sk_buff *skb); 145static void ipv4_link_failure(struct sk_buff *skb);
151static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 146static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
152static int rt_garbage_collect(struct dst_ops *ops); 147 struct sk_buff *skb, u32 mtu);
148static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
149 struct sk_buff *skb);
150static void ipv4_dst_destroy(struct dst_entry *dst);
153 151
154static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 152static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
155 int how) 153 int how)
@@ -158,45 +156,17 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
158 156
159static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 157static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
160{ 158{
161 struct rtable *rt = (struct rtable *) dst; 159 WARN_ON(1);
162 struct inet_peer *peer; 160 return NULL;
163 u32 *p = NULL;
164
165 if (!rt->peer)
166 rt_bind_peer(rt, rt->rt_dst, 1);
167
168 peer = rt->peer;
169 if (peer) {
170 u32 *old_p = __DST_METRICS_PTR(old);
171 unsigned long prev, new;
172
173 p = peer->metrics;
174 if (inet_metrics_new(peer))
175 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
176
177 new = (unsigned long) p;
178 prev = cmpxchg(&dst->_metrics, old, new);
179
180 if (prev != old) {
181 p = __DST_METRICS_PTR(prev);
182 if (prev & DST_METRICS_READ_ONLY)
183 p = NULL;
184 } else {
185 if (rt->fi) {
186 fib_info_put(rt->fi);
187 rt->fi = NULL;
188 }
189 }
190 }
191 return p;
192} 161}
193 162
194static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr); 163static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
164 struct sk_buff *skb,
165 const void *daddr);
195 166
196static struct dst_ops ipv4_dst_ops = { 167static struct dst_ops ipv4_dst_ops = {
197 .family = AF_INET, 168 .family = AF_INET,
198 .protocol = cpu_to_be16(ETH_P_IP), 169 .protocol = cpu_to_be16(ETH_P_IP),
199 .gc = rt_garbage_collect,
200 .check = ipv4_dst_check, 170 .check = ipv4_dst_check,
201 .default_advmss = ipv4_default_advmss, 171 .default_advmss = ipv4_default_advmss,
202 .mtu = ipv4_mtu, 172 .mtu = ipv4_mtu,
@@ -206,6 +176,7 @@ static struct dst_ops ipv4_dst_ops = {
206 .negative_advice = ipv4_negative_advice, 176 .negative_advice = ipv4_negative_advice,
207 .link_failure = ipv4_link_failure, 177 .link_failure = ipv4_link_failure,
208 .update_pmtu = ip_rt_update_pmtu, 178 .update_pmtu = ip_rt_update_pmtu,
179 .redirect = ip_do_redirect,
209 .local_out = __ip_local_out, 180 .local_out = __ip_local_out,
210 .neigh_lookup = ipv4_neigh_lookup, 181 .neigh_lookup = ipv4_neigh_lookup,
211}; 182};
@@ -232,184 +203,30 @@ const __u8 ip_tos2prio[16] = {
232}; 203};
233EXPORT_SYMBOL(ip_tos2prio); 204EXPORT_SYMBOL(ip_tos2prio);
234 205
235/*
236 * Route cache.
237 */
238
239/* The locking scheme is rather straight forward:
240 *
241 * 1) Read-Copy Update protects the buckets of the central route hash.
242 * 2) Only writers remove entries, and they hold the lock
243 * as they look at rtable reference counts.
244 * 3) Only readers acquire references to rtable entries,
245 * they do so with atomic increments and with the
246 * lock held.
247 */
248
249struct rt_hash_bucket {
250 struct rtable __rcu *chain;
251};
252
253#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
254 defined(CONFIG_PROVE_LOCKING)
255/*
256 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
257 * The size of this table is a power of two and depends on the number of CPUS.
258 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
259 */
260#ifdef CONFIG_LOCKDEP
261# define RT_HASH_LOCK_SZ 256
262#else
263# if NR_CPUS >= 32
264# define RT_HASH_LOCK_SZ 4096
265# elif NR_CPUS >= 16
266# define RT_HASH_LOCK_SZ 2048
267# elif NR_CPUS >= 8
268# define RT_HASH_LOCK_SZ 1024
269# elif NR_CPUS >= 4
270# define RT_HASH_LOCK_SZ 512
271# else
272# define RT_HASH_LOCK_SZ 256
273# endif
274#endif
275
276static spinlock_t *rt_hash_locks;
277# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
278
279static __init void rt_hash_lock_init(void)
280{
281 int i;
282
283 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
284 GFP_KERNEL);
285 if (!rt_hash_locks)
286 panic("IP: failed to allocate rt_hash_locks\n");
287
288 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
289 spin_lock_init(&rt_hash_locks[i]);
290}
291#else
292# define rt_hash_lock_addr(slot) NULL
293
294static inline void rt_hash_lock_init(void)
295{
296}
297#endif
298
299static struct rt_hash_bucket *rt_hash_table __read_mostly;
300static unsigned int rt_hash_mask __read_mostly;
301static unsigned int rt_hash_log __read_mostly;
302
303static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 206static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
304#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) 207#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
305 208
306static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
307 int genid)
308{
309 return jhash_3words((__force u32)daddr, (__force u32)saddr,
310 idx, genid)
311 & rt_hash_mask;
312}
313
314static inline int rt_genid(struct net *net) 209static inline int rt_genid(struct net *net)
315{ 210{
316 return atomic_read(&net->ipv4.rt_genid); 211 return atomic_read(&net->ipv4.rt_genid);
317} 212}
318 213
319#ifdef CONFIG_PROC_FS 214#ifdef CONFIG_PROC_FS
320struct rt_cache_iter_state {
321 struct seq_net_private p;
322 int bucket;
323 int genid;
324};
325
326static struct rtable *rt_cache_get_first(struct seq_file *seq)
327{
328 struct rt_cache_iter_state *st = seq->private;
329 struct rtable *r = NULL;
330
331 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
332 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
333 continue;
334 rcu_read_lock_bh();
335 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
336 while (r) {
337 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
338 r->rt_genid == st->genid)
339 return r;
340 r = rcu_dereference_bh(r->dst.rt_next);
341 }
342 rcu_read_unlock_bh();
343 }
344 return r;
345}
346
347static struct rtable *__rt_cache_get_next(struct seq_file *seq,
348 struct rtable *r)
349{
350 struct rt_cache_iter_state *st = seq->private;
351
352 r = rcu_dereference_bh(r->dst.rt_next);
353 while (!r) {
354 rcu_read_unlock_bh();
355 do {
356 if (--st->bucket < 0)
357 return NULL;
358 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
359 rcu_read_lock_bh();
360 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
361 }
362 return r;
363}
364
365static struct rtable *rt_cache_get_next(struct seq_file *seq,
366 struct rtable *r)
367{
368 struct rt_cache_iter_state *st = seq->private;
369 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
370 if (dev_net(r->dst.dev) != seq_file_net(seq))
371 continue;
372 if (r->rt_genid == st->genid)
373 break;
374 }
375 return r;
376}
377
378static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
379{
380 struct rtable *r = rt_cache_get_first(seq);
381
382 if (r)
383 while (pos && (r = rt_cache_get_next(seq, r)))
384 --pos;
385 return pos ? NULL : r;
386}
387
388static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 215static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
389{ 216{
390 struct rt_cache_iter_state *st = seq->private;
391 if (*pos) 217 if (*pos)
392 return rt_cache_get_idx(seq, *pos - 1); 218 return NULL;
393 st->genid = rt_genid(seq_file_net(seq));
394 return SEQ_START_TOKEN; 219 return SEQ_START_TOKEN;
395} 220}
396 221
397static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 222static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
398{ 223{
399 struct rtable *r;
400
401 if (v == SEQ_START_TOKEN)
402 r = rt_cache_get_first(seq);
403 else
404 r = rt_cache_get_next(seq, v);
405 ++*pos; 224 ++*pos;
406 return r; 225 return NULL;
407} 226}
408 227
409static void rt_cache_seq_stop(struct seq_file *seq, void *v) 228static void rt_cache_seq_stop(struct seq_file *seq, void *v)
410{ 229{
411 if (v && v != SEQ_START_TOKEN)
412 rcu_read_unlock_bh();
413} 230}
414 231
415static int rt_cache_seq_show(struct seq_file *seq, void *v) 232static int rt_cache_seq_show(struct seq_file *seq, void *v)
@@ -419,34 +236,6 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
419 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" 236 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
420 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" 237 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
421 "HHUptod\tSpecDst"); 238 "HHUptod\tSpecDst");
422 else {
423 struct rtable *r = v;
424 struct neighbour *n;
425 int len, HHUptod;
426
427 rcu_read_lock();
428 n = dst_get_neighbour_noref(&r->dst);
429 HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
430 rcu_read_unlock();
431
432 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
433 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
434 r->dst.dev ? r->dst.dev->name : "*",
435 (__force u32)r->rt_dst,
436 (__force u32)r->rt_gateway,
437 r->rt_flags, atomic_read(&r->dst.__refcnt),
438 r->dst.__use, 0, (__force u32)r->rt_src,
439 dst_metric_advmss(&r->dst) + 40,
440 dst_metric(&r->dst, RTAX_WINDOW),
441 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
442 dst_metric(&r->dst, RTAX_RTTVAR)),
443 r->rt_key_tos,
444 -1,
445 HHUptod,
446 r->rt_spec_dst, &len);
447
448 seq_printf(seq, "%*s\n", 127 - len, "");
449 }
450 return 0; 239 return 0;
451} 240}
452 241
@@ -459,8 +248,7 @@ static const struct seq_operations rt_cache_seq_ops = {
459 248
460static int rt_cache_seq_open(struct inode *inode, struct file *file) 249static int rt_cache_seq_open(struct inode *inode, struct file *file)
461{ 250{
462 return seq_open_net(inode, file, &rt_cache_seq_ops, 251 return seq_open(file, &rt_cache_seq_ops);
463 sizeof(struct rt_cache_iter_state));
464} 252}
465 253
466static const struct file_operations rt_cache_seq_fops = { 254static const struct file_operations rt_cache_seq_fops = {
@@ -468,7 +256,7 @@ static const struct file_operations rt_cache_seq_fops = {
468 .open = rt_cache_seq_open, 256 .open = rt_cache_seq_open,
469 .read = seq_read, 257 .read = seq_read,
470 .llseek = seq_lseek, 258 .llseek = seq_lseek,
471 .release = seq_release_net, 259 .release = seq_release,
472}; 260};
473 261
474 262
@@ -658,275 +446,12 @@ static inline int ip_rt_proc_init(void)
658} 446}
659#endif /* CONFIG_PROC_FS */ 447#endif /* CONFIG_PROC_FS */
660 448
661static inline void rt_free(struct rtable *rt) 449static inline bool rt_is_expired(const struct rtable *rth)
662{
663 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
664}
665
666static inline void rt_drop(struct rtable *rt)
667{
668 ip_rt_put(rt);
669 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
670}
671
672static inline int rt_fast_clean(struct rtable *rth)
673{
674 /* Kill broadcast/multicast entries very aggresively, if they
675 collide in hash table with more useful entries */
676 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
677 rt_is_input_route(rth) && rth->dst.rt_next;
678}
679
680static inline int rt_valuable(struct rtable *rth)
681{
682 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
683 (rth->peer && rth->peer->pmtu_expires);
684}
685
686static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
687{
688 unsigned long age;
689 int ret = 0;
690
691 if (atomic_read(&rth->dst.__refcnt))
692 goto out;
693
694 age = jiffies - rth->dst.lastuse;
695 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
696 (age <= tmo2 && rt_valuable(rth)))
697 goto out;
698 ret = 1;
699out: return ret;
700}
701
702/* Bits of score are:
703 * 31: very valuable
704 * 30: not quite useless
705 * 29..0: usage counter
706 */
707static inline u32 rt_score(struct rtable *rt)
708{
709 u32 score = jiffies - rt->dst.lastuse;
710
711 score = ~score & ~(3<<30);
712
713 if (rt_valuable(rt))
714 score |= (1<<31);
715
716 if (rt_is_output_route(rt) ||
717 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
718 score |= (1<<30);
719
720 return score;
721}
722
723static inline bool rt_caching(const struct net *net)
724{
725 return net->ipv4.current_rt_cache_rebuild_count <=
726 net->ipv4.sysctl_rt_cache_rebuild_count;
727}
728
729static inline bool compare_hash_inputs(const struct rtable *rt1,
730 const struct rtable *rt2)
731{
732 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
733 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
734 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
735}
736
737static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
738{
739 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
740 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
741 (rt1->rt_mark ^ rt2->rt_mark) |
742 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
743 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
744 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
745}
746
747static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
748{
749 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
750}
751
752static inline int rt_is_expired(struct rtable *rth)
753{ 450{
754 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); 451 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
755} 452}
756 453
757/* 454/*
758 * Perform a full scan of hash table and free all entries.
759 * Can be called by a softirq or a process.
760 * In the later case, we want to be reschedule if necessary
761 */
762static void rt_do_flush(struct net *net, int process_context)
763{
764 unsigned int i;
765 struct rtable *rth, *next;
766
767 for (i = 0; i <= rt_hash_mask; i++) {
768 struct rtable __rcu **pprev;
769 struct rtable *list;
770
771 if (process_context && need_resched())
772 cond_resched();
773 rth = rcu_access_pointer(rt_hash_table[i].chain);
774 if (!rth)
775 continue;
776
777 spin_lock_bh(rt_hash_lock_addr(i));
778
779 list = NULL;
780 pprev = &rt_hash_table[i].chain;
781 rth = rcu_dereference_protected(*pprev,
782 lockdep_is_held(rt_hash_lock_addr(i)));
783
784 while (rth) {
785 next = rcu_dereference_protected(rth->dst.rt_next,
786 lockdep_is_held(rt_hash_lock_addr(i)));
787
788 if (!net ||
789 net_eq(dev_net(rth->dst.dev), net)) {
790 rcu_assign_pointer(*pprev, next);
791 rcu_assign_pointer(rth->dst.rt_next, list);
792 list = rth;
793 } else {
794 pprev = &rth->dst.rt_next;
795 }
796 rth = next;
797 }
798
799 spin_unlock_bh(rt_hash_lock_addr(i));
800
801 for (; list; list = next) {
802 next = rcu_dereference_protected(list->dst.rt_next, 1);
803 rt_free(list);
804 }
805 }
806}
807
808/*
809 * While freeing expired entries, we compute average chain length
810 * and standard deviation, using fixed-point arithmetic.
811 * This to have an estimation of rt_chain_length_max
812 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
813 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
814 */
815
816#define FRACT_BITS 3
817#define ONE (1UL << FRACT_BITS)
818
819/*
820 * Given a hash chain and an item in this hash chain,
821 * find if a previous entry has the same hash_inputs
822 * (but differs on tos, mark or oif)
823 * Returns 0 if an alias is found.
824 * Returns ONE if rth has no alias before itself.
825 */
826static int has_noalias(const struct rtable *head, const struct rtable *rth)
827{
828 const struct rtable *aux = head;
829
830 while (aux != rth) {
831 if (compare_hash_inputs(aux, rth))
832 return 0;
833 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
834 }
835 return ONE;
836}
837
838static void rt_check_expire(void)
839{
840 static unsigned int rover;
841 unsigned int i = rover, goal;
842 struct rtable *rth;
843 struct rtable __rcu **rthp;
844 unsigned long samples = 0;
845 unsigned long sum = 0, sum2 = 0;
846 unsigned long delta;
847 u64 mult;
848
849 delta = jiffies - expires_ljiffies;
850 expires_ljiffies = jiffies;
851 mult = ((u64)delta) << rt_hash_log;
852 if (ip_rt_gc_timeout > 1)
853 do_div(mult, ip_rt_gc_timeout);
854 goal = (unsigned int)mult;
855 if (goal > rt_hash_mask)
856 goal = rt_hash_mask + 1;
857 for (; goal > 0; goal--) {
858 unsigned long tmo = ip_rt_gc_timeout;
859 unsigned long length;
860
861 i = (i + 1) & rt_hash_mask;
862 rthp = &rt_hash_table[i].chain;
863
864 if (need_resched())
865 cond_resched();
866
867 samples++;
868
869 if (rcu_dereference_raw(*rthp) == NULL)
870 continue;
871 length = 0;
872 spin_lock_bh(rt_hash_lock_addr(i));
873 while ((rth = rcu_dereference_protected(*rthp,
874 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
875 prefetch(rth->dst.rt_next);
876 if (rt_is_expired(rth)) {
877 *rthp = rth->dst.rt_next;
878 rt_free(rth);
879 continue;
880 }
881 if (rth->dst.expires) {
882 /* Entry is expired even if it is in use */
883 if (time_before_eq(jiffies, rth->dst.expires)) {
884nofree:
885 tmo >>= 1;
886 rthp = &rth->dst.rt_next;
887 /*
888 * We only count entries on
889 * a chain with equal hash inputs once
890 * so that entries for different QOS
891 * levels, and other non-hash input
892 * attributes don't unfairly skew
893 * the length computation
894 */
895 length += has_noalias(rt_hash_table[i].chain, rth);
896 continue;
897 }
898 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
899 goto nofree;
900
901 /* Cleanup aged off entries. */
902 *rthp = rth->dst.rt_next;
903 rt_free(rth);
904 }
905 spin_unlock_bh(rt_hash_lock_addr(i));
906 sum += length;
907 sum2 += length*length;
908 }
909 if (samples) {
910 unsigned long avg = sum / samples;
911 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
912 rt_chain_length_max = max_t(unsigned long,
913 ip_rt_gc_elasticity,
914 (avg + 4*sd) >> FRACT_BITS);
915 }
916 rover = i;
917}
918
919/*
920 * rt_worker_func() is run in process context.
921 * we call rt_check_expire() to scan part of the hash table
922 */
923static void rt_worker_func(struct work_struct *work)
924{
925 rt_check_expire();
926 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
927}
928
929/*
930 * Perturbation of rt_genid by a small quantity [1..256] 455 * Perturbation of rt_genid by a small quantity [1..256]
931 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() 456 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
932 * many times (2^24) without giving recent rt_genid. 457 * many times (2^24) without giving recent rt_genid.
@@ -938,7 +463,6 @@ static void rt_cache_invalidate(struct net *net)
938 463
939 get_random_bytes(&shuffle, sizeof(shuffle)); 464 get_random_bytes(&shuffle, sizeof(shuffle));
940 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 465 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
941 inetpeer_invalidate_tree(AF_INET);
942} 466}
943 467
944/* 468/*
@@ -948,183 +472,22 @@ static void rt_cache_invalidate(struct net *net)
948void rt_cache_flush(struct net *net, int delay) 472void rt_cache_flush(struct net *net, int delay)
949{ 473{
950 rt_cache_invalidate(net); 474 rt_cache_invalidate(net);
951 if (delay >= 0)
952 rt_do_flush(net, !in_softirq());
953}
954
955/* Flush previous cache invalidated entries from the cache */
956void rt_cache_flush_batch(struct net *net)
957{
958 rt_do_flush(net, !in_softirq());
959}
960
961static void rt_emergency_hash_rebuild(struct net *net)
962{
963 net_warn_ratelimited("Route hash chain too long!\n");
964 rt_cache_invalidate(net);
965} 475}
966 476
967/* 477static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
968 Short description of GC goals. 478 struct sk_buff *skb,
969 479 const void *daddr)
970 We want to build algorithm, which will keep routing cache
971 at some equilibrium point, when number of aged off entries
972 is kept approximately equal to newly generated ones.
973
974 Current expiration strength is variable "expire".
975 We try to adjust it dynamically, so that if networking
976 is idle expires is large enough to keep enough of warm entries,
977 and when load increases it reduces to limit cache size.
978 */
979
980static int rt_garbage_collect(struct dst_ops *ops)
981{ 480{
982 static unsigned long expire = RT_GC_TIMEOUT;
983 static unsigned long last_gc;
984 static int rover;
985 static int equilibrium;
986 struct rtable *rth;
987 struct rtable __rcu **rthp;
988 unsigned long now = jiffies;
989 int goal;
990 int entries = dst_entries_get_fast(&ipv4_dst_ops);
991
992 /*
993 * Garbage collection is pretty expensive,
994 * do not make it too frequently.
995 */
996
997 RT_CACHE_STAT_INC(gc_total);
998
999 if (now - last_gc < ip_rt_gc_min_interval &&
1000 entries < ip_rt_max_size) {
1001 RT_CACHE_STAT_INC(gc_ignored);
1002 goto out;
1003 }
1004
1005 entries = dst_entries_get_slow(&ipv4_dst_ops);
1006 /* Calculate number of entries, which we want to expire now. */
1007 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1008 if (goal <= 0) {
1009 if (equilibrium < ipv4_dst_ops.gc_thresh)
1010 equilibrium = ipv4_dst_ops.gc_thresh;
1011 goal = entries - equilibrium;
1012 if (goal > 0) {
1013 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
1014 goal = entries - equilibrium;
1015 }
1016 } else {
1017 /* We are in dangerous area. Try to reduce cache really
1018 * aggressively.
1019 */
1020 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
1021 equilibrium = entries - goal;
1022 }
1023
1024 if (now - last_gc >= ip_rt_gc_min_interval)
1025 last_gc = now;
1026
1027 if (goal <= 0) {
1028 equilibrium += goal;
1029 goto work_done;
1030 }
1031
1032 do {
1033 int i, k;
1034
1035 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1036 unsigned long tmo = expire;
1037
1038 k = (k + 1) & rt_hash_mask;
1039 rthp = &rt_hash_table[k].chain;
1040 spin_lock_bh(rt_hash_lock_addr(k));
1041 while ((rth = rcu_dereference_protected(*rthp,
1042 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1043 if (!rt_is_expired(rth) &&
1044 !rt_may_expire(rth, tmo, expire)) {
1045 tmo >>= 1;
1046 rthp = &rth->dst.rt_next;
1047 continue;
1048 }
1049 *rthp = rth->dst.rt_next;
1050 rt_free(rth);
1051 goal--;
1052 }
1053 spin_unlock_bh(rt_hash_lock_addr(k));
1054 if (goal <= 0)
1055 break;
1056 }
1057 rover = k;
1058
1059 if (goal <= 0)
1060 goto work_done;
1061
1062 /* Goal is not achieved. We stop process if:
1063
1064 - if expire reduced to zero. Otherwise, expire is halfed.
1065 - if table is not full.
1066 - if we are called from interrupt.
1067 - jiffies check is just fallback/debug loop breaker.
1068 We will not spin here for long time in any case.
1069 */
1070
1071 RT_CACHE_STAT_INC(gc_goal_miss);
1072
1073 if (expire == 0)
1074 break;
1075
1076 expire >>= 1;
1077
1078 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1079 goto out;
1080 } while (!in_softirq() && time_before_eq(jiffies, now));
1081
1082 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1083 goto out;
1084 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1085 goto out;
1086 net_warn_ratelimited("dst cache overflow\n");
1087 RT_CACHE_STAT_INC(gc_dst_overflow);
1088 return 1;
1089
1090work_done:
1091 expire += ip_rt_gc_min_interval;
1092 if (expire > ip_rt_gc_timeout ||
1093 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1094 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1095 expire = ip_rt_gc_timeout;
1096out: return 0;
1097}
1098
1099/*
1100 * Returns number of entries in a hash chain that have different hash_inputs
1101 */
1102static int slow_chain_length(const struct rtable *head)
1103{
1104 int length = 0;
1105 const struct rtable *rth = head;
1106
1107 while (rth) {
1108 length += has_noalias(head, rth);
1109 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1110 }
1111 return length >> FRACT_BITS;
1112}
1113
1114static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
1115{
1116 static const __be32 inaddr_any = 0;
1117 struct net_device *dev = dst->dev; 481 struct net_device *dev = dst->dev;
1118 const __be32 *pkey = daddr; 482 const __be32 *pkey = daddr;
1119 const struct rtable *rt; 483 const struct rtable *rt;
1120 struct neighbour *n; 484 struct neighbour *n;
1121 485
1122 rt = (const struct rtable *) dst; 486 rt = (const struct rtable *) dst;
1123 487 if (rt->rt_gateway)
1124 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1125 pkey = &inaddr_any;
1126 else if (rt->rt_gateway)
1127 pkey = (const __be32 *) &rt->rt_gateway; 488 pkey = (const __be32 *) &rt->rt_gateway;
489 else if (skb)
490 pkey = &ip_hdr(skb)->daddr;
1128 491
1129 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); 492 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1130 if (n) 493 if (n)
@@ -1132,311 +495,221 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
1132 return neigh_create(&arp_tbl, pkey, dev); 495 return neigh_create(&arp_tbl, pkey, dev);
1133} 496}
1134 497
1135static int rt_bind_neighbour(struct rtable *rt) 498/*
499 * Peer allocation may fail only in serious out-of-memory conditions. However
500 * we still can generate some output.
501 * Random ID selection looks a bit dangerous because we have no chances to
502 * select ID being unique in a reasonable period of time.
503 * But broken packet identifier may be better than no packet at all.
504 */
505static void ip_select_fb_ident(struct iphdr *iph)
1136{ 506{
1137 struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); 507 static DEFINE_SPINLOCK(ip_fb_id_lock);
1138 if (IS_ERR(n)) 508 static u32 ip_fallback_id;
1139 return PTR_ERR(n); 509 u32 salt;
1140 dst_set_neighbour(&rt->dst, n);
1141 510
1142 return 0; 511 spin_lock_bh(&ip_fb_id_lock);
512 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
513 iph->id = htons(salt & 0xFFFF);
514 ip_fallback_id = salt;
515 spin_unlock_bh(&ip_fb_id_lock);
1143} 516}
1144 517
1145static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, 518void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1146 struct sk_buff *skb, int ifindex)
1147{ 519{
1148 struct rtable *rth, *cand; 520 struct net *net = dev_net(dst->dev);
1149 struct rtable __rcu **rthp, **candp; 521 struct inet_peer *peer;
1150 unsigned long now;
1151 u32 min_score;
1152 int chain_length;
1153 int attempts = !in_softirq();
1154
1155restart:
1156 chain_length = 0;
1157 min_score = ~(u32)0;
1158 cand = NULL;
1159 candp = NULL;
1160 now = jiffies;
1161
1162 if (!rt_caching(dev_net(rt->dst.dev))) {
1163 /*
1164 * If we're not caching, just tell the caller we
1165 * were successful and don't touch the route. The
1166 * caller hold the sole reference to the cache entry, and
1167 * it will be released when the caller is done with it.
1168 * If we drop it here, the callers have no way to resolve routes
1169 * when we're not caching. Instead, just point *rp at rt, so
1170 * the caller gets a single use out of the route
1171 * Note that we do rt_free on this new route entry, so that
1172 * once its refcount hits zero, we are still able to reap it
1173 * (Thanks Alexey)
1174 * Note: To avoid expensive rcu stuff for this uncached dst,
1175 * we set DST_NOCACHE so that dst_release() can free dst without
1176 * waiting a grace period.
1177 */
1178
1179 rt->dst.flags |= DST_NOCACHE;
1180 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1181 int err = rt_bind_neighbour(rt);
1182 if (err) {
1183 net_warn_ratelimited("Neighbour table failure & not caching routes\n");
1184 ip_rt_put(rt);
1185 return ERR_PTR(err);
1186 }
1187 }
1188 522
1189 goto skip_hashing; 523 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
524 if (peer) {
525 iph->id = htons(inet_getid(peer, more));
526 inet_putpeer(peer);
527 return;
1190 } 528 }
1191 529
1192 rthp = &rt_hash_table[hash].chain; 530 ip_select_fb_ident(iph);
1193 531}
1194 spin_lock_bh(rt_hash_lock_addr(hash)); 532EXPORT_SYMBOL(__ip_select_ident);
1195 while ((rth = rcu_dereference_protected(*rthp,
1196 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1197 if (rt_is_expired(rth)) {
1198 *rthp = rth->dst.rt_next;
1199 rt_free(rth);
1200 continue;
1201 }
1202 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1203 /* Put it first */
1204 *rthp = rth->dst.rt_next;
1205 /*
1206 * Since lookup is lockfree, the deletion
1207 * must be visible to another weakly ordered CPU before
1208 * the insertion at the start of the hash chain.
1209 */
1210 rcu_assign_pointer(rth->dst.rt_next,
1211 rt_hash_table[hash].chain);
1212 /*
1213 * Since lookup is lockfree, the update writes
1214 * must be ordered for consistency on SMP.
1215 */
1216 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1217
1218 dst_use(&rth->dst, now);
1219 spin_unlock_bh(rt_hash_lock_addr(hash));
1220
1221 rt_drop(rt);
1222 if (skb)
1223 skb_dst_set(skb, &rth->dst);
1224 return rth;
1225 }
1226
1227 if (!atomic_read(&rth->dst.__refcnt)) {
1228 u32 score = rt_score(rth);
1229
1230 if (score <= min_score) {
1231 cand = rth;
1232 candp = rthp;
1233 min_score = score;
1234 }
1235 }
1236
1237 chain_length++;
1238
1239 rthp = &rth->dst.rt_next;
1240 }
1241 533
1242 if (cand) { 534static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
1243 /* ip_rt_gc_elasticity used to be average length of chain 535 const struct iphdr *iph,
1244 * length, when exceeded gc becomes really aggressive. 536 int oif, u8 tos,
1245 * 537 u8 prot, u32 mark, int flow_flags)
1246 * The second limit is less certain. At the moment it allows 538{
1247 * only 2 entries per bucket. We will see. 539 if (sk) {
1248 */ 540 const struct inet_sock *inet = inet_sk(sk);
1249 if (chain_length > ip_rt_gc_elasticity) {
1250 *candp = cand->dst.rt_next;
1251 rt_free(cand);
1252 }
1253 } else {
1254 if (chain_length > rt_chain_length_max &&
1255 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1256 struct net *net = dev_net(rt->dst.dev);
1257 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1258 if (!rt_caching(net)) {
1259 pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
1260 rt->dst.dev->name, num);
1261 }
1262 rt_emergency_hash_rebuild(net);
1263 spin_unlock_bh(rt_hash_lock_addr(hash));
1264 541
1265 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 542 oif = sk->sk_bound_dev_if;
1266 ifindex, rt_genid(net)); 543 mark = sk->sk_mark;
1267 goto restart; 544 tos = RT_CONN_FLAGS(sk);
1268 } 545 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
1269 } 546 }
547 flowi4_init_output(fl4, oif, mark, tos,
548 RT_SCOPE_UNIVERSE, prot,
549 flow_flags,
550 iph->daddr, iph->saddr, 0, 0);
551}
1270 552
1271 /* Try to bind route to arp only if it is output 553static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
1272 route or unicast forwarding path. 554 const struct sock *sk)
1273 */ 555{
1274 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { 556 const struct iphdr *iph = ip_hdr(skb);
1275 int err = rt_bind_neighbour(rt); 557 int oif = skb->dev->ifindex;
1276 if (err) { 558 u8 tos = RT_TOS(iph->tos);
1277 spin_unlock_bh(rt_hash_lock_addr(hash)); 559 u8 prot = iph->protocol;
1278 560 u32 mark = skb->mark;
1279 if (err != -ENOBUFS) {
1280 rt_drop(rt);
1281 return ERR_PTR(err);
1282 }
1283
1284 /* Neighbour tables are full and nothing
1285 can be released. Try to shrink route cache,
1286 it is most likely it holds some neighbour records.
1287 */
1288 if (attempts-- > 0) {
1289 int saved_elasticity = ip_rt_gc_elasticity;
1290 int saved_int = ip_rt_gc_min_interval;
1291 ip_rt_gc_elasticity = 1;
1292 ip_rt_gc_min_interval = 0;
1293 rt_garbage_collect(&ipv4_dst_ops);
1294 ip_rt_gc_min_interval = saved_int;
1295 ip_rt_gc_elasticity = saved_elasticity;
1296 goto restart;
1297 }
1298
1299 net_warn_ratelimited("Neighbour table overflow\n");
1300 rt_drop(rt);
1301 return ERR_PTR(-ENOBUFS);
1302 }
1303 }
1304 561
1305 rt->dst.rt_next = rt_hash_table[hash].chain; 562 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
563}
1306 564
1307 /* 565static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
1308 * Since lookup is lockfree, we must make sure 566{
1309 * previous writes to rt are committed to memory 567 const struct inet_sock *inet = inet_sk(sk);
1310 * before making rt visible to other CPUS. 568 const struct ip_options_rcu *inet_opt;
1311 */ 569 __be32 daddr = inet->inet_daddr;
1312 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1313 570
1314 spin_unlock_bh(rt_hash_lock_addr(hash)); 571 rcu_read_lock();
572 inet_opt = rcu_dereference(inet->inet_opt);
573 if (inet_opt && inet_opt->opt.srr)
574 daddr = inet_opt->opt.faddr;
575 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
576 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
577 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
578 inet_sk_flowi_flags(sk),
579 daddr, inet->inet_saddr, 0, 0);
580 rcu_read_unlock();
581}
1315 582
1316skip_hashing: 583static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
584 const struct sk_buff *skb)
585{
1317 if (skb) 586 if (skb)
1318 skb_dst_set(skb, &rt->dst); 587 build_skb_flow_key(fl4, skb, sk);
1319 return rt; 588 else
589 build_sk_flow_key(fl4, sk);
1320} 590}
1321 591
1322static atomic_t __rt_peer_genid = ATOMIC_INIT(0); 592static inline void rt_free(struct rtable *rt)
1323
1324static u32 rt_peer_genid(void)
1325{ 593{
1326 return atomic_read(&__rt_peer_genid); 594 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
1327} 595}
1328 596
1329void rt_bind_peer(struct rtable *rt, __be32 daddr, int create) 597static DEFINE_SPINLOCK(fnhe_lock);
1330{
1331 struct inet_peer *peer;
1332 598
1333 peer = inet_getpeer_v4(daddr, create); 599static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
600{
601 struct fib_nh_exception *fnhe, *oldest;
602 struct rtable *orig;
1334 603
1335 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL) 604 oldest = rcu_dereference(hash->chain);
1336 inet_putpeer(peer); 605 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
1337 else 606 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1338 rt->rt_peer_genid = rt_peer_genid(); 607 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
608 oldest = fnhe;
609 }
610 orig = rcu_dereference(oldest->fnhe_rth);
611 if (orig) {
612 RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
613 rt_free(orig);
614 }
615 return oldest;
1339} 616}
1340 617
1341/* 618static inline u32 fnhe_hashfun(__be32 daddr)
1342 * Peer allocation may fail only in serious out-of-memory conditions. However
1343 * we still can generate some output.
1344 * Random ID selection looks a bit dangerous because we have no chances to
1345 * select ID being unique in a reasonable period of time.
1346 * But broken packet identifier may be better than no packet at all.
1347 */
1348static void ip_select_fb_ident(struct iphdr *iph)
1349{ 619{
1350 static DEFINE_SPINLOCK(ip_fb_id_lock); 620 u32 hval;
1351 static u32 ip_fallback_id;
1352 u32 salt;
1353 621
1354 spin_lock_bh(&ip_fb_id_lock); 622 hval = (__force u32) daddr;
1355 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); 623 hval ^= (hval >> 11) ^ (hval >> 22);
1356 iph->id = htons(salt & 0xFFFF); 624
1357 ip_fallback_id = salt; 625 return hval & (FNHE_HASH_SIZE - 1);
1358 spin_unlock_bh(&ip_fb_id_lock);
1359} 626}
1360 627
1361void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) 628static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
629 u32 pmtu, unsigned long expires)
1362{ 630{
1363 struct rtable *rt = (struct rtable *) dst; 631 struct fnhe_hash_bucket *hash;
632 struct fib_nh_exception *fnhe;
633 int depth;
634 u32 hval = fnhe_hashfun(daddr);
1364 635
1365 if (rt && !(rt->dst.flags & DST_NOPEER)) { 636 spin_lock_bh(&fnhe_lock);
1366 if (rt->peer == NULL)
1367 rt_bind_peer(rt, rt->rt_dst, 1);
1368 637
1369 /* If peer is attached to destination, it is never detached, 638 hash = nh->nh_exceptions;
1370 so that we need not to grab a lock to dereference it. 639 if (!hash) {
1371 */ 640 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
1372 if (rt->peer) { 641 if (!hash)
1373 iph->id = htons(inet_getid(rt->peer, more)); 642 goto out_unlock;
1374 return; 643 nh->nh_exceptions = hash;
1375 } 644 }
1376 } else if (!rt)
1377 pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
1378 645
1379 ip_select_fb_ident(iph); 646 hash += hval;
1380}
1381EXPORT_SYMBOL(__ip_select_ident);
1382 647
1383static void rt_del(unsigned int hash, struct rtable *rt) 648 depth = 0;
1384{ 649 for (fnhe = rcu_dereference(hash->chain); fnhe;
1385 struct rtable __rcu **rthp; 650 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1386 struct rtable *aux; 651 if (fnhe->fnhe_daddr == daddr)
652 break;
653 depth++;
654 }
1387 655
1388 rthp = &rt_hash_table[hash].chain; 656 if (fnhe) {
1389 spin_lock_bh(rt_hash_lock_addr(hash)); 657 if (gw)
1390 ip_rt_put(rt); 658 fnhe->fnhe_gw = gw;
1391 while ((aux = rcu_dereference_protected(*rthp, 659 if (pmtu) {
1392 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) { 660 fnhe->fnhe_pmtu = pmtu;
1393 if (aux == rt || rt_is_expired(aux)) { 661 fnhe->fnhe_expires = expires;
1394 *rthp = aux->dst.rt_next; 662 }
1395 rt_free(aux); 663 } else {
1396 continue; 664 if (depth > FNHE_RECLAIM_DEPTH)
665 fnhe = fnhe_oldest(hash);
666 else {
667 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
668 if (!fnhe)
669 goto out_unlock;
670
671 fnhe->fnhe_next = hash->chain;
672 rcu_assign_pointer(hash->chain, fnhe);
1397 } 673 }
1398 rthp = &aux->dst.rt_next; 674 fnhe->fnhe_daddr = daddr;
675 fnhe->fnhe_gw = gw;
676 fnhe->fnhe_pmtu = pmtu;
677 fnhe->fnhe_expires = expires;
1399 } 678 }
1400 spin_unlock_bh(rt_hash_lock_addr(hash)); 679
680 fnhe->fnhe_stamp = jiffies;
681
682out_unlock:
683 spin_unlock_bh(&fnhe_lock);
684 return;
1401} 685}
1402 686
1403static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) 687static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
688 bool kill_route)
1404{ 689{
1405 struct rtable *rt = (struct rtable *) dst; 690 __be32 new_gw = icmp_hdr(skb)->un.gateway;
1406 __be32 orig_gw = rt->rt_gateway; 691 __be32 old_gw = ip_hdr(skb)->saddr;
1407 struct neighbour *n, *old_n; 692 struct net_device *dev = skb->dev;
1408 693 struct in_device *in_dev;
1409 dst_confirm(&rt->dst); 694 struct fib_result res;
695 struct neighbour *n;
696 struct net *net;
1410 697
1411 rt->rt_gateway = peer->redirect_learned.a4; 698 switch (icmp_hdr(skb)->code & 7) {
699 case ICMP_REDIR_NET:
700 case ICMP_REDIR_NETTOS:
701 case ICMP_REDIR_HOST:
702 case ICMP_REDIR_HOSTTOS:
703 break;
1412 704
1413 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); 705 default:
1414 if (IS_ERR(n)) {
1415 rt->rt_gateway = orig_gw;
1416 return; 706 return;
1417 } 707 }
1418 old_n = xchg(&rt->dst._neighbour, n);
1419 if (old_n)
1420 neigh_release(old_n);
1421 if (!(n->nud_state & NUD_VALID)) {
1422 neigh_event_send(n, NULL);
1423 } else {
1424 rt->rt_flags |= RTCF_REDIRECTED;
1425 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1426 }
1427}
1428 708
1429/* called in rcu_read_lock() section */ 709 if (rt->rt_gateway != old_gw)
1430void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 710 return;
1431 __be32 saddr, struct net_device *dev)
1432{
1433 int s, i;
1434 struct in_device *in_dev = __in_dev_get_rcu(dev);
1435 __be32 skeys[2] = { saddr, 0 };
1436 int ikeys[2] = { dev->ifindex, 0 };
1437 struct inet_peer *peer;
1438 struct net *net;
1439 711
712 in_dev = __in_dev_get_rcu(dev);
1440 if (!in_dev) 713 if (!in_dev)
1441 return; 714 return;
1442 715
@@ -1456,72 +729,50 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1456 goto reject_redirect; 729 goto reject_redirect;
1457 } 730 }
1458 731
1459 for (s = 0; s < 2; s++) { 732 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
1460 for (i = 0; i < 2; i++) { 733 if (n) {
1461 unsigned int hash; 734 if (!(n->nud_state & NUD_VALID)) {
1462 struct rtable __rcu **rthp; 735 neigh_event_send(n, NULL);
1463 struct rtable *rt; 736 } else {
1464 737 if (fib_lookup(net, fl4, &res) == 0) {
1465 hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net)); 738 struct fib_nh *nh = &FIB_RES_NH(res);
1466 739
1467 rthp = &rt_hash_table[hash].chain; 740 update_or_create_fnhe(nh, fl4->daddr, new_gw,
1468 741 0, 0);
1469 while ((rt = rcu_dereference(*rthp)) != NULL) {
1470 rthp = &rt->dst.rt_next;
1471
1472 if (rt->rt_key_dst != daddr ||
1473 rt->rt_key_src != skeys[s] ||
1474 rt->rt_oif != ikeys[i] ||
1475 rt_is_input_route(rt) ||
1476 rt_is_expired(rt) ||
1477 !net_eq(dev_net(rt->dst.dev), net) ||
1478 rt->dst.error ||
1479 rt->dst.dev != dev ||
1480 rt->rt_gateway != old_gw)
1481 continue;
1482
1483 if (!rt->peer)
1484 rt_bind_peer(rt, rt->rt_dst, 1);
1485
1486 peer = rt->peer;
1487 if (peer) {
1488 if (peer->redirect_learned.a4 != new_gw) {
1489 peer->redirect_learned.a4 = new_gw;
1490 atomic_inc(&__rt_peer_genid);
1491 }
1492 check_peer_redir(&rt->dst, peer);
1493 }
1494 } 742 }
743 if (kill_route)
744 rt->dst.obsolete = DST_OBSOLETE_KILL;
745 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1495 } 746 }
747 neigh_release(n);
1496 } 748 }
1497 return; 749 return;
1498 750
1499reject_redirect: 751reject_redirect:
1500#ifdef CONFIG_IP_ROUTE_VERBOSE 752#ifdef CONFIG_IP_ROUTE_VERBOSE
1501 if (IN_DEV_LOG_MARTIANS(in_dev)) 753 if (IN_DEV_LOG_MARTIANS(in_dev)) {
754 const struct iphdr *iph = (const struct iphdr *) skb->data;
755 __be32 daddr = iph->daddr;
756 __be32 saddr = iph->saddr;
757
1502 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" 758 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
1503 " Advised path = %pI4 -> %pI4\n", 759 " Advised path = %pI4 -> %pI4\n",
1504 &old_gw, dev->name, &new_gw, 760 &old_gw, dev->name, &new_gw,
1505 &saddr, &daddr); 761 &saddr, &daddr);
762 }
1506#endif 763#endif
1507 ; 764 ;
1508} 765}
1509 766
1510static bool peer_pmtu_expired(struct inet_peer *peer) 767static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1511{ 768{
1512 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires); 769 struct rtable *rt;
770 struct flowi4 fl4;
1513 771
1514 return orig && 772 rt = (struct rtable *) dst;
1515 time_after_eq(jiffies, orig) &&
1516 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1517}
1518 773
1519static bool peer_pmtu_cleaned(struct inet_peer *peer) 774 ip_rt_build_flow_key(&fl4, sk, skb);
1520{ 775 __ip_do_redirect(rt, skb, &fl4, true);
1521 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1522
1523 return orig &&
1524 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1525} 776}
1526 777
1527static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 778static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1533,14 +784,10 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1533 if (dst->obsolete > 0) { 784 if (dst->obsolete > 0) {
1534 ip_rt_put(rt); 785 ip_rt_put(rt);
1535 ret = NULL; 786 ret = NULL;
1536 } else if (rt->rt_flags & RTCF_REDIRECTED) { 787 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1537 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 788 rt->dst.expires) {
1538 rt->rt_oif, 789 ip_rt_put(rt);
1539 rt_genid(dev_net(dst->dev)));
1540 rt_del(hash, rt);
1541 ret = NULL; 790 ret = NULL;
1542 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1543 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1544 } 791 }
1545 } 792 }
1546 return ret; 793 return ret;
@@ -1567,6 +814,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1567 struct rtable *rt = skb_rtable(skb); 814 struct rtable *rt = skb_rtable(skb);
1568 struct in_device *in_dev; 815 struct in_device *in_dev;
1569 struct inet_peer *peer; 816 struct inet_peer *peer;
817 struct net *net;
1570 int log_martians; 818 int log_martians;
1571 819
1572 rcu_read_lock(); 820 rcu_read_lock();
@@ -1578,9 +826,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1578 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 826 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1579 rcu_read_unlock(); 827 rcu_read_unlock();
1580 828
1581 if (!rt->peer) 829 net = dev_net(rt->dst.dev);
1582 rt_bind_peer(rt, rt->rt_dst, 1); 830 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1583 peer = rt->peer;
1584 if (!peer) { 831 if (!peer) {
1585 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 832 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1586 return; 833 return;
@@ -1597,7 +844,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1597 */ 844 */
1598 if (peer->rate_tokens >= ip_rt_redirect_number) { 845 if (peer->rate_tokens >= ip_rt_redirect_number) {
1599 peer->rate_last = jiffies; 846 peer->rate_last = jiffies;
1600 return; 847 goto out_put_peer;
1601 } 848 }
1602 849
1603 /* Check for load limit; set rate_last to the latest sent 850 /* Check for load limit; set rate_last to the latest sent
@@ -1614,20 +861,38 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1614 if (log_martians && 861 if (log_martians &&
1615 peer->rate_tokens == ip_rt_redirect_number) 862 peer->rate_tokens == ip_rt_redirect_number)
1616 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 863 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1617 &ip_hdr(skb)->saddr, rt->rt_iif, 864 &ip_hdr(skb)->saddr, inet_iif(skb),
1618 &rt->rt_dst, &rt->rt_gateway); 865 &ip_hdr(skb)->daddr, &rt->rt_gateway);
1619#endif 866#endif
1620 } 867 }
868out_put_peer:
869 inet_putpeer(peer);
1621} 870}
1622 871
1623static int ip_error(struct sk_buff *skb) 872static int ip_error(struct sk_buff *skb)
1624{ 873{
874 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
1625 struct rtable *rt = skb_rtable(skb); 875 struct rtable *rt = skb_rtable(skb);
1626 struct inet_peer *peer; 876 struct inet_peer *peer;
1627 unsigned long now; 877 unsigned long now;
878 struct net *net;
1628 bool send; 879 bool send;
1629 int code; 880 int code;
1630 881
882 net = dev_net(rt->dst.dev);
883 if (!IN_DEV_FORWARD(in_dev)) {
884 switch (rt->dst.error) {
885 case EHOSTUNREACH:
886 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
887 break;
888
889 case ENETUNREACH:
890 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
891 break;
892 }
893 goto out;
894 }
895
1631 switch (rt->dst.error) { 896 switch (rt->dst.error) {
1632 case EINVAL: 897 case EINVAL:
1633 default: 898 default:
@@ -1637,17 +902,14 @@ static int ip_error(struct sk_buff *skb)
1637 break; 902 break;
1638 case ENETUNREACH: 903 case ENETUNREACH:
1639 code = ICMP_NET_UNREACH; 904 code = ICMP_NET_UNREACH;
1640 IP_INC_STATS_BH(dev_net(rt->dst.dev), 905 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1641 IPSTATS_MIB_INNOROUTES);
1642 break; 906 break;
1643 case EACCES: 907 case EACCES:
1644 code = ICMP_PKT_FILTERED; 908 code = ICMP_PKT_FILTERED;
1645 break; 909 break;
1646 } 910 }
1647 911
1648 if (!rt->peer) 912 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1649 rt_bind_peer(rt, rt->rt_dst, 1);
1650 peer = rt->peer;
1651 913
1652 send = true; 914 send = true;
1653 if (peer) { 915 if (peer) {
@@ -1660,6 +922,7 @@ static int ip_error(struct sk_buff *skb)
1660 peer->rate_tokens -= ip_rt_error_cost; 922 peer->rate_tokens -= ip_rt_error_cost;
1661 else 923 else
1662 send = false; 924 send = false;
925 inet_putpeer(peer);
1663 } 926 }
1664 if (send) 927 if (send)
1665 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 928 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1668,163 +931,120 @@ out: kfree_skb(skb);
1668 return 0; 931 return 0;
1669} 932}
1670 933
1671/* 934static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1672 * The last two values are not from the RFC but 935{
1673 * are needed for AMPRnet AX.25 paths. 936 struct fib_result res;
1674 */
1675 937
1676static const unsigned short mtu_plateau[] = 938 if (mtu < ip_rt_min_pmtu)
1677{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 }; 939 mtu = ip_rt_min_pmtu;
1678 940
1679static inline unsigned short guess_mtu(unsigned short old_mtu) 941 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
1680{ 942 struct fib_nh *nh = &FIB_RES_NH(res);
1681 int i;
1682 943
1683 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) 944 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
1684 if (old_mtu > mtu_plateau[i]) 945 jiffies + ip_rt_mtu_expires);
1685 return mtu_plateau[i]; 946 }
1686 return 68; 947 return mtu;
1687} 948}
1688 949
1689unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph, 950static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1690 unsigned short new_mtu, 951 struct sk_buff *skb, u32 mtu)
1691 struct net_device *dev)
1692{ 952{
1693 unsigned short old_mtu = ntohs(iph->tot_len); 953 struct rtable *rt = (struct rtable *) dst;
1694 unsigned short est_mtu = 0; 954 struct flowi4 fl4;
1695 struct inet_peer *peer;
1696
1697 peer = inet_getpeer_v4(iph->daddr, 1);
1698 if (peer) {
1699 unsigned short mtu = new_mtu;
1700
1701 if (new_mtu < 68 || new_mtu >= old_mtu) {
1702 /* BSD 4.2 derived systems incorrectly adjust
1703 * tot_len by the IP header length, and report
1704 * a zero MTU in the ICMP message.
1705 */
1706 if (mtu == 0 &&
1707 old_mtu >= 68 + (iph->ihl << 2))
1708 old_mtu -= iph->ihl << 2;
1709 mtu = guess_mtu(old_mtu);
1710 }
1711
1712 if (mtu < ip_rt_min_pmtu)
1713 mtu = ip_rt_min_pmtu;
1714 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1715 unsigned long pmtu_expires;
1716
1717 pmtu_expires = jiffies + ip_rt_mtu_expires;
1718 if (!pmtu_expires)
1719 pmtu_expires = 1UL;
1720 955
1721 est_mtu = mtu; 956 ip_rt_build_flow_key(&fl4, sk, skb);
1722 peer->pmtu_learned = mtu; 957 mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
1723 peer->pmtu_expires = pmtu_expires;
1724 atomic_inc(&__rt_peer_genid);
1725 }
1726 958
1727 inet_putpeer(peer); 959 if (!rt->rt_pmtu) {
960 dst->obsolete = DST_OBSOLETE_KILL;
961 } else {
962 rt->rt_pmtu = mtu;
963 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
1728 } 964 }
1729 return est_mtu ? : new_mtu;
1730} 965}
1731 966
1732static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) 967void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
968 int oif, u32 mark, u8 protocol, int flow_flags)
1733{ 969{
1734 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires); 970 const struct iphdr *iph = (const struct iphdr *) skb->data;
971 struct flowi4 fl4;
972 struct rtable *rt;
1735 973
1736 if (!expires) 974 __build_flow_key(&fl4, NULL, iph, oif,
1737 return; 975 RT_TOS(iph->tos), protocol, mark, flow_flags);
1738 if (time_before(jiffies, expires)) { 976 rt = __ip_route_output_key(net, &fl4);
1739 u32 orig_dst_mtu = dst_mtu(dst); 977 if (!IS_ERR(rt)) {
1740 if (peer->pmtu_learned < orig_dst_mtu) { 978 __ip_rt_update_pmtu(rt, &fl4, mtu);
1741 if (!peer->pmtu_orig) 979 ip_rt_put(rt);
1742 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU); 980 }
1743 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1744 }
1745 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1746 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1747} 981}
982EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1748 983
1749static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 984void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1750{ 985{
1751 struct rtable *rt = (struct rtable *) dst; 986 const struct iphdr *iph = (const struct iphdr *) skb->data;
1752 struct inet_peer *peer; 987 struct flowi4 fl4;
1753 988 struct rtable *rt;
1754 dst_confirm(dst);
1755
1756 if (!rt->peer)
1757 rt_bind_peer(rt, rt->rt_dst, 1);
1758 peer = rt->peer;
1759 if (peer) {
1760 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1761
1762 if (mtu < ip_rt_min_pmtu)
1763 mtu = ip_rt_min_pmtu;
1764 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1765
1766 pmtu_expires = jiffies + ip_rt_mtu_expires;
1767 if (!pmtu_expires)
1768 pmtu_expires = 1UL;
1769
1770 peer->pmtu_learned = mtu;
1771 peer->pmtu_expires = pmtu_expires;
1772 989
1773 atomic_inc(&__rt_peer_genid); 990 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1774 rt->rt_peer_genid = rt_peer_genid(); 991 rt = __ip_route_output_key(sock_net(sk), &fl4);
1775 } 992 if (!IS_ERR(rt)) {
1776 check_peer_pmtu(dst, peer); 993 __ip_rt_update_pmtu(rt, &fl4, mtu);
994 ip_rt_put(rt);
1777 } 995 }
1778} 996}
997EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1779 998
1780 999void ipv4_redirect(struct sk_buff *skb, struct net *net,
1781static void ipv4_validate_peer(struct rtable *rt) 1000 int oif, u32 mark, u8 protocol, int flow_flags)
1782{ 1001{
1783 if (rt->rt_peer_genid != rt_peer_genid()) { 1002 const struct iphdr *iph = (const struct iphdr *) skb->data;
1784 struct inet_peer *peer; 1003 struct flowi4 fl4;
1785 1004 struct rtable *rt;
1786 if (!rt->peer)
1787 rt_bind_peer(rt, rt->rt_dst, 0);
1788 1005
1789 peer = rt->peer; 1006 __build_flow_key(&fl4, NULL, iph, oif,
1790 if (peer) { 1007 RT_TOS(iph->tos), protocol, mark, flow_flags);
1791 check_peer_pmtu(&rt->dst, peer); 1008 rt = __ip_route_output_key(net, &fl4);
1009 if (!IS_ERR(rt)) {
1010 __ip_do_redirect(rt, skb, &fl4, false);
1011 ip_rt_put(rt);
1012 }
1013}
1014EXPORT_SYMBOL_GPL(ipv4_redirect);
1792 1015
1793 if (peer->redirect_learned.a4 && 1016void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1794 peer->redirect_learned.a4 != rt->rt_gateway) 1017{
1795 check_peer_redir(&rt->dst, peer); 1018 const struct iphdr *iph = (const struct iphdr *) skb->data;
1796 } 1019 struct flowi4 fl4;
1020 struct rtable *rt;
1797 1021
1798 rt->rt_peer_genid = rt_peer_genid(); 1022 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1023 rt = __ip_route_output_key(sock_net(sk), &fl4);
1024 if (!IS_ERR(rt)) {
1025 __ip_do_redirect(rt, skb, &fl4, false);
1026 ip_rt_put(rt);
1799 } 1027 }
1800} 1028}
1029EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1801 1030
1802static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1031static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1803{ 1032{
1804 struct rtable *rt = (struct rtable *) dst; 1033 struct rtable *rt = (struct rtable *) dst;
1805 1034
1806 if (rt_is_expired(rt)) 1035 /* All IPV4 dsts are created with ->obsolete set to the value
1036 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1037 * into this function always.
1038 *
1039 * When a PMTU/redirect information update invalidates a
1040 * route, this is indicated by setting obsolete to
1041 * DST_OBSOLETE_KILL.
1042 */
1043 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1807 return NULL; 1044 return NULL;
1808 ipv4_validate_peer(rt);
1809 return dst; 1045 return dst;
1810} 1046}
1811 1047
1812static void ipv4_dst_destroy(struct dst_entry *dst)
1813{
1814 struct rtable *rt = (struct rtable *) dst;
1815 struct inet_peer *peer = rt->peer;
1816
1817 if (rt->fi) {
1818 fib_info_put(rt->fi);
1819 rt->fi = NULL;
1820 }
1821 if (peer) {
1822 rt->peer = NULL;
1823 inet_putpeer(peer);
1824 }
1825}
1826
1827
1828static void ipv4_link_failure(struct sk_buff *skb) 1048static void ipv4_link_failure(struct sk_buff *skb)
1829{ 1049{
1830 struct rtable *rt; 1050 struct rtable *rt;
@@ -1832,8 +1052,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
1832 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1052 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1833 1053
1834 rt = skb_rtable(skb); 1054 rt = skb_rtable(skb);
1835 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer)) 1055 if (rt)
1836 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig); 1056 dst_set_expires(&rt->dst, 0);
1837} 1057}
1838 1058
1839static int ip_rt_bug(struct sk_buff *skb) 1059static int ip_rt_bug(struct sk_buff *skb)
@@ -1880,8 +1100,9 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1880 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1100 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1881 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); 1101 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1882 else 1102 else
1883 src = inet_select_addr(rt->dst.dev, rt->rt_gateway, 1103 src = inet_select_addr(rt->dst.dev,
1884 RT_SCOPE_UNIVERSE); 1104 rt_nexthop(rt, iph->daddr),
1105 RT_SCOPE_UNIVERSE);
1885 rcu_read_unlock(); 1106 rcu_read_unlock();
1886 } 1107 }
1887 memcpy(addr, &src, 4); 1108 memcpy(addr, &src, 4);
@@ -1913,7 +1134,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1913static unsigned int ipv4_mtu(const struct dst_entry *dst) 1134static unsigned int ipv4_mtu(const struct dst_entry *dst)
1914{ 1135{
1915 const struct rtable *rt = (const struct rtable *) dst; 1136 const struct rtable *rt = (const struct rtable *) dst;
1916 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 1137 unsigned int mtu = rt->rt_pmtu;
1138
1139 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1140 mtu = 0;
1141
1142 if (!mtu)
1143 mtu = dst_metric_raw(dst, RTAX_MTU);
1917 1144
1918 if (mtu && rt_is_output_route(rt)) 1145 if (mtu && rt_is_output_route(rt))
1919 return mtu; 1146 return mtu;
@@ -1921,8 +1148,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1921 mtu = dst->dev->mtu; 1148 mtu = dst->dev->mtu;
1922 1149
1923 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1150 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1924 1151 if (rt->rt_gateway && mtu > 576)
1925 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1926 mtu = 576; 1152 mtu = 576;
1927 } 1153 }
1928 1154
@@ -1932,76 +1158,184 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1932 return mtu; 1158 return mtu;
1933} 1159}
1934 1160
1935static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, 1161static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1936 struct fib_info *fi)
1937{ 1162{
1938 struct inet_peer *peer; 1163 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1939 int create = 0; 1164 struct fib_nh_exception *fnhe;
1165 u32 hval;
1940 1166
1941 /* If a peer entry exists for this destination, we must hook 1167 if (!hash)
1942 * it up in order to get at cached metrics. 1168 return NULL;
1943 */
1944 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1945 create = 1;
1946 1169
1947 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create); 1170 hval = fnhe_hashfun(daddr);
1948 if (peer) {
1949 rt->rt_peer_genid = rt_peer_genid();
1950 if (inet_metrics_new(peer))
1951 memcpy(peer->metrics, fi->fib_metrics,
1952 sizeof(u32) * RTAX_MAX);
1953 dst_init_metrics(&rt->dst, peer->metrics, false);
1954 1171
1955 check_peer_pmtu(&rt->dst, peer); 1172 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1173 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1174 if (fnhe->fnhe_daddr == daddr)
1175 return fnhe;
1176 }
1177 return NULL;
1178}
1179
1180static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1181 __be32 daddr)
1182{
1183 bool ret = false;
1184
1185 spin_lock_bh(&fnhe_lock);
1956 1186
1957 if (peer->redirect_learned.a4 && 1187 if (daddr == fnhe->fnhe_daddr) {
1958 peer->redirect_learned.a4 != rt->rt_gateway) { 1188 struct rtable *orig;
1959 rt->rt_gateway = peer->redirect_learned.a4; 1189
1190 if (fnhe->fnhe_pmtu) {
1191 unsigned long expires = fnhe->fnhe_expires;
1192 unsigned long diff = expires - jiffies;
1193
1194 if (time_before(jiffies, expires)) {
1195 rt->rt_pmtu = fnhe->fnhe_pmtu;
1196 dst_set_expires(&rt->dst, diff);
1197 }
1198 }
1199 if (fnhe->fnhe_gw) {
1960 rt->rt_flags |= RTCF_REDIRECTED; 1200 rt->rt_flags |= RTCF_REDIRECTED;
1201 rt->rt_gateway = fnhe->fnhe_gw;
1961 } 1202 }
1203
1204 orig = rcu_dereference(fnhe->fnhe_rth);
1205 rcu_assign_pointer(fnhe->fnhe_rth, rt);
1206 if (orig)
1207 rt_free(orig);
1208
1209 fnhe->fnhe_stamp = jiffies;
1210 ret = true;
1211 } else {
1212 /* Routes we intend to cache in nexthop exception have
1213 * the DST_NOCACHE bit clear. However, if we are
1214 * unsuccessful at storing this route into the cache
1215 * we really need to set it.
1216 */
1217 rt->dst.flags |= DST_NOCACHE;
1218 }
1219 spin_unlock_bh(&fnhe_lock);
1220
1221 return ret;
1222}
1223
1224static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1225{
1226 struct rtable *orig, *prev, **p;
1227 bool ret = true;
1228
1229 if (rt_is_input_route(rt)) {
1230 p = (struct rtable **)&nh->nh_rth_input;
1231 } else {
1232 if (!nh->nh_pcpu_rth_output)
1233 goto nocache;
1234 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1235 }
1236 orig = *p;
1237
1238 prev = cmpxchg(p, orig, rt);
1239 if (prev == orig) {
1240 if (orig)
1241 rt_free(orig);
1962 } else { 1242 } else {
1963 if (fi->fib_metrics != (u32 *) dst_default_metrics) { 1243 /* Routes we intend to cache in the FIB nexthop have
1964 rt->fi = fi; 1244 * the DST_NOCACHE bit clear. However, if we are
1965 atomic_inc(&fi->fib_clntref); 1245 * unsuccessful at storing this route into the cache
1246 * we really need to set it.
1247 */
1248nocache:
1249 rt->dst.flags |= DST_NOCACHE;
1250 ret = false;
1251 }
1252
1253 return ret;
1254}
1255
1256static DEFINE_SPINLOCK(rt_uncached_lock);
1257static LIST_HEAD(rt_uncached_list);
1258
1259static void rt_add_uncached_list(struct rtable *rt)
1260{
1261 spin_lock_bh(&rt_uncached_lock);
1262 list_add_tail(&rt->rt_uncached, &rt_uncached_list);
1263 spin_unlock_bh(&rt_uncached_lock);
1264}
1265
1266static void ipv4_dst_destroy(struct dst_entry *dst)
1267{
1268 struct rtable *rt = (struct rtable *) dst;
1269
1270 if (dst->flags & DST_NOCACHE) {
1271 spin_lock_bh(&rt_uncached_lock);
1272 list_del(&rt->rt_uncached);
1273 spin_unlock_bh(&rt_uncached_lock);
1274 }
1275}
1276
1277void rt_flush_dev(struct net_device *dev)
1278{
1279 if (!list_empty(&rt_uncached_list)) {
1280 struct net *net = dev_net(dev);
1281 struct rtable *rt;
1282
1283 spin_lock_bh(&rt_uncached_lock);
1284 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
1285 if (rt->dst.dev != dev)
1286 continue;
1287 rt->dst.dev = net->loopback_dev;
1288 dev_hold(rt->dst.dev);
1289 dev_put(dev);
1966 } 1290 }
1967 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1291 spin_unlock_bh(&rt_uncached_lock);
1968 } 1292 }
1969} 1293}
1970 1294
1971static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4, 1295static bool rt_cache_valid(const struct rtable *rt)
1296{
1297 return rt &&
1298 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1299 !rt_is_expired(rt);
1300}
1301
1302static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1972 const struct fib_result *res, 1303 const struct fib_result *res,
1304 struct fib_nh_exception *fnhe,
1973 struct fib_info *fi, u16 type, u32 itag) 1305 struct fib_info *fi, u16 type, u32 itag)
1974{ 1306{
1975 struct dst_entry *dst = &rt->dst; 1307 bool cached = false;
1976 1308
1977 if (fi) { 1309 if (fi) {
1978 if (FIB_RES_GW(*res) && 1310 struct fib_nh *nh = &FIB_RES_NH(*res);
1979 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1311
1980 rt->rt_gateway = FIB_RES_GW(*res); 1312 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1981 rt_init_metrics(rt, fl4, fi); 1313 rt->rt_gateway = nh->nh_gw;
1314 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1982#ifdef CONFIG_IP_ROUTE_CLASSID 1315#ifdef CONFIG_IP_ROUTE_CLASSID
1983 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1316 rt->dst.tclassid = nh->nh_tclassid;
1984#endif 1317#endif
1318 if (unlikely(fnhe))
1319 cached = rt_bind_exception(rt, fnhe, daddr);
1320 else if (!(rt->dst.flags & DST_NOCACHE))
1321 cached = rt_cache_route(nh, rt);
1985 } 1322 }
1986 1323 if (unlikely(!cached))
1987 if (dst_mtu(dst) > IP_MAX_MTU) 1324 rt_add_uncached_list(rt);
1988 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1989 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1990 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1991 1325
1992#ifdef CONFIG_IP_ROUTE_CLASSID 1326#ifdef CONFIG_IP_ROUTE_CLASSID
1993#ifdef CONFIG_IP_MULTIPLE_TABLES 1327#ifdef CONFIG_IP_MULTIPLE_TABLES
1994 set_class_tag(rt, fib_rules_tclass(res)); 1328 set_class_tag(rt, res->tclassid);
1995#endif 1329#endif
1996 set_class_tag(rt, itag); 1330 set_class_tag(rt, itag);
1997#endif 1331#endif
1998} 1332}
1999 1333
2000static struct rtable *rt_dst_alloc(struct net_device *dev, 1334static struct rtable *rt_dst_alloc(struct net_device *dev,
2001 bool nopolicy, bool noxfrm) 1335 bool nopolicy, bool noxfrm, bool will_cache)
2002{ 1336{
2003 return dst_alloc(&ipv4_dst_ops, dev, 1, -1, 1337 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
2004 DST_HOST | 1338 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
2005 (nopolicy ? DST_NOPOLICY : 0) | 1339 (nopolicy ? DST_NOPOLICY : 0) |
2006 (noxfrm ? DST_NOXFRM : 0)); 1340 (noxfrm ? DST_NOXFRM : 0));
2007} 1341}
@@ -2010,9 +1344,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
2010static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1344static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2011 u8 tos, struct net_device *dev, int our) 1345 u8 tos, struct net_device *dev, int our)
2012{ 1346{
2013 unsigned int hash;
2014 struct rtable *rth; 1347 struct rtable *rth;
2015 __be32 spec_dst;
2016 struct in_device *in_dev = __in_dev_get_rcu(dev); 1348 struct in_device *in_dev = __in_dev_get_rcu(dev);
2017 u32 itag = 0; 1349 u32 itag = 0;
2018 int err; 1350 int err;
@@ -2023,21 +1355,24 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2023 return -EINVAL; 1355 return -EINVAL;
2024 1356
2025 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1357 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2026 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP)) 1358 skb->protocol != htons(ETH_P_IP))
2027 goto e_inval; 1359 goto e_inval;
2028 1360
1361 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1362 if (ipv4_is_loopback(saddr))
1363 goto e_inval;
1364
2029 if (ipv4_is_zeronet(saddr)) { 1365 if (ipv4_is_zeronet(saddr)) {
2030 if (!ipv4_is_local_multicast(daddr)) 1366 if (!ipv4_is_local_multicast(daddr))
2031 goto e_inval; 1367 goto e_inval;
2032 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2033 } else { 1368 } else {
2034 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst, 1369 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2035 &itag); 1370 in_dev, &itag);
2036 if (err < 0) 1371 if (err < 0)
2037 goto e_err; 1372 goto e_err;
2038 } 1373 }
2039 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, 1374 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
2040 IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 1375 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
2041 if (!rth) 1376 if (!rth)
2042 goto e_nobufs; 1377 goto e_nobufs;
2043 1378
@@ -2046,23 +1381,14 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2046#endif 1381#endif
2047 rth->dst.output = ip_rt_bug; 1382 rth->dst.output = ip_rt_bug;
2048 1383
2049 rth->rt_key_dst = daddr;
2050 rth->rt_key_src = saddr;
2051 rth->rt_genid = rt_genid(dev_net(dev)); 1384 rth->rt_genid = rt_genid(dev_net(dev));
2052 rth->rt_flags = RTCF_MULTICAST; 1385 rth->rt_flags = RTCF_MULTICAST;
2053 rth->rt_type = RTN_MULTICAST; 1386 rth->rt_type = RTN_MULTICAST;
2054 rth->rt_key_tos = tos; 1387 rth->rt_is_input= 1;
2055 rth->rt_dst = daddr; 1388 rth->rt_iif = 0;
2056 rth->rt_src = saddr; 1389 rth->rt_pmtu = 0;
2057 rth->rt_route_iif = dev->ifindex; 1390 rth->rt_gateway = 0;
2058 rth->rt_iif = dev->ifindex; 1391 INIT_LIST_HEAD(&rth->rt_uncached);
2059 rth->rt_oif = 0;
2060 rth->rt_mark = skb->mark;
2061 rth->rt_gateway = daddr;
2062 rth->rt_spec_dst= spec_dst;
2063 rth->rt_peer_genid = 0;
2064 rth->peer = NULL;
2065 rth->fi = NULL;
2066 if (our) { 1392 if (our) {
2067 rth->dst.input= ip_local_deliver; 1393 rth->dst.input= ip_local_deliver;
2068 rth->rt_flags |= RTCF_LOCAL; 1394 rth->rt_flags |= RTCF_LOCAL;
@@ -2074,9 +1400,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2074#endif 1400#endif
2075 RT_CACHE_STAT_INC(in_slow_mc); 1401 RT_CACHE_STAT_INC(in_slow_mc);
2076 1402
2077 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1403 skb_dst_set(skb, &rth->dst);
2078 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1404 return 0;
2079 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
2080 1405
2081e_nobufs: 1406e_nobufs:
2082 return -ENOBUFS; 1407 return -ENOBUFS;
@@ -2116,14 +1441,13 @@ static void ip_handle_martian_source(struct net_device *dev,
2116static int __mkroute_input(struct sk_buff *skb, 1441static int __mkroute_input(struct sk_buff *skb,
2117 const struct fib_result *res, 1442 const struct fib_result *res,
2118 struct in_device *in_dev, 1443 struct in_device *in_dev,
2119 __be32 daddr, __be32 saddr, u32 tos, 1444 __be32 daddr, __be32 saddr, u32 tos)
2120 struct rtable **result)
2121{ 1445{
2122 struct rtable *rth; 1446 struct rtable *rth;
2123 int err; 1447 int err;
2124 struct in_device *out_dev; 1448 struct in_device *out_dev;
2125 unsigned int flags = 0; 1449 unsigned int flags = 0;
2126 __be32 spec_dst; 1450 bool do_cache;
2127 u32 itag; 1451 u32 itag;
2128 1452
2129 /* get a working reference to the output device */ 1453 /* get a working reference to the output device */
@@ -2135,7 +1459,7 @@ static int __mkroute_input(struct sk_buff *skb,
2135 1459
2136 1460
2137 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1461 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2138 in_dev->dev, &spec_dst, &itag); 1462 in_dev->dev, in_dev, &itag);
2139 if (err < 0) { 1463 if (err < 0) {
2140 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1464 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2141 saddr); 1465 saddr);
@@ -2143,9 +1467,6 @@ static int __mkroute_input(struct sk_buff *skb,
2143 goto cleanup; 1467 goto cleanup;
2144 } 1468 }
2145 1469
2146 if (err)
2147 flags |= RTCF_DIRECTSRC;
2148
2149 if (out_dev == in_dev && err && 1470 if (out_dev == in_dev && err &&
2150 (IN_DEV_SHARED_MEDIA(out_dev) || 1471 (IN_DEV_SHARED_MEDIA(out_dev) ||
2151 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1472 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
@@ -2166,38 +1487,41 @@ static int __mkroute_input(struct sk_buff *skb,
2166 } 1487 }
2167 } 1488 }
2168 1489
1490 do_cache = false;
1491 if (res->fi) {
1492 if (!itag) {
1493 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1494 if (rt_cache_valid(rth)) {
1495 skb_dst_set_noref(skb, &rth->dst);
1496 goto out;
1497 }
1498 do_cache = true;
1499 }
1500 }
1501
2169 rth = rt_dst_alloc(out_dev->dev, 1502 rth = rt_dst_alloc(out_dev->dev,
2170 IN_DEV_CONF_GET(in_dev, NOPOLICY), 1503 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2171 IN_DEV_CONF_GET(out_dev, NOXFRM)); 1504 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
2172 if (!rth) { 1505 if (!rth) {
2173 err = -ENOBUFS; 1506 err = -ENOBUFS;
2174 goto cleanup; 1507 goto cleanup;
2175 } 1508 }
2176 1509
2177 rth->rt_key_dst = daddr;
2178 rth->rt_key_src = saddr;
2179 rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); 1510 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2180 rth->rt_flags = flags; 1511 rth->rt_flags = flags;
2181 rth->rt_type = res->type; 1512 rth->rt_type = res->type;
2182 rth->rt_key_tos = tos; 1513 rth->rt_is_input = 1;
2183 rth->rt_dst = daddr; 1514 rth->rt_iif = 0;
2184 rth->rt_src = saddr; 1515 rth->rt_pmtu = 0;
2185 rth->rt_route_iif = in_dev->dev->ifindex; 1516 rth->rt_gateway = 0;
2186 rth->rt_iif = in_dev->dev->ifindex; 1517 INIT_LIST_HEAD(&rth->rt_uncached);
2187 rth->rt_oif = 0;
2188 rth->rt_mark = skb->mark;
2189 rth->rt_gateway = daddr;
2190 rth->rt_spec_dst= spec_dst;
2191 rth->rt_peer_genid = 0;
2192 rth->peer = NULL;
2193 rth->fi = NULL;
2194 1518
2195 rth->dst.input = ip_forward; 1519 rth->dst.input = ip_forward;
2196 rth->dst.output = ip_output; 1520 rth->dst.output = ip_output;
2197 1521
2198 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag); 1522 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
2199 1523 skb_dst_set(skb, &rth->dst);
2200 *result = rth; 1524out:
2201 err = 0; 1525 err = 0;
2202 cleanup: 1526 cleanup:
2203 return err; 1527 return err;
@@ -2209,27 +1533,13 @@ static int ip_mkroute_input(struct sk_buff *skb,
2209 struct in_device *in_dev, 1533 struct in_device *in_dev,
2210 __be32 daddr, __be32 saddr, u32 tos) 1534 __be32 daddr, __be32 saddr, u32 tos)
2211{ 1535{
2212 struct rtable *rth = NULL;
2213 int err;
2214 unsigned int hash;
2215
2216#ifdef CONFIG_IP_ROUTE_MULTIPATH 1536#ifdef CONFIG_IP_ROUTE_MULTIPATH
2217 if (res->fi && res->fi->fib_nhs > 1) 1537 if (res->fi && res->fi->fib_nhs > 1)
2218 fib_select_multipath(res); 1538 fib_select_multipath(res);
2219#endif 1539#endif
2220 1540
2221 /* create a routing cache entry */ 1541 /* create a routing cache entry */
2222 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth); 1542 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2223 if (err)
2224 return err;
2225
2226 /* put it into the cache */
2227 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2228 rt_genid(dev_net(rth->dst.dev)));
2229 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2230 if (IS_ERR(rth))
2231 return PTR_ERR(rth);
2232 return 0;
2233} 1543}
2234 1544
2235/* 1545/*
@@ -2252,10 +1562,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2252 unsigned int flags = 0; 1562 unsigned int flags = 0;
2253 u32 itag = 0; 1563 u32 itag = 0;
2254 struct rtable *rth; 1564 struct rtable *rth;
2255 unsigned int hash;
2256 __be32 spec_dst;
2257 int err = -EINVAL; 1565 int err = -EINVAL;
2258 struct net *net = dev_net(dev); 1566 struct net *net = dev_net(dev);
1567 bool do_cache;
2259 1568
2260 /* IP on this device is disabled. */ 1569 /* IP on this device is disabled. */
2261 1570
@@ -2266,10 +1575,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2266 by fib_lookup. 1575 by fib_lookup.
2267 */ 1576 */
2268 1577
2269 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1578 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2270 ipv4_is_loopback(saddr))
2271 goto martian_source; 1579 goto martian_source;
2272 1580
1581 res.fi = NULL;
2273 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 1582 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2274 goto brd_input; 1583 goto brd_input;
2275 1584
@@ -2279,9 +1588,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2279 if (ipv4_is_zeronet(saddr)) 1588 if (ipv4_is_zeronet(saddr))
2280 goto martian_source; 1589 goto martian_source;
2281 1590
2282 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr)) 1591 if (ipv4_is_zeronet(daddr))
2283 goto martian_destination; 1592 goto martian_destination;
2284 1593
1594 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
1595 if (ipv4_is_loopback(daddr))
1596 goto martian_destination;
1597
1598 if (ipv4_is_loopback(saddr))
1599 goto martian_source;
1600 }
1601
2285 /* 1602 /*
2286 * Now we are ready to route packet. 1603 * Now we are ready to route packet.
2287 */ 1604 */
@@ -2293,11 +1610,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2293 fl4.daddr = daddr; 1610 fl4.daddr = daddr;
2294 fl4.saddr = saddr; 1611 fl4.saddr = saddr;
2295 err = fib_lookup(net, &fl4, &res); 1612 err = fib_lookup(net, &fl4, &res);
2296 if (err != 0) { 1613 if (err != 0)
2297 if (!IN_DEV_FORWARD(in_dev))
2298 goto e_hostunreach;
2299 goto no_route; 1614 goto no_route;
2300 }
2301 1615
2302 RT_CACHE_STAT_INC(in_slow_tot); 1616 RT_CACHE_STAT_INC(in_slow_tot);
2303 1617
@@ -2307,17 +1621,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2307 if (res.type == RTN_LOCAL) { 1621 if (res.type == RTN_LOCAL) {
2308 err = fib_validate_source(skb, saddr, daddr, tos, 1622 err = fib_validate_source(skb, saddr, daddr, tos,
2309 net->loopback_dev->ifindex, 1623 net->loopback_dev->ifindex,
2310 dev, &spec_dst, &itag); 1624 dev, in_dev, &itag);
2311 if (err < 0) 1625 if (err < 0)
2312 goto martian_source_keep_err; 1626 goto martian_source_keep_err;
2313 if (err)
2314 flags |= RTCF_DIRECTSRC;
2315 spec_dst = daddr;
2316 goto local_input; 1627 goto local_input;
2317 } 1628 }
2318 1629
2319 if (!IN_DEV_FORWARD(in_dev)) 1630 if (!IN_DEV_FORWARD(in_dev))
2320 goto e_hostunreach; 1631 goto no_route;
2321 if (res.type != RTN_UNICAST) 1632 if (res.type != RTN_UNICAST)
2322 goto martian_destination; 1633 goto martian_destination;
2323 1634
@@ -2328,23 +1639,32 @@ brd_input:
2328 if (skb->protocol != htons(ETH_P_IP)) 1639 if (skb->protocol != htons(ETH_P_IP))
2329 goto e_inval; 1640 goto e_inval;
2330 1641
2331 if (ipv4_is_zeronet(saddr)) 1642 if (!ipv4_is_zeronet(saddr)) {
2332 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1643 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2333 else { 1644 in_dev, &itag);
2334 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2335 &itag);
2336 if (err < 0) 1645 if (err < 0)
2337 goto martian_source_keep_err; 1646 goto martian_source_keep_err;
2338 if (err)
2339 flags |= RTCF_DIRECTSRC;
2340 } 1647 }
2341 flags |= RTCF_BROADCAST; 1648 flags |= RTCF_BROADCAST;
2342 res.type = RTN_BROADCAST; 1649 res.type = RTN_BROADCAST;
2343 RT_CACHE_STAT_INC(in_brd); 1650 RT_CACHE_STAT_INC(in_brd);
2344 1651
2345local_input: 1652local_input:
1653 do_cache = false;
1654 if (res.fi) {
1655 if (!itag) {
1656 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1657 if (rt_cache_valid(rth)) {
1658 skb_dst_set_noref(skb, &rth->dst);
1659 err = 0;
1660 goto out;
1661 }
1662 do_cache = true;
1663 }
1664 }
1665
2346 rth = rt_dst_alloc(net->loopback_dev, 1666 rth = rt_dst_alloc(net->loopback_dev,
2347 IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 1667 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2348 if (!rth) 1668 if (!rth)
2349 goto e_nobufs; 1669 goto e_nobufs;
2350 1670
@@ -2354,41 +1674,27 @@ local_input:
2354 rth->dst.tclassid = itag; 1674 rth->dst.tclassid = itag;
2355#endif 1675#endif
2356 1676
2357 rth->rt_key_dst = daddr;
2358 rth->rt_key_src = saddr;
2359 rth->rt_genid = rt_genid(net); 1677 rth->rt_genid = rt_genid(net);
2360 rth->rt_flags = flags|RTCF_LOCAL; 1678 rth->rt_flags = flags|RTCF_LOCAL;
2361 rth->rt_type = res.type; 1679 rth->rt_type = res.type;
2362 rth->rt_key_tos = tos; 1680 rth->rt_is_input = 1;
2363 rth->rt_dst = daddr; 1681 rth->rt_iif = 0;
2364 rth->rt_src = saddr; 1682 rth->rt_pmtu = 0;
2365#ifdef CONFIG_IP_ROUTE_CLASSID 1683 rth->rt_gateway = 0;
2366 rth->dst.tclassid = itag; 1684 INIT_LIST_HEAD(&rth->rt_uncached);
2367#endif
2368 rth->rt_route_iif = dev->ifindex;
2369 rth->rt_iif = dev->ifindex;
2370 rth->rt_oif = 0;
2371 rth->rt_mark = skb->mark;
2372 rth->rt_gateway = daddr;
2373 rth->rt_spec_dst= spec_dst;
2374 rth->rt_peer_genid = 0;
2375 rth->peer = NULL;
2376 rth->fi = NULL;
2377 if (res.type == RTN_UNREACHABLE) { 1685 if (res.type == RTN_UNREACHABLE) {
2378 rth->dst.input= ip_error; 1686 rth->dst.input= ip_error;
2379 rth->dst.error= -err; 1687 rth->dst.error= -err;
2380 rth->rt_flags &= ~RTCF_LOCAL; 1688 rth->rt_flags &= ~RTCF_LOCAL;
2381 } 1689 }
2382 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net)); 1690 if (do_cache)
2383 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif); 1691 rt_cache_route(&FIB_RES_NH(res), rth);
1692 skb_dst_set(skb, &rth->dst);
2384 err = 0; 1693 err = 0;
2385 if (IS_ERR(rth))
2386 err = PTR_ERR(rth);
2387 goto out; 1694 goto out;
2388 1695
2389no_route: 1696no_route:
2390 RT_CACHE_STAT_INC(in_no_route); 1697 RT_CACHE_STAT_INC(in_no_route);
2391 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2392 res.type = RTN_UNREACHABLE; 1698 res.type = RTN_UNREACHABLE;
2393 if (err == -ESRCH) 1699 if (err == -ESRCH)
2394 err = -ENETUNREACH; 1700 err = -ENETUNREACH;
@@ -2405,10 +1711,6 @@ martian_destination:
2405 &daddr, &saddr, dev->name); 1711 &daddr, &saddr, dev->name);
2406#endif 1712#endif
2407 1713
2408e_hostunreach:
2409 err = -EHOSTUNREACH;
2410 goto out;
2411
2412e_inval: 1714e_inval:
2413 err = -EINVAL; 1715 err = -EINVAL;
2414 goto out; 1716 goto out;
@@ -2424,50 +1726,13 @@ martian_source_keep_err:
2424 goto out; 1726 goto out;
2425} 1727}
2426 1728
2427int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1729int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2428 u8 tos, struct net_device *dev, bool noref) 1730 u8 tos, struct net_device *dev)
2429{ 1731{
2430 struct rtable *rth;
2431 unsigned int hash;
2432 int iif = dev->ifindex;
2433 struct net *net;
2434 int res; 1732 int res;
2435 1733
2436 net = dev_net(dev);
2437
2438 rcu_read_lock(); 1734 rcu_read_lock();
2439 1735
2440 if (!rt_caching(net))
2441 goto skip_cache;
2442
2443 tos &= IPTOS_RT_MASK;
2444 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2445
2446 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2447 rth = rcu_dereference(rth->dst.rt_next)) {
2448 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2449 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2450 (rth->rt_route_iif ^ iif) |
2451 (rth->rt_key_tos ^ tos)) == 0 &&
2452 rth->rt_mark == skb->mark &&
2453 net_eq(dev_net(rth->dst.dev), net) &&
2454 !rt_is_expired(rth)) {
2455 ipv4_validate_peer(rth);
2456 if (noref) {
2457 dst_use_noref(&rth->dst, jiffies);
2458 skb_dst_set_noref(skb, &rth->dst);
2459 } else {
2460 dst_use(&rth->dst, jiffies);
2461 skb_dst_set(skb, &rth->dst);
2462 }
2463 RT_CACHE_STAT_INC(in_hit);
2464 rcu_read_unlock();
2465 return 0;
2466 }
2467 RT_CACHE_STAT_INC(in_hlist_search);
2468 }
2469
2470skip_cache:
2471 /* Multicast recognition logic is moved from route cache to here. 1736 /* Multicast recognition logic is moved from route cache to here.
2472 The problem was that too many Ethernet cards have broken/missing 1737 The problem was that too many Ethernet cards have broken/missing
2473 hardware multicast filters :-( As result the host on multicasting 1738 hardware multicast filters :-( As result the host on multicasting
@@ -2505,24 +1770,28 @@ skip_cache:
2505 rcu_read_unlock(); 1770 rcu_read_unlock();
2506 return res; 1771 return res;
2507} 1772}
2508EXPORT_SYMBOL(ip_route_input_common); 1773EXPORT_SYMBOL(ip_route_input_noref);
2509 1774
2510/* called with rcu_read_lock() */ 1775/* called with rcu_read_lock() */
2511static struct rtable *__mkroute_output(const struct fib_result *res, 1776static struct rtable *__mkroute_output(const struct fib_result *res,
2512 const struct flowi4 *fl4, 1777 const struct flowi4 *fl4, int orig_oif,
2513 __be32 orig_daddr, __be32 orig_saddr,
2514 int orig_oif, __u8 orig_rtos,
2515 struct net_device *dev_out, 1778 struct net_device *dev_out,
2516 unsigned int flags) 1779 unsigned int flags)
2517{ 1780{
2518 struct fib_info *fi = res->fi; 1781 struct fib_info *fi = res->fi;
1782 struct fib_nh_exception *fnhe;
2519 struct in_device *in_dev; 1783 struct in_device *in_dev;
2520 u16 type = res->type; 1784 u16 type = res->type;
2521 struct rtable *rth; 1785 struct rtable *rth;
2522 1786
2523 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK)) 1787 in_dev = __in_dev_get_rcu(dev_out);
1788 if (!in_dev)
2524 return ERR_PTR(-EINVAL); 1789 return ERR_PTR(-EINVAL);
2525 1790
1791 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1792 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1793 return ERR_PTR(-EINVAL);
1794
2526 if (ipv4_is_lbcast(fl4->daddr)) 1795 if (ipv4_is_lbcast(fl4->daddr))
2527 type = RTN_BROADCAST; 1796 type = RTN_BROADCAST;
2528 else if (ipv4_is_multicast(fl4->daddr)) 1797 else if (ipv4_is_multicast(fl4->daddr))
@@ -2533,10 +1802,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2533 if (dev_out->flags & IFF_LOOPBACK) 1802 if (dev_out->flags & IFF_LOOPBACK)
2534 flags |= RTCF_LOCAL; 1803 flags |= RTCF_LOCAL;
2535 1804
2536 in_dev = __in_dev_get_rcu(dev_out);
2537 if (!in_dev)
2538 return ERR_PTR(-EINVAL);
2539
2540 if (type == RTN_BROADCAST) { 1805 if (type == RTN_BROADCAST) {
2541 flags |= RTCF_BROADCAST | RTCF_LOCAL; 1806 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2542 fi = NULL; 1807 fi = NULL;
@@ -2553,40 +1818,44 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2553 fi = NULL; 1818 fi = NULL;
2554 } 1819 }
2555 1820
1821 fnhe = NULL;
1822 if (fi) {
1823 struct rtable __rcu **prth;
1824
1825 fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
1826 if (fnhe)
1827 prth = &fnhe->fnhe_rth;
1828 else
1829 prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
1830 rth = rcu_dereference(*prth);
1831 if (rt_cache_valid(rth)) {
1832 dst_hold(&rth->dst);
1833 return rth;
1834 }
1835 }
2556 rth = rt_dst_alloc(dev_out, 1836 rth = rt_dst_alloc(dev_out,
2557 IN_DEV_CONF_GET(in_dev, NOPOLICY), 1837 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2558 IN_DEV_CONF_GET(in_dev, NOXFRM)); 1838 IN_DEV_CONF_GET(in_dev, NOXFRM),
1839 fi);
2559 if (!rth) 1840 if (!rth)
2560 return ERR_PTR(-ENOBUFS); 1841 return ERR_PTR(-ENOBUFS);
2561 1842
2562 rth->dst.output = ip_output; 1843 rth->dst.output = ip_output;
2563 1844
2564 rth->rt_key_dst = orig_daddr;
2565 rth->rt_key_src = orig_saddr;
2566 rth->rt_genid = rt_genid(dev_net(dev_out)); 1845 rth->rt_genid = rt_genid(dev_net(dev_out));
2567 rth->rt_flags = flags; 1846 rth->rt_flags = flags;
2568 rth->rt_type = type; 1847 rth->rt_type = type;
2569 rth->rt_key_tos = orig_rtos; 1848 rth->rt_is_input = 0;
2570 rth->rt_dst = fl4->daddr; 1849 rth->rt_iif = orig_oif ? : 0;
2571 rth->rt_src = fl4->saddr; 1850 rth->rt_pmtu = 0;
2572 rth->rt_route_iif = 0; 1851 rth->rt_gateway = 0;
2573 rth->rt_iif = orig_oif ? : dev_out->ifindex; 1852 INIT_LIST_HEAD(&rth->rt_uncached);
2574 rth->rt_oif = orig_oif;
2575 rth->rt_mark = fl4->flowi4_mark;
2576 rth->rt_gateway = fl4->daddr;
2577 rth->rt_spec_dst= fl4->saddr;
2578 rth->rt_peer_genid = 0;
2579 rth->peer = NULL;
2580 rth->fi = NULL;
2581 1853
2582 RT_CACHE_STAT_INC(out_slow_tot); 1854 RT_CACHE_STAT_INC(out_slow_tot);
2583 1855
2584 if (flags & RTCF_LOCAL) { 1856 if (flags & RTCF_LOCAL)
2585 rth->dst.input = ip_local_deliver; 1857 rth->dst.input = ip_local_deliver;
2586 rth->rt_spec_dst = fl4->daddr;
2587 }
2588 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1858 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2589 rth->rt_spec_dst = fl4->saddr;
2590 if (flags & RTCF_LOCAL && 1859 if (flags & RTCF_LOCAL &&
2591 !(dev_out->flags & IFF_LOOPBACK)) { 1860 !(dev_out->flags & IFF_LOOPBACK)) {
2592 rth->dst.output = ip_mc_output; 1861 rth->dst.output = ip_mc_output;
@@ -2603,34 +1872,28 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2603#endif 1872#endif
2604 } 1873 }
2605 1874
2606 rt_set_nexthop(rth, fl4, res, fi, type, 0); 1875 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
2607 1876
2608 return rth; 1877 return rth;
2609} 1878}
2610 1879
2611/* 1880/*
2612 * Major route resolver routine. 1881 * Major route resolver routine.
2613 * called with rcu_read_lock();
2614 */ 1882 */
2615 1883
2616static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) 1884struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2617{ 1885{
2618 struct net_device *dev_out = NULL; 1886 struct net_device *dev_out = NULL;
2619 __u8 tos = RT_FL_TOS(fl4); 1887 __u8 tos = RT_FL_TOS(fl4);
2620 unsigned int flags = 0; 1888 unsigned int flags = 0;
2621 struct fib_result res; 1889 struct fib_result res;
2622 struct rtable *rth; 1890 struct rtable *rth;
2623 __be32 orig_daddr;
2624 __be32 orig_saddr;
2625 int orig_oif; 1891 int orig_oif;
2626 1892
1893 res.tclassid = 0;
2627 res.fi = NULL; 1894 res.fi = NULL;
2628#ifdef CONFIG_IP_MULTIPLE_TABLES 1895 res.table = NULL;
2629 res.r = NULL;
2630#endif
2631 1896
2632 orig_daddr = fl4->daddr;
2633 orig_saddr = fl4->saddr;
2634 orig_oif = fl4->flowi4_oif; 1897 orig_oif = fl4->flowi4_oif;
2635 1898
2636 fl4->flowi4_iif = net->loopback_dev->ifindex; 1899 fl4->flowi4_iif = net->loopback_dev->ifindex;
@@ -2730,6 +1993,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2730 1993
2731 if (fib_lookup(net, fl4, &res)) { 1994 if (fib_lookup(net, fl4, &res)) {
2732 res.fi = NULL; 1995 res.fi = NULL;
1996 res.table = NULL;
2733 if (fl4->flowi4_oif) { 1997 if (fl4->flowi4_oif) {
2734 /* Apparently, routing tables are wrong. Assume, 1998 /* Apparently, routing tables are wrong. Assume,
2735 that the destination is on link. 1999 that the destination is on link.
@@ -2791,60 +2055,12 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2791 2055
2792 2056
2793make_route: 2057make_route:
2794 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, 2058 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2795 tos, dev_out, flags);
2796 if (!IS_ERR(rth)) {
2797 unsigned int hash;
2798
2799 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2800 rt_genid(dev_net(dev_out)));
2801 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2802 }
2803 2059
2804out: 2060out:
2805 rcu_read_unlock(); 2061 rcu_read_unlock();
2806 return rth; 2062 return rth;
2807} 2063}
2808
2809struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2810{
2811 struct rtable *rth;
2812 unsigned int hash;
2813
2814 if (!rt_caching(net))
2815 goto slow_output;
2816
2817 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2818
2819 rcu_read_lock_bh();
2820 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2821 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2822 if (rth->rt_key_dst == flp4->daddr &&
2823 rth->rt_key_src == flp4->saddr &&
2824 rt_is_output_route(rth) &&
2825 rth->rt_oif == flp4->flowi4_oif &&
2826 rth->rt_mark == flp4->flowi4_mark &&
2827 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2828 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2829 net_eq(dev_net(rth->dst.dev), net) &&
2830 !rt_is_expired(rth)) {
2831 ipv4_validate_peer(rth);
2832 dst_use(&rth->dst, jiffies);
2833 RT_CACHE_STAT_INC(out_hit);
2834 rcu_read_unlock_bh();
2835 if (!flp4->saddr)
2836 flp4->saddr = rth->rt_src;
2837 if (!flp4->daddr)
2838 flp4->daddr = rth->rt_dst;
2839 return rth;
2840 }
2841 RT_CACHE_STAT_INC(out_hlist_search);
2842 }
2843 rcu_read_unlock_bh();
2844
2845slow_output:
2846 return ip_route_output_slow(net, flp4);
2847}
2848EXPORT_SYMBOL_GPL(__ip_route_output_key); 2064EXPORT_SYMBOL_GPL(__ip_route_output_key);
2849 2065
2850static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) 2066static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
@@ -2859,7 +2075,13 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2859 return mtu ? : dst->dev->mtu; 2075 return mtu ? : dst->dev->mtu;
2860} 2076}
2861 2077
2862static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 2078static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2079 struct sk_buff *skb, u32 mtu)
2080{
2081}
2082
2083static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2084 struct sk_buff *skb)
2863{ 2085{
2864} 2086}
2865 2087
@@ -2872,53 +2094,42 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2872static struct dst_ops ipv4_dst_blackhole_ops = { 2094static struct dst_ops ipv4_dst_blackhole_ops = {
2873 .family = AF_INET, 2095 .family = AF_INET,
2874 .protocol = cpu_to_be16(ETH_P_IP), 2096 .protocol = cpu_to_be16(ETH_P_IP),
2875 .destroy = ipv4_dst_destroy,
2876 .check = ipv4_blackhole_dst_check, 2097 .check = ipv4_blackhole_dst_check,
2877 .mtu = ipv4_blackhole_mtu, 2098 .mtu = ipv4_blackhole_mtu,
2878 .default_advmss = ipv4_default_advmss, 2099 .default_advmss = ipv4_default_advmss,
2879 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2100 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2101 .redirect = ipv4_rt_blackhole_redirect,
2880 .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2102 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2881 .neigh_lookup = ipv4_neigh_lookup, 2103 .neigh_lookup = ipv4_neigh_lookup,
2882}; 2104};
2883 2105
2884struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2106struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2885{ 2107{
2886 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2887 struct rtable *ort = (struct rtable *) dst_orig; 2108 struct rtable *ort = (struct rtable *) dst_orig;
2109 struct rtable *rt;
2888 2110
2111 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2889 if (rt) { 2112 if (rt) {
2890 struct dst_entry *new = &rt->dst; 2113 struct dst_entry *new = &rt->dst;
2891 2114
2892 new->__use = 1; 2115 new->__use = 1;
2893 new->input = dst_discard; 2116 new->input = dst_discard;
2894 new->output = dst_discard; 2117 new->output = dst_discard;
2895 dst_copy_metrics(new, &ort->dst);
2896 2118
2897 new->dev = ort->dst.dev; 2119 new->dev = ort->dst.dev;
2898 if (new->dev) 2120 if (new->dev)
2899 dev_hold(new->dev); 2121 dev_hold(new->dev);
2900 2122
2901 rt->rt_key_dst = ort->rt_key_dst; 2123 rt->rt_is_input = ort->rt_is_input;
2902 rt->rt_key_src = ort->rt_key_src;
2903 rt->rt_key_tos = ort->rt_key_tos;
2904 rt->rt_route_iif = ort->rt_route_iif;
2905 rt->rt_iif = ort->rt_iif; 2124 rt->rt_iif = ort->rt_iif;
2906 rt->rt_oif = ort->rt_oif; 2125 rt->rt_pmtu = ort->rt_pmtu;
2907 rt->rt_mark = ort->rt_mark;
2908 2126
2909 rt->rt_genid = rt_genid(net); 2127 rt->rt_genid = rt_genid(net);
2910 rt->rt_flags = ort->rt_flags; 2128 rt->rt_flags = ort->rt_flags;
2911 rt->rt_type = ort->rt_type; 2129 rt->rt_type = ort->rt_type;
2912 rt->rt_dst = ort->rt_dst;
2913 rt->rt_src = ort->rt_src;
2914 rt->rt_gateway = ort->rt_gateway; 2130 rt->rt_gateway = ort->rt_gateway;
2915 rt->rt_spec_dst = ort->rt_spec_dst; 2131
2916 rt->peer = ort->peer; 2132 INIT_LIST_HEAD(&rt->rt_uncached);
2917 if (rt->peer)
2918 atomic_inc(&rt->peer->refcnt);
2919 rt->fi = ort->fi;
2920 if (rt->fi)
2921 atomic_inc(&rt->fi->fib_clntref);
2922 2133
2923 dst_free(new); 2134 dst_free(new);
2924 } 2135 }
@@ -2945,16 +2156,16 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2945} 2156}
2946EXPORT_SYMBOL_GPL(ip_route_output_flow); 2157EXPORT_SYMBOL_GPL(ip_route_output_flow);
2947 2158
2948static int rt_fill_info(struct net *net, 2159static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2949 struct sk_buff *skb, u32 pid, u32 seq, int event, 2160 struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
2950 int nowait, unsigned int flags) 2161 u32 seq, int event, int nowait, unsigned int flags)
2951{ 2162{
2952 struct rtable *rt = skb_rtable(skb); 2163 struct rtable *rt = skb_rtable(skb);
2953 struct rtmsg *r; 2164 struct rtmsg *r;
2954 struct nlmsghdr *nlh; 2165 struct nlmsghdr *nlh;
2955 unsigned long expires = 0; 2166 unsigned long expires = 0;
2956 const struct inet_peer *peer = rt->peer; 2167 u32 error;
2957 u32 id = 0, ts = 0, tsage = 0, error; 2168 u32 metrics[RTAX_MAX];
2958 2169
2959 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2170 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2960 if (nlh == NULL) 2171 if (nlh == NULL)
@@ -2964,7 +2175,7 @@ static int rt_fill_info(struct net *net,
2964 r->rtm_family = AF_INET; 2175 r->rtm_family = AF_INET;
2965 r->rtm_dst_len = 32; 2176 r->rtm_dst_len = 32;
2966 r->rtm_src_len = 0; 2177 r->rtm_src_len = 0;
2967 r->rtm_tos = rt->rt_key_tos; 2178 r->rtm_tos = fl4->flowi4_tos;
2968 r->rtm_table = RT_TABLE_MAIN; 2179 r->rtm_table = RT_TABLE_MAIN;
2969 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) 2180 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2970 goto nla_put_failure; 2181 goto nla_put_failure;
@@ -2975,11 +2186,11 @@ static int rt_fill_info(struct net *net,
2975 if (rt->rt_flags & RTCF_NOTIFY) 2186 if (rt->rt_flags & RTCF_NOTIFY)
2976 r->rtm_flags |= RTM_F_NOTIFY; 2187 r->rtm_flags |= RTM_F_NOTIFY;
2977 2188
2978 if (nla_put_be32(skb, RTA_DST, rt->rt_dst)) 2189 if (nla_put_be32(skb, RTA_DST, dst))
2979 goto nla_put_failure; 2190 goto nla_put_failure;
2980 if (rt->rt_key_src) { 2191 if (src) {
2981 r->rtm_src_len = 32; 2192 r->rtm_src_len = 32;
2982 if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src)) 2193 if (nla_put_be32(skb, RTA_SRC, src))
2983 goto nla_put_failure; 2194 goto nla_put_failure;
2984 } 2195 }
2985 if (rt->dst.dev && 2196 if (rt->dst.dev &&
@@ -2990,69 +2201,40 @@ static int rt_fill_info(struct net *net,
2990 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2201 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2991 goto nla_put_failure; 2202 goto nla_put_failure;
2992#endif 2203#endif
2993 if (rt_is_input_route(rt)) { 2204 if (!rt_is_input_route(rt) &&
2994 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst)) 2205 fl4->saddr != src) {
2995 goto nla_put_failure; 2206 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2996 } else if (rt->rt_src != rt->rt_key_src) {
2997 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
2998 goto nla_put_failure; 2207 goto nla_put_failure;
2999 } 2208 }
3000 if (rt->rt_dst != rt->rt_gateway && 2209 if (rt->rt_gateway &&
3001 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) 2210 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
3002 goto nla_put_failure; 2211 goto nla_put_failure;
3003 2212
3004 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2213 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2214 if (rt->rt_pmtu)
2215 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2216 if (rtnetlink_put_metrics(skb, metrics) < 0)
3005 goto nla_put_failure; 2217 goto nla_put_failure;
3006 2218
3007 if (rt->rt_mark && 2219 if (fl4->flowi4_mark &&
3008 nla_put_be32(skb, RTA_MARK, rt->rt_mark)) 2220 nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
3009 goto nla_put_failure; 2221 goto nla_put_failure;
3010 2222
3011 error = rt->dst.error; 2223 error = rt->dst.error;
3012 if (peer) { 2224 expires = rt->dst.expires;
3013 inet_peer_refcheck(rt->peer); 2225 if (expires) {
3014 id = atomic_read(&peer->ip_id_count) & 0xffff; 2226 if (time_before(jiffies, expires))
3015 if (peer->tcp_ts_stamp) { 2227 expires -= jiffies;
3016 ts = peer->tcp_ts; 2228 else
3017 tsage = get_seconds() - peer->tcp_ts_stamp; 2229 expires = 0;
3018 }
3019 expires = ACCESS_ONCE(peer->pmtu_expires);
3020 if (expires) {
3021 if (time_before(jiffies, expires))
3022 expires -= jiffies;
3023 else
3024 expires = 0;
3025 }
3026 } 2230 }
3027 2231
3028 if (rt_is_input_route(rt)) { 2232 if (rt_is_input_route(rt)) {
3029#ifdef CONFIG_IP_MROUTE 2233 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
3030 __be32 dst = rt->rt_dst; 2234 goto nla_put_failure;
3031
3032 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
3033 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3034 int err = ipmr_get_route(net, skb,
3035 rt->rt_src, rt->rt_dst,
3036 r, nowait);
3037 if (err <= 0) {
3038 if (!nowait) {
3039 if (err == 0)
3040 return 0;
3041 goto nla_put_failure;
3042 } else {
3043 if (err == -EMSGSIZE)
3044 goto nla_put_failure;
3045 error = err;
3046 }
3047 }
3048 } else
3049#endif
3050 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
3051 goto nla_put_failure;
3052 } 2235 }
3053 2236
3054 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, 2237 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3055 expires, error) < 0)
3056 goto nla_put_failure; 2238 goto nla_put_failure;
3057 2239
3058 return nlmsg_end(skb, nlh); 2240 return nlmsg_end(skb, nlh);
@@ -3068,6 +2250,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
3068 struct rtmsg *rtm; 2250 struct rtmsg *rtm;
3069 struct nlattr *tb[RTA_MAX+1]; 2251 struct nlattr *tb[RTA_MAX+1];
3070 struct rtable *rt = NULL; 2252 struct rtable *rt = NULL;
2253 struct flowi4 fl4;
3071 __be32 dst = 0; 2254 __be32 dst = 0;
3072 __be32 src = 0; 2255 __be32 src = 0;
3073 u32 iif; 2256 u32 iif;
@@ -3102,6 +2285,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
3102 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 2285 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3103 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; 2286 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3104 2287
2288 memset(&fl4, 0, sizeof(fl4));
2289 fl4.daddr = dst;
2290 fl4.saddr = src;
2291 fl4.flowi4_tos = rtm->rtm_tos;
2292 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2293 fl4.flowi4_mark = mark;
2294
3105 if (iif) { 2295 if (iif) {
3106 struct net_device *dev; 2296 struct net_device *dev;
3107 2297
@@ -3122,13 +2312,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
3122 if (err == 0 && rt->dst.error) 2312 if (err == 0 && rt->dst.error)
3123 err = -rt->dst.error; 2313 err = -rt->dst.error;
3124 } else { 2314 } else {
3125 struct flowi4 fl4 = {
3126 .daddr = dst,
3127 .saddr = src,
3128 .flowi4_tos = rtm->rtm_tos,
3129 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3130 .flowi4_mark = mark,
3131 };
3132 rt = ip_route_output_key(net, &fl4); 2315 rt = ip_route_output_key(net, &fl4);
3133 2316
3134 err = 0; 2317 err = 0;
@@ -3143,7 +2326,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
3143 if (rtm->rtm_flags & RTM_F_NOTIFY) 2326 if (rtm->rtm_flags & RTM_F_NOTIFY)
3144 rt->rt_flags |= RTCF_NOTIFY; 2327 rt->rt_flags |= RTCF_NOTIFY;
3145 2328
3146 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2329 err = rt_fill_info(net, dst, src, &fl4, skb,
2330 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
3147 RTM_NEWROUTE, 0, 0); 2331 RTM_NEWROUTE, 0, 0);
3148 if (err <= 0) 2332 if (err <= 0)
3149 goto errout_free; 2333 goto errout_free;
@@ -3159,43 +2343,6 @@ errout_free:
3159 2343
3160int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) 2344int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3161{ 2345{
3162 struct rtable *rt;
3163 int h, s_h;
3164 int idx, s_idx;
3165 struct net *net;
3166
3167 net = sock_net(skb->sk);
3168
3169 s_h = cb->args[0];
3170 if (s_h < 0)
3171 s_h = 0;
3172 s_idx = idx = cb->args[1];
3173 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3174 if (!rt_hash_table[h].chain)
3175 continue;
3176 rcu_read_lock_bh();
3177 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3178 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3179 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3180 continue;
3181 if (rt_is_expired(rt))
3182 continue;
3183 skb_dst_set_noref(skb, &rt->dst);
3184 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3185 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3186 1, NLM_F_MULTI) <= 0) {
3187 skb_dst_drop(skb);
3188 rcu_read_unlock_bh();
3189 goto done;
3190 }
3191 skb_dst_drop(skb);
3192 }
3193 rcu_read_unlock_bh();
3194 }
3195
3196done:
3197 cb->args[0] = h;
3198 cb->args[1] = idx;
3199 return skb->len; 2346 return skb->len;
3200} 2347}
3201 2348
@@ -3400,26 +2547,34 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
3400 .init = rt_genid_init, 2547 .init = rt_genid_init,
3401}; 2548};
3402 2549
2550static int __net_init ipv4_inetpeer_init(struct net *net)
2551{
2552 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3403 2553
3404#ifdef CONFIG_IP_ROUTE_CLASSID 2554 if (!bp)
3405struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 2555 return -ENOMEM;
3406#endif /* CONFIG_IP_ROUTE_CLASSID */ 2556 inet_peer_base_init(bp);
2557 net->ipv4.peers = bp;
2558 return 0;
2559}
3407 2560
3408static __initdata unsigned long rhash_entries; 2561static void __net_exit ipv4_inetpeer_exit(struct net *net)
3409static int __init set_rhash_entries(char *str)
3410{ 2562{
3411 ssize_t ret; 2563 struct inet_peer_base *bp = net->ipv4.peers;
3412 2564
3413 if (!str) 2565 net->ipv4.peers = NULL;
3414 return 0; 2566 inetpeer_invalidate_tree(bp);
2567 kfree(bp);
2568}
3415 2569
3416 ret = kstrtoul(str, 0, &rhash_entries); 2570static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3417 if (ret) 2571 .init = ipv4_inetpeer_init,
3418 return 0; 2572 .exit = ipv4_inetpeer_exit,
2573};
3419 2574
3420 return 1; 2575#ifdef CONFIG_IP_ROUTE_CLASSID
3421} 2576struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3422__setup("rhash_entries=", set_rhash_entries); 2577#endif /* CONFIG_IP_ROUTE_CLASSID */
3423 2578
3424int __init ip_rt_init(void) 2579int __init ip_rt_init(void)
3425{ 2580{
@@ -3443,31 +2598,12 @@ int __init ip_rt_init(void)
3443 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) 2598 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3444 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); 2599 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3445 2600
3446 rt_hash_table = (struct rt_hash_bucket *) 2601 ipv4_dst_ops.gc_thresh = ~0;
3447 alloc_large_system_hash("IP route cache", 2602 ip_rt_max_size = INT_MAX;
3448 sizeof(struct rt_hash_bucket),
3449 rhash_entries,
3450 (totalram_pages >= 128 * 1024) ?
3451 15 : 17,
3452 0,
3453 &rt_hash_log,
3454 &rt_hash_mask,
3455 0,
3456 rhash_entries ? 0 : 512 * 1024);
3457 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3458 rt_hash_lock_init();
3459
3460 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3461 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3462 2603
3463 devinet_init(); 2604 devinet_init();
3464 ip_fib_init(); 2605 ip_fib_init();
3465 2606
3466 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3467 expires_ljiffies = jiffies;
3468 schedule_delayed_work(&expires_work,
3469 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3470
3471 if (ip_rt_proc_init()) 2607 if (ip_rt_proc_init())
3472 pr_err("Unable to create route proc files\n"); 2608 pr_err("Unable to create route proc files\n");
3473#ifdef CONFIG_XFRM 2609#ifdef CONFIG_XFRM
@@ -3480,6 +2616,7 @@ int __init ip_rt_init(void)
3480 register_pernet_subsys(&sysctl_route_ops); 2616 register_pernet_subsys(&sysctl_route_ops);
3481#endif 2617#endif
3482 register_pernet_subsys(&rt_genid_ops); 2618 register_pernet_subsys(&rt_genid_ops);
2619 register_pernet_subsys(&ipv4_inetpeer_ops);
3483 return rc; 2620 return rc;
3484} 2621}
3485 2622
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index eab2a7fb15d1..650e1528e1e6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -293,7 +293,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
293 293
294 /* check for timestamp cookie support */ 294 /* check for timestamp cookie support */
295 memset(&tcp_opt, 0, sizeof(tcp_opt)); 295 memset(&tcp_opt, 0, sizeof(tcp_opt));
296 tcp_parse_options(skb, &tcp_opt, &hash_location, 0); 296 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
297 297
298 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) 298 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
299 goto out; 299 goto out;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ef32956ed655..1b5ce96707a3 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -184,7 +184,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
184 int ret; 184 int ret;
185 unsigned long vec[3]; 185 unsigned long vec[3];
186 struct net *net = current->nsproxy->net_ns; 186 struct net *net = current->nsproxy->net_ns;
187#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 187#ifdef CONFIG_MEMCG_KMEM
188 struct mem_cgroup *memcg; 188 struct mem_cgroup *memcg;
189#endif 189#endif
190 190
@@ -203,7 +203,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 206#ifdef CONFIG_MEMCG_KMEM
207 rcu_read_lock(); 207 rcu_read_lock();
208 memcg = mem_cgroup_from_task(current); 208 memcg = mem_cgroup_from_task(current);
209 209
@@ -301,6 +301,13 @@ static struct ctl_table ipv4_table[] = {
301 .proc_handler = proc_dointvec 301 .proc_handler = proc_dointvec
302 }, 302 },
303 { 303 {
304 .procname = "ip_early_demux",
305 .data = &sysctl_ip_early_demux,
306 .maxlen = sizeof(int),
307 .mode = 0644,
308 .proc_handler = proc_dointvec
309 },
310 {
304 .procname = "ip_dynaddr", 311 .procname = "ip_dynaddr",
305 .data = &sysctl_ip_dynaddr, 312 .data = &sysctl_ip_dynaddr,
306 .maxlen = sizeof(int), 313 .maxlen = sizeof(int),
@@ -360,6 +367,13 @@ static struct ctl_table ipv4_table[] = {
360 }, 367 },
361#endif 368#endif
362 { 369 {
370 .procname = "tcp_fastopen",
371 .data = &sysctl_tcp_fastopen,
372 .maxlen = sizeof(int),
373 .mode = 0644,
374 .proc_handler = proc_dointvec,
375 },
376 {
363 .procname = "tcp_tw_recycle", 377 .procname = "tcp_tw_recycle",
364 .data = &tcp_death_row.sysctl_tw_recycle, 378 .data = &tcp_death_row.sysctl_tw_recycle,
365 .maxlen = sizeof(int), 379 .maxlen = sizeof(int),
@@ -591,6 +605,20 @@ static struct ctl_table ipv4_table[] = {
591 .mode = 0644, 605 .mode = 0644,
592 .proc_handler = proc_dointvec 606 .proc_handler = proc_dointvec
593 }, 607 },
608 {
609 .procname = "tcp_limit_output_bytes",
610 .data = &sysctl_tcp_limit_output_bytes,
611 .maxlen = sizeof(int),
612 .mode = 0644,
613 .proc_handler = proc_dointvec
614 },
615 {
616 .procname = "tcp_challenge_ack_limit",
617 .data = &sysctl_tcp_challenge_ack_limit,
618 .maxlen = sizeof(int),
619 .mode = 0644,
620 .proc_handler = proc_dointvec
621 },
594#ifdef CONFIG_NET_DMA 622#ifdef CONFIG_NET_DMA
595 { 623 {
596 .procname = "tcp_dma_copybreak", 624 .procname = "tcp_dma_copybreak",
@@ -756,13 +784,6 @@ static struct ctl_table ipv4_net_table[] = {
756 .proc_handler = proc_dointvec 784 .proc_handler = proc_dointvec
757 }, 785 },
758 { 786 {
759 .procname = "rt_cache_rebuild_count",
760 .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count,
761 .maxlen = sizeof(int),
762 .mode = 0644,
763 .proc_handler = proc_dointvec
764 },
765 {
766 .procname = "ping_group_range", 787 .procname = "ping_group_range",
767 .data = &init_net.ipv4.sysctl_ping_group_range, 788 .data = &init_net.ipv4.sysctl_ping_group_range,
768 .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range), 789 .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
@@ -801,8 +822,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
801 table[5].data = 822 table[5].data =
802 &net->ipv4.sysctl_icmp_ratemask; 823 &net->ipv4.sysctl_icmp_ratemask;
803 table[6].data = 824 table[6].data =
804 &net->ipv4.sysctl_rt_cache_rebuild_count;
805 table[7].data =
806 &net->ipv4.sysctl_ping_group_range; 825 &net->ipv4.sysctl_ping_group_range;
807 826
808 } 827 }
@@ -814,8 +833,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
814 net->ipv4.sysctl_ping_group_range[0] = 1; 833 net->ipv4.sysctl_ping_group_range[0] = 1;
815 net->ipv4.sysctl_ping_group_range[1] = 0; 834 net->ipv4.sysctl_ping_group_range[1] = 0;
816 835
817 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
818
819 tcp_init_mem(net); 836 tcp_init_mem(net);
820 837
821 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); 838 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ba605f60e4e..e7e6eeae49c0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -270,6 +270,7 @@
270#include <linux/slab.h> 270#include <linux/slab.h>
271 271
272#include <net/icmp.h> 272#include <net/icmp.h>
273#include <net/inet_common.h>
273#include <net/tcp.h> 274#include <net/tcp.h>
274#include <net/xfrm.h> 275#include <net/xfrm.h>
275#include <net/ip.h> 276#include <net/ip.h>
@@ -376,6 +377,7 @@ void tcp_init_sock(struct sock *sk)
376 skb_queue_head_init(&tp->out_of_order_queue); 377 skb_queue_head_init(&tp->out_of_order_queue);
377 tcp_init_xmit_timers(sk); 378 tcp_init_xmit_timers(sk);
378 tcp_prequeue_init(tp); 379 tcp_prequeue_init(tp);
380 INIT_LIST_HEAD(&tp->tsq_node);
379 381
380 icsk->icsk_rto = TCP_TIMEOUT_INIT; 382 icsk->icsk_rto = TCP_TIMEOUT_INIT;
381 tp->mdev = TCP_TIMEOUT_INIT; 383 tp->mdev = TCP_TIMEOUT_INIT;
@@ -796,6 +798,10 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
796 inet_csk(sk)->icsk_ext_hdr_len - 798 inet_csk(sk)->icsk_ext_hdr_len -
797 tp->tcp_header_len); 799 tp->tcp_header_len);
798 800
801 /* TSQ : try to have two TSO segments in flight */
802 xmit_size_goal = min_t(u32, xmit_size_goal,
803 sysctl_tcp_limit_output_bytes >> 1);
804
799 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); 805 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
800 806
801 /* We try hard to avoid divides here */ 807 /* We try hard to avoid divides here */
@@ -977,26 +983,67 @@ static inline int select_size(const struct sock *sk, bool sg)
977 return tmp; 983 return tmp;
978} 984}
979 985
986void tcp_free_fastopen_req(struct tcp_sock *tp)
987{
988 if (tp->fastopen_req != NULL) {
989 kfree(tp->fastopen_req);
990 tp->fastopen_req = NULL;
991 }
992}
993
994static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
995{
996 struct tcp_sock *tp = tcp_sk(sk);
997 int err, flags;
998
999 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1000 return -EOPNOTSUPP;
1001 if (tp->fastopen_req != NULL)
1002 return -EALREADY; /* Another Fast Open is in progress */
1003
1004 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1005 sk->sk_allocation);
1006 if (unlikely(tp->fastopen_req == NULL))
1007 return -ENOBUFS;
1008 tp->fastopen_req->data = msg;
1009
1010 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1011 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1012 msg->msg_namelen, flags);
1013 *size = tp->fastopen_req->copied;
1014 tcp_free_fastopen_req(tp);
1015 return err;
1016}
1017
980int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1018int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
981 size_t size) 1019 size_t size)
982{ 1020{
983 struct iovec *iov; 1021 struct iovec *iov;
984 struct tcp_sock *tp = tcp_sk(sk); 1022 struct tcp_sock *tp = tcp_sk(sk);
985 struct sk_buff *skb; 1023 struct sk_buff *skb;
986 int iovlen, flags, err, copied; 1024 int iovlen, flags, err, copied = 0;
987 int mss_now = 0, size_goal; 1025 int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
988 bool sg; 1026 bool sg;
989 long timeo; 1027 long timeo;
990 1028
991 lock_sock(sk); 1029 lock_sock(sk);
992 1030
993 flags = msg->msg_flags; 1031 flags = msg->msg_flags;
1032 if (flags & MSG_FASTOPEN) {
1033 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
1034 if (err == -EINPROGRESS && copied_syn > 0)
1035 goto out;
1036 else if (err)
1037 goto out_err;
1038 offset = copied_syn;
1039 }
1040
994 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1041 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
995 1042
996 /* Wait for a connection to finish. */ 1043 /* Wait for a connection to finish. */
997 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1044 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
998 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1045 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
999 goto out_err; 1046 goto do_error;
1000 1047
1001 if (unlikely(tp->repair)) { 1048 if (unlikely(tp->repair)) {
1002 if (tp->repair_queue == TCP_RECV_QUEUE) { 1049 if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1032,6 +1079,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1032 unsigned char __user *from = iov->iov_base; 1079 unsigned char __user *from = iov->iov_base;
1033 1080
1034 iov++; 1081 iov++;
1082 if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
1083 if (offset >= seglen) {
1084 offset -= seglen;
1085 continue;
1086 }
1087 seglen -= offset;
1088 from += offset;
1089 offset = 0;
1090 }
1035 1091
1036 while (seglen > 0) { 1092 while (seglen > 0) {
1037 int copy = 0; 1093 int copy = 0;
@@ -1194,7 +1250,7 @@ out:
1194 if (copied && likely(!tp->repair)) 1250 if (copied && likely(!tp->repair))
1195 tcp_push(sk, flags, mss_now, tp->nonagle); 1251 tcp_push(sk, flags, mss_now, tp->nonagle);
1196 release_sock(sk); 1252 release_sock(sk);
1197 return copied; 1253 return copied + copied_syn;
1198 1254
1199do_fault: 1255do_fault:
1200 if (!skb->len) { 1256 if (!skb->len) {
@@ -1207,7 +1263,7 @@ do_fault:
1207 } 1263 }
1208 1264
1209do_error: 1265do_error:
1210 if (copied) 1266 if (copied + copied_syn)
1211 goto out; 1267 goto out;
1212out_err: 1268out_err:
1213 err = sk_stream_error(sk, flags, err); 1269 err = sk_stream_error(sk, flags, err);
@@ -2625,7 +2681,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2625 /* Cap the max timeout in ms TCP will retry/retrans 2681 /* Cap the max timeout in ms TCP will retry/retrans
2626 * before giving up and aborting (ETIMEDOUT) a connection. 2682 * before giving up and aborting (ETIMEDOUT) a connection.
2627 */ 2683 */
2628 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2684 if (val < 0)
2685 err = -EINVAL;
2686 else
2687 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2629 break; 2688 break;
2630 default: 2689 default:
2631 err = -ENOPROTOOPT; 2690 err = -ENOPROTOOPT;
@@ -3310,8 +3369,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
3310 3369
3311#endif 3370#endif
3312 3371
3313/** 3372/* Each Responder maintains up to two secret values concurrently for
3314 * Each Responder maintains up to two secret values concurrently for
3315 * efficient secret rollover. Each secret value has 4 states: 3373 * efficient secret rollover. Each secret value has 4 states:
3316 * 3374 *
3317 * Generating. (tcp_secret_generating != tcp_secret_primary) 3375 * Generating. (tcp_secret_generating != tcp_secret_primary)
@@ -3563,6 +3621,8 @@ void __init tcp_init(void)
3563 pr_info("Hash tables configured (established %u bind %u)\n", 3621 pr_info("Hash tables configured (established %u bind %u)\n",
3564 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3622 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3565 3623
3624 tcp_metrics_init();
3625
3566 tcp_register_congestion_control(&tcp_reno); 3626 tcp_register_congestion_control(&tcp_reno);
3567 3627
3568 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); 3628 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
@@ -3573,4 +3633,5 @@ void __init tcp_init(void)
3573 tcp_secret_primary = &tcp_secret_one; 3633 tcp_secret_primary = &tcp_secret_one;
3574 tcp_secret_retiring = &tcp_secret_two; 3634 tcp_secret_retiring = &tcp_secret_two;
3575 tcp_secret_secondary = &tcp_secret_two; 3635 tcp_secret_secondary = &tcp_secret_two;
3636 tcp_tasklet_init();
3576} 3637}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 04dbd7ae7c62..4d4db16e336e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
307void tcp_slow_start(struct tcp_sock *tp) 307void tcp_slow_start(struct tcp_sock *tp)
308{ 308{
309 int cnt; /* increase in packets */ 309 int cnt; /* increase in packets */
310 unsigned int delta = 0;
310 311
311 /* RFC3465: ABC Slow start 312 /* RFC3465: ABC Slow start
312 * Increase only after a full MSS of bytes is acked 313 * Increase only after a full MSS of bytes is acked
@@ -333,9 +334,9 @@ void tcp_slow_start(struct tcp_sock *tp)
333 tp->snd_cwnd_cnt += cnt; 334 tp->snd_cwnd_cnt += cnt;
334 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 335 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
335 tp->snd_cwnd_cnt -= tp->snd_cwnd; 336 tp->snd_cwnd_cnt -= tp->snd_cwnd;
336 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 337 delta++;
337 tp->snd_cwnd++;
338 } 338 }
339 tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
339} 340}
340EXPORT_SYMBOL_GPL(tcp_slow_start); 341EXPORT_SYMBOL_GPL(tcp_slow_start);
341 342
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
new file mode 100644
index 000000000000..a7f729c409d7
--- /dev/null
+++ b/net/ipv4/tcp_fastopen.c
@@ -0,0 +1,11 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3
4int sysctl_tcp_fastopen;
5
6static int __init tcp_fastopen_init(void)
7{
8 return 0;
9}
10
11late_initcall(tcp_fastopen_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8b..2fd2bc9e3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 1; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91/* rfc5961 challenge ack rate limiting */
92int sysctl_tcp_challenge_ack_limit = 100;
93
91int sysctl_tcp_stdurg __read_mostly; 94int sysctl_tcp_stdurg __read_mostly;
92int sysctl_tcp_rfc1337 __read_mostly; 95int sysctl_tcp_rfc1337 __read_mostly;
93int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 96int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
94int sysctl_tcp_frto __read_mostly = 2; 97int sysctl_tcp_frto __read_mostly = 2;
95int sysctl_tcp_frto_response __read_mostly; 98int sysctl_tcp_frto_response __read_mostly;
96int sysctl_tcp_nometrics_save __read_mostly;
97 99
98int sysctl_tcp_thin_dupack __read_mostly; 100int sysctl_tcp_thin_dupack __read_mostly;
99 101
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
701/* Calculate rto without backoff. This is the second half of Van Jacobson's 703/* Calculate rto without backoff. This is the second half of Van Jacobson's
702 * routine referred to above. 704 * routine referred to above.
703 */ 705 */
704static inline void tcp_set_rto(struct sock *sk) 706void tcp_set_rto(struct sock *sk)
705{ 707{
706 const struct tcp_sock *tp = tcp_sk(sk); 708 const struct tcp_sock *tp = tcp_sk(sk);
707 /* Old crap is replaced with new one. 8) 709 /* Old crap is replaced with new one. 8)
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk)
728 tcp_bound_rto(sk); 730 tcp_bound_rto(sk);
729} 731}
730 732
731/* Save metrics learned by this TCP session.
732 This function is called only, when TCP finishes successfully
733 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
734 */
735void tcp_update_metrics(struct sock *sk)
736{
737 struct tcp_sock *tp = tcp_sk(sk);
738 struct dst_entry *dst = __sk_dst_get(sk);
739
740 if (sysctl_tcp_nometrics_save)
741 return;
742
743 dst_confirm(dst);
744
745 if (dst && (dst->flags & DST_HOST)) {
746 const struct inet_connection_sock *icsk = inet_csk(sk);
747 int m;
748 unsigned long rtt;
749
750 if (icsk->icsk_backoff || !tp->srtt) {
751 /* This session failed to estimate rtt. Why?
752 * Probably, no packets returned in time.
753 * Reset our results.
754 */
755 if (!(dst_metric_locked(dst, RTAX_RTT)))
756 dst_metric_set(dst, RTAX_RTT, 0);
757 return;
758 }
759
760 rtt = dst_metric_rtt(dst, RTAX_RTT);
761 m = rtt - tp->srtt;
762
763 /* If newly calculated rtt larger than stored one,
764 * store new one. Otherwise, use EWMA. Remember,
765 * rtt overestimation is always better than underestimation.
766 */
767 if (!(dst_metric_locked(dst, RTAX_RTT))) {
768 if (m <= 0)
769 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
770 else
771 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
772 }
773
774 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
775 unsigned long var;
776 if (m < 0)
777 m = -m;
778
779 /* Scale deviation to rttvar fixed point */
780 m >>= 1;
781 if (m < tp->mdev)
782 m = tp->mdev;
783
784 var = dst_metric_rtt(dst, RTAX_RTTVAR);
785 if (m >= var)
786 var = m;
787 else
788 var -= (var - m) >> 2;
789
790 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
791 }
792
793 if (tcp_in_initial_slowstart(tp)) {
794 /* Slow start still did not finish. */
795 if (dst_metric(dst, RTAX_SSTHRESH) &&
796 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
797 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
798 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
799 if (!dst_metric_locked(dst, RTAX_CWND) &&
800 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
801 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
802 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
803 icsk->icsk_ca_state == TCP_CA_Open) {
804 /* Cong. avoidance phase, cwnd is reliable. */
805 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
806 dst_metric_set(dst, RTAX_SSTHRESH,
807 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
808 if (!dst_metric_locked(dst, RTAX_CWND))
809 dst_metric_set(dst, RTAX_CWND,
810 (dst_metric(dst, RTAX_CWND) +
811 tp->snd_cwnd) >> 1);
812 } else {
813 /* Else slow start did not finish, cwnd is non-sense,
814 ssthresh may be also invalid.
815 */
816 if (!dst_metric_locked(dst, RTAX_CWND))
817 dst_metric_set(dst, RTAX_CWND,
818 (dst_metric(dst, RTAX_CWND) +
819 tp->snd_ssthresh) >> 1);
820 if (dst_metric(dst, RTAX_SSTHRESH) &&
821 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
822 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
823 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
824 }
825
826 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
827 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
828 tp->reordering != sysctl_tcp_reordering)
829 dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
830 }
831 }
832}
833
834__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 733__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
835{ 734{
836 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 735 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
867 * Packet counting of FACK is based on in-order assumptions, therefore TCP 766 * Packet counting of FACK is based on in-order assumptions, therefore TCP
868 * disables it when reordering is detected 767 * disables it when reordering is detected
869 */ 768 */
870static void tcp_disable_fack(struct tcp_sock *tp) 769void tcp_disable_fack(struct tcp_sock *tp)
871{ 770{
872 /* RFC3517 uses different metric in lost marker => reset on change */ 771 /* RFC3517 uses different metric in lost marker => reset on change */
873 if (tcp_is_fack(tp)) 772 if (tcp_is_fack(tp))
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
881 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 780 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
882} 781}
883 782
884/* Initialize metrics on socket. */
885
886static void tcp_init_metrics(struct sock *sk)
887{
888 struct tcp_sock *tp = tcp_sk(sk);
889 struct dst_entry *dst = __sk_dst_get(sk);
890
891 if (dst == NULL)
892 goto reset;
893
894 dst_confirm(dst);
895
896 if (dst_metric_locked(dst, RTAX_CWND))
897 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
898 if (dst_metric(dst, RTAX_SSTHRESH)) {
899 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
900 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
901 tp->snd_ssthresh = tp->snd_cwnd_clamp;
902 } else {
903 /* ssthresh may have been reduced unnecessarily during.
904 * 3WHS. Restore it back to its initial default.
905 */
906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
907 }
908 if (dst_metric(dst, RTAX_REORDERING) &&
909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
910 tcp_disable_fack(tp);
911 tcp_disable_early_retrans(tp);
912 tp->reordering = dst_metric(dst, RTAX_REORDERING);
913 }
914
915 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
916 goto reset;
917
918 /* Initial rtt is determined from SYN,SYN-ACK.
919 * The segment is small and rtt may appear much
920 * less than real one. Use per-dst memory
921 * to make it more realistic.
922 *
923 * A bit of theory. RTT is time passed after "normal" sized packet
924 * is sent until it is ACKed. In normal circumstances sending small
925 * packets force peer to delay ACKs and calculation is correct too.
926 * The algorithm is adaptive and, provided we follow specs, it
927 * NEVER underestimate RTT. BUT! If peer tries to make some clever
928 * tricks sort of "quick acks" for time long enough to decrease RTT
929 * to low value, and then abruptly stops to do it and starts to delay
930 * ACKs, wait for troubles.
931 */
932 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
933 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
934 tp->rtt_seq = tp->snd_nxt;
935 }
936 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
937 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
938 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
939 }
940 tcp_set_rto(sk);
941reset:
942 if (tp->srtt == 0) {
943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
944 * 3WHS. This is most likely due to retransmission,
945 * including spurious one. Reset the RTO back to 3secs
946 * from the more aggressive 1sec to avoid more spurious
947 * retransmission.
948 */
949 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
951 }
952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
953 * retransmitted. In light of RFC6298 more aggressive 1sec
954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
955 * retransmission has occurred.
956 */
957 if (tp->total_retrans > 1)
958 tp->snd_cwnd = 1;
959 else
960 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
961 tp->snd_cwnd_stamp = tcp_time_stamp;
962}
963
964static void tcp_update_reordering(struct sock *sk, const int metric, 783static void tcp_update_reordering(struct sock *sk, const int metric,
965 const int ts) 784 const int ts)
966{ 785{
@@ -2702,7 +2521,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
2702/* Nothing was retransmitted or returned timestamp is less 2521/* Nothing was retransmitted or returned timestamp is less
2703 * than timestamp of the first retransmission. 2522 * than timestamp of the first retransmission.
2704 */ 2523 */
2705static inline int tcp_packet_delayed(const struct tcp_sock *tp) 2524static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2706{ 2525{
2707 return !tp->retrans_stamp || 2526 return !tp->retrans_stamp ||
2708 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2527 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2763,7 +2582,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
2763 tp->snd_cwnd_stamp = tcp_time_stamp; 2582 tp->snd_cwnd_stamp = tcp_time_stamp;
2764} 2583}
2765 2584
2766static inline int tcp_may_undo(const struct tcp_sock *tp) 2585static inline bool tcp_may_undo(const struct tcp_sock *tp)
2767{ 2586{
2768 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2587 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2769} 2588}
@@ -3552,13 +3371,13 @@ static void tcp_ack_probe(struct sock *sk)
3552 } 3371 }
3553} 3372}
3554 3373
3555static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3374static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3556{ 3375{
3557 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3376 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3558 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3377 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3559} 3378}
3560 3379
3561static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3380static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3562{ 3381{
3563 const struct tcp_sock *tp = tcp_sk(sk); 3382 const struct tcp_sock *tp = tcp_sk(sk);
3564 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3383 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
@@ -3568,7 +3387,7 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3568/* Check that window update is acceptable. 3387/* Check that window update is acceptable.
3569 * The function assumes that snd_una<=ack<=snd_next. 3388 * The function assumes that snd_una<=ack<=snd_next.
3570 */ 3389 */
3571static inline int tcp_may_update_window(const struct tcp_sock *tp, 3390static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3572 const u32 ack, const u32 ack_seq, 3391 const u32 ack, const u32 ack_seq,
3573 const u32 nwin) 3392 const u32 nwin)
3574{ 3393{
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3869 tcp_cong_avoid(sk, ack, prior_in_flight); 3688 tcp_cong_avoid(sk, ack, prior_in_flight);
3870 } 3689 }
3871 3690
3872 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3691 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
3873 dst_confirm(__sk_dst_get(sk)); 3692 struct dst_entry *dst = __sk_dst_get(sk);
3874 3693 if (dst)
3694 dst_confirm(dst);
3695 }
3875 return 1; 3696 return 1;
3876 3697
3877no_queue: 3698no_queue:
@@ -3911,7 +3732,8 @@ old_ack:
3911 * the fast version below fails. 3732 * the fast version below fails.
3912 */ 3733 */
3913void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3734void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
3914 const u8 **hvpp, int estab) 3735 const u8 **hvpp, int estab,
3736 struct tcp_fastopen_cookie *foc)
3915{ 3737{
3916 const unsigned char *ptr; 3738 const unsigned char *ptr;
3917 const struct tcphdr *th = tcp_hdr(skb); 3739 const struct tcphdr *th = tcp_hdr(skb);
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
4018 break; 3840 break;
4019 } 3841 }
4020 break; 3842 break;
4021 }
4022 3843
3844 case TCPOPT_EXP:
3845 /* Fast Open option shares code 254 using a
3846 * 16 bits magic number. It's valid only in
3847 * SYN or SYN-ACK with an even size.
3848 */
3849 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
3850 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
3851 foc == NULL || !th->syn || (opsize & 1))
3852 break;
3853 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
3854 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
3855 foc->len <= TCP_FASTOPEN_COOKIE_MAX)
3856 memcpy(foc->val, ptr + 2, foc->len);
3857 else if (foc->len != 0)
3858 foc->len = -1;
3859 break;
3860
3861 }
4023 ptr += opsize-2; 3862 ptr += opsize-2;
4024 length -= opsize; 3863 length -= opsize;
4025 } 3864 }
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
4061 if (tcp_parse_aligned_timestamp(tp, th)) 3900 if (tcp_parse_aligned_timestamp(tp, th))
4062 return true; 3901 return true;
4063 } 3902 }
4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 3903 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
4065 return true; 3904 return true;
4066} 3905}
4067 3906
@@ -4167,7 +4006,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
4167 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 4006 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
4168} 4007}
4169 4008
4170static inline int tcp_paws_discard(const struct sock *sk, 4009static inline bool tcp_paws_discard(const struct sock *sk,
4171 const struct sk_buff *skb) 4010 const struct sk_buff *skb)
4172{ 4011{
4173 const struct tcp_sock *tp = tcp_sk(sk); 4012 const struct tcp_sock *tp = tcp_sk(sk);
@@ -4189,7 +4028,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
4189 * (borrowed from freebsd) 4028 * (borrowed from freebsd)
4190 */ 4029 */
4191 4030
4192static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 4031static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4193{ 4032{
4194 return !before(end_seq, tp->rcv_wup) && 4033 return !before(end_seq, tp->rcv_wup) &&
4195 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4034 !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4512,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk)
4512static bool tcp_prune_ofo_queue(struct sock *sk); 4351static bool tcp_prune_ofo_queue(struct sock *sk);
4513static int tcp_prune_queue(struct sock *sk); 4352static int tcp_prune_queue(struct sock *sk);
4514 4353
4515static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4354static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
4355 unsigned int size)
4516{ 4356{
4517 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4357 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4518 !sk_rmem_schedule(sk, size)) { 4358 !sk_rmem_schedule(sk, skb, size)) {
4519 4359
4520 if (tcp_prune_queue(sk) < 0) 4360 if (tcp_prune_queue(sk) < 0)
4521 return -1; 4361 return -1;
4522 4362
4523 if (!sk_rmem_schedule(sk, size)) { 4363 if (!sk_rmem_schedule(sk, skb, size)) {
4524 if (!tcp_prune_ofo_queue(sk)) 4364 if (!tcp_prune_ofo_queue(sk))
4525 return -1; 4365 return -1;
4526 4366
4527 if (!sk_rmem_schedule(sk, size)) 4367 if (!sk_rmem_schedule(sk, skb, size))
4528 return -1; 4368 return -1;
4529 } 4369 }
4530 } 4370 }
@@ -4579,8 +4419,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4579 4419
4580 TCP_ECN_check_ce(tp, skb); 4420 TCP_ECN_check_ce(tp, skb);
4581 4421
4582 if (tcp_try_rmem_schedule(sk, skb->truesize)) { 4422 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4583 /* TODO: should increment a counter */ 4423 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
4584 __kfree_skb(skb); 4424 __kfree_skb(skb);
4585 return; 4425 return;
4586 } 4426 }
@@ -4589,6 +4429,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4589 tp->pred_flags = 0; 4429 tp->pred_flags = 0;
4590 inet_csk_schedule_ack(sk); 4430 inet_csk_schedule_ack(sk);
4591 4431
4432 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4592 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4433 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4593 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4434 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4594 4435
@@ -4642,6 +4483,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4642 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4483 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4643 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4484 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4644 /* All the bits are present. Drop. */ 4485 /* All the bits are present. Drop. */
4486 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4645 __kfree_skb(skb); 4487 __kfree_skb(skb);
4646 skb = NULL; 4488 skb = NULL;
4647 tcp_dsack_set(sk, seq, end_seq); 4489 tcp_dsack_set(sk, seq, end_seq);
@@ -4680,6 +4522,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4680 __skb_unlink(skb1, &tp->out_of_order_queue); 4522 __skb_unlink(skb1, &tp->out_of_order_queue);
4681 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4523 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4682 TCP_SKB_CB(skb1)->end_seq); 4524 TCP_SKB_CB(skb1)->end_seq);
4525 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4683 __kfree_skb(skb1); 4526 __kfree_skb(skb1);
4684 } 4527 }
4685 4528
@@ -4710,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
4710 4553
4711int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4554int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4712{ 4555{
4713 struct sk_buff *skb; 4556 struct sk_buff *skb = NULL;
4714 struct tcphdr *th; 4557 struct tcphdr *th;
4715 bool fragstolen; 4558 bool fragstolen;
4716 4559
4717 if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
4718 goto err;
4719
4720 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4560 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
4721 if (!skb) 4561 if (!skb)
4722 goto err; 4562 goto err;
4723 4563
4564 if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
4565 goto err_free;
4566
4724 th = (struct tcphdr *)skb_put(skb, sizeof(*th)); 4567 th = (struct tcphdr *)skb_put(skb, sizeof(*th));
4725 skb_reset_transport_header(skb); 4568 skb_reset_transport_header(skb);
4726 memset(th, 0, sizeof(*th)); 4569 memset(th, 0, sizeof(*th));
@@ -4791,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4791 if (eaten <= 0) { 4634 if (eaten <= 0) {
4792queue_and_out: 4635queue_and_out:
4793 if (eaten < 0 && 4636 if (eaten < 0 &&
4794 tcp_try_rmem_schedule(sk, skb->truesize)) 4637 tcp_try_rmem_schedule(sk, skb, skb->truesize))
4795 goto drop; 4638 goto drop;
4796 4639
4797 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4640 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
@@ -5372,7 +5215,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk,
5372 return result; 5215 return result;
5373} 5216}
5374 5217
5375static inline int tcp_checksum_complete_user(struct sock *sk, 5218static inline bool tcp_checksum_complete_user(struct sock *sk,
5376 struct sk_buff *skb) 5219 struct sk_buff *skb)
5377{ 5220{
5378 return !skb_csum_unnecessary(skb) && 5221 return !skb_csum_unnecessary(skb) &&
@@ -5426,11 +5269,28 @@ out:
5426} 5269}
5427#endif /* CONFIG_NET_DMA */ 5270#endif /* CONFIG_NET_DMA */
5428 5271
5272static void tcp_send_challenge_ack(struct sock *sk)
5273{
5274 /* unprotected vars, we dont care of overwrites */
5275 static u32 challenge_timestamp;
5276 static unsigned int challenge_count;
5277 u32 now = jiffies / HZ;
5278
5279 if (now != challenge_timestamp) {
5280 challenge_timestamp = now;
5281 challenge_count = 0;
5282 }
5283 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
5285 tcp_send_ack(sk);
5286 }
5287}
5288
5429/* Does PAWS and seqno based validation of an incoming segment, flags will 5289/* Does PAWS and seqno based validation of an incoming segment, flags will
5430 * play significant role here. 5290 * play significant role here.
5431 */ 5291 */
5432static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5292static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5433 const struct tcphdr *th, int syn_inerr) 5293 const struct tcphdr *th, int syn_inerr)
5434{ 5294{
5435 const u8 *hash_location; 5295 const u8 *hash_location;
5436 struct tcp_sock *tp = tcp_sk(sk); 5296 struct tcp_sock *tp = tcp_sk(sk);
@@ -5455,14 +5315,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5455 * an acknowledgment should be sent in reply (unless the RST 5315 * an acknowledgment should be sent in reply (unless the RST
5456 * bit is set, if so drop the segment and return)". 5316 * bit is set, if so drop the segment and return)".
5457 */ 5317 */
5458 if (!th->rst) 5318 if (!th->rst) {
5319 if (th->syn)
5320 goto syn_challenge;
5459 tcp_send_dupack(sk, skb); 5321 tcp_send_dupack(sk, skb);
5322 }
5460 goto discard; 5323 goto discard;
5461 } 5324 }
5462 5325
5463 /* Step 2: check RST bit */ 5326 /* Step 2: check RST bit */
5464 if (th->rst) { 5327 if (th->rst) {
5465 tcp_reset(sk); 5328 /* RFC 5961 3.2 :
5329 * If sequence number exactly matches RCV.NXT, then
5330 * RESET the connection
5331 * else
5332 * Send a challenge ACK
5333 */
5334 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
5335 tcp_reset(sk);
5336 else
5337 tcp_send_challenge_ack(sk);
5466 goto discard; 5338 goto discard;
5467 } 5339 }
5468 5340
@@ -5473,20 +5345,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5473 5345
5474 /* step 3: check security and precedence [ignored] */ 5346 /* step 3: check security and precedence [ignored] */
5475 5347
5476 /* step 4: Check for a SYN in window. */ 5348 /* step 4: Check for a SYN
5477 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5349 * RFC 5691 4.2 : Send a challenge ack
5350 */
5351 if (th->syn) {
5352syn_challenge:
5478 if (syn_inerr) 5353 if (syn_inerr)
5479 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5354 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 5355 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5481 tcp_reset(sk); 5356 tcp_send_challenge_ack(sk);
5482 return -1; 5357 goto discard;
5483 } 5358 }
5484 5359
5485 return 1; 5360 return true;
5486 5361
5487discard: 5362discard:
5488 __kfree_skb(skb); 5363 __kfree_skb(skb);
5489 return 0; 5364 return false;
5490} 5365}
5491 5366
5492/* 5367/*
@@ -5516,7 +5391,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5516 const struct tcphdr *th, unsigned int len) 5391 const struct tcphdr *th, unsigned int len)
5517{ 5392{
5518 struct tcp_sock *tp = tcp_sk(sk); 5393 struct tcp_sock *tp = tcp_sk(sk);
5519 int res;
5520 5394
5521 /* 5395 /*
5522 * Header prediction. 5396 * Header prediction.
@@ -5602,7 +5476,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5602 if (tp->copied_seq == tp->rcv_nxt && 5476 if (tp->copied_seq == tp->rcv_nxt &&
5603 len - tcp_header_len <= tp->ucopy.len) { 5477 len - tcp_header_len <= tp->ucopy.len) {
5604#ifdef CONFIG_NET_DMA 5478#ifdef CONFIG_NET_DMA
5605 if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 5479 if (tp->ucopy.task == current &&
5480 sock_owned_by_user(sk) &&
5481 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5606 copied_early = 1; 5482 copied_early = 1;
5607 eaten = 1; 5483 eaten = 1;
5608 } 5484 }
@@ -5693,9 +5569,8 @@ slow_path:
5693 * Standard slow path. 5569 * Standard slow path.
5694 */ 5570 */
5695 5571
5696 res = tcp_validate_incoming(sk, skb, th, 1); 5572 if (!tcp_validate_incoming(sk, skb, th, 1))
5697 if (res <= 0) 5573 return 0;
5698 return -res;
5699 5574
5700step5: 5575step5:
5701 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5576 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
@@ -5729,8 +5604,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5729 5604
5730 tcp_set_state(sk, TCP_ESTABLISHED); 5605 tcp_set_state(sk, TCP_ESTABLISHED);
5731 5606
5732 if (skb != NULL) 5607 if (skb != NULL) {
5608 inet_sk_rx_dst_set(sk, skb);
5733 security_inet_conn_established(sk, skb); 5609 security_inet_conn_established(sk, skb);
5610 }
5734 5611
5735 /* Make sure socket is routed, for correct metrics. */ 5612 /* Make sure socket is routed, for correct metrics. */
5736 icsk->icsk_af_ops->rebuild_header(sk); 5613 icsk->icsk_af_ops->rebuild_header(sk);
@@ -5760,6 +5637,45 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5760 } 5637 }
5761} 5638}
5762 5639
5640static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5641 struct tcp_fastopen_cookie *cookie)
5642{
5643 struct tcp_sock *tp = tcp_sk(sk);
5644 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
5645 u16 mss = tp->rx_opt.mss_clamp;
5646 bool syn_drop;
5647
5648 if (mss == tp->rx_opt.user_mss) {
5649 struct tcp_options_received opt;
5650 const u8 *hash_location;
5651
5652 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
5653 tcp_clear_options(&opt);
5654 opt.user_mss = opt.mss_clamp = 0;
5655 tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
5656 mss = opt.mss_clamp;
5657 }
5658
5659 if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */
5660 cookie->len = -1;
5661
5662 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
5663 * the remote receives only the retransmitted (regular) SYNs: either
5664 * the original SYN-data or the corresponding SYN-ACK is lost.
5665 */
5666 syn_drop = (cookie->len <= 0 && data &&
5667 inet_csk(sk)->icsk_retransmits);
5668
5669 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5670
5671 if (data) { /* Retransmit unacked data in SYN */
5672 tcp_retransmit_skb(sk, data);
5673 tcp_rearm_rto(sk);
5674 return true;
5675 }
5676 return false;
5677}
5678
5763static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5679static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5764 const struct tcphdr *th, unsigned int len) 5680 const struct tcphdr *th, unsigned int len)
5765{ 5681{
@@ -5767,9 +5683,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5767 struct inet_connection_sock *icsk = inet_csk(sk); 5683 struct inet_connection_sock *icsk = inet_csk(sk);
5768 struct tcp_sock *tp = tcp_sk(sk); 5684 struct tcp_sock *tp = tcp_sk(sk);
5769 struct tcp_cookie_values *cvp = tp->cookie_values; 5685 struct tcp_cookie_values *cvp = tp->cookie_values;
5686 struct tcp_fastopen_cookie foc = { .len = -1 };
5770 int saved_clamp = tp->rx_opt.mss_clamp; 5687 int saved_clamp = tp->rx_opt.mss_clamp;
5771 5688
5772 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); 5689 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5773 5690
5774 if (th->ack) { 5691 if (th->ack) {
5775 /* rfc793: 5692 /* rfc793:
@@ -5779,11 +5696,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5779 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5696 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
5780 * a reset (unless the RST bit is set, if so drop 5697 * a reset (unless the RST bit is set, if so drop
5781 * the segment and return)" 5698 * the segment and return)"
5782 *
5783 * We do not send data with SYN, so that RFC-correct
5784 * test reduces to:
5785 */ 5699 */
5786 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 5700 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
5701 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
5787 goto reset_and_undo; 5702 goto reset_and_undo;
5788 5703
5789 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5704 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -5895,6 +5810,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5895 5810
5896 tcp_finish_connect(sk, skb); 5811 tcp_finish_connect(sk, skb);
5897 5812
5813 if ((tp->syn_fastopen || tp->syn_data) &&
5814 tcp_rcv_fastopen_synack(sk, skb, &foc))
5815 return -1;
5816
5898 if (sk->sk_write_pending || 5817 if (sk->sk_write_pending ||
5899 icsk->icsk_accept_queue.rskq_defer_accept || 5818 icsk->icsk_accept_queue.rskq_defer_accept ||
5900 icsk->icsk_ack.pingpong) { 5819 icsk->icsk_ack.pingpong) {
@@ -6013,7 +5932,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6013 struct tcp_sock *tp = tcp_sk(sk); 5932 struct tcp_sock *tp = tcp_sk(sk);
6014 struct inet_connection_sock *icsk = inet_csk(sk); 5933 struct inet_connection_sock *icsk = inet_csk(sk);
6015 int queued = 0; 5934 int queued = 0;
6016 int res;
6017 5935
6018 tp->rx_opt.saw_tstamp = 0; 5936 tp->rx_opt.saw_tstamp = 0;
6019 5937
@@ -6068,9 +5986,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6068 return 0; 5986 return 0;
6069 } 5987 }
6070 5988
6071 res = tcp_validate_incoming(sk, skb, th, 0); 5989 if (!tcp_validate_incoming(sk, skb, th, 0))
6072 if (res <= 0) 5990 return 0;
6073 return -res;
6074 5991
6075 /* step 5: check the ACK field */ 5992 /* step 5: check the ACK field */
6076 if (th->ack) { 5993 if (th->ack) {
@@ -6126,9 +6043,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6126 6043
6127 case TCP_FIN_WAIT1: 6044 case TCP_FIN_WAIT1:
6128 if (tp->snd_una == tp->write_seq) { 6045 if (tp->snd_una == tp->write_seq) {
6046 struct dst_entry *dst;
6047
6129 tcp_set_state(sk, TCP_FIN_WAIT2); 6048 tcp_set_state(sk, TCP_FIN_WAIT2);
6130 sk->sk_shutdown |= SEND_SHUTDOWN; 6049 sk->sk_shutdown |= SEND_SHUTDOWN;
6131 dst_confirm(__sk_dst_get(sk)); 6050
6051 dst = __sk_dst_get(sk);
6052 if (dst)
6053 dst_confirm(dst);
6132 6054
6133 if (!sock_flag(sk, SOCK_DEAD)) 6055 if (!sock_flag(sk, SOCK_DEAD))
6134 /* Wake up lingering close() */ 6056 /* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c8d28c433b2b..42b2a6a73092 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -209,22 +209,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209 } 209 }
210 210
211 if (tcp_death_row.sysctl_tw_recycle && 211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) { 212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr); 213 tcp_fetch_timewait_stamp(sk, &rt->dst);
214 /*
215 * VJ's idea. We save last timestamp seen from
216 * the destination in peer table, when entering state
217 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
218 * when trying new connection.
219 */
220 if (peer) {
221 inet_peer_refcheck(peer);
222 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
223 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
224 tp->rx_opt.ts_recent = peer->tcp_ts;
225 }
226 }
227 }
228 214
229 inet->inet_dport = usin->sin_port; 215 inet->inet_dport = usin->sin_port;
230 inet->inet_daddr = daddr; 216 inet->inet_daddr = daddr;
@@ -289,12 +275,15 @@ failure:
289EXPORT_SYMBOL(tcp_v4_connect); 275EXPORT_SYMBOL(tcp_v4_connect);
290 276
291/* 277/*
292 * This routine does path mtu discovery as defined in RFC1191. 278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
293 */ 281 */
294static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) 282static void tcp_v4_mtu_reduced(struct sock *sk)
295{ 283{
296 struct dst_entry *dst; 284 struct dst_entry *dst;
297 struct inet_sock *inet = inet_sk(sk); 285 struct inet_sock *inet = inet_sk(sk);
286 u32 mtu = tcp_sk(sk)->mtu_info;
298 287
299 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs 288 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
300 * send out by Linux are always <576bytes so they should go through 289 * send out by Linux are always <576bytes so they should go through
@@ -303,17 +292,10 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
303 if (sk->sk_state == TCP_LISTEN) 292 if (sk->sk_state == TCP_LISTEN)
304 return; 293 return;
305 294
306 /* We don't check in the destentry if pmtu discovery is forbidden 295 dst = inet_csk_update_pmtu(sk, mtu);
307 * on this route. We just assume that no packet_to_big packets 296 if (!dst)
308 * are send back when pmtu discovery is not active.
309 * There is a small race when the user changes this flag in the
310 * route, but I think that's acceptable.
311 */
312 if ((dst = __sk_dst_check(sk, 0)) == NULL)
313 return; 297 return;
314 298
315 dst->ops->update_pmtu(dst, mtu);
316
317 /* Something is about to be wrong... Remember soft error 299 /* Something is about to be wrong... Remember soft error
318 * for the case, if this connection will not able to recover. 300 * for the case, if this connection will not able to recover.
319 */ 301 */
@@ -335,6 +317,14 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
335 } /* else let the usual retransmit timer handle it */ 317 } /* else let the usual retransmit timer handle it */
336} 318}
337 319
320static void do_redirect(struct sk_buff *skb, struct sock *sk)
321{
322 struct dst_entry *dst = __sk_dst_check(sk, 0);
323
324 if (dst)
325 dst->ops->redirect(dst, sk, skb);
326}
327
338/* 328/*
339 * This routine is called by the ICMP module when it gets some 329 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should 330 * sort of error condition. If err < 0 then the socket should
@@ -386,8 +376,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
386 bh_lock_sock(sk); 376 bh_lock_sock(sk);
387 /* If too many ICMPs get dropped on busy 377 /* If too many ICMPs get dropped on busy
388 * servers this needs to be solved differently. 378 * servers this needs to be solved differently.
379 * We do take care of PMTU discovery (RFC1191) special case :
380 * we can receive locally generated ICMP messages while socket is held.
389 */ 381 */
390 if (sock_owned_by_user(sk)) 382 if (sock_owned_by_user(sk) &&
383 type != ICMP_DEST_UNREACH &&
384 code != ICMP_FRAG_NEEDED)
391 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 385 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
392 386
393 if (sk->sk_state == TCP_CLOSE) 387 if (sk->sk_state == TCP_CLOSE)
@@ -408,6 +402,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
408 } 402 }
409 403
410 switch (type) { 404 switch (type) {
405 case ICMP_REDIRECT:
406 do_redirect(icmp_skb, sk);
407 goto out;
411 case ICMP_SOURCE_QUENCH: 408 case ICMP_SOURCE_QUENCH:
412 /* Just silently ignore these. */ 409 /* Just silently ignore these. */
413 goto out; 410 goto out;
@@ -419,8 +416,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
419 goto out; 416 goto out;
420 417
421 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
419 tp->mtu_info = info;
422 if (!sock_owned_by_user(sk)) 420 if (!sock_owned_by_user(sk))
423 do_pmtu_discovery(sk, iph, info); 421 tcp_v4_mtu_reduced(sk);
422 else
423 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
424 goto out; 424 goto out;
425 } 425 }
426 426
@@ -698,8 +698,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
698 698
699 net = dev_net(skb_dst(skb)->dev); 699 net = dev_net(skb_dst(skb)->dev);
700 arg.tos = ip_hdr(skb)->tos; 700 arg.tos = ip_hdr(skb)->tos;
701 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 701 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
702 &arg, arg.iov[0].iov_len); 702 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
703 703
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -781,8 +781,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
781 if (oif) 781 if (oif)
782 arg.bound_dev_if = oif; 782 arg.bound_dev_if = oif;
783 arg.tos = tos; 783 arg.tos = tos;
784 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 784 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
785 &arg, arg.iov[0].iov_len); 785 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
786 786
787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
788} 788}
@@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp, 827 struct request_values *rvp,
828 u16 queue_mapping) 828 u16 queue_mapping,
829 bool nocache)
829{ 830{
830 const struct inet_request_sock *ireq = inet_rsk(req); 831 const struct inet_request_sock *ireq = inet_rsk(req);
831 struct flowi4 fl4; 832 struct flowi4 fl4;
@@ -848,7 +849,6 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
848 err = net_xmit_eval(err); 849 err = net_xmit_eval(err);
849 } 850 }
850 851
851 dst_release(dst);
852 return err; 852 return err;
853} 853}
854 854
@@ -856,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 struct request_values *rvp) 856 struct request_values *rvp)
857{ 857{
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
860} 860}
861 861
862/* 862/*
@@ -1317,7 +1317,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1317 tcp_clear_options(&tmp_opt); 1317 tcp_clear_options(&tmp_opt);
1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319 tmp_opt.user_mss = tp->rx_opt.user_mss; 1319 tmp_opt.user_mss = tp->rx_opt.user_mss;
1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1321 1321
1322 if (tmp_opt.cookie_plus > 0 && 1322 if (tmp_opt.cookie_plus > 0 &&
1323 tmp_opt.saw_tstamp && 1323 tmp_opt.saw_tstamp &&
@@ -1375,7 +1375,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 req->cookie_ts = tmp_opt.tstamp_ok; 1376 req->cookie_ts = tmp_opt.tstamp_ok;
1377 } else if (!isn) { 1377 } else if (!isn) {
1378 struct inet_peer *peer = NULL;
1379 struct flowi4 fl4; 1378 struct flowi4 fl4;
1380 1379
1381 /* VJ's idea. We save last timestamp seen 1380 /* VJ's idea. We save last timestamp seen
@@ -1390,12 +1389,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1390 if (tmp_opt.saw_tstamp && 1389 if (tmp_opt.saw_tstamp &&
1391 tcp_death_row.sysctl_tw_recycle && 1390 tcp_death_row.sysctl_tw_recycle &&
1392 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && 1391 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1393 fl4.daddr == saddr && 1392 fl4.daddr == saddr) {
1394 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { 1393 if (!tcp_peer_is_proven(req, dst, true)) {
1395 inet_peer_refcheck(peer);
1396 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1397 (s32)(peer->tcp_ts - req->ts_recent) >
1398 TCP_PAWS_WINDOW) {
1399 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1394 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1400 goto drop_and_release; 1395 goto drop_and_release;
1401 } 1396 }
@@ -1404,8 +1399,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1404 else if (!sysctl_tcp_syncookies && 1399 else if (!sysctl_tcp_syncookies &&
1405 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1400 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1406 (sysctl_max_syn_backlog >> 2)) && 1401 (sysctl_max_syn_backlog >> 2)) &&
1407 (!peer || !peer->tcp_ts_stamp) && 1402 !tcp_peer_is_proven(req, dst, false)) {
1408 (!dst || !dst_metric(dst, RTAX_RTT))) {
1409 /* Without syncookies last quarter of 1403 /* Without syncookies last quarter of
1410 * backlog is filled with destinations, 1404 * backlog is filled with destinations,
1411 * proven to be alive. 1405 * proven to be alive.
@@ -1425,7 +1419,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1425 1419
1426 if (tcp_v4_send_synack(sk, dst, req, 1420 if (tcp_v4_send_synack(sk, dst, req,
1427 (struct request_values *)&tmp_ext, 1421 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) || 1422 skb_get_queue_mapping(skb),
1423 want_cookie) ||
1429 want_cookie) 1424 want_cookie)
1430 goto drop_and_free; 1425 goto drop_and_free;
1431 1426
@@ -1622,7 +1617,19 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1622#endif 1617#endif
1623 1618
1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1620 struct dst_entry *dst = sk->sk_rx_dst;
1621
1625 sock_rps_save_rxhash(sk, skb); 1622 sock_rps_save_rxhash(sk, skb);
1623 if (dst) {
1624 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1625 dst->ops->check(dst, 0) == NULL) {
1626 dst_release(dst);
1627 sk->sk_rx_dst = NULL;
1628 }
1629 }
1630 if (unlikely(sk->sk_rx_dst == NULL))
1631 inet_sk_rx_dst_set(sk, skb);
1632
1626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1627 rsk = sk; 1634 rsk = sk;
1628 goto reset; 1635 goto reset;
@@ -1672,6 +1679,44 @@ csum_err:
1672} 1679}
1673EXPORT_SYMBOL(tcp_v4_do_rcv); 1680EXPORT_SYMBOL(tcp_v4_do_rcv);
1674 1681
1682void tcp_v4_early_demux(struct sk_buff *skb)
1683{
1684 struct net *net = dev_net(skb->dev);
1685 const struct iphdr *iph;
1686 const struct tcphdr *th;
1687 struct sock *sk;
1688
1689 if (skb->pkt_type != PACKET_HOST)
1690 return;
1691
1692 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1693 return;
1694
1695 iph = ip_hdr(skb);
1696 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1697
1698 if (th->doff < sizeof(struct tcphdr) / 4)
1699 return;
1700
1701 sk = __inet_lookup_established(net, &tcp_hashinfo,
1702 iph->saddr, th->source,
1703 iph->daddr, ntohs(th->dest),
1704 skb->skb_iif);
1705 if (sk) {
1706 skb->sk = sk;
1707 skb->destructor = sock_edemux;
1708 if (sk->sk_state != TCP_TIME_WAIT) {
1709 struct dst_entry *dst = sk->sk_rx_dst;
1710
1711 if (dst)
1712 dst = dst_check(dst, 0);
1713 if (dst &&
1714 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1715 skb_dst_set_noref(skb, dst);
1716 }
1717 }
1718}
1719
1675/* 1720/*
1676 * From tcp_input.c 1721 * From tcp_input.c
1677 */ 1722 */
@@ -1821,40 +1866,10 @@ do_time_wait:
1821 goto discard_it; 1866 goto discard_it;
1822} 1867}
1823 1868
1824struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1825{
1826 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1827 struct inet_sock *inet = inet_sk(sk);
1828 struct inet_peer *peer;
1829
1830 if (!rt ||
1831 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1832 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1833 *release_it = true;
1834 } else {
1835 if (!rt->peer)
1836 rt_bind_peer(rt, inet->inet_daddr, 1);
1837 peer = rt->peer;
1838 *release_it = false;
1839 }
1840
1841 return peer;
1842}
1843EXPORT_SYMBOL(tcp_v4_get_peer);
1844
1845void *tcp_v4_tw_get_peer(struct sock *sk)
1846{
1847 const struct inet_timewait_sock *tw = inet_twsk(sk);
1848
1849 return inet_getpeer_v4(tw->tw_daddr, 1);
1850}
1851EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1852
1853static struct timewait_sock_ops tcp_timewait_sock_ops = { 1869static struct timewait_sock_ops tcp_timewait_sock_ops = {
1854 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1870 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1855 .twsk_unique = tcp_twsk_unique, 1871 .twsk_unique = tcp_twsk_unique,
1856 .twsk_destructor= tcp_twsk_destructor, 1872 .twsk_destructor= tcp_twsk_destructor,
1857 .twsk_getpeer = tcp_v4_tw_get_peer,
1858}; 1873};
1859 1874
1860const struct inet_connection_sock_af_ops ipv4_specific = { 1875const struct inet_connection_sock_af_ops ipv4_specific = {
@@ -1863,7 +1878,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
1863 .rebuild_header = inet_sk_rebuild_header, 1878 .rebuild_header = inet_sk_rebuild_header,
1864 .conn_request = tcp_v4_conn_request, 1879 .conn_request = tcp_v4_conn_request,
1865 .syn_recv_sock = tcp_v4_syn_recv_sock, 1880 .syn_recv_sock = tcp_v4_syn_recv_sock,
1866 .get_peer = tcp_v4_get_peer,
1867 .net_header_len = sizeof(struct iphdr), 1881 .net_header_len = sizeof(struct iphdr),
1868 .setsockopt = ip_setsockopt, 1882 .setsockopt = ip_setsockopt,
1869 .getsockopt = ip_getsockopt, 1883 .getsockopt = ip_getsockopt,
@@ -1953,6 +1967,9 @@ void tcp_v4_destroy_sock(struct sock *sk)
1953 tp->cookie_values = NULL; 1967 tp->cookie_values = NULL;
1954 } 1968 }
1955 1969
1970 /* If socket is aborted during connect operation */
1971 tcp_free_fastopen_req(tp);
1972
1956 sk_sockets_allocated_dec(sk); 1973 sk_sockets_allocated_dec(sk);
1957 sock_release_memcg(sk); 1974 sock_release_memcg(sk);
1958} 1975}
@@ -2593,6 +2610,8 @@ struct proto tcp_prot = {
2593 .sendmsg = tcp_sendmsg, 2610 .sendmsg = tcp_sendmsg,
2594 .sendpage = tcp_sendpage, 2611 .sendpage = tcp_sendpage,
2595 .backlog_rcv = tcp_v4_do_rcv, 2612 .backlog_rcv = tcp_v4_do_rcv,
2613 .release_cb = tcp_release_cb,
2614 .mtu_reduced = tcp_v4_mtu_reduced,
2596 .hash = inet_hash, 2615 .hash = inet_hash,
2597 .unhash = inet_unhash, 2616 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port, 2617 .get_port = inet_csk_get_port,
@@ -2614,7 +2633,7 @@ struct proto tcp_prot = {
2614 .compat_setsockopt = compat_tcp_setsockopt, 2633 .compat_setsockopt = compat_tcp_setsockopt,
2615 .compat_getsockopt = compat_tcp_getsockopt, 2634 .compat_getsockopt = compat_tcp_getsockopt,
2616#endif 2635#endif
2617#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 2636#ifdef CONFIG_MEMCG_KMEM
2618 .init_cgroup = tcp_init_cgroup, 2637 .init_cgroup = tcp_init_cgroup,
2619 .destroy_cgroup = tcp_destroy_cgroup, 2638 .destroy_cgroup = tcp_destroy_cgroup,
2620 .proto_cgroup = tcp_proto_cgroup, 2639 .proto_cgroup = tcp_proto_cgroup,
@@ -2624,13 +2643,11 @@ EXPORT_SYMBOL(tcp_prot);
2624 2643
2625static int __net_init tcp_sk_init(struct net *net) 2644static int __net_init tcp_sk_init(struct net *net)
2626{ 2645{
2627 return inet_ctl_sock_create(&net->ipv4.tcp_sock, 2646 return 0;
2628 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2629} 2647}
2630 2648
2631static void __net_exit tcp_sk_exit(struct net *net) 2649static void __net_exit tcp_sk_exit(struct net *net)
2632{ 2650{
2633 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2634} 2651}
2635 2652
2636static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2653static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
new file mode 100644
index 000000000000..2288a6399e1e
--- /dev/null
+++ b/net/ipv4/tcp_metrics.c
@@ -0,0 +1,745 @@
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
5#include <linux/module.h>
6#include <linux/cache.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/tcp.h>
10#include <linux/hash.h>
11
12#include <net/inet_connection_sock.h>
13#include <net/net_namespace.h>
14#include <net/request_sock.h>
15#include <net/inetpeer.h>
16#include <net/sock.h>
17#include <net/ipv6.h>
18#include <net/dst.h>
19#include <net/tcp.h>
20
21int sysctl_tcp_nometrics_save __read_mostly;
22
23enum tcp_metric_index {
24 TCP_METRIC_RTT,
25 TCP_METRIC_RTTVAR,
26 TCP_METRIC_SSTHRESH,
27 TCP_METRIC_CWND,
28 TCP_METRIC_REORDERING,
29
30 /* Always last. */
31 TCP_METRIC_MAX,
32};
33
34struct tcp_fastopen_metrics {
35 u16 mss;
36 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
37 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
38 struct tcp_fastopen_cookie cookie;
39};
40
41struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 struct inetpeer_addr tcpm_addr;
44 unsigned long tcpm_stamp;
45 u32 tcpm_ts;
46 u32 tcpm_ts_stamp;
47 u32 tcpm_lock;
48 u32 tcpm_vals[TCP_METRIC_MAX];
49 struct tcp_fastopen_metrics tcpm_fastopen;
50};
51
52static bool tcp_metric_locked(struct tcp_metrics_block *tm,
53 enum tcp_metric_index idx)
54{
55 return tm->tcpm_lock & (1 << idx);
56}
57
58static u32 tcp_metric_get(struct tcp_metrics_block *tm,
59 enum tcp_metric_index idx)
60{
61 return tm->tcpm_vals[idx];
62}
63
64static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
65 enum tcp_metric_index idx)
66{
67 return msecs_to_jiffies(tm->tcpm_vals[idx]);
68}
69
70static void tcp_metric_set(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
73{
74 tm->tcpm_vals[idx] = val;
75}
76
77static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
78 enum tcp_metric_index idx,
79 u32 val)
80{
81 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
82}
83
84static bool addr_same(const struct inetpeer_addr *a,
85 const struct inetpeer_addr *b)
86{
87 const struct in6_addr *a6, *b6;
88
89 if (a->family != b->family)
90 return false;
91 if (a->family == AF_INET)
92 return a->addr.a4 == b->addr.a4;
93
94 a6 = (const struct in6_addr *) &a->addr.a6[0];
95 b6 = (const struct in6_addr *) &b->addr.a6[0];
96
97 return ipv6_addr_equal(a6, b6);
98}
99
100struct tcpm_hash_bucket {
101 struct tcp_metrics_block __rcu *chain;
102};
103
104static DEFINE_SPINLOCK(tcp_metrics_lock);
105
106static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
107{
108 u32 val;
109
110 tm->tcpm_stamp = jiffies;
111
112 val = 0;
113 if (dst_metric_locked(dst, RTAX_RTT))
114 val |= 1 << TCP_METRIC_RTT;
115 if (dst_metric_locked(dst, RTAX_RTTVAR))
116 val |= 1 << TCP_METRIC_RTTVAR;
117 if (dst_metric_locked(dst, RTAX_SSTHRESH))
118 val |= 1 << TCP_METRIC_SSTHRESH;
119 if (dst_metric_locked(dst, RTAX_CWND))
120 val |= 1 << TCP_METRIC_CWND;
121 if (dst_metric_locked(dst, RTAX_REORDERING))
122 val |= 1 << TCP_METRIC_REORDERING;
123 tm->tcpm_lock = val;
124
125 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
126 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
127 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
128 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
129 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
130 tm->tcpm_ts = 0;
131 tm->tcpm_ts_stamp = 0;
132 tm->tcpm_fastopen.mss = 0;
133 tm->tcpm_fastopen.syn_loss = 0;
134 tm->tcpm_fastopen.cookie.len = 0;
135}
136
137static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
138 struct inetpeer_addr *addr,
139 unsigned int hash,
140 bool reclaim)
141{
142 struct tcp_metrics_block *tm;
143 struct net *net;
144
145 spin_lock_bh(&tcp_metrics_lock);
146 net = dev_net(dst->dev);
147 if (unlikely(reclaim)) {
148 struct tcp_metrics_block *oldest;
149
150 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
151 for (tm = rcu_dereference(oldest->tcpm_next); tm;
152 tm = rcu_dereference(tm->tcpm_next)) {
153 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
154 oldest = tm;
155 }
156 tm = oldest;
157 } else {
158 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
159 if (!tm)
160 goto out_unlock;
161 }
162 tm->tcpm_addr = *addr;
163
164 tcpm_suck_dst(tm, dst);
165
166 if (likely(!reclaim)) {
167 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
168 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
169 }
170
171out_unlock:
172 spin_unlock_bh(&tcp_metrics_lock);
173 return tm;
174}
175
176#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
177
178static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
179{
180 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
181 tcpm_suck_dst(tm, dst);
182}
183
184#define TCP_METRICS_RECLAIM_DEPTH 5
185#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
186
187static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
188{
189 if (tm)
190 return tm;
191 if (depth > TCP_METRICS_RECLAIM_DEPTH)
192 return TCP_METRICS_RECLAIM_PTR;
193 return NULL;
194}
195
196static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
197 struct net *net, unsigned int hash)
198{
199 struct tcp_metrics_block *tm;
200 int depth = 0;
201
202 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
203 tm = rcu_dereference(tm->tcpm_next)) {
204 if (addr_same(&tm->tcpm_addr, addr))
205 break;
206 depth++;
207 }
208 return tcp_get_encode(tm, depth);
209}
210
211static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
212 struct dst_entry *dst)
213{
214 struct tcp_metrics_block *tm;
215 struct inetpeer_addr addr;
216 unsigned int hash;
217 struct net *net;
218
219 addr.family = req->rsk_ops->family;
220 switch (addr.family) {
221 case AF_INET:
222 addr.addr.a4 = inet_rsk(req)->rmt_addr;
223 hash = (__force unsigned int) addr.addr.a4;
224 break;
225 case AF_INET6:
226 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
227 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
228 break;
229 default:
230 return NULL;
231 }
232
233 net = dev_net(dst->dev);
234 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
235
236 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
237 tm = rcu_dereference(tm->tcpm_next)) {
238 if (addr_same(&tm->tcpm_addr, &addr))
239 break;
240 }
241 tcpm_check_stamp(tm, dst);
242 return tm;
243}
244
245static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
246{
247 struct inet6_timewait_sock *tw6;
248 struct tcp_metrics_block *tm;
249 struct inetpeer_addr addr;
250 unsigned int hash;
251 struct net *net;
252
253 addr.family = tw->tw_family;
254 switch (addr.family) {
255 case AF_INET:
256 addr.addr.a4 = tw->tw_daddr;
257 hash = (__force unsigned int) addr.addr.a4;
258 break;
259 case AF_INET6:
260 tw6 = inet6_twsk((struct sock *)tw);
261 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
262 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
263 break;
264 default:
265 return NULL;
266 }
267
268 net = twsk_net(tw);
269 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
270
271 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
272 tm = rcu_dereference(tm->tcpm_next)) {
273 if (addr_same(&tm->tcpm_addr, &addr))
274 break;
275 }
276 return tm;
277}
278
279static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
280 struct dst_entry *dst,
281 bool create)
282{
283 struct tcp_metrics_block *tm;
284 struct inetpeer_addr addr;
285 unsigned int hash;
286 struct net *net;
287 bool reclaim;
288
289 addr.family = sk->sk_family;
290 switch (addr.family) {
291 case AF_INET:
292 addr.addr.a4 = inet_sk(sk)->inet_daddr;
293 hash = (__force unsigned int) addr.addr.a4;
294 break;
295 case AF_INET6:
296 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
297 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
298 break;
299 default:
300 return NULL;
301 }
302
303 net = dev_net(dst->dev);
304 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
305
306 tm = __tcp_get_metrics(&addr, net, hash);
307 reclaim = false;
308 if (tm == TCP_METRICS_RECLAIM_PTR) {
309 reclaim = true;
310 tm = NULL;
311 }
312 if (!tm && create)
313 tm = tcpm_new(dst, &addr, hash, reclaim);
314 else
315 tcpm_check_stamp(tm, dst);
316
317 return tm;
318}
319
320/* Save metrics learned by this TCP session. This function is called
321 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
322 * or goes from LAST-ACK to CLOSE.
323 */
324void tcp_update_metrics(struct sock *sk)
325{
326 const struct inet_connection_sock *icsk = inet_csk(sk);
327 struct dst_entry *dst = __sk_dst_get(sk);
328 struct tcp_sock *tp = tcp_sk(sk);
329 struct tcp_metrics_block *tm;
330 unsigned long rtt;
331 u32 val;
332 int m;
333
334 if (sysctl_tcp_nometrics_save || !dst)
335 return;
336
337 if (dst->flags & DST_HOST)
338 dst_confirm(dst);
339
340 rcu_read_lock();
341 if (icsk->icsk_backoff || !tp->srtt) {
342 /* This session failed to estimate rtt. Why?
343 * Probably, no packets returned in time. Reset our
344 * results.
345 */
346 tm = tcp_get_metrics(sk, dst, false);
347 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
348 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
349 goto out_unlock;
350 } else
351 tm = tcp_get_metrics(sk, dst, true);
352
353 if (!tm)
354 goto out_unlock;
355
356 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
357 m = rtt - tp->srtt;
358
359 /* If newly calculated rtt larger than stored one, store new
360 * one. Otherwise, use EWMA. Remember, rtt overestimation is
361 * always better than underestimation.
362 */
363 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
364 if (m <= 0)
365 rtt = tp->srtt;
366 else
367 rtt -= (m >> 3);
368 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
369 }
370
371 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
372 unsigned long var;
373
374 if (m < 0)
375 m = -m;
376
377 /* Scale deviation to rttvar fixed point */
378 m >>= 1;
379 if (m < tp->mdev)
380 m = tp->mdev;
381
382 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
383 if (m >= var)
384 var = m;
385 else
386 var -= (var - m) >> 2;
387
388 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
389 }
390
391 if (tcp_in_initial_slowstart(tp)) {
392 /* Slow start still did not finish. */
393 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
394 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
395 if (val && (tp->snd_cwnd >> 1) > val)
396 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
397 tp->snd_cwnd >> 1);
398 }
399 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
400 val = tcp_metric_get(tm, TCP_METRIC_CWND);
401 if (tp->snd_cwnd > val)
402 tcp_metric_set(tm, TCP_METRIC_CWND,
403 tp->snd_cwnd);
404 }
405 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
406 icsk->icsk_ca_state == TCP_CA_Open) {
407 /* Cong. avoidance phase, cwnd is reliable. */
408 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
409 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
410 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
411 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
412 val = tcp_metric_get(tm, TCP_METRIC_CWND);
413 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
414 }
415 } else {
416 /* Else slow start did not finish, cwnd is non-sense,
417 * ssthresh may be also invalid.
418 */
419 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
420 val = tcp_metric_get(tm, TCP_METRIC_CWND);
421 tcp_metric_set(tm, TCP_METRIC_CWND,
422 (val + tp->snd_ssthresh) >> 1);
423 }
424 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
425 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
426 if (val && tp->snd_ssthresh > val)
427 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
428 tp->snd_ssthresh);
429 }
430 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
431 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
432 if (val < tp->reordering &&
433 tp->reordering != sysctl_tcp_reordering)
434 tcp_metric_set(tm, TCP_METRIC_REORDERING,
435 tp->reordering);
436 }
437 }
438 tm->tcpm_stamp = jiffies;
439out_unlock:
440 rcu_read_unlock();
441}
442
443/* Initialize metrics on socket. */
444
445void tcp_init_metrics(struct sock *sk)
446{
447 struct dst_entry *dst = __sk_dst_get(sk);
448 struct tcp_sock *tp = tcp_sk(sk);
449 struct tcp_metrics_block *tm;
450 u32 val;
451
452 if (dst == NULL)
453 goto reset;
454
455 dst_confirm(dst);
456
457 rcu_read_lock();
458 tm = tcp_get_metrics(sk, dst, true);
459 if (!tm) {
460 rcu_read_unlock();
461 goto reset;
462 }
463
464 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
465 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
466
467 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
468 if (val) {
469 tp->snd_ssthresh = val;
470 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
471 tp->snd_ssthresh = tp->snd_cwnd_clamp;
472 } else {
473 /* ssthresh may have been reduced unnecessarily during.
474 * 3WHS. Restore it back to its initial default.
475 */
476 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
477 }
478 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
479 if (val && tp->reordering != val) {
480 tcp_disable_fack(tp);
481 tcp_disable_early_retrans(tp);
482 tp->reordering = val;
483 }
484
485 val = tcp_metric_get(tm, TCP_METRIC_RTT);
486 if (val == 0 || tp->srtt == 0) {
487 rcu_read_unlock();
488 goto reset;
489 }
490 /* Initial rtt is determined from SYN,SYN-ACK.
491 * The segment is small and rtt may appear much
492 * less than real one. Use per-dst memory
493 * to make it more realistic.
494 *
495 * A bit of theory. RTT is time passed after "normal" sized packet
496 * is sent until it is ACKed. In normal circumstances sending small
497 * packets force peer to delay ACKs and calculation is correct too.
498 * The algorithm is adaptive and, provided we follow specs, it
499 * NEVER underestimate RTT. BUT! If peer tries to make some clever
500 * tricks sort of "quick acks" for time long enough to decrease RTT
501 * to low value, and then abruptly stops to do it and starts to delay
502 * ACKs, wait for troubles.
503 */
504 val = msecs_to_jiffies(val);
505 if (val > tp->srtt) {
506 tp->srtt = val;
507 tp->rtt_seq = tp->snd_nxt;
508 }
509 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
510 if (val > tp->mdev) {
511 tp->mdev = val;
512 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
513 }
514 rcu_read_unlock();
515
516 tcp_set_rto(sk);
517reset:
518 if (tp->srtt == 0) {
519 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
520 * 3WHS. This is most likely due to retransmission,
521 * including spurious one. Reset the RTO back to 3secs
522 * from the more aggressive 1sec to avoid more spurious
523 * retransmission.
524 */
525 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
526 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
527 }
528 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
529 * retransmitted. In light of RFC6298 more aggressive 1sec
530 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
531 * retransmission has occurred.
532 */
533 if (tp->total_retrans > 1)
534 tp->snd_cwnd = 1;
535 else
536 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
537 tp->snd_cwnd_stamp = tcp_time_stamp;
538}
539
540bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
541{
542 struct tcp_metrics_block *tm;
543 bool ret;
544
545 if (!dst)
546 return false;
547
548 rcu_read_lock();
549 tm = __tcp_get_metrics_req(req, dst);
550 if (paws_check) {
551 if (tm &&
552 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
553 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
554 ret = false;
555 else
556 ret = true;
557 } else {
558 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
559 ret = true;
560 else
561 ret = false;
562 }
563 rcu_read_unlock();
564
565 return ret;
566}
567EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
568
569void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
570{
571 struct tcp_metrics_block *tm;
572
573 rcu_read_lock();
574 tm = tcp_get_metrics(sk, dst, true);
575 if (tm) {
576 struct tcp_sock *tp = tcp_sk(sk);
577
578 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
579 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
580 tp->rx_opt.ts_recent = tm->tcpm_ts;
581 }
582 }
583 rcu_read_unlock();
584}
585EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
586
587/* VJ's idea. Save last timestamp seen from this destination and hold
588 * it at least for normal timewait interval to use for duplicate
589 * segment detection in subsequent connections, before they enter
590 * synchronized state.
591 */
592bool tcp_remember_stamp(struct sock *sk)
593{
594 struct dst_entry *dst = __sk_dst_get(sk);
595 bool ret = false;
596
597 if (dst) {
598 struct tcp_metrics_block *tm;
599
600 rcu_read_lock();
601 tm = tcp_get_metrics(sk, dst, true);
602 if (tm) {
603 struct tcp_sock *tp = tcp_sk(sk);
604
605 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
606 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
607 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
608 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
609 tm->tcpm_ts = tp->rx_opt.ts_recent;
610 }
611 ret = true;
612 }
613 rcu_read_unlock();
614 }
615 return ret;
616}
617
618bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
619{
620 struct tcp_metrics_block *tm;
621 bool ret = false;
622
623 rcu_read_lock();
624 tm = __tcp_get_metrics_tw(tw);
625 if (tm) {
626 const struct tcp_timewait_sock *tcptw;
627 struct sock *sk = (struct sock *) tw;
628
629 tcptw = tcp_twsk(sk);
630 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
631 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
632 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
633 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
634 tm->tcpm_ts = tcptw->tw_ts_recent;
635 }
636 ret = true;
637 }
638 rcu_read_unlock();
639
640 return ret;
641}
642
643static DEFINE_SEQLOCK(fastopen_seqlock);
644
645void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
646 struct tcp_fastopen_cookie *cookie,
647 int *syn_loss, unsigned long *last_syn_loss)
648{
649 struct tcp_metrics_block *tm;
650
651 rcu_read_lock();
652 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
653 if (tm) {
654 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
655 unsigned int seq;
656
657 do {
658 seq = read_seqbegin(&fastopen_seqlock);
659 if (tfom->mss)
660 *mss = tfom->mss;
661 *cookie = tfom->cookie;
662 *syn_loss = tfom->syn_loss;
663 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
664 } while (read_seqretry(&fastopen_seqlock, seq));
665 }
666 rcu_read_unlock();
667}
668
669void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
670 struct tcp_fastopen_cookie *cookie, bool syn_lost)
671{
672 struct tcp_metrics_block *tm;
673
674 rcu_read_lock();
675 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
676 if (tm) {
677 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
678
679 write_seqlock_bh(&fastopen_seqlock);
680 tfom->mss = mss;
681 if (cookie->len > 0)
682 tfom->cookie = *cookie;
683 if (syn_lost) {
684 ++tfom->syn_loss;
685 tfom->last_syn_loss = jiffies;
686 } else
687 tfom->syn_loss = 0;
688 write_sequnlock_bh(&fastopen_seqlock);
689 }
690 rcu_read_unlock();
691}
692
693static unsigned int tcpmhash_entries;
694static int __init set_tcpmhash_entries(char *str)
695{
696 ssize_t ret;
697
698 if (!str)
699 return 0;
700
701 ret = kstrtouint(str, 0, &tcpmhash_entries);
702 if (ret)
703 return 0;
704
705 return 1;
706}
707__setup("tcpmhash_entries=", set_tcpmhash_entries);
708
709static int __net_init tcp_net_metrics_init(struct net *net)
710{
711 size_t size;
712 unsigned int slots;
713
714 slots = tcpmhash_entries;
715 if (!slots) {
716 if (totalram_pages >= 128 * 1024)
717 slots = 16 * 1024;
718 else
719 slots = 8 * 1024;
720 }
721
722 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
723 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
724
725 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
726 if (!net->ipv4.tcp_metrics_hash)
727 return -ENOMEM;
728
729 return 0;
730}
731
732static void __net_exit tcp_net_metrics_exit(struct net *net)
733{
734 kfree(net->ipv4.tcp_metrics_hash);
735}
736
737static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
738 .init = tcp_net_metrics_init,
739 .exit = tcp_net_metrics_exit,
740};
741
742void __init tcp_metrics_init(void)
743{
744 register_pernet_subsys(&tcp_net_metrics_ops);
745}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b85d9fe7d663..232a90c3ec86 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,56 +49,6 @@ struct inet_timewait_death_row tcp_death_row = {
49}; 49};
50EXPORT_SYMBOL_GPL(tcp_death_row); 50EXPORT_SYMBOL_GPL(tcp_death_row);
51 51
52/* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
55 * state.
56 */
57
58static bool tcp_remember_stamp(struct sock *sk)
59{
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
63 bool release_it;
64
65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
66 if (peer) {
67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
71 peer->tcp_ts = tp->rx_opt.ts_recent;
72 }
73 if (release_it)
74 inet_putpeer(peer);
75 return true;
76 }
77
78 return false;
79}
80
81static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{
83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer;
85
86 peer = twsk_getpeer(sk);
87 if (peer) {
88 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
89
90 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
91 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
92 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
93 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
94 peer->tcp_ts = tcptw->tw_ts_recent;
95 }
96 inet_putpeer(peer);
97 return true;
98 }
99 return false;
100}
101
102static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 52static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
103{ 53{
104 if (seq == s_win) 54 if (seq == s_win)
@@ -147,7 +97,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
147 97
148 tmp_opt.saw_tstamp = 0; 98 tmp_opt.saw_tstamp = 0;
149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
150 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
151 101
152 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
153 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -327,8 +277,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
327 if (tw != NULL) { 277 if (tw != NULL) {
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 278 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 279 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
280 struct inet_sock *inet = inet_sk(sk);
330 281
331 tw->tw_transparent = inet_sk(sk)->transparent; 282 tw->tw_transparent = inet->transparent;
332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 283 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
333 tcptw->tw_rcv_nxt = tp->rcv_nxt; 284 tcptw->tw_rcv_nxt = tp->rcv_nxt;
334 tcptw->tw_snd_nxt = tp->snd_nxt; 285 tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -403,6 +354,7 @@ void tcp_twsk_destructor(struct sock *sk)
403{ 354{
404#ifdef CONFIG_TCP_MD5SIG 355#ifdef CONFIG_TCP_MD5SIG
405 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 356 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357
406 if (twsk->tw_md5_key) { 358 if (twsk->tw_md5_key) {
407 tcp_free_md5sig_pool(); 359 tcp_free_md5sig_pool();
408 kfree_rcu(twsk->tw_md5_key, rcu); 360 kfree_rcu(twsk->tw_md5_key, rcu);
@@ -435,6 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
435 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
436 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
437 389
390 inet_sk_rx_dst_set(newsk, skb);
391
438 /* TCP Cookie Transactions require space for the cookie pair, 392 /* TCP Cookie Transactions require space for the cookie pair,
439 * as it differs for each connection. There is no need to 393 * as it differs for each connection. There is no need to
440 * copy any s_data_payload stored at the original socket. 394 * copy any s_data_payload stored at the original socket.
@@ -470,6 +424,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
470 treq->snt_isn + 1 + tcp_s_data_size(oldtp); 424 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
471 425
472 tcp_prequeue_init(newtp); 426 tcp_prequeue_init(newtp);
427 INIT_LIST_HEAD(&newtp->tsq_node);
473 428
474 tcp_init_wl(newtp, treq->rcv_isn); 429 tcp_init_wl(newtp, treq->rcv_isn);
475 430
@@ -579,7 +534,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
579 534
580 tmp_opt.saw_tstamp = 0; 535 tmp_opt.saw_tstamp = 0;
581 if (th->doff > (sizeof(struct tcphdr)>>2)) { 536 if (th->doff > (sizeof(struct tcphdr)>>2)) {
582 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 537 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
583 538
584 if (tmp_opt.saw_tstamp) { 539 if (tmp_opt.saw_tstamp) {
585 tmp_opt.ts_recent = req->ts_recent; 540 tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 803cbfe82fbc..3f1bcff0b10b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -50,6 +50,9 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1;
50 */ 50 */
51int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 51int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
52 52
53/* Default TSQ limit of two TSO segments */
54int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
55
53/* This limits the percentage of the congestion window which we 56/* This limits the percentage of the congestion window which we
54 * will allow a single TSO frame to consume. Building TSO frames 57 * will allow a single TSO frame to consume. Building TSO frames
55 * which are too large can cause TCP streams to be bursty. 58 * which are too large can cause TCP streams to be bursty.
@@ -65,6 +68,8 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
65int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ 68int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
66EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); 69EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
67 70
71static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
72 int push_one, gfp_t gfp);
68 73
69/* Account for new data that has been sent to the network. */ 74/* Account for new data that has been sent to the network. */
70static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 75static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
@@ -380,15 +385,17 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
380#define OPTION_MD5 (1 << 2) 385#define OPTION_MD5 (1 << 2)
381#define OPTION_WSCALE (1 << 3) 386#define OPTION_WSCALE (1 << 3)
382#define OPTION_COOKIE_EXTENSION (1 << 4) 387#define OPTION_COOKIE_EXTENSION (1 << 4)
388#define OPTION_FAST_OPEN_COOKIE (1 << 8)
383 389
384struct tcp_out_options { 390struct tcp_out_options {
385 u8 options; /* bit field of OPTION_* */ 391 u16 options; /* bit field of OPTION_* */
392 u16 mss; /* 0 to disable */
386 u8 ws; /* window scale, 0 to disable */ 393 u8 ws; /* window scale, 0 to disable */
387 u8 num_sack_blocks; /* number of SACK blocks to include */ 394 u8 num_sack_blocks; /* number of SACK blocks to include */
388 u8 hash_size; /* bytes in hash_location */ 395 u8 hash_size; /* bytes in hash_location */
389 u16 mss; /* 0 to disable */
390 __u32 tsval, tsecr; /* need to include OPTION_TS */
391 __u8 *hash_location; /* temporary pointer, overloaded */ 396 __u8 *hash_location; /* temporary pointer, overloaded */
397 __u32 tsval, tsecr; /* need to include OPTION_TS */
398 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
392}; 399};
393 400
394/* The sysctl int routines are generic, so check consistency here. 401/* The sysctl int routines are generic, so check consistency here.
@@ -437,7 +444,7 @@ static u8 tcp_cookie_size_check(u8 desired)
437static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 444static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
438 struct tcp_out_options *opts) 445 struct tcp_out_options *opts)
439{ 446{
440 u8 options = opts->options; /* mungable copy */ 447 u16 options = opts->options; /* mungable copy */
441 448
442 /* Having both authentication and cookies for security is redundant, 449 /* Having both authentication and cookies for security is redundant,
443 * and there's certainly not enough room. Instead, the cookie-less 450 * and there's certainly not enough room. Instead, the cookie-less
@@ -559,6 +566,21 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
559 566
560 tp->rx_opt.dsack = 0; 567 tp->rx_opt.dsack = 0;
561 } 568 }
569
570 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
571 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
572
573 *ptr++ = htonl((TCPOPT_EXP << 24) |
574 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
575 TCPOPT_FASTOPEN_MAGIC);
576
577 memcpy(ptr, foc->val, foc->len);
578 if ((foc->len & 3) == 2) {
579 u8 *align = ((u8 *)ptr) + foc->len;
580 align[0] = align[1] = TCPOPT_NOP;
581 }
582 ptr += (foc->len + 3) >> 2;
583 }
562} 584}
563 585
564/* Compute TCP options for SYN packets. This is not the final 586/* Compute TCP options for SYN packets. This is not the final
@@ -574,6 +596,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
574 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 596 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
575 tcp_cookie_size_check(cvp->cookie_desired) : 597 tcp_cookie_size_check(cvp->cookie_desired) :
576 0; 598 0;
599 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
577 600
578#ifdef CONFIG_TCP_MD5SIG 601#ifdef CONFIG_TCP_MD5SIG
579 *md5 = tp->af_specific->md5_lookup(sk, sk); 602 *md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -614,6 +637,16 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
614 remaining -= TCPOLEN_SACKPERM_ALIGNED; 637 remaining -= TCPOLEN_SACKPERM_ALIGNED;
615 } 638 }
616 639
640 if (fastopen && fastopen->cookie.len >= 0) {
641 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
642 need = (need + 3) & ~3U; /* Align to 32 bits */
643 if (remaining >= need) {
644 opts->options |= OPTION_FAST_OPEN_COOKIE;
645 opts->fastopen_cookie = &fastopen->cookie;
646 remaining -= need;
647 tp->syn_fastopen = 1;
648 }
649 }
617 /* Note that timestamps are required by the specification. 650 /* Note that timestamps are required by the specification.
618 * 651 *
619 * Odd numbers of bytes are prohibited by the specification, ensuring 652 * Odd numbers of bytes are prohibited by the specification, ensuring
@@ -783,6 +816,156 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
783 return size; 816 return size;
784} 817}
785 818
819
820/* TCP SMALL QUEUES (TSQ)
821 *
822 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
823 * to reduce RTT and bufferbloat.
824 * We do this using a special skb destructor (tcp_wfree).
825 *
826 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
827 * needs to be reallocated in a driver.
828 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
829 *
830 * Since transmit from skb destructor is forbidden, we use a tasklet
831 * to process all sockets that eventually need to send more skbs.
832 * We use one tasklet per cpu, with its own queue of sockets.
833 */
834struct tsq_tasklet {
835 struct tasklet_struct tasklet;
836 struct list_head head; /* queue of tcp sockets */
837};
838static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
839
840static void tcp_tsq_handler(struct sock *sk)
841{
842 if ((1 << sk->sk_state) &
843 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
844 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
845 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
846}
847/*
848 * One tasklest per cpu tries to send more skbs.
849 * We run in tasklet context but need to disable irqs when
850 * transfering tsq->head because tcp_wfree() might
851 * interrupt us (non NAPI drivers)
852 */
853static void tcp_tasklet_func(unsigned long data)
854{
855 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
856 LIST_HEAD(list);
857 unsigned long flags;
858 struct list_head *q, *n;
859 struct tcp_sock *tp;
860 struct sock *sk;
861
862 local_irq_save(flags);
863 list_splice_init(&tsq->head, &list);
864 local_irq_restore(flags);
865
866 list_for_each_safe(q, n, &list) {
867 tp = list_entry(q, struct tcp_sock, tsq_node);
868 list_del(&tp->tsq_node);
869
870 sk = (struct sock *)tp;
871 bh_lock_sock(sk);
872
873 if (!sock_owned_by_user(sk)) {
874 tcp_tsq_handler(sk);
875 } else {
876 /* defer the work to tcp_release_cb() */
877 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
878 }
879 bh_unlock_sock(sk);
880
881 clear_bit(TSQ_QUEUED, &tp->tsq_flags);
882 sk_free(sk);
883 }
884}
885
886#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
887 (1UL << TCP_WRITE_TIMER_DEFERRED) | \
888 (1UL << TCP_DELACK_TIMER_DEFERRED) | \
889 (1UL << TCP_MTU_REDUCED_DEFERRED))
890/**
891 * tcp_release_cb - tcp release_sock() callback
892 * @sk: socket
893 *
894 * called from release_sock() to perform protocol dependent
895 * actions before socket release.
896 */
897void tcp_release_cb(struct sock *sk)
898{
899 struct tcp_sock *tp = tcp_sk(sk);
900 unsigned long flags, nflags;
901
902 /* perform an atomic operation only if at least one flag is set */
903 do {
904 flags = tp->tsq_flags;
905 if (!(flags & TCP_DEFERRED_ALL))
906 return;
907 nflags = flags & ~TCP_DEFERRED_ALL;
908 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
909
910 if (flags & (1UL << TCP_TSQ_DEFERRED))
911 tcp_tsq_handler(sk);
912
913 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
914 tcp_write_timer_handler(sk);
915
916 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
917 tcp_delack_timer_handler(sk);
918
919 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
920 sk->sk_prot->mtu_reduced(sk);
921}
922EXPORT_SYMBOL(tcp_release_cb);
923
924void __init tcp_tasklet_init(void)
925{
926 int i;
927
928 for_each_possible_cpu(i) {
929 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
930
931 INIT_LIST_HEAD(&tsq->head);
932 tasklet_init(&tsq->tasklet,
933 tcp_tasklet_func,
934 (unsigned long)tsq);
935 }
936}
937
938/*
939 * Write buffer destructor automatically called from kfree_skb.
940 * We cant xmit new skbs from this context, as we might already
941 * hold qdisc lock.
942 */
943void tcp_wfree(struct sk_buff *skb)
944{
945 struct sock *sk = skb->sk;
946 struct tcp_sock *tp = tcp_sk(sk);
947
948 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
949 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
950 unsigned long flags;
951 struct tsq_tasklet *tsq;
952
953 /* Keep a ref on socket.
954 * This last ref will be released in tcp_tasklet_func()
955 */
956 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
957
958 /* queue this socket to tasklet queue */
959 local_irq_save(flags);
960 tsq = &__get_cpu_var(tsq_tasklet);
961 list_add(&tp->tsq_node, &tsq->head);
962 tasklet_schedule(&tsq->tasklet);
963 local_irq_restore(flags);
964 } else {
965 sock_wfree(skb);
966 }
967}
968
786/* This routine actually transmits TCP packets queued in by 969/* This routine actually transmits TCP packets queued in by
787 * tcp_do_sendmsg(). This is used by both the initial 970 * tcp_do_sendmsg(). This is used by both the initial
788 * transmission and possible later retransmissions. 971 * transmission and possible later retransmissions.
@@ -844,7 +1027,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
844 1027
845 skb_push(skb, tcp_header_size); 1028 skb_push(skb, tcp_header_size);
846 skb_reset_transport_header(skb); 1029 skb_reset_transport_header(skb);
847 skb_set_owner_w(skb, sk); 1030
1031 skb_orphan(skb);
1032 skb->sk = sk;
1033 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
1034 tcp_wfree : sock_wfree;
1035 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
848 1036
849 /* Build TCP header and checksum it. */ 1037 /* Build TCP header and checksum it. */
850 th = tcp_hdr(skb); 1038 th = tcp_hdr(skb);
@@ -1780,6 +1968,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1780 while ((skb = tcp_send_head(sk))) { 1968 while ((skb = tcp_send_head(sk))) {
1781 unsigned int limit; 1969 unsigned int limit;
1782 1970
1971
1783 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1972 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1784 BUG_ON(!tso_segs); 1973 BUG_ON(!tso_segs);
1785 1974
@@ -1800,6 +1989,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1800 break; 1989 break;
1801 } 1990 }
1802 1991
1992 /* TSQ : sk_wmem_alloc accounts skb truesize,
1993 * including skb overhead. But thats OK.
1994 */
1995 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
1996 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1997 break;
1998 }
1803 limit = mss_now; 1999 limit = mss_now;
1804 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2000 if (tso_segs > 1 && !tcp_urg_mode(tp))
1805 limit = tcp_mss_split_point(sk, skb, mss_now, 2001 limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -1849,7 +2045,8 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1849 if (unlikely(sk->sk_state == TCP_CLOSE)) 2045 if (unlikely(sk->sk_state == TCP_CLOSE))
1850 return; 2046 return;
1851 2047
1852 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 2048 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2049 sk_gfp_atomic(sk, GFP_ATOMIC)))
1853 tcp_check_probe_timer(sk); 2050 tcp_check_probe_timer(sk);
1854} 2051}
1855 2052
@@ -2442,7 +2639,16 @@ int tcp_send_synack(struct sock *sk)
2442 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2639 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2443} 2640}
2444 2641
2445/* Prepare a SYN-ACK. */ 2642/**
2643 * tcp_make_synack - Prepare a SYN-ACK.
2644 * sk: listener socket
2645 * dst: dst entry attached to the SYNACK
2646 * req: request_sock pointer
2647 * rvp: request_values pointer
2648 *
2649 * Allocate one skb and build a SYNACK packet.
2650 * @dst is consumed : Caller should not use it again.
2651 */
2446struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2652struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2447 struct request_sock *req, 2653 struct request_sock *req,
2448 struct request_values *rvp) 2654 struct request_values *rvp)
@@ -2461,14 +2667,16 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2461 2667
2462 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2668 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2463 s_data_desired = cvp->s_data_desired; 2669 s_data_desired = cvp->s_data_desired;
2464 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); 2670 skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
2465 if (skb == NULL) 2671 sk_gfp_atomic(sk, GFP_ATOMIC));
2672 if (unlikely(!skb)) {
2673 dst_release(dst);
2466 return NULL; 2674 return NULL;
2467 2675 }
2468 /* Reserve space for headers. */ 2676 /* Reserve space for headers. */
2469 skb_reserve(skb, MAX_TCP_HEADER); 2677 skb_reserve(skb, MAX_TCP_HEADER);
2470 2678
2471 skb_dst_set(skb, dst_clone(dst)); 2679 skb_dst_set(skb, dst);
2472 2680
2473 mss = dst_metric_advmss(dst); 2681 mss = dst_metric_advmss(dst);
2474 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2682 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
@@ -2645,6 +2853,109 @@ void tcp_connect_init(struct sock *sk)
2645 tcp_clear_retrans(tp); 2853 tcp_clear_retrans(tp);
2646} 2854}
2647 2855
2856static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2857{
2858 struct tcp_sock *tp = tcp_sk(sk);
2859 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2860
2861 tcb->end_seq += skb->len;
2862 skb_header_release(skb);
2863 __tcp_add_write_queue_tail(sk, skb);
2864 sk->sk_wmem_queued += skb->truesize;
2865 sk_mem_charge(sk, skb->truesize);
2866 tp->write_seq = tcb->end_seq;
2867 tp->packets_out += tcp_skb_pcount(skb);
2868}
2869
2870/* Build and send a SYN with data and (cached) Fast Open cookie. However,
2871 * queue a data-only packet after the regular SYN, such that regular SYNs
2872 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2873 * only the SYN sequence, the data are retransmitted in the first ACK.
2874 * If cookie is not cached or other error occurs, falls back to send a
2875 * regular SYN with Fast Open cookie request option.
2876 */
2877static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2878{
2879 struct tcp_sock *tp = tcp_sk(sk);
2880 struct tcp_fastopen_request *fo = tp->fastopen_req;
2881 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2882 struct sk_buff *syn_data = NULL, *data;
2883 unsigned long last_syn_loss = 0;
2884
2885 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
2886 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2887 &syn_loss, &last_syn_loss);
2888 /* Recurring FO SYN losses: revert to regular handshake temporarily */
2889 if (syn_loss > 1 &&
2890 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2891 fo->cookie.len = -1;
2892 goto fallback;
2893 }
2894
2895 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
2896 fo->cookie.len = -1;
2897 else if (fo->cookie.len <= 0)
2898 goto fallback;
2899
2900 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2901 * user-MSS. Reserve maximum option space for middleboxes that add
2902 * private TCP options. The cost is reduced data space in SYN :(
2903 */
2904 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2905 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2906 space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2907 MAX_TCP_OPTION_SPACE;
2908
2909 syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
2910 sk->sk_allocation);
2911 if (syn_data == NULL)
2912 goto fallback;
2913
2914 for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2915 struct iovec *iov = &fo->data->msg_iov[i];
2916 unsigned char __user *from = iov->iov_base;
2917 int len = iov->iov_len;
2918
2919 if (syn_data->len + len > space)
2920 len = space - syn_data->len;
2921 else if (i + 1 == iovlen)
2922 /* No more data pending in inet_wait_for_connect() */
2923 fo->data = NULL;
2924
2925 if (skb_add_data(syn_data, from, len))
2926 goto fallback;
2927 }
2928
2929 /* Queue a data-only packet after the regular SYN for retransmission */
2930 data = pskb_copy(syn_data, sk->sk_allocation);
2931 if (data == NULL)
2932 goto fallback;
2933 TCP_SKB_CB(data)->seq++;
2934 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2935 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2936 tcp_connect_queue_skb(sk, data);
2937 fo->copied = data->len;
2938
2939 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
2940 tp->syn_data = (fo->copied > 0);
2941 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2942 goto done;
2943 }
2944 syn_data = NULL;
2945
2946fallback:
2947 /* Send a regular SYN with Fast Open cookie request option */
2948 if (fo->cookie.len > 0)
2949 fo->cookie.len = 0;
2950 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2951 if (err)
2952 tp->syn_fastopen = 0;
2953 kfree_skb(syn_data);
2954done:
2955 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
2956 return err;
2957}
2958
2648/* Build a SYN and send it off. */ 2959/* Build a SYN and send it off. */
2649int tcp_connect(struct sock *sk) 2960int tcp_connect(struct sock *sk)
2650{ 2961{
@@ -2662,17 +2973,13 @@ int tcp_connect(struct sock *sk)
2662 skb_reserve(buff, MAX_TCP_HEADER); 2973 skb_reserve(buff, MAX_TCP_HEADER);
2663 2974
2664 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2975 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2976 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2977 tcp_connect_queue_skb(sk, buff);
2665 TCP_ECN_send_syn(sk, buff); 2978 TCP_ECN_send_syn(sk, buff);
2666 2979
2667 /* Send it off. */ 2980 /* Send off SYN; include data in Fast Open. */
2668 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2981 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2669 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2982 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2670 skb_header_release(buff);
2671 __tcp_add_write_queue_tail(sk, buff);
2672 sk->sk_wmem_queued += buff->truesize;
2673 sk_mem_charge(sk, buff->truesize);
2674 tp->packets_out += tcp_skb_pcount(buff);
2675 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2676 if (err == -ECONNREFUSED) 2983 if (err == -ECONNREFUSED)
2677 return err; 2984 return err;
2678 2985
@@ -2759,7 +3066,7 @@ void tcp_send_ack(struct sock *sk)
2759 * tcp_transmit_skb() will set the ownership to this 3066 * tcp_transmit_skb() will set the ownership to this
2760 * sock. 3067 * sock.
2761 */ 3068 */
2762 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 3069 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
2763 if (buff == NULL) { 3070 if (buff == NULL) {
2764 inet_csk_schedule_ack(sk); 3071 inet_csk_schedule_ack(sk);
2765 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3072 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
@@ -2774,7 +3081,7 @@ void tcp_send_ack(struct sock *sk)
2774 3081
2775 /* Send it off, this clears delayed acks for us. */ 3082 /* Send it off, this clears delayed acks for us. */
2776 TCP_SKB_CB(buff)->when = tcp_time_stamp; 3083 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2777 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 3084 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
2778} 3085}
2779 3086
2780/* This routine sends a packet with an out of date sequence 3087/* This routine sends a packet with an out of date sequence
@@ -2794,7 +3101,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2794 struct sk_buff *skb; 3101 struct sk_buff *skb;
2795 3102
2796 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 3103 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2797 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 3104 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
2798 if (skb == NULL) 3105 if (skb == NULL)
2799 return -1; 3106 return -1;
2800 3107
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index e911e6c523ec..6df36ad55a38 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32int sysctl_tcp_orphan_retries __read_mostly; 32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly; 33int sysctl_tcp_thin_linear_timeouts __read_mostly;
34 34
35static void tcp_write_timer(unsigned long);
36static void tcp_delack_timer(unsigned long);
37static void tcp_keepalive_timer (unsigned long data);
38
39void tcp_init_xmit_timers(struct sock *sk)
40{
41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
42 &tcp_keepalive_timer);
43}
44EXPORT_SYMBOL(tcp_init_xmit_timers);
45
46static void tcp_write_err(struct sock *sk) 35static void tcp_write_err(struct sock *sk)
47{ 36{
48 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
205 return 0; 194 return 0;
206} 195}
207 196
208static void tcp_delack_timer(unsigned long data) 197void tcp_delack_timer_handler(struct sock *sk)
209{ 198{
210 struct sock *sk = (struct sock *)data;
211 struct tcp_sock *tp = tcp_sk(sk); 199 struct tcp_sock *tp = tcp_sk(sk);
212 struct inet_connection_sock *icsk = inet_csk(sk); 200 struct inet_connection_sock *icsk = inet_csk(sk);
213 201
214 bh_lock_sock(sk);
215 if (sock_owned_by_user(sk)) {
216 /* Try again later. */
217 icsk->icsk_ack.blocked = 1;
218 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
219 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
220 goto out_unlock;
221 }
222
223 sk_mem_reclaim_partial(sk); 202 sk_mem_reclaim_partial(sk);
224 203
225 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 204 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
260out: 239out:
261 if (sk_under_memory_pressure(sk)) 240 if (sk_under_memory_pressure(sk))
262 sk_mem_reclaim(sk); 241 sk_mem_reclaim(sk);
263out_unlock: 242}
243
244static void tcp_delack_timer(unsigned long data)
245{
246 struct sock *sk = (struct sock *)data;
247
248 bh_lock_sock(sk);
249 if (!sock_owned_by_user(sk)) {
250 tcp_delack_timer_handler(sk);
251 } else {
252 inet_csk(sk)->icsk_ack.blocked = 1;
253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
254 /* deleguate our work to tcp_release_cb() */
255 set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
256 }
264 bh_unlock_sock(sk); 257 bh_unlock_sock(sk);
265 sock_put(sk); 258 sock_put(sk);
266} 259}
@@ -450,19 +443,11 @@ out_reset_timer:
450out:; 443out:;
451} 444}
452 445
453static void tcp_write_timer(unsigned long data) 446void tcp_write_timer_handler(struct sock *sk)
454{ 447{
455 struct sock *sk = (struct sock *)data;
456 struct inet_connection_sock *icsk = inet_csk(sk); 448 struct inet_connection_sock *icsk = inet_csk(sk);
457 int event; 449 int event;
458 450
459 bh_lock_sock(sk);
460 if (sock_owned_by_user(sk)) {
461 /* Try again later */
462 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
463 goto out_unlock;
464 }
465
466 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 451 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
467 goto out; 452 goto out;
468 453
@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
485 470
486out: 471out:
487 sk_mem_reclaim(sk); 472 sk_mem_reclaim(sk);
488out_unlock: 473}
474
475static void tcp_write_timer(unsigned long data)
476{
477 struct sock *sk = (struct sock *)data;
478
479 bh_lock_sock(sk);
480 if (!sock_owned_by_user(sk)) {
481 tcp_write_timer_handler(sk);
482 } else {
483 /* deleguate our work to tcp_release_cb() */
484 set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
485 }
489 bh_unlock_sock(sk); 486 bh_unlock_sock(sk);
490 sock_put(sk); 487 sock_put(sk);
491} 488}
@@ -602,3 +599,10 @@ out:
602 bh_unlock_sock(sk); 599 bh_unlock_sock(sk);
603 sock_put(sk); 600 sock_put(sk);
604} 601}
602
603void tcp_init_xmit_timers(struct sock *sk)
604{
605 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
606 &tcp_keepalive_timer);
607}
608EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eaca73644e79..b4c3582a991f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -108,6 +108,7 @@
108#include <net/xfrm.h> 108#include <net/xfrm.h>
109#include <trace/events/udp.h> 109#include <trace/events/udp.h>
110#include <linux/static_key.h> 110#include <linux/static_key.h>
111#include <trace/events/skb.h>
111#include "udp_impl.h" 112#include "udp_impl.h"
112 113
113struct udp_table udp_table __read_mostly; 114struct udp_table udp_table __read_mostly;
@@ -615,6 +616,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
615 break; 616 break;
616 case ICMP_DEST_UNREACH: 617 case ICMP_DEST_UNREACH:
617 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 618 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
619 ipv4_sk_update_pmtu(skb, sk, info);
618 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 620 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
619 err = EMSGSIZE; 621 err = EMSGSIZE;
620 harderr = 1; 622 harderr = 1;
@@ -628,6 +630,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
628 err = icmp_err_convert[code].errno; 630 err = icmp_err_convert[code].errno;
629 } 631 }
630 break; 632 break;
633 case ICMP_REDIRECT:
634 ipv4_sk_redirect(skb, sk);
635 break;
631 } 636 }
632 637
633 /* 638 /*
@@ -1219,8 +1224,10 @@ try_again:
1219 goto csum_copy_err; 1224 goto csum_copy_err;
1220 } 1225 }
1221 1226
1222 if (err) 1227 if (unlikely(err)) {
1228 trace_kfree_skb(skb, udp_recvmsg);
1223 goto out_free; 1229 goto out_free;
1230 }
1224 1231
1225 if (!peeked) 1232 if (!peeked)
1226 UDP_INC_STATS_USER(sock_net(sk), 1233 UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index a7f86a3cd502..16d0960062be 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -34,15 +34,16 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
34 int err = -EINVAL; 34 int err = -EINVAL;
35 struct sock *sk; 35 struct sock *sk;
36 struct sk_buff *rep; 36 struct sk_buff *rep;
37 struct net *net = sock_net(in_skb->sk);
37 38
38 if (req->sdiag_family == AF_INET) 39 if (req->sdiag_family == AF_INET)
39 sk = __udp4_lib_lookup(&init_net, 40 sk = __udp4_lib_lookup(net,
40 req->id.idiag_src[0], req->id.idiag_sport, 41 req->id.idiag_src[0], req->id.idiag_sport,
41 req->id.idiag_dst[0], req->id.idiag_dport, 42 req->id.idiag_dst[0], req->id.idiag_dport,
42 req->id.idiag_if, tbl); 43 req->id.idiag_if, tbl);
43#if IS_ENABLED(CONFIG_IPV6) 44#if IS_ENABLED(CONFIG_IPV6)
44 else if (req->sdiag_family == AF_INET6) 45 else if (req->sdiag_family == AF_INET6)
45 sk = __udp6_lib_lookup(&init_net, 46 sk = __udp6_lib_lookup(net,
46 (struct in6_addr *)req->id.idiag_src, 47 (struct in6_addr *)req->id.idiag_src,
47 req->id.idiag_sport, 48 req->id.idiag_sport,
48 (struct in6_addr *)req->id.idiag_dst, 49 (struct in6_addr *)req->id.idiag_dst,
@@ -75,7 +76,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
75 kfree_skb(rep); 76 kfree_skb(rep);
76 goto out; 77 goto out;
77 } 78 }
78 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 79 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
79 MSG_DONTWAIT); 80 MSG_DONTWAIT);
80 if (err > 0) 81 if (err > 0)
81 err = 0; 82 err = 0;
@@ -90,6 +91,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
90 struct inet_diag_req_v2 *r, struct nlattr *bc) 91 struct inet_diag_req_v2 *r, struct nlattr *bc)
91{ 92{
92 int num, s_num, slot, s_slot; 93 int num, s_num, slot, s_slot;
94 struct net *net = sock_net(skb->sk);
93 95
94 s_slot = cb->args[0]; 96 s_slot = cb->args[0];
95 num = s_num = cb->args[1]; 97 num = s_num = cb->args[1];
@@ -106,6 +108,8 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
106 sk_nulls_for_each(sk, node, &hslot->head) { 108 sk_nulls_for_each(sk, node, &hslot->head) {
107 struct inet_sock *inet = inet_sk(sk); 109 struct inet_sock *inet = inet_sk(sk);
108 110
111 if (!net_eq(sock_net(sk), net))
112 continue;
109 if (num < s_num) 113 if (num < s_num)
110 goto next; 114 goto next;
111 if (!(r->idiag_states & (1 << sk->sk_state))) 115 if (!(r->idiag_states & (1 << sk->sk_state)))
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index ed4bf11ef9f4..ddee0a099a2c 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -15,6 +15,65 @@
15#include <net/ip.h> 15#include <net/ip.h>
16#include <net/xfrm.h> 16#include <net/xfrm.h>
17 17
18/* Informational hook. The decap is still done here. */
19static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
20static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
21
22int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
23{
24 struct xfrm_tunnel __rcu **pprev;
25 struct xfrm_tunnel *t;
26 int ret = -EEXIST;
27 int priority = handler->priority;
28
29 mutex_lock(&xfrm4_mode_tunnel_input_mutex);
30
31 for (pprev = &rcv_notify_handlers;
32 (t = rcu_dereference_protected(*pprev,
33 lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
34 pprev = &t->next) {
35 if (t->priority > priority)
36 break;
37 if (t->priority == priority)
38 goto err;
39
40 }
41
42 handler->next = *pprev;
43 rcu_assign_pointer(*pprev, handler);
44
45 ret = 0;
46
47err:
48 mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
49 return ret;
50}
51EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
52
53int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
54{
55 struct xfrm_tunnel __rcu **pprev;
56 struct xfrm_tunnel *t;
57 int ret = -ENOENT;
58
59 mutex_lock(&xfrm4_mode_tunnel_input_mutex);
60 for (pprev = &rcv_notify_handlers;
61 (t = rcu_dereference_protected(*pprev,
62 lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
63 pprev = &t->next) {
64 if (t == handler) {
65 *pprev = handler->next;
66 ret = 0;
67 break;
68 }
69 }
70 mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
71 synchronize_net();
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_deregister);
76
18static inline void ipip_ecn_decapsulate(struct sk_buff *skb) 77static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
19{ 78{
20 struct iphdr *inner_iph = ipip_hdr(skb); 79 struct iphdr *inner_iph = ipip_hdr(skb);
@@ -64,8 +123,14 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
64 return 0; 123 return 0;
65} 124}
66 125
126#define for_each_input_rcu(head, handler) \
127 for (handler = rcu_dereference(head); \
128 handler != NULL; \
129 handler = rcu_dereference(handler->next))
130
67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 131static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
68{ 132{
133 struct xfrm_tunnel *handler;
69 int err = -EINVAL; 134 int err = -EINVAL;
70 135
71 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 136 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -74,6 +139,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
74 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 139 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
75 goto out; 140 goto out;
76 141
142 for_each_input_rcu(rcv_notify_handlers, handler)
143 handler->handler(skb);
144
77 if (skb_cloned(skb) && 145 if (skb_cloned(skb) &&
78 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 146 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
79 goto out; 147 goto out;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0d3426cb5c4f..681ea2f413e2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,30 +79,20 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
79 struct rtable *rt = (struct rtable *)xdst->route; 79 struct rtable *rt = (struct rtable *)xdst->route;
80 const struct flowi4 *fl4 = &fl->u.ip4; 80 const struct flowi4 *fl4 = &fl->u.ip4;
81 81
82 xdst->u.rt.rt_key_dst = fl4->daddr;
83 xdst->u.rt.rt_key_src = fl4->saddr;
84 xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
85 xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
86 xdst->u.rt.rt_iif = fl4->flowi4_iif; 82 xdst->u.rt.rt_iif = fl4->flowi4_iif;
87 xdst->u.rt.rt_oif = fl4->flowi4_oif;
88 xdst->u.rt.rt_mark = fl4->flowi4_mark;
89 83
90 xdst->u.dst.dev = dev; 84 xdst->u.dst.dev = dev;
91 dev_hold(dev); 85 dev_hold(dev);
92 86
93 xdst->u.rt.peer = rt->peer;
94 if (rt->peer)
95 atomic_inc(&rt->peer->refcnt);
96
97 /* Sheit... I remember I did this right. Apparently, 87 /* Sheit... I remember I did this right. Apparently,
98 * it was magically lost, so this code needs audit */ 88 * it was magically lost, so this code needs audit */
89 xdst->u.rt.rt_is_input = rt->rt_is_input;
99 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | 90 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
100 RTCF_LOCAL); 91 RTCF_LOCAL);
101 xdst->u.rt.rt_type = rt->rt_type; 92 xdst->u.rt.rt_type = rt->rt_type;
102 xdst->u.rt.rt_src = rt->rt_src;
103 xdst->u.rt.rt_dst = rt->rt_dst;
104 xdst->u.rt.rt_gateway = rt->rt_gateway; 93 xdst->u.rt.rt_gateway = rt->rt_gateway;
105 xdst->u.rt.rt_spec_dst = rt->rt_spec_dst; 94 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
95 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
106 96
107 return 0; 97 return 0;
108} 98}
@@ -198,12 +188,22 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops)
198 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2); 188 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
199} 189}
200 190
201static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) 191static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
192 struct sk_buff *skb, u32 mtu)
193{
194 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
195 struct dst_entry *path = xdst->route;
196
197 path->ops->update_pmtu(path, sk, skb, mtu);
198}
199
200static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
201 struct sk_buff *skb)
202{ 202{
203 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 203 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
204 struct dst_entry *path = xdst->route; 204 struct dst_entry *path = xdst->route;
205 205
206 path->ops->update_pmtu(path, mtu); 206 path->ops->redirect(path, sk, skb);
207} 207}
208 208
209static void xfrm4_dst_destroy(struct dst_entry *dst) 209static void xfrm4_dst_destroy(struct dst_entry *dst)
@@ -212,9 +212,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
212 212
213 dst_destroy_metrics_generic(dst); 213 dst_destroy_metrics_generic(dst);
214 214
215 if (likely(xdst->u.rt.peer))
216 inet_putpeer(xdst->u.rt.peer);
217
218 xfrm_dst_destroy(xdst); 215 xfrm_dst_destroy(xdst);
219} 216}
220 217
@@ -232,6 +229,7 @@ static struct dst_ops xfrm4_dst_ops = {
232 .protocol = cpu_to_be16(ETH_P_IP), 229 .protocol = cpu_to_be16(ETH_P_IP),
233 .gc = xfrm4_garbage_collect, 230 .gc = xfrm4_garbage_collect,
234 .update_pmtu = xfrm4_update_pmtu, 231 .update_pmtu = xfrm4_update_pmtu,
232 .redirect = xfrm4_redirect,
235 .cow_metrics = dst_cow_metrics_generic, 233 .cow_metrics = dst_cow_metrics_generic,
236 .destroy = xfrm4_dst_destroy, 234 .destroy = xfrm4_dst_destroy,
237 .ifdown = xfrm4_dst_ifdown, 235 .ifdown = xfrm4_dst_ifdown,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8f6411c97189..79181819a24f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -63,6 +63,7 @@
63#include <linux/delay.h> 63#include <linux/delay.h>
64#include <linux/notifier.h> 64#include <linux/notifier.h>
65#include <linux/string.h> 65#include <linux/string.h>
66#include <linux/hash.h>
66 67
67#include <net/net_namespace.h> 68#include <net/net_namespace.h>
68#include <net/sock.h> 69#include <net/sock.h>
@@ -579,15 +580,9 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
579 list_add_tail(&ifp->if_list, p); 580 list_add_tail(&ifp->if_list, p);
580} 581}
581 582
582static u32 ipv6_addr_hash(const struct in6_addr *addr) 583static u32 inet6_addr_hash(const struct in6_addr *addr)
583{ 584{
584 /* 585 return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
585 * We perform the hash function over the last 64 bits of the address
586 * This will include the IEEE address token on links that support it.
587 */
588 return jhash_2words((__force u32)addr->s6_addr32[2],
589 (__force u32)addr->s6_addr32[3], 0)
590 & (IN6_ADDR_HSIZE - 1);
591} 586}
592 587
593/* On success it returns ifp with increased reference count */ 588/* On success it returns ifp with increased reference count */
@@ -662,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
662 in6_ifa_hold(ifa); 657 in6_ifa_hold(ifa);
663 658
664 /* Add to big hash table */ 659 /* Add to big hash table */
665 hash = ipv6_addr_hash(addr); 660 hash = inet6_addr_hash(addr);
666 661
667 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); 662 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
668 spin_unlock(&addrconf_hash_lock); 663 spin_unlock(&addrconf_hash_lock);
@@ -1270,7 +1265,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1270{ 1265{
1271 struct inet6_ifaddr *ifp; 1266 struct inet6_ifaddr *ifp;
1272 struct hlist_node *node; 1267 struct hlist_node *node;
1273 unsigned int hash = ipv6_addr_hash(addr); 1268 unsigned int hash = inet6_addr_hash(addr);
1274 1269
1275 rcu_read_lock_bh(); 1270 rcu_read_lock_bh();
1276 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1271 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
@@ -1293,7 +1288,7 @@ EXPORT_SYMBOL(ipv6_chk_addr);
1293static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 1288static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1294 struct net_device *dev) 1289 struct net_device *dev)
1295{ 1290{
1296 unsigned int hash = ipv6_addr_hash(addr); 1291 unsigned int hash = inet6_addr_hash(addr);
1297 struct inet6_ifaddr *ifp; 1292 struct inet6_ifaddr *ifp;
1298 struct hlist_node *node; 1293 struct hlist_node *node;
1299 1294
@@ -1336,7 +1331,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
1336 struct net_device *dev, int strict) 1331 struct net_device *dev, int strict)
1337{ 1332{
1338 struct inet6_ifaddr *ifp, *result = NULL; 1333 struct inet6_ifaddr *ifp, *result = NULL;
1339 unsigned int hash = ipv6_addr_hash(addr); 1334 unsigned int hash = inet6_addr_hash(addr);
1340 struct hlist_node *node; 1335 struct hlist_node *node;
1341 1336
1342 rcu_read_lock_bh(); 1337 rcu_read_lock_bh();
@@ -3223,7 +3218,7 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3223 int ret = 0; 3218 int ret = 0;
3224 struct inet6_ifaddr *ifp = NULL; 3219 struct inet6_ifaddr *ifp = NULL;
3225 struct hlist_node *n; 3220 struct hlist_node *n;
3226 unsigned int hash = ipv6_addr_hash(addr); 3221 unsigned int hash = inet6_addr_hash(addr);
3227 3222
3228 rcu_read_lock_bh(); 3223 rcu_read_lock_bh();
3229 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { 3224 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index f1a4a2c28ed3..7e6139508ee7 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -35,6 +35,7 @@
35#include <linux/pfkeyv2.h> 35#include <linux/pfkeyv2.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
38#include <net/ip6_route.h>
38#include <net/icmp.h> 39#include <net/icmp.h>
39#include <net/ipv6.h> 40#include <net/ipv6.h>
40#include <net/protocol.h> 41#include <net/protocol.h>
@@ -612,16 +613,18 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
612 struct xfrm_state *x; 613 struct xfrm_state *x;
613 614
614 if (type != ICMPV6_DEST_UNREACH && 615 if (type != ICMPV6_DEST_UNREACH &&
615 type != ICMPV6_PKT_TOOBIG) 616 type != ICMPV6_PKT_TOOBIG &&
617 type != NDISC_REDIRECT)
616 return; 618 return;
617 619
618 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); 620 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
619 if (!x) 621 if (!x)
620 return; 622 return;
621 623
622 NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n", 624 if (type == NDISC_REDIRECT)
623 ntohl(ah->spi), &iph->daddr); 625 ip6_redirect(skb, net, 0, 0);
624 626 else
627 ip6_update_pmtu(skb, net, info, 0, 0);
625 xfrm_state_put(x); 628 xfrm_state_put(x);
626} 629}
627 630
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index db1521fcda5b..6dc7fd353ef5 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -39,6 +39,7 @@
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <net/ip6_route.h>
42#include <net/icmp.h> 43#include <net/icmp.h>
43#include <net/ipv6.h> 44#include <net/ipv6.h>
44#include <net/protocol.h> 45#include <net/protocol.h>
@@ -433,15 +434,19 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
433 struct xfrm_state *x; 434 struct xfrm_state *x;
434 435
435 if (type != ICMPV6_DEST_UNREACH && 436 if (type != ICMPV6_DEST_UNREACH &&
436 type != ICMPV6_PKT_TOOBIG) 437 type != ICMPV6_PKT_TOOBIG &&
438 type != NDISC_REDIRECT)
437 return; 439 return;
438 440
439 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
440 esph->spi, IPPROTO_ESP, AF_INET6); 442 esph->spi, IPPROTO_ESP, AF_INET6);
441 if (!x) 443 if (!x)
442 return; 444 return;
443 pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n", 445
444 ntohl(esph->spi), &iph->daddr); 446 if (type == NDISC_REDIRECT)
447 ip6_redirect(skb, net, 0, 0);
448 else
449 ip6_update_pmtu(skb, net, info, 0, 0);
445 xfrm_state_put(x); 450 xfrm_state_put(x);
446} 451}
447 452
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 6447dc49429f..fa3d9c328092 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -791,14 +791,14 @@ static int ipv6_renew_option(void *ohdr,
791 if (ohdr) { 791 if (ohdr) {
792 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); 792 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
793 *hdr = (struct ipv6_opt_hdr *)*p; 793 *hdr = (struct ipv6_opt_hdr *)*p;
794 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); 794 *p += CMSG_ALIGN(ipv6_optlen(*hdr));
795 } 795 }
796 } else { 796 } else {
797 if (newopt) { 797 if (newopt) {
798 if (copy_from_user(*p, newopt, newoptlen)) 798 if (copy_from_user(*p, newopt, newoptlen))
799 return -EFAULT; 799 return -EFAULT;
800 *hdr = (struct ipv6_opt_hdr *)*p; 800 *hdr = (struct ipv6_opt_hdr *)*p;
801 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) 801 if (ipv6_optlen(*hdr) > newoptlen)
802 return -EINVAL; 802 return -EINVAL;
803 *p += CMSG_ALIGN(newoptlen); 803 *p += CMSG_ALIGN(newoptlen);
804 } 804 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 091a2971c7b7..24d69dbca4d6 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -188,14 +188,16 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
188 } else { 188 } else {
189 struct rt6_info *rt = (struct rt6_info *)dst; 189 struct rt6_info *rt = (struct rt6_info *)dst;
190 int tmo = net->ipv6.sysctl.icmpv6_time; 190 int tmo = net->ipv6.sysctl.icmpv6_time;
191 struct inet_peer *peer;
191 192
192 /* Give more bandwidth to wider prefixes. */ 193 /* Give more bandwidth to wider prefixes. */
193 if (rt->rt6i_dst.plen < 128) 194 if (rt->rt6i_dst.plen < 128)
194 tmo >>= ((128 - rt->rt6i_dst.plen)>>5); 195 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
195 196
196 if (!rt->rt6i_peer) 197 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
197 rt6_bind_peer(rt, 1); 198 res = inet_peer_xrlim_allow(peer, tmo);
198 res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); 199 if (peer)
200 inet_putpeer(peer);
199 } 201 }
200 dst_release(dst); 202 dst_release(dst);
201 return res; 203 return res;
@@ -596,13 +598,12 @@ out:
596 icmpv6_xmit_unlock(sk); 598 icmpv6_xmit_unlock(sk);
597} 599}
598 600
599static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) 601void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
600{ 602{
601 const struct inet6_protocol *ipprot; 603 const struct inet6_protocol *ipprot;
602 int inner_offset; 604 int inner_offset;
603 int hash;
604 u8 nexthdr;
605 __be16 frag_off; 605 __be16 frag_off;
606 u8 nexthdr;
606 607
607 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 608 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
608 return; 609 return;
@@ -629,10 +630,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
629 --ANK (980726) 630 --ANK (980726)
630 */ 631 */
631 632
632 hash = nexthdr & (MAX_INET_PROTOS - 1);
633
634 rcu_read_lock(); 633 rcu_read_lock();
635 ipprot = rcu_dereference(inet6_protos[hash]); 634 ipprot = rcu_dereference(inet6_protos[nexthdr]);
636 if (ipprot && ipprot->err_handler) 635 if (ipprot && ipprot->err_handler)
637 ipprot->err_handler(skb, NULL, type, code, inner_offset, info); 636 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
638 rcu_read_unlock(); 637 rcu_read_unlock();
@@ -649,7 +648,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
649 struct net_device *dev = skb->dev; 648 struct net_device *dev = skb->dev;
650 struct inet6_dev *idev = __in6_dev_get(dev); 649 struct inet6_dev *idev = __in6_dev_get(dev);
651 const struct in6_addr *saddr, *daddr; 650 const struct in6_addr *saddr, *daddr;
652 const struct ipv6hdr *orig_hdr;
653 struct icmp6hdr *hdr; 651 struct icmp6hdr *hdr;
654 u8 type; 652 u8 type;
655 653
@@ -661,7 +659,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
661 XFRM_STATE_ICMP)) 659 XFRM_STATE_ICMP))
662 goto drop_no_count; 660 goto drop_no_count;
663 661
664 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) 662 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
665 goto drop_no_count; 663 goto drop_no_count;
666 664
667 nh = skb_network_offset(skb); 665 nh = skb_network_offset(skb);
@@ -722,9 +720,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
722 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 720 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
723 goto discard_it; 721 goto discard_it;
724 hdr = icmp6_hdr(skb); 722 hdr = icmp6_hdr(skb);
725 orig_hdr = (struct ipv6hdr *) (hdr + 1);
726 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
727 ntohl(hdr->icmp6_mtu));
728 723
729 /* 724 /*
730 * Drop through to notify 725 * Drop through to notify
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e6cee5292a0b..0251a6005be8 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -55,26 +55,26 @@ int inet6_csk_bind_conflict(const struct sock *sk,
55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
56 56
57struct dst_entry *inet6_csk_route_req(struct sock *sk, 57struct dst_entry *inet6_csk_route_req(struct sock *sk,
58 struct flowi6 *fl6,
58 const struct request_sock *req) 59 const struct request_sock *req)
59{ 60{
60 struct inet6_request_sock *treq = inet6_rsk(req); 61 struct inet6_request_sock *treq = inet6_rsk(req);
61 struct ipv6_pinfo *np = inet6_sk(sk); 62 struct ipv6_pinfo *np = inet6_sk(sk);
62 struct in6_addr *final_p, final; 63 struct in6_addr *final_p, final;
63 struct dst_entry *dst; 64 struct dst_entry *dst;
64 struct flowi6 fl6;
65 65
66 memset(&fl6, 0, sizeof(fl6)); 66 memset(fl6, 0, sizeof(*fl6));
67 fl6.flowi6_proto = IPPROTO_TCP; 67 fl6->flowi6_proto = IPPROTO_TCP;
68 fl6.daddr = treq->rmt_addr; 68 fl6->daddr = treq->rmt_addr;
69 final_p = fl6_update_dst(&fl6, np->opt, &final); 69 final_p = fl6_update_dst(fl6, np->opt, &final);
70 fl6.saddr = treq->loc_addr; 70 fl6->saddr = treq->loc_addr;
71 fl6.flowi6_oif = sk->sk_bound_dev_if; 71 fl6->flowi6_oif = treq->iif;
72 fl6.flowi6_mark = sk->sk_mark; 72 fl6->flowi6_mark = sk->sk_mark;
73 fl6.fl6_dport = inet_rsk(req)->rmt_port; 73 fl6->fl6_dport = inet_rsk(req)->rmt_port;
74 fl6.fl6_sport = inet_rsk(req)->loc_port; 74 fl6->fl6_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 75 security_req_classify_flow(req, flowi6_to_flowi(fl6));
76 76
77 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 77 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
78 if (IS_ERR(dst)) 78 if (IS_ERR(dst))
79 return NULL; 79 return NULL;
80 80
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
171 171
172static inline 172static inline
173void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, 173void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
174 struct in6_addr *daddr, struct in6_addr *saddr) 174 const struct in6_addr *daddr,
175 const struct in6_addr *saddr)
175{ 176{
176 __ip6_dst_store(sk, dst, daddr, saddr); 177 __ip6_dst_store(sk, dst, daddr, saddr);
177 178
@@ -203,43 +204,52 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
203 return dst; 204 return dst;
204} 205}
205 206
206int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) 207static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
208 struct flowi6 *fl6)
207{ 209{
208 struct sock *sk = skb->sk;
209 struct inet_sock *inet = inet_sk(sk); 210 struct inet_sock *inet = inet_sk(sk);
210 struct ipv6_pinfo *np = inet6_sk(sk); 211 struct ipv6_pinfo *np = inet6_sk(sk);
211 struct flowi6 fl6;
212 struct dst_entry *dst;
213 struct in6_addr *final_p, final; 212 struct in6_addr *final_p, final;
214 int res; 213 struct dst_entry *dst;
215 214
216 memset(&fl6, 0, sizeof(fl6)); 215 memset(fl6, 0, sizeof(*fl6));
217 fl6.flowi6_proto = sk->sk_protocol; 216 fl6->flowi6_proto = sk->sk_protocol;
218 fl6.daddr = np->daddr; 217 fl6->daddr = np->daddr;
219 fl6.saddr = np->saddr; 218 fl6->saddr = np->saddr;
220 fl6.flowlabel = np->flow_label; 219 fl6->flowlabel = np->flow_label;
221 IP6_ECN_flow_xmit(sk, fl6.flowlabel); 220 IP6_ECN_flow_xmit(sk, fl6->flowlabel);
222 fl6.flowi6_oif = sk->sk_bound_dev_if; 221 fl6->flowi6_oif = sk->sk_bound_dev_if;
223 fl6.flowi6_mark = sk->sk_mark; 222 fl6->flowi6_mark = sk->sk_mark;
224 fl6.fl6_sport = inet->inet_sport; 223 fl6->fl6_sport = inet->inet_sport;
225 fl6.fl6_dport = inet->inet_dport; 224 fl6->fl6_dport = inet->inet_dport;
226 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 225 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
227 226
228 final_p = fl6_update_dst(&fl6, np->opt, &final); 227 final_p = fl6_update_dst(fl6, np->opt, &final);
229 228
230 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 229 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
230 if (!dst) {
231 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
231 232
232 if (dst == NULL) { 233 if (!IS_ERR(dst))
233 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 234 __inet6_csk_dst_store(sk, dst, NULL, NULL);
235 }
236 return dst;
237}
234 238
235 if (IS_ERR(dst)) { 239int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
236 sk->sk_err_soft = -PTR_ERR(dst); 240{
237 sk->sk_route_caps = 0; 241 struct sock *sk = skb->sk;
238 kfree_skb(skb); 242 struct ipv6_pinfo *np = inet6_sk(sk);
239 return PTR_ERR(dst); 243 struct flowi6 fl6;
240 } 244 struct dst_entry *dst;
245 int res;
241 246
242 __inet6_csk_dst_store(sk, dst, NULL, NULL); 247 dst = inet6_csk_route_socket(sk, &fl6);
248 if (IS_ERR(dst)) {
249 sk->sk_err_soft = -PTR_ERR(dst);
250 sk->sk_route_caps = 0;
251 kfree_skb(skb);
252 return PTR_ERR(dst);
243 } 253 }
244 254
245 rcu_read_lock(); 255 rcu_read_lock();
@@ -253,3 +263,16 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
253 return res; 263 return res;
254} 264}
255EXPORT_SYMBOL_GPL(inet6_csk_xmit); 265EXPORT_SYMBOL_GPL(inet6_csk_xmit);
266
267struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
268{
269 struct flowi6 fl6;
270 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
271
272 if (IS_ERR(dst))
273 return NULL;
274 dst->ops->update_pmtu(dst, sk, NULL, mtu);
275
276 return inet6_csk_route_socket(sk, &fl6);
277}
278EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 74c21b924a79..13690d650c3e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -197,6 +197,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
197 table->tb6_id = id; 197 table->tb6_id = id;
198 table->tb6_root.leaf = net->ipv6.ip6_null_entry; 198 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
199 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 199 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
200 inet_peer_base_init(&table->tb6_peers);
200 } 201 }
201 202
202 return table; 203 return table;
@@ -1349,8 +1350,8 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1349 if (w->leaf && fn->fn_flags & RTN_RTINFO) { 1350 if (w->leaf && fn->fn_flags & RTN_RTINFO) {
1350 int err; 1351 int err;
1351 1352
1352 if (w->count < w->skip) { 1353 if (w->skip) {
1353 w->count++; 1354 w->skip--;
1354 continue; 1355 continue;
1355 } 1356 }
1356 1357
@@ -1633,6 +1634,7 @@ static int __net_init fib6_net_init(struct net *net)
1633 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1634 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1634 net->ipv6.fib6_main_tbl->tb6_root.fn_flags = 1635 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1635 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1636 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1637 inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
1636 1638
1637#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1639#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1638 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), 1640 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -1643,6 +1645,7 @@ static int __net_init fib6_net_init(struct net *net)
1643 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1645 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1644 net->ipv6.fib6_local_tbl->tb6_root.fn_flags = 1646 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1645 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1647 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1648 inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
1646#endif 1649#endif
1647 fib6_tables_init(net); 1650 fib6_tables_init(net);
1648 1651
@@ -1666,8 +1669,10 @@ static void fib6_net_exit(struct net *net)
1666 del_timer_sync(&net->ipv6.ip6_fib_timer); 1669 del_timer_sync(&net->ipv6.ip6_fib_timer);
1667 1670
1668#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1671#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1672 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
1669 kfree(net->ipv6.fib6_local_tbl); 1673 kfree(net->ipv6.fib6_local_tbl);
1670#endif 1674#endif
1675 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
1671 kfree(net->ipv6.fib6_main_tbl); 1676 kfree(net->ipv6.fib6_main_tbl);
1672 kfree(net->ipv6.fib_table_hash); 1677 kfree(net->ipv6.fib_table_hash);
1673 kfree(net->ipv6.rt6_stats); 1678 kfree(net->ipv6.rt6_stats);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 21a15dfe4a9e..a52d864d562b 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -47,9 +47,16 @@
47 47
48 48
49 49
50inline int ip6_rcv_finish( struct sk_buff *skb) 50int ip6_rcv_finish(struct sk_buff *skb)
51{ 51{
52 if (skb_dst(skb) == NULL) 52 if (sysctl_ip_early_demux && !skb_dst(skb)) {
53 const struct inet6_protocol *ipprot;
54
55 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
56 if (ipprot && ipprot->early_demux)
57 ipprot->early_demux(skb);
58 }
59 if (!skb_dst(skb))
53 ip6_route_input(skb); 60 ip6_route_input(skb);
54 61
55 return dst_input(skb); 62 return dst_input(skb);
@@ -168,13 +175,12 @@ drop:
168 175
169static int ip6_input_finish(struct sk_buff *skb) 176static int ip6_input_finish(struct sk_buff *skb)
170{ 177{
178 struct net *net = dev_net(skb_dst(skb)->dev);
171 const struct inet6_protocol *ipprot; 179 const struct inet6_protocol *ipprot;
180 struct inet6_dev *idev;
172 unsigned int nhoff; 181 unsigned int nhoff;
173 int nexthdr; 182 int nexthdr;
174 bool raw; 183 bool raw;
175 u8 hash;
176 struct inet6_dev *idev;
177 struct net *net = dev_net(skb_dst(skb)->dev);
178 184
179 /* 185 /*
180 * Parse extension headers 186 * Parse extension headers
@@ -189,9 +195,7 @@ resubmit:
189 nexthdr = skb_network_header(skb)[nhoff]; 195 nexthdr = skb_network_header(skb)[nhoff];
190 196
191 raw = raw6_local_deliver(skb, nexthdr); 197 raw = raw6_local_deliver(skb, nexthdr);
192 198 if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) {
193 hash = nexthdr & (MAX_INET_PROTOS - 1);
194 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
195 int ret; 199 int ret;
196 200
197 if (ipprot->flags & INET6_PROTO_FINAL) { 201 if (ipprot->flags & INET6_PROTO_FINAL) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index decc21d19c53..5b2d63ed793e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -83,24 +83,12 @@ int ip6_local_out(struct sk_buff *skb)
83} 83}
84EXPORT_SYMBOL_GPL(ip6_local_out); 84EXPORT_SYMBOL_GPL(ip6_local_out);
85 85
86/* dev_loopback_xmit for use with netfilter. */
87static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
88{
89 skb_reset_mac_header(newskb);
90 __skb_pull(newskb, skb_network_offset(newskb));
91 newskb->pkt_type = PACKET_LOOPBACK;
92 newskb->ip_summed = CHECKSUM_UNNECESSARY;
93 WARN_ON(!skb_dst(newskb));
94
95 netif_rx_ni(newskb);
96 return 0;
97}
98
99static int ip6_finish_output2(struct sk_buff *skb) 86static int ip6_finish_output2(struct sk_buff *skb)
100{ 87{
101 struct dst_entry *dst = skb_dst(skb); 88 struct dst_entry *dst = skb_dst(skb);
102 struct net_device *dev = dst->dev; 89 struct net_device *dev = dst->dev;
103 struct neighbour *neigh; 90 struct neighbour *neigh;
91 struct rt6_info *rt;
104 92
105 skb->protocol = htons(ETH_P_IPV6); 93 skb->protocol = htons(ETH_P_IPV6);
106 skb->dev = dev; 94 skb->dev = dev;
@@ -121,7 +109,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
121 if (newskb) 109 if (newskb)
122 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, 110 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
123 newskb, NULL, newskb->dev, 111 newskb, NULL, newskb->dev,
124 ip6_dev_loopback_xmit); 112 dev_loopback_xmit);
125 113
126 if (ipv6_hdr(skb)->hop_limit == 0) { 114 if (ipv6_hdr(skb)->hop_limit == 0) {
127 IP6_INC_STATS(dev_net(dev), idev, 115 IP6_INC_STATS(dev_net(dev), idev,
@@ -136,9 +124,10 @@ static int ip6_finish_output2(struct sk_buff *skb)
136 } 124 }
137 125
138 rcu_read_lock(); 126 rcu_read_lock();
139 neigh = dst_get_neighbour_noref(dst); 127 rt = (struct rt6_info *) dst;
128 neigh = rt->n;
140 if (neigh) { 129 if (neigh) {
141 int res = neigh_output(neigh, skb); 130 int res = dst_neigh_output(dst, neigh, skb);
142 131
143 rcu_read_unlock(); 132 rcu_read_unlock();
144 return res; 133 return res;
@@ -463,6 +452,7 @@ int ip6_forward(struct sk_buff *skb)
463 */ 452 */
464 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { 453 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
465 struct in6_addr *target = NULL; 454 struct in6_addr *target = NULL;
455 struct inet_peer *peer;
466 struct rt6_info *rt; 456 struct rt6_info *rt;
467 457
468 /* 458 /*
@@ -476,14 +466,15 @@ int ip6_forward(struct sk_buff *skb)
476 else 466 else
477 target = &hdr->daddr; 467 target = &hdr->daddr;
478 468
479 if (!rt->rt6i_peer) 469 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
480 rt6_bind_peer(rt, 1);
481 470
482 /* Limit redirects both by destination (here) 471 /* Limit redirects both by destination (here)
483 and by source (inside ndisc_send_redirect) 472 and by source (inside ndisc_send_redirect)
484 */ 473 */
485 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 474 if (inet_peer_xrlim_allow(peer, 1*HZ))
486 ndisc_send_redirect(skb, target); 475 ndisc_send_redirect(skb, target);
476 if (peer)
477 inet_putpeer(peer);
487 } else { 478 } else {
488 int addrtype = ipv6_addr_type(&hdr->saddr); 479 int addrtype = ipv6_addr_type(&hdr->saddr);
489 480
@@ -604,12 +595,13 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
604 595
605 if (rt && !(rt->dst.flags & DST_NOPEER)) { 596 if (rt && !(rt->dst.flags & DST_NOPEER)) {
606 struct inet_peer *peer; 597 struct inet_peer *peer;
598 struct net *net;
607 599
608 if (!rt->rt6i_peer) 600 net = dev_net(rt->dst.dev);
609 rt6_bind_peer(rt, 1); 601 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
610 peer = rt->rt6i_peer;
611 if (peer) { 602 if (peer) {
612 fhdr->identification = htonl(inet_getid(peer, 0)); 603 fhdr->identification = htonl(inet_getid(peer, 0));
604 inet_putpeer(peer);
613 return; 605 return;
614 } 606 }
615 } 607 }
@@ -960,6 +952,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
960 struct net *net = sock_net(sk); 952 struct net *net = sock_net(sk);
961#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 953#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
962 struct neighbour *n; 954 struct neighbour *n;
955 struct rt6_info *rt;
963#endif 956#endif
964 int err; 957 int err;
965 958
@@ -988,7 +981,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
988 * dst entry of the nexthop router 981 * dst entry of the nexthop router
989 */ 982 */
990 rcu_read_lock(); 983 rcu_read_lock();
991 n = dst_get_neighbour_noref(*dst); 984 rt = (struct rt6_info *) *dst;
985 n = rt->n;
992 if (n && !(n->nud_state & NUD_VALID)) { 986 if (n && !(n->nud_state & NUD_VALID)) {
993 struct inet6_ifaddr *ifp; 987 struct inet6_ifaddr *ifp;
994 struct flowi6 fl_gw6; 988 struct flowi6 fl_gw6;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c9015fad8d65..9a1d5fe6aef8 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -40,6 +40,7 @@
40#include <linux/rtnetlink.h> 40#include <linux/rtnetlink.h>
41#include <linux/netfilter_ipv6.h> 41#include <linux/netfilter_ipv6.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/hash.h>
43 44
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
45#include <linux/atomic.h> 46#include <linux/atomic.h>
@@ -70,11 +71,15 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
70#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) 71#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
71#define IPV6_TCLASS_SHIFT 20 72#define IPV6_TCLASS_SHIFT 20
72 73
73#define HASH_SIZE 32 74#define HASH_SIZE_SHIFT 5
75#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
74 76
75#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ 77static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 78{
77 (HASH_SIZE - 1)) 79 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
80
81 return hash_32(hash, HASH_SIZE_SHIFT);
82}
78 83
79static int ip6_tnl_dev_init(struct net_device *dev); 84static int ip6_tnl_dev_init(struct net_device *dev);
80static void ip6_tnl_dev_setup(struct net_device *dev); 85static void ip6_tnl_dev_setup(struct net_device *dev);
@@ -166,12 +171,11 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
166static struct ip6_tnl * 171static struct ip6_tnl *
167ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 172ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
168{ 173{
169 unsigned int h0 = HASH(remote); 174 unsigned int hash = HASH(remote, local);
170 unsigned int h1 = HASH(local);
171 struct ip6_tnl *t; 175 struct ip6_tnl *t;
172 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 176 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
173 177
174 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { 178 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
175 if (ipv6_addr_equal(local, &t->parms.laddr) && 179 if (ipv6_addr_equal(local, &t->parms.laddr) &&
176 ipv6_addr_equal(remote, &t->parms.raddr) && 180 ipv6_addr_equal(remote, &t->parms.raddr) &&
177 (t->dev->flags & IFF_UP)) 181 (t->dev->flags & IFF_UP))
@@ -205,7 +209,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
205 209
206 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 210 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
207 prio = 1; 211 prio = 1;
208 h = HASH(remote) ^ HASH(local); 212 h = HASH(remote, local);
209 } 213 }
210 return &ip6n->tnls[prio][h]; 214 return &ip6n->tnls[prio][h];
211} 215}
@@ -252,7 +256,7 @@ static void ip6_dev_free(struct net_device *dev)
252} 256}
253 257
254/** 258/**
255 * ip6_tnl_create() - create a new tunnel 259 * ip6_tnl_create - create a new tunnel
256 * @p: tunnel parameters 260 * @p: tunnel parameters
257 * @pt: pointer to new tunnel 261 * @pt: pointer to new tunnel
258 * 262 *
@@ -550,6 +554,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
550 rel_type = ICMP_DEST_UNREACH; 554 rel_type = ICMP_DEST_UNREACH;
551 rel_code = ICMP_FRAG_NEEDED; 555 rel_code = ICMP_FRAG_NEEDED;
552 break; 556 break;
557 case NDISC_REDIRECT:
558 rel_type = ICMP_REDIRECT;
559 rel_code = ICMP_REDIR_HOST;
553 default: 560 default:
554 return 0; 561 return 0;
555 } 562 }
@@ -606,8 +613,10 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
606 if (rel_info > dst_mtu(skb_dst(skb2))) 613 if (rel_info > dst_mtu(skb_dst(skb2)))
607 goto out; 614 goto out;
608 615
609 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); 616 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
610 } 617 }
618 if (rel_type == ICMP_REDIRECT)
619 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
611 620
612 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 621 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
613 622
@@ -684,24 +693,50 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
684 IP6_ECN_set_ce(ipv6_hdr(skb)); 693 IP6_ECN_set_ce(ipv6_hdr(skb));
685} 694}
686 695
696static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
697 const struct in6_addr *laddr,
698 const struct in6_addr *raddr)
699{
700 struct ip6_tnl_parm *p = &t->parms;
701 int ltype = ipv6_addr_type(laddr);
702 int rtype = ipv6_addr_type(raddr);
703 __u32 flags = 0;
704
705 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
706 flags = IP6_TNL_F_CAP_PER_PACKET;
707 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
708 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
709 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
710 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
711 if (ltype&IPV6_ADDR_UNICAST)
712 flags |= IP6_TNL_F_CAP_XMIT;
713 if (rtype&IPV6_ADDR_UNICAST)
714 flags |= IP6_TNL_F_CAP_RCV;
715 }
716 return flags;
717}
718
687/* called with rcu_read_lock() */ 719/* called with rcu_read_lock() */
688static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) 720static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
721 const struct in6_addr *laddr,
722 const struct in6_addr *raddr)
689{ 723{
690 struct ip6_tnl_parm *p = &t->parms; 724 struct ip6_tnl_parm *p = &t->parms;
691 int ret = 0; 725 int ret = 0;
692 struct net *net = dev_net(t->dev); 726 struct net *net = dev_net(t->dev);
693 727
694 if (p->flags & IP6_TNL_F_CAP_RCV) { 728 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
729 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
730 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
695 struct net_device *ldev = NULL; 731 struct net_device *ldev = NULL;
696 732
697 if (p->link) 733 if (p->link)
698 ldev = dev_get_by_index_rcu(net, p->link); 734 ldev = dev_get_by_index_rcu(net, p->link);
699 735
700 if ((ipv6_addr_is_multicast(&p->laddr) || 736 if ((ipv6_addr_is_multicast(laddr) ||
701 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && 737 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
702 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) 738 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
703 ret = 1; 739 ret = 1;
704
705 } 740 }
706 return ret; 741 return ret;
707} 742}
@@ -740,7 +775,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
740 goto discard; 775 goto discard;
741 } 776 }
742 777
743 if (!ip6_tnl_rcv_ctl(t)) { 778 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
744 t->dev->stats.rx_dropped++; 779 t->dev->stats.rx_dropped++;
745 rcu_read_unlock(); 780 rcu_read_unlock();
746 goto discard; 781 goto discard;
@@ -921,7 +956,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
921 if (mtu < IPV6_MIN_MTU) 956 if (mtu < IPV6_MIN_MTU)
922 mtu = IPV6_MIN_MTU; 957 mtu = IPV6_MIN_MTU;
923 if (skb_dst(skb)) 958 if (skb_dst(skb))
924 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 959 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
925 if (skb->len > mtu) { 960 if (skb->len > mtu) {
926 *pmtu = mtu; 961 *pmtu = mtu;
927 err = -EMSGSIZE; 962 err = -EMSGSIZE;
@@ -1114,25 +1149,6 @@ tx_err:
1114 return NETDEV_TX_OK; 1149 return NETDEV_TX_OK;
1115} 1150}
1116 1151
1117static void ip6_tnl_set_cap(struct ip6_tnl *t)
1118{
1119 struct ip6_tnl_parm *p = &t->parms;
1120 int ltype = ipv6_addr_type(&p->laddr);
1121 int rtype = ipv6_addr_type(&p->raddr);
1122
1123 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
1124
1125 if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1126 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1127 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
1128 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
1129 if (ltype&IPV6_ADDR_UNICAST)
1130 p->flags |= IP6_TNL_F_CAP_XMIT;
1131 if (rtype&IPV6_ADDR_UNICAST)
1132 p->flags |= IP6_TNL_F_CAP_RCV;
1133 }
1134}
1135
1136static void ip6_tnl_link_config(struct ip6_tnl *t) 1152static void ip6_tnl_link_config(struct ip6_tnl *t)
1137{ 1153{
1138 struct net_device *dev = t->dev; 1154 struct net_device *dev = t->dev;
@@ -1153,7 +1169,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1153 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1169 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1154 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1170 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1155 1171
1156 ip6_tnl_set_cap(t); 1172 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1173 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1157 1174
1158 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1175 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1159 dev->flags |= IFF_POINTOPOINT; 1176 dev->flags |= IFF_POINTOPOINT;
@@ -1438,6 +1455,9 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1438 1455
1439 t->parms.proto = IPPROTO_IPV6; 1456 t->parms.proto = IPPROTO_IPV6;
1440 dev_hold(dev); 1457 dev_hold(dev);
1458
1459 ip6_tnl_link_config(t);
1460
1441 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1461 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1442 return 0; 1462 return 0;
1443} 1463}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 461e47c8e956..4532973f0dd4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2104,8 +2104,9 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2104 if (c->mf6c_parent >= MAXMIFS) 2104 if (c->mf6c_parent >= MAXMIFS)
2105 return -ENOENT; 2105 return -ENOENT;
2106 2106
2107 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2107 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2108 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); 2108 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2109 return -EMSGSIZE;
2109 2110
2110 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2111 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2111 2112
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5cb75bfe45b1..7af5aee75d98 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -46,6 +46,7 @@
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
48#include <linux/rtnetlink.h> 48#include <linux/rtnetlink.h>
49#include <net/ip6_route.h>
49#include <net/icmp.h> 50#include <net/icmp.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/protocol.h> 52#include <net/protocol.h>
@@ -63,7 +64,9 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
63 (struct ip_comp_hdr *)(skb->data + offset); 64 (struct ip_comp_hdr *)(skb->data + offset);
64 struct xfrm_state *x; 65 struct xfrm_state *x;
65 66
66 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) 67 if (type != ICMPV6_DEST_UNREACH &&
68 type != ICMPV6_PKT_TOOBIG &&
69 type != NDISC_REDIRECT)
67 return; 70 return;
68 71
69 spi = htonl(ntohs(ipcomph->cpi)); 72 spi = htonl(ntohs(ipcomph->cpi));
@@ -72,8 +75,10 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
72 if (!x) 75 if (!x)
73 return; 76 return;
74 77
75 pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n", 78 if (type == NDISC_REDIRECT)
76 spi, &iph->daddr); 79 ip6_redirect(skb, net, 0, 0);
80 else
81 ip6_update_pmtu(skb, net, info, 0, 0);
77 xfrm_state_put(x); 82 xfrm_state_put(x);
78} 83}
79 84
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6d0f5dc8e3a6..92f8e48e4ba4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -211,6 +211,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
211 struct ipv6_mc_socklist __rcu **lnk; 211 struct ipv6_mc_socklist __rcu **lnk;
212 struct net *net = sock_net(sk); 212 struct net *net = sock_net(sk);
213 213
214 if (!ipv6_addr_is_multicast(addr))
215 return -EINVAL;
216
214 spin_lock(&ipv6_sk_mc_lock); 217 spin_lock(&ipv6_sk_mc_lock);
215 for (lnk = &np->ipv6_mc_list; 218 for (lnk = &np->ipv6_mc_list;
216 (mc_lst = rcu_dereference_protected(*lnk, 219 (mc_lst = rcu_dereference_protected(*lnk,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 54f62d3b8dd6..ff36194a71aa 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -143,40 +143,6 @@ struct neigh_table nd_tbl = {
143 .gc_thresh3 = 1024, 143 .gc_thresh3 = 1024,
144}; 144};
145 145
146/* ND options */
147struct ndisc_options {
148 struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
149#ifdef CONFIG_IPV6_ROUTE_INFO
150 struct nd_opt_hdr *nd_opts_ri;
151 struct nd_opt_hdr *nd_opts_ri_end;
152#endif
153 struct nd_opt_hdr *nd_useropts;
154 struct nd_opt_hdr *nd_useropts_end;
155};
156
157#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
158#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
159#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
160#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
161#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
162#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
163
164#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
165
166/*
167 * Return the padding between the option length and the start of the
168 * link addr. Currently only IP-over-InfiniBand needs this, although
169 * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
170 * also need a pad of 2.
171 */
172static int ndisc_addr_option_pad(unsigned short type)
173{
174 switch (type) {
175 case ARPHRD_INFINIBAND: return 2;
176 default: return 0;
177 }
178}
179
180static inline int ndisc_opt_addr_space(struct net_device *dev) 146static inline int ndisc_opt_addr_space(struct net_device *dev)
181{ 147{
182 return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type)); 148 return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type));
@@ -233,8 +199,8 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
233 return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; 199 return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
234} 200}
235 201
236static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, 202struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
237 struct ndisc_options *ndopts) 203 struct ndisc_options *ndopts)
238{ 204{
239 struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; 205 struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt;
240 206
@@ -297,17 +263,6 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
297 return ndopts; 263 return ndopts;
298} 264}
299 265
300static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
301 struct net_device *dev)
302{
303 u8 *lladdr = (u8 *)(p + 1);
304 int lladdrlen = p->nd_opt_len << 3;
305 int prepad = ndisc_addr_option_pad(dev->type);
306 if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
307 return NULL;
308 return lladdr + prepad;
309}
310
311int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 266int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
312{ 267{
313 switch (dev->type) { 268 switch (dev->type) {
@@ -1379,16 +1334,6 @@ out:
1379 1334
1380static void ndisc_redirect_rcv(struct sk_buff *skb) 1335static void ndisc_redirect_rcv(struct sk_buff *skb)
1381{ 1336{
1382 struct inet6_dev *in6_dev;
1383 struct icmp6hdr *icmph;
1384 const struct in6_addr *dest;
1385 const struct in6_addr *target; /* new first hop to destination */
1386 struct neighbour *neigh;
1387 int on_link = 0;
1388 struct ndisc_options ndopts;
1389 int optlen;
1390 u8 *lladdr = NULL;
1391
1392#ifdef CONFIG_IPV6_NDISC_NODETYPE 1337#ifdef CONFIG_IPV6_NDISC_NODETYPE
1393 switch (skb->ndisc_nodetype) { 1338 switch (skb->ndisc_nodetype) {
1394 case NDISC_NODETYPE_HOST: 1339 case NDISC_NODETYPE_HOST:
@@ -1405,65 +1350,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1405 return; 1350 return;
1406 } 1351 }
1407 1352
1408 optlen = skb->tail - skb->transport_header; 1353 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
1409 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1410
1411 if (optlen < 0) {
1412 ND_PRINTK(2, warn, "Redirect: packet too short\n");
1413 return;
1414 }
1415
1416 icmph = icmp6_hdr(skb);
1417 target = (const struct in6_addr *) (icmph + 1);
1418 dest = target + 1;
1419
1420 if (ipv6_addr_is_multicast(dest)) {
1421 ND_PRINTK(2, warn,
1422 "Redirect: destination address is multicast\n");
1423 return;
1424 }
1425
1426 if (ipv6_addr_equal(dest, target)) {
1427 on_link = 1;
1428 } else if (ipv6_addr_type(target) !=
1429 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1430 ND_PRINTK(2, warn,
1431 "Redirect: target address is not link-local unicast\n");
1432 return;
1433 }
1434
1435 in6_dev = __in6_dev_get(skb->dev);
1436 if (!in6_dev)
1437 return;
1438 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1439 return;
1440
1441 /* RFC2461 8.1:
1442 * The IP source address of the Redirect MUST be the same as the current
1443 * first-hop router for the specified ICMP Destination Address.
1444 */
1445
1446 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1447 ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
1448 return;
1449 }
1450 if (ndopts.nd_opts_tgt_lladdr) {
1451 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1452 skb->dev);
1453 if (!lladdr) {
1454 ND_PRINTK(2, warn,
1455 "Redirect: invalid link-layer address length\n");
1456 return;
1457 }
1458 }
1459
1460 neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
1461 if (neigh) {
1462 rt6_redirect(dest, &ipv6_hdr(skb)->daddr,
1463 &ipv6_hdr(skb)->saddr, neigh, lladdr,
1464 on_link);
1465 neigh_release(neigh);
1466 }
1467} 1354}
1468 1355
1469void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1356void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
@@ -1472,6 +1359,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1472 struct net *net = dev_net(dev); 1359 struct net *net = dev_net(dev);
1473 struct sock *sk = net->ipv6.ndisc_sk; 1360 struct sock *sk = net->ipv6.ndisc_sk;
1474 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1361 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1362 struct inet_peer *peer;
1475 struct sk_buff *buff; 1363 struct sk_buff *buff;
1476 struct icmp6hdr *icmph; 1364 struct icmp6hdr *icmph;
1477 struct in6_addr saddr_buf; 1365 struct in6_addr saddr_buf;
@@ -1485,6 +1373,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1485 int rd_len; 1373 int rd_len;
1486 int err; 1374 int err;
1487 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1375 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
1376 bool ret;
1488 1377
1489 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { 1378 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1490 ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", 1379 ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
@@ -1518,9 +1407,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1518 "Redirect: destination is not a neighbour\n"); 1407 "Redirect: destination is not a neighbour\n");
1519 goto release; 1408 goto release;
1520 } 1409 }
1521 if (!rt->rt6i_peer) 1410 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
1522 rt6_bind_peer(rt, 1); 1411 ret = inet_peer_xrlim_allow(peer, 1*HZ);
1523 if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 1412 if (peer)
1413 inet_putpeer(peer);
1414 if (!ret)
1524 goto release; 1415 goto release;
1525 1416
1526 if (dev->addr_len) { 1417 if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3224ef90a21a..4794f96cf2e0 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -143,11 +143,11 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
143 return NF_ACCEPT; 143 return NF_ACCEPT;
144} 144}
145 145
146static unsigned int ipv6_confirm(unsigned int hooknum, 146static unsigned int ipv6_helper(unsigned int hooknum,
147 struct sk_buff *skb, 147 struct sk_buff *skb,
148 const struct net_device *in, 148 const struct net_device *in,
149 const struct net_device *out, 149 const struct net_device *out,
150 int (*okfn)(struct sk_buff *)) 150 int (*okfn)(struct sk_buff *))
151{ 151{
152 struct nf_conn *ct; 152 struct nf_conn *ct;
153 const struct nf_conn_help *help; 153 const struct nf_conn_help *help;
@@ -161,15 +161,15 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
161 /* This is where we call the helper: as the packet goes out. */ 161 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 162 ct = nf_ct_get(skb, &ctinfo);
163 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 163 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
164 goto out; 164 return NF_ACCEPT;
165 165
166 help = nfct_help(ct); 166 help = nfct_help(ct);
167 if (!help) 167 if (!help)
168 goto out; 168 return NF_ACCEPT;
169 /* rcu_read_lock()ed by nf_hook_slow */ 169 /* rcu_read_lock()ed by nf_hook_slow */
170 helper = rcu_dereference(help->helper); 170 helper = rcu_dereference(help->helper);
171 if (!helper) 171 if (!helper)
172 goto out; 172 return NF_ACCEPT;
173 173
174 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, 174 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
175 skb->len - extoff); 175 skb->len - extoff);
@@ -179,12 +179,19 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
179 } 179 }
180 180
181 ret = helper->help(skb, protoff, ct, ctinfo); 181 ret = helper->help(skb, protoff, ct, ctinfo);
182 if (ret != NF_ACCEPT) { 182 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
183 nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, 183 nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
184 "nf_ct_%s: dropping packet", helper->name); 184 "nf_ct_%s: dropping packet", helper->name);
185 return ret;
186 } 185 }
187out: 186 return ret;
187}
188
189static unsigned int ipv6_confirm(unsigned int hooknum,
190 struct sk_buff *skb,
191 const struct net_device *in,
192 const struct net_device *out,
193 int (*okfn)(struct sk_buff *))
194{
188 /* We've seen it coming out the other side: confirm it */ 195 /* We've seen it coming out the other side: confirm it */
189 return nf_conntrack_confirm(skb); 196 return nf_conntrack_confirm(skb);
190} 197}
@@ -254,6 +261,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
254 .priority = NF_IP6_PRI_CONNTRACK, 261 .priority = NF_IP6_PRI_CONNTRACK,
255 }, 262 },
256 { 263 {
264 .hook = ipv6_helper,
265 .owner = THIS_MODULE,
266 .pf = NFPROTO_IPV6,
267 .hooknum = NF_INET_POST_ROUTING,
268 .priority = NF_IP6_PRI_CONNTRACK_HELPER,
269 },
270 {
257 .hook = ipv6_confirm, 271 .hook = ipv6_confirm,
258 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
259 .pf = NFPROTO_IPV6, 273 .pf = NFPROTO_IPV6,
@@ -261,6 +275,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
261 .priority = NF_IP6_PRI_LAST, 275 .priority = NF_IP6_PRI_LAST,
262 }, 276 },
263 { 277 {
278 .hook = ipv6_helper,
279 .owner = THIS_MODULE,
280 .pf = NFPROTO_IPV6,
281 .hooknum = NF_INET_LOCAL_IN,
282 .priority = NF_IP6_PRI_CONNTRACK_HELPER,
283 },
284 {
264 .hook = ipv6_confirm, 285 .hook = ipv6_confirm,
265 .owner = THIS_MODULE, 286 .owner = THIS_MODULE,
266 .pf = NFPROTO_IPV6, 287 .pf = NFPROTO_IPV6,
@@ -333,37 +354,75 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
333MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
334MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); 355MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
335 356
336static int __init nf_conntrack_l3proto_ipv6_init(void) 357static int ipv6_net_init(struct net *net)
337{ 358{
338 int ret = 0; 359 int ret = 0;
339 360
340 need_conntrack(); 361 ret = nf_conntrack_l4proto_register(net,
341 nf_defrag_ipv6_enable(); 362 &nf_conntrack_l4proto_tcp6);
342
343 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
344 if (ret < 0) { 363 if (ret < 0) {
345 pr_err("nf_conntrack_ipv6: can't register tcp.\n"); 364 printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n");
346 return ret; 365 goto out;
347 } 366 }
348 367 ret = nf_conntrack_l4proto_register(net,
349 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); 368 &nf_conntrack_l4proto_udp6);
350 if (ret < 0) { 369 if (ret < 0) {
351 pr_err("nf_conntrack_ipv6: can't register udp.\n"); 370 printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n");
352 goto cleanup_tcp; 371 goto cleanup_tcp6;
353 } 372 }
354 373 ret = nf_conntrack_l4proto_register(net,
355 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); 374 &nf_conntrack_l4proto_icmpv6);
356 if (ret < 0) { 375 if (ret < 0) {
357 pr_err("nf_conntrack_ipv6: can't register icmpv6.\n"); 376 printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n");
358 goto cleanup_udp; 377 goto cleanup_udp6;
359 } 378 }
360 379 ret = nf_conntrack_l3proto_register(net,
361 ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); 380 &nf_conntrack_l3proto_ipv6);
362 if (ret < 0) { 381 if (ret < 0) {
363 pr_err("nf_conntrack_ipv6: can't register ipv6\n"); 382 printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n");
364 goto cleanup_icmpv6; 383 goto cleanup_icmpv6;
365 } 384 }
385 return 0;
386 cleanup_icmpv6:
387 nf_conntrack_l4proto_unregister(net,
388 &nf_conntrack_l4proto_icmpv6);
389 cleanup_udp6:
390 nf_conntrack_l4proto_unregister(net,
391 &nf_conntrack_l4proto_udp6);
392 cleanup_tcp6:
393 nf_conntrack_l4proto_unregister(net,
394 &nf_conntrack_l4proto_tcp6);
395 out:
396 return ret;
397}
366 398
399static void ipv6_net_exit(struct net *net)
400{
401 nf_conntrack_l3proto_unregister(net,
402 &nf_conntrack_l3proto_ipv6);
403 nf_conntrack_l4proto_unregister(net,
404 &nf_conntrack_l4proto_icmpv6);
405 nf_conntrack_l4proto_unregister(net,
406 &nf_conntrack_l4proto_udp6);
407 nf_conntrack_l4proto_unregister(net,
408 &nf_conntrack_l4proto_tcp6);
409}
410
411static struct pernet_operations ipv6_net_ops = {
412 .init = ipv6_net_init,
413 .exit = ipv6_net_exit,
414};
415
416static int __init nf_conntrack_l3proto_ipv6_init(void)
417{
418 int ret = 0;
419
420 need_conntrack();
421 nf_defrag_ipv6_enable();
422
423 ret = register_pernet_subsys(&ipv6_net_ops);
424 if (ret < 0)
425 goto cleanup_pernet;
367 ret = nf_register_hooks(ipv6_conntrack_ops, 426 ret = nf_register_hooks(ipv6_conntrack_ops,
368 ARRAY_SIZE(ipv6_conntrack_ops)); 427 ARRAY_SIZE(ipv6_conntrack_ops));
369 if (ret < 0) { 428 if (ret < 0) {
@@ -374,13 +433,8 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
374 return ret; 433 return ret;
375 434
376 cleanup_ipv6: 435 cleanup_ipv6:
377 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); 436 unregister_pernet_subsys(&ipv6_net_ops);
378 cleanup_icmpv6: 437 cleanup_pernet:
379 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
380 cleanup_udp:
381 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
382 cleanup_tcp:
383 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
384 return ret; 438 return ret;
385} 439}
386 440
@@ -388,10 +442,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void)
388{ 442{
389 synchronize_net(); 443 synchronize_net();
390 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); 444 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
391 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); 445 unregister_pernet_subsys(&ipv6_net_ops);
392 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
393 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
394 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
395} 446}
396 447
397module_init(nf_conntrack_l3proto_ipv6_init); 448module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3e81904fbbcd..2d54b2061d68 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -29,6 +29,11 @@
29 29
30static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; 30static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
31 31
32static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
33{
34 return &net->ct.nf_ct_proto.icmpv6;
35}
36
32static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 37static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
33 unsigned int dataoff, 38 unsigned int dataoff,
34 struct nf_conntrack_tuple *tuple) 39 struct nf_conntrack_tuple *tuple)
@@ -90,7 +95,7 @@ static int icmpv6_print_tuple(struct seq_file *s,
90 95
91static unsigned int *icmpv6_get_timeouts(struct net *net) 96static unsigned int *icmpv6_get_timeouts(struct net *net)
92{ 97{
93 return &nf_ct_icmpv6_timeout; 98 return &icmpv6_pernet(net)->timeout;
94} 99}
95 100
96/* Returns verdict for packet, or -1 for invalid. */ 101/* Returns verdict for packet, or -1 for invalid. */
@@ -281,16 +286,18 @@ static int icmpv6_nlattr_tuple_size(void)
281#include <linux/netfilter/nfnetlink.h> 286#include <linux/netfilter/nfnetlink.h>
282#include <linux/netfilter/nfnetlink_cttimeout.h> 287#include <linux/netfilter/nfnetlink_cttimeout.h>
283 288
284static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 289static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
290 struct net *net, void *data)
285{ 291{
286 unsigned int *timeout = data; 292 unsigned int *timeout = data;
293 struct nf_icmp_net *in = icmpv6_pernet(net);
287 294
288 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { 295 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
289 *timeout = 296 *timeout =
290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; 297 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
291 } else { 298 } else {
292 /* Set default ICMPv6 timeout. */ 299 /* Set default ICMPv6 timeout. */
293 *timeout = nf_ct_icmpv6_timeout; 300 *timeout = in->timeout;
294 } 301 }
295 return 0; 302 return 0;
296} 303}
@@ -315,11 +322,9 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 322#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
316 323
317#ifdef CONFIG_SYSCTL 324#ifdef CONFIG_SYSCTL
318static struct ctl_table_header *icmpv6_sysctl_header;
319static struct ctl_table icmpv6_sysctl_table[] = { 325static struct ctl_table icmpv6_sysctl_table[] = {
320 { 326 {
321 .procname = "nf_conntrack_icmpv6_timeout", 327 .procname = "nf_conntrack_icmpv6_timeout",
322 .data = &nf_ct_icmpv6_timeout,
323 .maxlen = sizeof(unsigned int), 328 .maxlen = sizeof(unsigned int),
324 .mode = 0644, 329 .mode = 0644,
325 .proc_handler = proc_dointvec_jiffies, 330 .proc_handler = proc_dointvec_jiffies,
@@ -328,6 +333,36 @@ static struct ctl_table icmpv6_sysctl_table[] = {
328}; 333};
329#endif /* CONFIG_SYSCTL */ 334#endif /* CONFIG_SYSCTL */
330 335
336static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
337 struct nf_icmp_net *in)
338{
339#ifdef CONFIG_SYSCTL
340 pn->ctl_table = kmemdup(icmpv6_sysctl_table,
341 sizeof(icmpv6_sysctl_table),
342 GFP_KERNEL);
343 if (!pn->ctl_table)
344 return -ENOMEM;
345
346 pn->ctl_table[0].data = &in->timeout;
347#endif
348 return 0;
349}
350
351static int icmpv6_init_net(struct net *net, u_int16_t proto)
352{
353 struct nf_icmp_net *in = icmpv6_pernet(net);
354 struct nf_proto_net *pn = &in->pn;
355
356 in->timeout = nf_ct_icmpv6_timeout;
357
358 return icmpv6_kmemdup_sysctl_table(pn, in);
359}
360
361static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
362{
363 return &net->ct.nf_ct_proto.icmpv6.pn;
364}
365
331struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = 366struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
332{ 367{
333 .l3proto = PF_INET6, 368 .l3proto = PF_INET6,
@@ -355,8 +390,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
355 .nla_policy = icmpv6_timeout_nla_policy, 390 .nla_policy = icmpv6_timeout_nla_policy,
356 }, 391 },
357#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 392#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
358#ifdef CONFIG_SYSCTL 393 .init_net = icmpv6_init_net,
359 .ctl_table_header = &icmpv6_sysctl_header, 394 .get_net_proto = icmpv6_get_net_proto,
360 .ctl_table = icmpv6_sysctl_table,
361#endif
362}; 395};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9a7978fdc02a..053082dfc93e 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -29,9 +29,7 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
29 29
30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
31{ 31{
32 int hash = protocol & (MAX_INET_PROTOS - 1); 32 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
33
34 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
35 NULL, prot) ? 0 : -1; 33 NULL, prot) ? 0 : -1;
36} 34}
37EXPORT_SYMBOL(inet6_add_protocol); 35EXPORT_SYMBOL(inet6_add_protocol);
@@ -42,9 +40,9 @@ EXPORT_SYMBOL(inet6_add_protocol);
42 40
43int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 41int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
44{ 42{
45 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 43 int ret;
46 44
47 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], 45 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
48 prot, NULL) == prot) ? 0 : -1; 46 prot, NULL) == prot) ? 0 : -1;
49 47
50 synchronize_net(); 48 synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 93d69836fded..ef0579d5bca6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
165 saddr = &ipv6_hdr(skb)->saddr; 165 saddr = &ipv6_hdr(skb)->saddr;
166 daddr = saddr + 1; 166 daddr = saddr + 1;
167 167
168 hash = nexthdr & (MAX_INET_PROTOS - 1); 168 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
169 169
170 read_lock(&raw_v6_hashinfo.lock); 170 read_lock(&raw_v6_hashinfo.lock);
171 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 171 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
@@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
229{ 229{
230 struct sock *raw_sk; 230 struct sock *raw_sk;
231 231
232 raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); 232 raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
233 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) 233 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
234 raw_sk = NULL; 234 raw_sk = NULL;
235 235
@@ -328,9 +328,12 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
328 return; 328 return;
329 329
330 harderr = icmpv6_err_convert(type, code, &err); 330 harderr = icmpv6_err_convert(type, code, &err);
331 if (type == ICMPV6_PKT_TOOBIG) 331 if (type == ICMPV6_PKT_TOOBIG) {
332 ip6_sk_update_pmtu(skb, sk, info);
332 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); 333 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
333 334 }
335 if (type == NDISC_REDIRECT)
336 ip6_sk_redirect(skb, sk);
334 if (np->recverr) { 337 if (np->recverr) {
335 u8 *payload = skb->data; 338 u8 *payload = skb->data;
336 if (!inet->hdrincl) 339 if (!inet->hdrincl)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 999a982ad3fd..8e80fd279100 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -78,7 +78,10 @@ static int ip6_dst_gc(struct dst_ops *ops);
78static int ip6_pkt_discard(struct sk_buff *skb); 78static int ip6_pkt_discard(struct sk_buff *skb);
79static int ip6_pkt_discard_out(struct sk_buff *skb); 79static int ip6_pkt_discard_out(struct sk_buff *skb);
80static void ip6_link_failure(struct sk_buff *skb); 80static void ip6_link_failure(struct sk_buff *skb);
81static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 81static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
82 struct sk_buff *skb, u32 mtu);
83static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
84 struct sk_buff *skb);
82 85
83#ifdef CONFIG_IPV6_ROUTE_INFO 86#ifdef CONFIG_IPV6_ROUTE_INFO
84static struct rt6_info *rt6_add_route_info(struct net *net, 87static struct rt6_info *rt6_add_route_info(struct net *net,
@@ -99,10 +102,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
99 if (!(rt->dst.flags & DST_HOST)) 102 if (!(rt->dst.flags & DST_HOST))
100 return NULL; 103 return NULL;
101 104
102 if (!rt->rt6i_peer) 105 peer = rt6_get_peer_create(rt);
103 rt6_bind_peer(rt, 1);
104
105 peer = rt->rt6i_peer;
106 if (peer) { 106 if (peer) {
107 u32 *old_p = __DST_METRICS_PTR(old); 107 u32 *old_p = __DST_METRICS_PTR(old);
108 unsigned long prev, new; 108 unsigned long prev, new;
@@ -123,21 +123,27 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
123 return p; 123 return p;
124} 124}
125 125
126static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr) 126static inline const void *choose_neigh_daddr(struct rt6_info *rt,
127 struct sk_buff *skb,
128 const void *daddr)
127{ 129{
128 struct in6_addr *p = &rt->rt6i_gateway; 130 struct in6_addr *p = &rt->rt6i_gateway;
129 131
130 if (!ipv6_addr_any(p)) 132 if (!ipv6_addr_any(p))
131 return (const void *) p; 133 return (const void *) p;
134 else if (skb)
135 return &ipv6_hdr(skb)->daddr;
132 return daddr; 136 return daddr;
133} 137}
134 138
135static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) 139static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
140 struct sk_buff *skb,
141 const void *daddr)
136{ 142{
137 struct rt6_info *rt = (struct rt6_info *) dst; 143 struct rt6_info *rt = (struct rt6_info *) dst;
138 struct neighbour *n; 144 struct neighbour *n;
139 145
140 daddr = choose_neigh_daddr(rt, daddr); 146 daddr = choose_neigh_daddr(rt, skb, daddr);
141 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 147 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
142 if (n) 148 if (n)
143 return n; 149 return n;
@@ -152,7 +158,7 @@ static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
152 if (IS_ERR(n)) 158 if (IS_ERR(n))
153 return PTR_ERR(n); 159 return PTR_ERR(n);
154 } 160 }
155 dst_set_neighbour(&rt->dst, n); 161 rt->n = n;
156 162
157 return 0; 163 return 0;
158} 164}
@@ -171,6 +177,7 @@ static struct dst_ops ip6_dst_ops_template = {
171 .negative_advice = ip6_negative_advice, 177 .negative_advice = ip6_negative_advice,
172 .link_failure = ip6_link_failure, 178 .link_failure = ip6_link_failure,
173 .update_pmtu = ip6_rt_update_pmtu, 179 .update_pmtu = ip6_rt_update_pmtu,
180 .redirect = rt6_do_redirect,
174 .local_out = __ip6_local_out, 181 .local_out = __ip6_local_out,
175 .neigh_lookup = ip6_neigh_lookup, 182 .neigh_lookup = ip6_neigh_lookup,
176}; 183};
@@ -182,7 +189,13 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
182 return mtu ? : dst->dev->mtu; 189 return mtu ? : dst->dev->mtu;
183} 190}
184 191
185static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 192static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
193 struct sk_buff *skb, u32 mtu)
194{
195}
196
197static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
198 struct sk_buff *skb)
186{ 199{
187} 200}
188 201
@@ -200,6 +213,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
200 .mtu = ip6_blackhole_mtu, 213 .mtu = ip6_blackhole_mtu,
201 .default_advmss = ip6_default_advmss, 214 .default_advmss = ip6_default_advmss,
202 .update_pmtu = ip6_rt_blackhole_update_pmtu, 215 .update_pmtu = ip6_rt_blackhole_update_pmtu,
216 .redirect = ip6_rt_blackhole_redirect,
203 .cow_metrics = ip6_rt_blackhole_cow_metrics, 217 .cow_metrics = ip6_rt_blackhole_cow_metrics,
204 .neigh_lookup = ip6_neigh_lookup, 218 .neigh_lookup = ip6_neigh_lookup,
205}; 219};
@@ -261,16 +275,20 @@ static struct rt6_info ip6_blk_hole_entry_template = {
261#endif 275#endif
262 276
263/* allocate dst with ip6_dst_ops */ 277/* allocate dst with ip6_dst_ops */
264static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, 278static inline struct rt6_info *ip6_dst_alloc(struct net *net,
265 struct net_device *dev, 279 struct net_device *dev,
266 int flags) 280 int flags,
281 struct fib6_table *table)
267{ 282{
268 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); 283 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
284 0, DST_OBSOLETE_NONE, flags);
269 285
270 if (rt) 286 if (rt) {
271 memset(&rt->rt6i_table, 0, 287 struct dst_entry *dst = &rt->dst;
272 sizeof(*rt) - sizeof(struct dst_entry));
273 288
289 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
290 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
291 }
274 return rt; 292 return rt;
275} 293}
276 294
@@ -278,7 +296,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
278{ 296{
279 struct rt6_info *rt = (struct rt6_info *)dst; 297 struct rt6_info *rt = (struct rt6_info *)dst;
280 struct inet6_dev *idev = rt->rt6i_idev; 298 struct inet6_dev *idev = rt->rt6i_idev;
281 struct inet_peer *peer = rt->rt6i_peer; 299
300 if (rt->n)
301 neigh_release(rt->n);
282 302
283 if (!(rt->dst.flags & DST_HOST)) 303 if (!(rt->dst.flags & DST_HOST))
284 dst_destroy_metrics_generic(dst); 304 dst_destroy_metrics_generic(dst);
@@ -291,8 +311,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
291 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) 311 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
292 dst_release(dst->from); 312 dst_release(dst->from);
293 313
294 if (peer) { 314 if (rt6_has_peer(rt)) {
295 rt->rt6i_peer = NULL; 315 struct inet_peer *peer = rt6_peer_ptr(rt);
296 inet_putpeer(peer); 316 inet_putpeer(peer);
297 } 317 }
298} 318}
@@ -306,13 +326,20 @@ static u32 rt6_peer_genid(void)
306 326
307void rt6_bind_peer(struct rt6_info *rt, int create) 327void rt6_bind_peer(struct rt6_info *rt, int create)
308{ 328{
329 struct inet_peer_base *base;
309 struct inet_peer *peer; 330 struct inet_peer *peer;
310 331
311 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); 332 base = inetpeer_base_ptr(rt->_rt6i_peer);
312 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) 333 if (!base)
313 inet_putpeer(peer); 334 return;
314 else 335
315 rt->rt6i_peer_genid = rt6_peer_genid(); 336 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
337 if (peer) {
338 if (!rt6_set_peer(rt, peer))
339 inet_putpeer(peer);
340 else
341 rt->rt6i_peer_genid = rt6_peer_genid();
342 }
316} 343}
317 344
318static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 345static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -323,12 +350,19 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
323 struct net_device *loopback_dev = 350 struct net_device *loopback_dev =
324 dev_net(dev)->loopback_dev; 351 dev_net(dev)->loopback_dev;
325 352
326 if (dev != loopback_dev && idev && idev->dev == dev) { 353 if (dev != loopback_dev) {
327 struct inet6_dev *loopback_idev = 354 if (idev && idev->dev == dev) {
328 in6_dev_get(loopback_dev); 355 struct inet6_dev *loopback_idev =
329 if (loopback_idev) { 356 in6_dev_get(loopback_dev);
330 rt->rt6i_idev = loopback_idev; 357 if (loopback_idev) {
331 in6_dev_put(idev); 358 rt->rt6i_idev = loopback_idev;
359 in6_dev_put(idev);
360 }
361 }
362 if (rt->n && rt->n->dev == dev) {
363 rt->n->dev = loopback_dev;
364 dev_hold(loopback_dev);
365 dev_put(dev);
332 } 366 }
333 } 367 }
334} 368}
@@ -418,7 +452,7 @@ static void rt6_probe(struct rt6_info *rt)
418 * to no more than one per minute. 452 * to no more than one per minute.
419 */ 453 */
420 rcu_read_lock(); 454 rcu_read_lock();
421 neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL; 455 neigh = rt ? rt->n : NULL;
422 if (!neigh || (neigh->nud_state & NUD_VALID)) 456 if (!neigh || (neigh->nud_state & NUD_VALID))
423 goto out; 457 goto out;
424 read_lock_bh(&neigh->lock); 458 read_lock_bh(&neigh->lock);
@@ -465,7 +499,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
465 int m; 499 int m;
466 500
467 rcu_read_lock(); 501 rcu_read_lock();
468 neigh = dst_get_neighbour_noref(&rt->dst); 502 neigh = rt->n;
469 if (rt->rt6i_flags & RTF_NONEXTHOP || 503 if (rt->rt6i_flags & RTF_NONEXTHOP ||
470 !(rt->rt6i_flags & RTF_GATEWAY)) 504 !(rt->rt6i_flags & RTF_GATEWAY))
471 m = 1; 505 m = 1;
@@ -812,7 +846,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
812 846
813 if (rt) { 847 if (rt) {
814 rt->rt6i_flags |= RTF_CACHE; 848 rt->rt6i_flags |= RTF_CACHE;
815 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst))); 849 rt->n = neigh_clone(ort->n);
816 } 850 }
817 return rt; 851 return rt;
818} 852}
@@ -846,7 +880,7 @@ restart:
846 dst_hold(&rt->dst); 880 dst_hold(&rt->dst);
847 read_unlock_bh(&table->tb6_lock); 881 read_unlock_bh(&table->tb6_lock);
848 882
849 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) 883 if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
850 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 884 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
851 else if (!(rt->dst.flags & DST_HOST)) 885 else if (!(rt->dst.flags & DST_HOST))
852 nrt = rt6_alloc_clone(rt, &fl6->daddr); 886 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -931,6 +965,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
931{ 965{
932 int flags = 0; 966 int flags = 0;
933 967
968 fl6->flowi6_iif = net->loopback_dev->ifindex;
969
934 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) 970 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
935 flags |= RT6_LOOKUP_F_IFACE; 971 flags |= RT6_LOOKUP_F_IFACE;
936 972
@@ -949,12 +985,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
949 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; 985 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
950 struct dst_entry *new = NULL; 986 struct dst_entry *new = NULL;
951 987
952 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0); 988 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
953 if (rt) { 989 if (rt) {
954 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
955
956 new = &rt->dst; 990 new = &rt->dst;
957 991
992 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
993 rt6_init_peer(rt, net->ipv6.peers);
994
958 new->__use = 1; 995 new->__use = 1;
959 new->input = dst_discard; 996 new->input = dst_discard;
960 new->output = dst_discard; 997 new->output = dst_discard;
@@ -996,7 +1033,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
996 1033
997 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { 1034 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
998 if (rt->rt6i_peer_genid != rt6_peer_genid()) { 1035 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
999 if (!rt->rt6i_peer) 1036 if (!rt6_has_peer(rt))
1000 rt6_bind_peer(rt, 0); 1037 rt6_bind_peer(rt, 0);
1001 rt->rt6i_peer_genid = rt6_peer_genid(); 1038 rt->rt6i_peer_genid = rt6_peer_genid();
1002 } 1039 }
@@ -1038,11 +1075,15 @@ static void ip6_link_failure(struct sk_buff *skb)
1038 } 1075 }
1039} 1076}
1040 1077
1041static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 1078static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1079 struct sk_buff *skb, u32 mtu)
1042{ 1080{
1043 struct rt6_info *rt6 = (struct rt6_info*)dst; 1081 struct rt6_info *rt6 = (struct rt6_info*)dst;
1044 1082
1083 dst_confirm(dst);
1045 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { 1084 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1085 struct net *net = dev_net(dst->dev);
1086
1046 rt6->rt6i_flags |= RTF_MODIFIED; 1087 rt6->rt6i_flags |= RTF_MODIFIED;
1047 if (mtu < IPV6_MIN_MTU) { 1088 if (mtu < IPV6_MIN_MTU) {
1048 u32 features = dst_metric(dst, RTAX_FEATURES); 1089 u32 features = dst_metric(dst, RTAX_FEATURES);
@@ -1051,9 +1092,66 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1051 dst_metric_set(dst, RTAX_FEATURES, features); 1092 dst_metric_set(dst, RTAX_FEATURES, features);
1052 } 1093 }
1053 dst_metric_set(dst, RTAX_MTU, mtu); 1094 dst_metric_set(dst, RTAX_MTU, mtu);
1095 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1054 } 1096 }
1055} 1097}
1056 1098
1099void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1100 int oif, u32 mark)
1101{
1102 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1103 struct dst_entry *dst;
1104 struct flowi6 fl6;
1105
1106 memset(&fl6, 0, sizeof(fl6));
1107 fl6.flowi6_oif = oif;
1108 fl6.flowi6_mark = mark;
1109 fl6.flowi6_flags = 0;
1110 fl6.daddr = iph->daddr;
1111 fl6.saddr = iph->saddr;
1112 fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
1113
1114 dst = ip6_route_output(net, NULL, &fl6);
1115 if (!dst->error)
1116 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1117 dst_release(dst);
1118}
1119EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1120
1121void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1122{
1123 ip6_update_pmtu(skb, sock_net(sk), mtu,
1124 sk->sk_bound_dev_if, sk->sk_mark);
1125}
1126EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1127
1128void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1129{
1130 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1131 struct dst_entry *dst;
1132 struct flowi6 fl6;
1133
1134 memset(&fl6, 0, sizeof(fl6));
1135 fl6.flowi6_oif = oif;
1136 fl6.flowi6_mark = mark;
1137 fl6.flowi6_flags = 0;
1138 fl6.daddr = iph->daddr;
1139 fl6.saddr = iph->saddr;
1140 fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
1141
1142 dst = ip6_route_output(net, NULL, &fl6);
1143 if (!dst->error)
1144 rt6_do_redirect(dst, NULL, skb);
1145 dst_release(dst);
1146}
1147EXPORT_SYMBOL_GPL(ip6_redirect);
1148
1149void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1150{
1151 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1152}
1153EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1154
1057static unsigned int ip6_default_advmss(const struct dst_entry *dst) 1155static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1058{ 1156{
1059 struct net_device *dev = dst->dev; 1157 struct net_device *dev = dst->dev;
@@ -1110,7 +1208,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1110 if (unlikely(!idev)) 1208 if (unlikely(!idev))
1111 return ERR_PTR(-ENODEV); 1209 return ERR_PTR(-ENODEV);
1112 1210
1113 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); 1211 rt = ip6_dst_alloc(net, dev, 0, NULL);
1114 if (unlikely(!rt)) { 1212 if (unlikely(!rt)) {
1115 in6_dev_put(idev); 1213 in6_dev_put(idev);
1116 dst = ERR_PTR(-ENOMEM); 1214 dst = ERR_PTR(-ENOMEM);
@@ -1120,7 +1218,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1120 if (neigh) 1218 if (neigh)
1121 neigh_hold(neigh); 1219 neigh_hold(neigh);
1122 else { 1220 else {
1123 neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr); 1221 neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr);
1124 if (IS_ERR(neigh)) { 1222 if (IS_ERR(neigh)) {
1125 in6_dev_put(idev); 1223 in6_dev_put(idev);
1126 dst_free(&rt->dst); 1224 dst_free(&rt->dst);
@@ -1130,7 +1228,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1130 1228
1131 rt->dst.flags |= DST_HOST; 1229 rt->dst.flags |= DST_HOST;
1132 rt->dst.output = ip6_output; 1230 rt->dst.output = ip6_output;
1133 dst_set_neighbour(&rt->dst, neigh); 1231 rt->n = neigh;
1134 atomic_set(&rt->dst.__refcnt, 1); 1232 atomic_set(&rt->dst.__refcnt, 1);
1135 rt->rt6i_dst.addr = fl6->daddr; 1233 rt->rt6i_dst.addr = fl6->daddr;
1136 rt->rt6i_dst.plen = 128; 1234 rt->rt6i_dst.plen = 128;
@@ -1292,7 +1390,7 @@ int ip6_route_add(struct fib6_config *cfg)
1292 if (!table) 1390 if (!table)
1293 goto out; 1391 goto out;
1294 1392
1295 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); 1393 rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
1296 1394
1297 if (!rt) { 1395 if (!rt) {
1298 err = -ENOMEM; 1396 err = -ENOMEM;
@@ -1546,107 +1644,94 @@ static int ip6_route_del(struct fib6_config *cfg)
1546 return err; 1644 return err;
1547} 1645}
1548 1646
1549/* 1647static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1550 * Handle redirects
1551 */
1552struct ip6rd_flowi {
1553 struct flowi6 fl6;
1554 struct in6_addr gateway;
1555};
1556
1557static struct rt6_info *__ip6_route_redirect(struct net *net,
1558 struct fib6_table *table,
1559 struct flowi6 *fl6,
1560 int flags)
1561{ 1648{
1562 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; 1649 struct net *net = dev_net(skb->dev);
1563 struct rt6_info *rt; 1650 struct netevent_redirect netevent;
1564 struct fib6_node *fn; 1651 struct rt6_info *rt, *nrt = NULL;
1652 const struct in6_addr *target;
1653 struct ndisc_options ndopts;
1654 const struct in6_addr *dest;
1655 struct neighbour *old_neigh;
1656 struct inet6_dev *in6_dev;
1657 struct neighbour *neigh;
1658 struct icmp6hdr *icmph;
1659 int optlen, on_link;
1660 u8 *lladdr;
1565 1661
1566 /* 1662 optlen = skb->tail - skb->transport_header;
1567 * Get the "current" route for this destination and 1663 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1568 * check if the redirect has come from approriate router.
1569 *
1570 * RFC 2461 specifies that redirects should only be
1571 * accepted if they come from the nexthop to the target.
1572 * Due to the way the routes are chosen, this notion
1573 * is a bit fuzzy and one might need to check all possible
1574 * routes.
1575 */
1576 1664
1577 read_lock_bh(&table->tb6_lock); 1665 if (optlen < 0) {
1578 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1666 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1579restart: 1667 return;
1580 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1581 /*
1582 * Current route is on-link; redirect is always invalid.
1583 *
1584 * Seems, previous statement is not true. It could
1585 * be node, which looks for us as on-link (f.e. proxy ndisc)
1586 * But then router serving it might decide, that we should
1587 * know truth 8)8) --ANK (980726).
1588 */
1589 if (rt6_check_expired(rt))
1590 continue;
1591 if (!(rt->rt6i_flags & RTF_GATEWAY))
1592 continue;
1593 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1594 continue;
1595 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1596 continue;
1597 break;
1598 } 1668 }
1599 1669
1600 if (!rt) 1670 icmph = icmp6_hdr(skb);
1601 rt = net->ipv6.ip6_null_entry; 1671 target = (const struct in6_addr *) (icmph + 1);
1602 BACKTRACK(net, &fl6->saddr); 1672 dest = target + 1;
1603out:
1604 dst_hold(&rt->dst);
1605 1673
1606 read_unlock_bh(&table->tb6_lock); 1674 if (ipv6_addr_is_multicast(dest)) {
1607 1675 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1608 return rt; 1676 return;
1609}; 1677 }
1610
1611static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1612 const struct in6_addr *src,
1613 const struct in6_addr *gateway,
1614 struct net_device *dev)
1615{
1616 int flags = RT6_LOOKUP_F_HAS_SADDR;
1617 struct net *net = dev_net(dev);
1618 struct ip6rd_flowi rdfl = {
1619 .fl6 = {
1620 .flowi6_oif = dev->ifindex,
1621 .daddr = *dest,
1622 .saddr = *src,
1623 },
1624 };
1625 1678
1626 rdfl.gateway = *gateway; 1679 on_link = 0;
1680 if (ipv6_addr_equal(dest, target)) {
1681 on_link = 1;
1682 } else if (ipv6_addr_type(target) !=
1683 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1684 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1685 return;
1686 }
1627 1687
1628 if (rt6_need_strict(dest)) 1688 in6_dev = __in6_dev_get(skb->dev);
1629 flags |= RT6_LOOKUP_F_IFACE; 1689 if (!in6_dev)
1690 return;
1691 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1692 return;
1630 1693
1631 return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6, 1694 /* RFC2461 8.1:
1632 flags, __ip6_route_redirect); 1695 * The IP source address of the Redirect MUST be the same as the current
1633} 1696 * first-hop router for the specified ICMP Destination Address.
1697 */
1634 1698
1635void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, 1699 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1636 const struct in6_addr *saddr, 1700 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1637 struct neighbour *neigh, u8 *lladdr, int on_link) 1701 return;
1638{ 1702 }
1639 struct rt6_info *rt, *nrt = NULL;
1640 struct netevent_redirect netevent;
1641 struct net *net = dev_net(neigh->dev);
1642 1703
1643 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1704 lladdr = NULL;
1705 if (ndopts.nd_opts_tgt_lladdr) {
1706 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1707 skb->dev);
1708 if (!lladdr) {
1709 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1710 return;
1711 }
1712 }
1644 1713
1714 rt = (struct rt6_info *) dst;
1645 if (rt == net->ipv6.ip6_null_entry) { 1715 if (rt == net->ipv6.ip6_null_entry) {
1646 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); 1716 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1647 goto out; 1717 return;
1648 } 1718 }
1649 1719
1720 /* Redirect received -> path was valid.
1721 * Look, redirects are sent only in response to data packets,
1722 * so that this nexthop apparently is reachable. --ANK
1723 */
1724 dst_confirm(&rt->dst);
1725
1726 neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
1727 if (!neigh)
1728 return;
1729
1730 /* Duplicate redirect: silently ignore. */
1731 old_neigh = rt->n;
1732 if (neigh == old_neigh)
1733 goto out;
1734
1650 /* 1735 /*
1651 * We have finally decided to accept it. 1736 * We have finally decided to accept it.
1652 */ 1737 */
@@ -1658,17 +1743,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1658 NEIGH_UPDATE_F_ISROUTER)) 1743 NEIGH_UPDATE_F_ISROUTER))
1659 ); 1744 );
1660 1745
1661 /*
1662 * Redirect received -> path was valid.
1663 * Look, redirects are sent only in response to data packets,
1664 * so that this nexthop apparently is reachable. --ANK
1665 */
1666 dst_confirm(&rt->dst);
1667
1668 /* Duplicate redirect: silently ignore. */
1669 if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
1670 goto out;
1671
1672 nrt = ip6_rt_copy(rt, dest); 1746 nrt = ip6_rt_copy(rt, dest);
1673 if (!nrt) 1747 if (!nrt)
1674 goto out; 1748 goto out;
@@ -1678,132 +1752,25 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1678 nrt->rt6i_flags &= ~RTF_GATEWAY; 1752 nrt->rt6i_flags &= ~RTF_GATEWAY;
1679 1753
1680 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 1754 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1681 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1755 nrt->n = neigh_clone(neigh);
1682 1756
1683 if (ip6_ins_rt(nrt)) 1757 if (ip6_ins_rt(nrt))
1684 goto out; 1758 goto out;
1685 1759
1686 netevent.old = &rt->dst; 1760 netevent.old = &rt->dst;
1761 netevent.old_neigh = old_neigh;
1687 netevent.new = &nrt->dst; 1762 netevent.new = &nrt->dst;
1763 netevent.new_neigh = neigh;
1764 netevent.daddr = dest;
1688 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1765 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1689 1766
1690 if (rt->rt6i_flags & RTF_CACHE) { 1767 if (rt->rt6i_flags & RTF_CACHE) {
1768 rt = (struct rt6_info *) dst_clone(&rt->dst);
1691 ip6_del_rt(rt); 1769 ip6_del_rt(rt);
1692 return;
1693 }
1694
1695out:
1696 dst_release(&rt->dst);
1697}
1698
1699/*
1700 * Handle ICMP "packet too big" messages
1701 * i.e. Path MTU discovery
1702 */
1703
1704static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1705 struct net *net, u32 pmtu, int ifindex)
1706{
1707 struct rt6_info *rt, *nrt;
1708 int allfrag = 0;
1709again:
1710 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1711 if (!rt)
1712 return;
1713
1714 if (rt6_check_expired(rt)) {
1715 ip6_del_rt(rt);
1716 goto again;
1717 } 1770 }
1718 1771
1719 if (pmtu >= dst_mtu(&rt->dst))
1720 goto out;
1721
1722 if (pmtu < IPV6_MIN_MTU) {
1723 /*
1724 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1725 * MTU (1280) and a fragment header should always be included
1726 * after a node receiving Too Big message reporting PMTU is
1727 * less than the IPv6 Minimum Link MTU.
1728 */
1729 pmtu = IPV6_MIN_MTU;
1730 allfrag = 1;
1731 }
1732
1733 /* New mtu received -> path was valid.
1734 They are sent only in response to data packets,
1735 so that this nexthop apparently is reachable. --ANK
1736 */
1737 dst_confirm(&rt->dst);
1738
1739 /* Host route. If it is static, it would be better
1740 not to override it, but add new one, so that
1741 when cache entry will expire old pmtu
1742 would return automatically.
1743 */
1744 if (rt->rt6i_flags & RTF_CACHE) {
1745 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1746 if (allfrag) {
1747 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1748 features |= RTAX_FEATURE_ALLFRAG;
1749 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1750 }
1751 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1752 rt->rt6i_flags |= RTF_MODIFIED;
1753 goto out;
1754 }
1755
1756 /* Network route.
1757 Two cases are possible:
1758 1. It is connected route. Action: COW
1759 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1760 */
1761 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1762 nrt = rt6_alloc_cow(rt, daddr, saddr);
1763 else
1764 nrt = rt6_alloc_clone(rt, daddr);
1765
1766 if (nrt) {
1767 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1768 if (allfrag) {
1769 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1770 features |= RTAX_FEATURE_ALLFRAG;
1771 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1772 }
1773
1774 /* According to RFC 1981, detecting PMTU increase shouldn't be
1775 * happened within 5 mins, the recommended timer is 10 mins.
1776 * Here this route expiration time is set to ip6_rt_mtu_expires
1777 * which is 10 mins. After 10 mins the decreased pmtu is expired
1778 * and detecting PMTU increase will be automatically happened.
1779 */
1780 rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1781 nrt->rt6i_flags |= RTF_DYNAMIC;
1782 ip6_ins_rt(nrt);
1783 }
1784out: 1772out:
1785 dst_release(&rt->dst); 1773 neigh_release(neigh);
1786}
1787
1788void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1789 struct net_device *dev, u32 pmtu)
1790{
1791 struct net *net = dev_net(dev);
1792
1793 /*
1794 * RFC 1981 states that a node "MUST reduce the size of the packets it
1795 * is sending along the path" that caused the Packet Too Big message.
1796 * Since it's not possible in the general case to determine which
1797 * interface was used to send the original packet, we update the MTU
1798 * on the interface that will be used to send future packets. We also
1799 * update the MTU on the interface that received the Packet Too Big in
1800 * case the original packet was forced out that interface with
1801 * SO_BINDTODEVICE or similar. This is the next best thing to the
1802 * correct behaviour, which would be to update the MTU on all
1803 * interfaces.
1804 */
1805 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1806 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1807} 1774}
1808 1775
1809/* 1776/*
@@ -1814,8 +1781,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1814 const struct in6_addr *dest) 1781 const struct in6_addr *dest)
1815{ 1782{
1816 struct net *net = dev_net(ort->dst.dev); 1783 struct net *net = dev_net(ort->dst.dev);
1817 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 1784 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1818 ort->dst.dev, 0); 1785 ort->rt6i_table);
1819 1786
1820 if (rt) { 1787 if (rt) {
1821 rt->dst.input = ort->dst.input; 1788 rt->dst.input = ort->dst.input;
@@ -2099,8 +2066,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2099 bool anycast) 2066 bool anycast)
2100{ 2067{
2101 struct net *net = dev_net(idev->dev); 2068 struct net *net = dev_net(idev->dev);
2102 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 2069 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2103 net->loopback_dev, 0);
2104 int err; 2070 int err;
2105 2071
2106 if (!rt) { 2072 if (!rt) {
@@ -2396,13 +2362,11 @@ static int rt6_fill_node(struct net *net,
2396 int iif, int type, u32 pid, u32 seq, 2362 int iif, int type, u32 pid, u32 seq,
2397 int prefix, int nowait, unsigned int flags) 2363 int prefix, int nowait, unsigned int flags)
2398{ 2364{
2399 const struct inet_peer *peer;
2400 struct rtmsg *rtm; 2365 struct rtmsg *rtm;
2401 struct nlmsghdr *nlh; 2366 struct nlmsghdr *nlh;
2402 long expires; 2367 long expires;
2403 u32 table; 2368 u32 table;
2404 struct neighbour *n; 2369 struct neighbour *n;
2405 u32 ts, tsage;
2406 2370
2407 if (prefix) { /* user wants prefix routes only */ 2371 if (prefix) { /* user wants prefix routes only */
2408 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { 2372 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2440,10 +2404,12 @@ static int rt6_fill_node(struct net *net,
2440 rtm->rtm_protocol = rt->rt6i_protocol; 2404 rtm->rtm_protocol = rt->rt6i_protocol;
2441 if (rt->rt6i_flags & RTF_DYNAMIC) 2405 if (rt->rt6i_flags & RTF_DYNAMIC)
2442 rtm->rtm_protocol = RTPROT_REDIRECT; 2406 rtm->rtm_protocol = RTPROT_REDIRECT;
2443 else if (rt->rt6i_flags & RTF_ADDRCONF) 2407 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2444 rtm->rtm_protocol = RTPROT_KERNEL; 2408 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2445 else if (rt->rt6i_flags & RTF_DEFAULT) 2409 rtm->rtm_protocol = RTPROT_RA;
2446 rtm->rtm_protocol = RTPROT_RA; 2410 else
2411 rtm->rtm_protocol = RTPROT_KERNEL;
2412 }
2447 2413
2448 if (rt->rt6i_flags & RTF_CACHE) 2414 if (rt->rt6i_flags & RTF_CACHE)
2449 rtm->rtm_flags |= RTM_F_CLONED; 2415 rtm->rtm_flags |= RTM_F_CLONED;
@@ -2500,7 +2466,7 @@ static int rt6_fill_node(struct net *net,
2500 goto nla_put_failure; 2466 goto nla_put_failure;
2501 2467
2502 rcu_read_lock(); 2468 rcu_read_lock();
2503 n = dst_get_neighbour_noref(&rt->dst); 2469 n = rt->n;
2504 if (n) { 2470 if (n) {
2505 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { 2471 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
2506 rcu_read_unlock(); 2472 rcu_read_unlock();
@@ -2514,22 +2480,10 @@ static int rt6_fill_node(struct net *net,
2514 goto nla_put_failure; 2480 goto nla_put_failure;
2515 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) 2481 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2516 goto nla_put_failure; 2482 goto nla_put_failure;
2517 if (!(rt->rt6i_flags & RTF_EXPIRES))
2518 expires = 0;
2519 else if (rt->dst.expires - jiffies < INT_MAX)
2520 expires = rt->dst.expires - jiffies;
2521 else
2522 expires = INT_MAX;
2523 2483
2524 peer = rt->rt6i_peer; 2484 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2525 ts = tsage = 0;
2526 if (peer && peer->tcp_ts_stamp) {
2527 ts = peer->tcp_ts;
2528 tsage = get_seconds() - peer->tcp_ts_stamp;
2529 }
2530 2485
2531 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage, 2486 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2532 expires, rt->dst.error) < 0)
2533 goto nla_put_failure; 2487 goto nla_put_failure;
2534 2488
2535 return nlmsg_end(skb, nlh); 2489 return nlmsg_end(skb, nlh);
@@ -2722,7 +2676,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2722 seq_puts(m, "00000000000000000000000000000000 00 "); 2676 seq_puts(m, "00000000000000000000000000000000 00 ");
2723#endif 2677#endif
2724 rcu_read_lock(); 2678 rcu_read_lock();
2725 n = dst_get_neighbour_noref(&rt->dst); 2679 n = rt->n;
2726 if (n) { 2680 if (n) {
2727 seq_printf(m, "%pi6", n->primary_key); 2681 seq_printf(m, "%pi6", n->primary_key);
2728 } else { 2682 } else {
@@ -2957,10 +2911,6 @@ static int __net_init ip6_route_net_init(struct net *net)
2957 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; 2911 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2958 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 2912 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2959 2913
2960#ifdef CONFIG_PROC_FS
2961 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2962 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2963#endif
2964 net->ipv6.ip6_rt_gc_expire = 30*HZ; 2914 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2965 2915
2966 ret = 0; 2916 ret = 0;
@@ -2981,10 +2931,6 @@ out_ip6_dst_ops:
2981 2931
2982static void __net_exit ip6_route_net_exit(struct net *net) 2932static void __net_exit ip6_route_net_exit(struct net *net)
2983{ 2933{
2984#ifdef CONFIG_PROC_FS
2985 proc_net_remove(net, "ipv6_route");
2986 proc_net_remove(net, "rt6_stats");
2987#endif
2988 kfree(net->ipv6.ip6_null_entry); 2934 kfree(net->ipv6.ip6_null_entry);
2989#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2935#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2990 kfree(net->ipv6.ip6_prohibit_entry); 2936 kfree(net->ipv6.ip6_prohibit_entry);
@@ -2993,11 +2939,58 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2993 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 2939 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2994} 2940}
2995 2941
2942static int __net_init ip6_route_net_init_late(struct net *net)
2943{
2944#ifdef CONFIG_PROC_FS
2945 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2946 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2947#endif
2948 return 0;
2949}
2950
2951static void __net_exit ip6_route_net_exit_late(struct net *net)
2952{
2953#ifdef CONFIG_PROC_FS
2954 proc_net_remove(net, "ipv6_route");
2955 proc_net_remove(net, "rt6_stats");
2956#endif
2957}
2958
2996static struct pernet_operations ip6_route_net_ops = { 2959static struct pernet_operations ip6_route_net_ops = {
2997 .init = ip6_route_net_init, 2960 .init = ip6_route_net_init,
2998 .exit = ip6_route_net_exit, 2961 .exit = ip6_route_net_exit,
2999}; 2962};
3000 2963
2964static int __net_init ipv6_inetpeer_init(struct net *net)
2965{
2966 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2967
2968 if (!bp)
2969 return -ENOMEM;
2970 inet_peer_base_init(bp);
2971 net->ipv6.peers = bp;
2972 return 0;
2973}
2974
2975static void __net_exit ipv6_inetpeer_exit(struct net *net)
2976{
2977 struct inet_peer_base *bp = net->ipv6.peers;
2978
2979 net->ipv6.peers = NULL;
2980 inetpeer_invalidate_tree(bp);
2981 kfree(bp);
2982}
2983
2984static struct pernet_operations ipv6_inetpeer_ops = {
2985 .init = ipv6_inetpeer_init,
2986 .exit = ipv6_inetpeer_exit,
2987};
2988
2989static struct pernet_operations ip6_route_net_late_ops = {
2990 .init = ip6_route_net_init_late,
2991 .exit = ip6_route_net_exit_late,
2992};
2993
3001static struct notifier_block ip6_route_dev_notifier = { 2994static struct notifier_block ip6_route_dev_notifier = {
3002 .notifier_call = ip6_route_dev_notify, 2995 .notifier_call = ip6_route_dev_notify,
3003 .priority = 0, 2996 .priority = 0,
@@ -3018,10 +3011,14 @@ int __init ip6_route_init(void)
3018 if (ret) 3011 if (ret)
3019 goto out_kmem_cache; 3012 goto out_kmem_cache;
3020 3013
3021 ret = register_pernet_subsys(&ip6_route_net_ops); 3014 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3022 if (ret) 3015 if (ret)
3023 goto out_dst_entries; 3016 goto out_dst_entries;
3024 3017
3018 ret = register_pernet_subsys(&ip6_route_net_ops);
3019 if (ret)
3020 goto out_register_inetpeer;
3021
3025 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 3022 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3026 3023
3027 /* Registering of the loopback is done before this portion of code, 3024 /* Registering of the loopback is done before this portion of code,
@@ -3047,19 +3044,25 @@ int __init ip6_route_init(void)
3047 if (ret) 3044 if (ret)
3048 goto xfrm6_init; 3045 goto xfrm6_init;
3049 3046
3047 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3048 if (ret)
3049 goto fib6_rules_init;
3050
3050 ret = -ENOBUFS; 3051 ret = -ENOBUFS;
3051 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || 3052 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3052 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || 3053 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3053 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) 3054 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3054 goto fib6_rules_init; 3055 goto out_register_late_subsys;
3055 3056
3056 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 3057 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3057 if (ret) 3058 if (ret)
3058 goto fib6_rules_init; 3059 goto out_register_late_subsys;
3059 3060
3060out: 3061out:
3061 return ret; 3062 return ret;
3062 3063
3064out_register_late_subsys:
3065 unregister_pernet_subsys(&ip6_route_net_late_ops);
3063fib6_rules_init: 3066fib6_rules_init:
3064 fib6_rules_cleanup(); 3067 fib6_rules_cleanup();
3065xfrm6_init: 3068xfrm6_init:
@@ -3068,6 +3071,8 @@ out_fib6_init:
3068 fib6_gc_cleanup(); 3071 fib6_gc_cleanup();
3069out_register_subsys: 3072out_register_subsys:
3070 unregister_pernet_subsys(&ip6_route_net_ops); 3073 unregister_pernet_subsys(&ip6_route_net_ops);
3074out_register_inetpeer:
3075 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3071out_dst_entries: 3076out_dst_entries:
3072 dst_entries_destroy(&ip6_dst_blackhole_ops); 3077 dst_entries_destroy(&ip6_dst_blackhole_ops);
3073out_kmem_cache: 3078out_kmem_cache:
@@ -3078,9 +3083,11 @@ out_kmem_cache:
3078void ip6_route_cleanup(void) 3083void ip6_route_cleanup(void)
3079{ 3084{
3080 unregister_netdevice_notifier(&ip6_route_dev_notifier); 3085 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3086 unregister_pernet_subsys(&ip6_route_net_late_ops);
3081 fib6_rules_cleanup(); 3087 fib6_rules_cleanup();
3082 xfrm6_fini(); 3088 xfrm6_fini();
3083 fib6_gc_cleanup(); 3089 fib6_gc_cleanup();
3090 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3084 unregister_pernet_subsys(&ip6_route_net_ops); 3091 unregister_pernet_subsys(&ip6_route_net_ops);
3085 dst_entries_destroy(&ip6_dst_blackhole_ops); 3092 dst_entries_destroy(&ip6_dst_blackhole_ops);
3086 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 3093 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 60415711563f..3bd1bfc01f85 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -527,9 +527,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
527 case ICMP_PORT_UNREACH: 527 case ICMP_PORT_UNREACH:
528 /* Impossible event. */ 528 /* Impossible event. */
529 return 0; 529 return 0;
530 case ICMP_FRAG_NEEDED:
531 /* Soft state for pmtu is maintained by IP core. */
532 return 0;
533 default: 530 default:
534 /* All others are translated to HOST_UNREACH. 531 /* All others are translated to HOST_UNREACH.
535 rfc2003 contains "deep thoughts" about NET_UNREACH, 532 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -542,6 +539,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
542 if (code != ICMP_EXC_TTL) 539 if (code != ICMP_EXC_TTL)
543 return 0; 540 return 0;
544 break; 541 break;
542 case ICMP_REDIRECT:
543 break;
545 } 544 }
546 545
547 err = -ENOENT; 546 err = -ENOENT;
@@ -551,7 +550,23 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
551 skb->dev, 550 skb->dev,
552 iph->daddr, 551 iph->daddr,
553 iph->saddr); 552 iph->saddr);
554 if (t == NULL || t->parms.iph.daddr == 0) 553 if (t == NULL)
554 goto out;
555
556 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
557 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
558 t->dev->ifindex, 0, IPPROTO_IPV6, 0);
559 err = 0;
560 goto out;
561 }
562 if (type == ICMP_REDIRECT) {
563 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
564 IPPROTO_IPV6, 0);
565 err = 0;
566 goto out;
567 }
568
569 if (t->parms.iph.daddr == 0)
555 goto out; 570 goto out;
556 571
557 err = 0; 572 err = 0;
@@ -792,7 +807,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
792 } 807 }
793 808
794 if (tunnel->parms.iph.daddr && skb_dst(skb)) 809 if (tunnel->parms.iph.daddr && skb_dst(skb))
795 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 810 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
796 811
797 if (skb->len > mtu) { 812 if (skb->len > mtu) {
798 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 813 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8e951d8d3b81..bb46061c813a 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -21,9 +21,6 @@
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/tcp.h> 22#include <net/tcp.h>
23 23
24extern int sysctl_tcp_syncookies;
25extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
26
27#define COOKIEBITS 24 /* Upper bits store count */ 24#define COOKIEBITS 24 /* Upper bits store count */
28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 25#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
29 26
@@ -180,7 +177,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
180 177
181 /* check for timestamp cookie support */ 178 /* check for timestamp cookie support */
182 memset(&tcp_opt, 0, sizeof(tcp_opt)); 179 memset(&tcp_opt, 0, sizeof(tcp_opt));
183 tcp_parse_options(skb, &tcp_opt, &hash_location, 0); 180 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
184 181
185 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) 182 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
186 goto out; 183 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3a9aec29581a..c66b90f71c9b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -277,22 +277,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
277 rt = (struct rt6_info *) dst; 277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle && 278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp && 279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) { 280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
281 struct inet_peer *peer = rt6_get_peer(rt); 281 tcp_fetch_timewait_stamp(sk, dst);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296 282
297 icsk->icsk_ext_hdr_len = 0; 283 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt) 284 if (np->opt)
@@ -329,6 +315,23 @@ failure:
329 return err; 315 return err;
330} 316}
331 317
318static void tcp_v6_mtu_reduced(struct sock *sk)
319{
320 struct dst_entry *dst;
321
322 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 return;
324
325 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 if (!dst)
327 return;
328
329 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 tcp_sync_mss(sk, dst_mtu(dst));
331 tcp_simple_retransmit(sk);
332 }
333}
334
332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 335static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info) 336 u8 type, u8 code, int offset, __be32 info)
334{ 337{
@@ -356,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
356 } 359 }
357 360
358 bh_lock_sock(sk); 361 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk)) 362 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 363 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 364
362 if (sk->sk_state == TCP_CLOSE) 365 if (sk->sk_state == TCP_CLOSE)
@@ -377,49 +380,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 380
378 np = inet6_sk(sk); 381 np = inet6_sk(sk);
379 382
380 if (type == ICMPV6_PKT_TOOBIG) { 383 if (type == NDISC_REDIRECT) {
381 struct dst_entry *dst; 384 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
393 struct flowi6 fl6;
394 385
395 /* BUGGG_FUTURE: Again, it is not clear how 386 if (dst)
396 to handle rthdr case. Ignore this complexity 387 dst->ops->redirect(dst, sk, skb);
397 for now. 388 }
398 */
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417 389
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 390 if (type == ICMPV6_PKT_TOOBIG) {
419 tcp_sync_mss(sk, dst_mtu(dst)); 391 tp->mtu_info = ntohl(info);
420 tcp_simple_retransmit(sk); 392 if (!sock_owned_by_user(sk))
421 } /* else let the usual retransmit timer handle it */ 393 tcp_v6_mtu_reduced(sk);
422 dst_release(dst); 394 else
395 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
423 goto out; 396 goto out;
424 } 397 }
425 398
@@ -475,62 +448,43 @@ out:
475} 448}
476 449
477 450
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 451static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
452 struct flowi6 *fl6,
453 struct request_sock *req,
479 struct request_values *rvp, 454 struct request_values *rvp,
480 u16 queue_mapping) 455 u16 queue_mapping)
481{ 456{
482 struct inet6_request_sock *treq = inet6_rsk(req); 457 struct inet6_request_sock *treq = inet6_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk); 458 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct sk_buff * skb; 459 struct sk_buff * skb;
485 struct ipv6_txoptions *opt = NULL; 460 int err = -ENOMEM;
486 struct in6_addr * final_p, final;
487 struct flowi6 fl6;
488 struct dst_entry *dst;
489 int err;
490 461
491 memset(&fl6, 0, sizeof(fl6)); 462 /* First, grab a route. */
492 fl6.flowi6_proto = IPPROTO_TCP; 463 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
493 fl6.daddr = treq->rmt_addr;
494 fl6.saddr = treq->loc_addr;
495 fl6.flowlabel = 0;
496 fl6.flowi6_oif = treq->iif;
497 fl6.flowi6_mark = sk->sk_mark;
498 fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 fl6.fl6_sport = inet_rsk(req)->loc_port;
500 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501
502 opt = np->opt;
503 final_p = fl6_update_dst(&fl6, opt, &final);
504
505 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 if (IS_ERR(dst)) {
507 err = PTR_ERR(dst);
508 dst = NULL;
509 goto done; 464 goto done;
510 } 465
511 skb = tcp_make_synack(sk, dst, req, rvp); 466 skb = tcp_make_synack(sk, dst, req, rvp);
512 err = -ENOMEM; 467
513 if (skb) { 468 if (skb) {
514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 469 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 470
516 fl6.daddr = treq->rmt_addr; 471 fl6->daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping); 472 skb_set_queue_mapping(skb, queue_mapping);
518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 473 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
519 err = net_xmit_eval(err); 474 err = net_xmit_eval(err);
520 } 475 }
521 476
522done: 477done:
523 if (opt && opt != np->opt)
524 sock_kfree_s(sk, opt, opt->tot_len);
525 dst_release(dst);
526 return err; 478 return err;
527} 479}
528 480
529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, 481static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 struct request_values *rvp) 482 struct request_values *rvp)
531{ 483{
484 struct flowi6 fl6;
485
532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 486 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 return tcp_v6_send_synack(sk, req, rvp, 0); 487 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
534} 488}
535 489
536static void tcp_v6_reqsk_destructor(struct request_sock *req) 490static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1057,6 +1011,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1057 struct tcp_sock *tp = tcp_sk(sk); 1011 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when; 1012 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL; 1013 struct dst_entry *dst = NULL;
1014 struct flowi6 fl6;
1060 bool want_cookie = false; 1015 bool want_cookie = false;
1061 1016
1062 if (skb->protocol == htons(ETH_P_IP)) 1017 if (skb->protocol == htons(ETH_P_IP))
@@ -1085,7 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1085 tcp_clear_options(&tmp_opt); 1040 tcp_clear_options(&tmp_opt);
1086 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1041 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087 tmp_opt.user_mss = tp->rx_opt.user_mss; 1042 tmp_opt.user_mss = tp->rx_opt.user_mss;
1088 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 1043 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1089 1044
1090 if (tmp_opt.cookie_plus > 0 && 1045 if (tmp_opt.cookie_plus > 0 &&
1091 tmp_opt.saw_tstamp && 1046 tmp_opt.saw_tstamp &&
@@ -1150,8 +1105,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1150 treq->iif = inet6_iif(skb); 1105 treq->iif = inet6_iif(skb);
1151 1106
1152 if (!isn) { 1107 if (!isn) {
1153 struct inet_peer *peer = NULL;
1154
1155 if (ipv6_opt_accepted(sk, skb) || 1108 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1109 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1110 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1176,14 +1129,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1176 */ 1129 */
1177 if (tmp_opt.saw_tstamp && 1130 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle && 1131 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL && 1132 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && 1133 if (!tcp_peer_is_proven(req, dst, true)) {
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 &treq->rmt_addr)) {
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1186 TCP_PAWS_WINDOW) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1134 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release; 1135 goto drop_and_release;
1189 } 1136 }
@@ -1192,8 +1139,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 else if (!sysctl_tcp_syncookies && 1139 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1140 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) && 1141 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) && 1142 !tcp_peer_is_proven(req, dst, false)) {
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 /* Without syncookies last quarter of 1143 /* Without syncookies last quarter of
1198 * backlog is filled with destinations, 1144 * backlog is filled with destinations,
1199 * proven to be alive. 1145 * proven to be alive.
@@ -1212,9 +1158,10 @@ have_isn:
1212 tcp_rsk(req)->snt_isn = isn; 1158 tcp_rsk(req)->snt_isn = isn;
1213 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1159 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214 1160
1215 security_inet_conn_request(sk, skb, req); 1161 if (security_inet_conn_request(sk, skb, req))
1162 goto drop_and_release;
1216 1163
1217 if (tcp_v6_send_synack(sk, req, 1164 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1218 (struct request_values *)&tmp_ext, 1165 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) || 1166 skb_get_queue_mapping(skb)) ||
1220 want_cookie) 1167 want_cookie)
@@ -1241,10 +1188,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1241 struct inet_sock *newinet; 1188 struct inet_sock *newinet;
1242 struct tcp_sock *newtp; 1189 struct tcp_sock *newtp;
1243 struct sock *newsk; 1190 struct sock *newsk;
1244 struct ipv6_txoptions *opt;
1245#ifdef CONFIG_TCP_MD5SIG 1191#ifdef CONFIG_TCP_MD5SIG
1246 struct tcp_md5sig_key *key; 1192 struct tcp_md5sig_key *key;
1247#endif 1193#endif
1194 struct flowi6 fl6;
1248 1195
1249 if (skb->protocol == htons(ETH_P_IP)) { 1196 if (skb->protocol == htons(ETH_P_IP)) {
1250 /* 1197 /*
@@ -1301,13 +1248,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1301 } 1248 }
1302 1249
1303 treq = inet6_rsk(req); 1250 treq = inet6_rsk(req);
1304 opt = np->opt;
1305 1251
1306 if (sk_acceptq_is_full(sk)) 1252 if (sk_acceptq_is_full(sk))
1307 goto out_overflow; 1253 goto out_overflow;
1308 1254
1309 if (!dst) { 1255 if (!dst) {
1310 dst = inet6_csk_route_req(sk, req); 1256 dst = inet6_csk_route_req(sk, &fl6, req);
1311 if (!dst) 1257 if (!dst)
1312 goto out; 1258 goto out;
1313 } 1259 }
@@ -1353,7 +1299,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1353 /* Clone pktoptions received with SYN */ 1299 /* Clone pktoptions received with SYN */
1354 newnp->pktoptions = NULL; 1300 newnp->pktoptions = NULL;
1355 if (treq->pktopts != NULL) { 1301 if (treq->pktopts != NULL) {
1356 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 1302 newnp->pktoptions = skb_clone(treq->pktopts,
1303 sk_gfp_atomic(sk, GFP_ATOMIC));
1357 consume_skb(treq->pktopts); 1304 consume_skb(treq->pktopts);
1358 treq->pktopts = NULL; 1305 treq->pktopts = NULL;
1359 if (newnp->pktoptions) 1306 if (newnp->pktoptions)
@@ -1370,11 +1317,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1370 but we make one more one thing there: reattach optmem 1317 but we make one more one thing there: reattach optmem
1371 to newsk. 1318 to newsk.
1372 */ 1319 */
1373 if (opt) { 1320 if (np->opt)
1374 newnp->opt = ipv6_dup_options(newsk, opt); 1321 newnp->opt = ipv6_dup_options(newsk, np->opt);
1375 if (opt != np->opt)
1376 sock_kfree_s(sk, opt, opt->tot_len);
1377 }
1378 1322
1379 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1323 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1380 if (newnp->opt) 1324 if (newnp->opt)
@@ -1406,7 +1350,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1406 * across. Shucks. 1350 * across. Shucks.
1407 */ 1351 */
1408 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr, 1352 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1409 AF_INET6, key->key, key->keylen, GFP_ATOMIC); 1353 AF_INET6, key->key, key->keylen,
1354 sk_gfp_atomic(sk, GFP_ATOMIC));
1410 } 1355 }
1411#endif 1356#endif
1412 1357
@@ -1421,8 +1366,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1421out_overflow: 1366out_overflow:
1422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1367 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1423out_nonewsk: 1368out_nonewsk:
1424 if (opt && opt != np->opt)
1425 sock_kfree_s(sk, opt, opt->tot_len);
1426 dst_release(dst); 1369 dst_release(dst);
1427out: 1370out:
1428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1371 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
@@ -1501,7 +1444,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1501 --ANK (980728) 1444 --ANK (980728)
1502 */ 1445 */
1503 if (np->rxopt.all) 1446 if (np->rxopt.all)
1504 opt_skb = skb_clone(skb, GFP_ATOMIC); 1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1505 1448
1506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1507 sock_rps_save_rxhash(sk, skb); 1450 sock_rps_save_rxhash(sk, skb);
@@ -1733,42 +1676,47 @@ do_time_wait:
1733 goto discard_it; 1676 goto discard_it;
1734} 1677}
1735 1678
1736static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it) 1679static void tcp_v6_early_demux(struct sk_buff *skb)
1737{ 1680{
1738 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk); 1681 const struct ipv6hdr *hdr;
1739 struct ipv6_pinfo *np = inet6_sk(sk); 1682 const struct tcphdr *th;
1740 struct inet_peer *peer; 1683 struct sock *sk;
1741 1684
1742 if (!rt || 1685 if (skb->pkt_type != PACKET_HOST)
1743 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) { 1686 return;
1744 peer = inet_getpeer_v6(&np->daddr, 1);
1745 *release_it = true;
1746 } else {
1747 if (!rt->rt6i_peer)
1748 rt6_bind_peer(rt, 1);
1749 peer = rt->rt6i_peer;
1750 *release_it = false;
1751 }
1752 1687
1753 return peer; 1688 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1754} 1689 return;
1755 1690
1756static void *tcp_v6_tw_get_peer(struct sock *sk) 1691 hdr = ipv6_hdr(skb);
1757{ 1692 th = tcp_hdr(skb);
1758 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1759 const struct inet_timewait_sock *tw = inet_twsk(sk);
1760 1693
1761 if (tw->tw_family == AF_INET) 1694 if (th->doff < sizeof(struct tcphdr) / 4)
1762 return tcp_v4_tw_get_peer(sk); 1695 return;
1763 1696
1764 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1); 1697 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1698 &hdr->saddr, th->source,
1699 &hdr->daddr, ntohs(th->dest),
1700 inet6_iif(skb));
1701 if (sk) {
1702 skb->sk = sk;
1703 skb->destructor = sock_edemux;
1704 if (sk->sk_state != TCP_TIME_WAIT) {
1705 struct dst_entry *dst = sk->sk_rx_dst;
1706 struct inet_sock *icsk = inet_sk(sk);
1707 if (dst)
1708 dst = dst_check(dst, 0);
1709 if (dst &&
1710 icsk->rx_dst_ifindex == inet6_iif(skb))
1711 skb_dst_set_noref(skb, dst);
1712 }
1713 }
1765} 1714}
1766 1715
1767static struct timewait_sock_ops tcp6_timewait_sock_ops = { 1716static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1768 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 1717 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1769 .twsk_unique = tcp_twsk_unique, 1718 .twsk_unique = tcp_twsk_unique,
1770 .twsk_destructor= tcp_twsk_destructor, 1719 .twsk_destructor= tcp_twsk_destructor,
1771 .twsk_getpeer = tcp_v6_tw_get_peer,
1772}; 1720};
1773 1721
1774static const struct inet_connection_sock_af_ops ipv6_specific = { 1722static const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -1777,7 +1725,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1777 .rebuild_header = inet6_sk_rebuild_header, 1725 .rebuild_header = inet6_sk_rebuild_header,
1778 .conn_request = tcp_v6_conn_request, 1726 .conn_request = tcp_v6_conn_request,
1779 .syn_recv_sock = tcp_v6_syn_recv_sock, 1727 .syn_recv_sock = tcp_v6_syn_recv_sock,
1780 .get_peer = tcp_v6_get_peer,
1781 .net_header_len = sizeof(struct ipv6hdr), 1728 .net_header_len = sizeof(struct ipv6hdr),
1782 .net_frag_header_len = sizeof(struct frag_hdr), 1729 .net_frag_header_len = sizeof(struct frag_hdr),
1783 .setsockopt = ipv6_setsockopt, 1730 .setsockopt = ipv6_setsockopt,
@@ -1809,7 +1756,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1809 .rebuild_header = inet_sk_rebuild_header, 1756 .rebuild_header = inet_sk_rebuild_header,
1810 .conn_request = tcp_v6_conn_request, 1757 .conn_request = tcp_v6_conn_request,
1811 .syn_recv_sock = tcp_v6_syn_recv_sock, 1758 .syn_recv_sock = tcp_v6_syn_recv_sock,
1812 .get_peer = tcp_v4_get_peer,
1813 .net_header_len = sizeof(struct iphdr), 1759 .net_header_len = sizeof(struct iphdr),
1814 .setsockopt = ipv6_setsockopt, 1760 .setsockopt = ipv6_setsockopt,
1815 .getsockopt = ipv6_getsockopt, 1761 .getsockopt = ipv6_getsockopt,
@@ -2048,6 +1994,8 @@ struct proto tcpv6_prot = {
2048 .sendmsg = tcp_sendmsg, 1994 .sendmsg = tcp_sendmsg,
2049 .sendpage = tcp_sendpage, 1995 .sendpage = tcp_sendpage,
2050 .backlog_rcv = tcp_v6_do_rcv, 1996 .backlog_rcv = tcp_v6_do_rcv,
1997 .release_cb = tcp_release_cb,
1998 .mtu_reduced = tcp_v6_mtu_reduced,
2051 .hash = tcp_v6_hash, 1999 .hash = tcp_v6_hash,
2052 .unhash = inet_unhash, 2000 .unhash = inet_unhash,
2053 .get_port = inet_csk_get_port, 2001 .get_port = inet_csk_get_port,
@@ -2069,12 +2017,13 @@ struct proto tcpv6_prot = {
2069 .compat_setsockopt = compat_tcp_setsockopt, 2017 .compat_setsockopt = compat_tcp_setsockopt,
2070 .compat_getsockopt = compat_tcp_getsockopt, 2018 .compat_getsockopt = compat_tcp_getsockopt,
2071#endif 2019#endif
2072#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 2020#ifdef CONFIG_MEMCG_KMEM
2073 .proto_cgroup = tcp_proto_cgroup, 2021 .proto_cgroup = tcp_proto_cgroup,
2074#endif 2022#endif
2075}; 2023};
2076 2024
2077static const struct inet6_protocol tcpv6_protocol = { 2025static const struct inet6_protocol tcpv6_protocol = {
2026 .early_demux = tcp_v6_early_demux,
2078 .handler = tcp_v6_rcv, 2027 .handler = tcp_v6_rcv,
2079 .err_handler = tcp_v6_err, 2028 .err_handler = tcp_v6_err,
2080 .gso_send_check = tcp_v6_gso_send_check, 2029 .gso_send_check = tcp_v6_gso_send_check,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f05099fc5901..99d0077b56b8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/proc_fs.h> 49#include <linux/proc_fs.h>
50#include <linux/seq_file.h> 50#include <linux/seq_file.h>
51#include <trace/events/skb.h>
51#include "udp_impl.h" 52#include "udp_impl.h"
52 53
53int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) 54int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
@@ -385,15 +386,16 @@ try_again:
385 386
386 if (skb_csum_unnecessary(skb)) 387 if (skb_csum_unnecessary(skb))
387 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 388 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
388 msg->msg_iov, copied ); 389 msg->msg_iov, copied);
389 else { 390 else {
390 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 391 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
391 if (err == -EINVAL) 392 if (err == -EINVAL)
392 goto csum_copy_err; 393 goto csum_copy_err;
393 } 394 }
394 if (err) 395 if (unlikely(err)) {
396 trace_kfree_skb(skb, udpv6_recvmsg);
395 goto out_free; 397 goto out_free;
396 398 }
397 if (!peeked) { 399 if (!peeked) {
398 if (is_udp4) 400 if (is_udp4)
399 UDP_INC_STATS_USER(sock_net(sk), 401 UDP_INC_STATS_USER(sock_net(sk),
@@ -479,6 +481,11 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
479 if (sk == NULL) 481 if (sk == NULL)
480 return; 482 return;
481 483
484 if (type == ICMPV6_PKT_TOOBIG)
485 ip6_sk_update_pmtu(skb, sk, info);
486 if (type == NDISC_REDIRECT)
487 ip6_sk_redirect(skb, sk);
488
482 np = inet6_sk(sk); 489 np = inet6_sk(sk);
483 490
484 if (!icmpv6_err_convert(type, code, &err) && !np->recverr) 491 if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8625fba96db9..ef39812107b1 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -99,12 +99,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
99 if (!xdst->u.rt6.rt6i_idev) 99 if (!xdst->u.rt6.rt6i_idev)
100 return -ENODEV; 100 return -ENODEV;
101 101
102 xdst->u.rt6.rt6i_peer = rt->rt6i_peer; 102 rt6_transfer_peer(&xdst->u.rt6, rt);
103 if (rt->rt6i_peer)
104 atomic_inc(&rt->rt6i_peer->refcnt);
105 103
106 /* Sheit... I remember I did this right. Apparently, 104 /* Sheit... I remember I did this right. Apparently,
107 * it was magically lost, so this code needs audit */ 105 * it was magically lost, so this code needs audit */
106 xdst->u.rt6.n = neigh_clone(rt->n);
108 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | 107 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
109 RTF_LOCAL); 108 RTF_LOCAL);
110 xdst->u.rt6.rt6i_metric = rt->rt6i_metric; 109 xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
@@ -208,12 +207,22 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
208 return dst_entries_get_fast(ops) > ops->gc_thresh * 2; 207 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
209} 208}
210 209
211static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) 210static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
211 struct sk_buff *skb, u32 mtu)
212{ 212{
213 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 213 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
214 struct dst_entry *path = xdst->route; 214 struct dst_entry *path = xdst->route;
215 215
216 path->ops->update_pmtu(path, mtu); 216 path->ops->update_pmtu(path, sk, skb, mtu);
217}
218
219static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
220 struct sk_buff *skb)
221{
222 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
223 struct dst_entry *path = xdst->route;
224
225 path->ops->redirect(path, sk, skb);
217} 226}
218 227
219static void xfrm6_dst_destroy(struct dst_entry *dst) 228static void xfrm6_dst_destroy(struct dst_entry *dst)
@@ -223,8 +232,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
223 if (likely(xdst->u.rt6.rt6i_idev)) 232 if (likely(xdst->u.rt6.rt6i_idev))
224 in6_dev_put(xdst->u.rt6.rt6i_idev); 233 in6_dev_put(xdst->u.rt6.rt6i_idev);
225 dst_destroy_metrics_generic(dst); 234 dst_destroy_metrics_generic(dst);
226 if (likely(xdst->u.rt6.rt6i_peer)) 235 if (rt6_has_peer(&xdst->u.rt6)) {
227 inet_putpeer(xdst->u.rt6.rt6i_peer); 236 struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
237 inet_putpeer(peer);
238 }
228 xfrm_dst_destroy(xdst); 239 xfrm_dst_destroy(xdst);
229} 240}
230 241
@@ -260,6 +271,7 @@ static struct dst_ops xfrm6_dst_ops = {
260 .protocol = cpu_to_be16(ETH_P_IPV6), 271 .protocol = cpu_to_be16(ETH_P_IPV6),
261 .gc = xfrm6_garbage_collect, 272 .gc = xfrm6_garbage_collect,
262 .update_pmtu = xfrm6_update_pmtu, 273 .update_pmtu = xfrm6_update_pmtu,
274 .redirect = xfrm6_redirect,
263 .cow_metrics = dst_cow_metrics_generic, 275 .cow_metrics = dst_cow_metrics_generic,
264 .destroy = xfrm6_dst_destroy, 276 .destroy = xfrm6_dst_destroy,
265 .ifdown = xfrm6_dst_ifdown, 277 .ifdown = xfrm6_dst_ifdown,
diff --git a/net/ipx/Makefile b/net/ipx/Makefile
index 4b95e3ea0f8b..440fafa9fd07 100644
--- a/net/ipx/Makefile
+++ b/net/ipx/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_IPX) += ipx.o 5obj-$(CONFIG_IPX) += ipx.o
6 6
7ipx-y := af_ipx.o ipx_route.o ipx_proc.o 7ipx-y := af_ipx.o ipx_route.o ipx_proc.o pe2.o
8ipx-$(CONFIG_SYSCTL) += sysctl_net_ipx.o 8ipx-$(CONFIG_SYSCTL) += sysctl_net_ipx.o
diff --git a/net/ethernet/pe2.c b/net/ipx/pe2.c
index 85d574addbc1..32dcd601ab32 100644
--- a/net/ethernet/pe2.c
+++ b/net/ipx/pe2.c
@@ -28,10 +28,8 @@ struct datalink_proto *make_EII_client(void)
28 28
29 return proto; 29 return proto;
30} 30}
31EXPORT_SYMBOL(make_EII_client);
32 31
33void destroy_EII_client(struct datalink_proto *dl) 32void destroy_EII_client(struct datalink_proto *dl)
34{ 33{
35 kfree(dl); 34 kfree(dl);
36} 35}
37EXPORT_SYMBOL(destroy_EII_client);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index bb14c3477680..bb738c9f9146 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -955,7 +955,7 @@ out:
955 * The main difference with a "standard" connect is that with IrDA we need 955 * The main difference with a "standard" connect is that with IrDA we need
956 * to resolve the service name into a TSAP selector (in TCP, port number 956 * to resolve the service name into a TSAP selector (in TCP, port number
957 * doesn't have to be resolved). 957 * doesn't have to be resolved).
958 * Because of this service name resoltion, we can offer "auto-connect", 958 * Because of this service name resolution, we can offer "auto-connect",
959 * where we connect to a service without specifying a destination address. 959 * where we connect to a service without specifying a destination address.
960 * 960 *
961 * Note : by consulting "errno", the user space caller may learn the cause 961 * Note : by consulting "errno", the user space caller may learn the cause
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 32dcaac70b0c..4664855222f4 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
296 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 296 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
297 /* Bigger param length comes from CMD_GET_MEDIA_CHAR */ 297 /* Bigger param length comes from CMD_GET_MEDIA_CHAR */
298 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") + 298 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
299 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") + 299 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
300 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") + 300 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
301 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"), 301 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
302 GFP_ATOMIC); 302 GFP_ATOMIC);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index f06947c4fa82..7152624ed5f1 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -523,7 +523,7 @@ void *hashbin_remove_first( hashbin_t *hashbin)
523 * Dequeue the entry... 523 * Dequeue the entry...
524 */ 524 */
525 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 525 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
526 (irda_queue_t*) entry ); 526 entry);
527 hashbin->hb_size--; 527 hashbin->hb_size--;
528 entry->q_next = NULL; 528 entry->q_next = NULL;
529 entry->q_prev = NULL; 529 entry->q_prev = NULL;
@@ -615,7 +615,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
615 */ 615 */
616 if ( found ) { 616 if ( found ) {
617 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 617 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
618 (irda_queue_t*) entry ); 618 entry);
619 hashbin->hb_size--; 619 hashbin->hb_size--;
620 620
621 /* 621 /*
@@ -685,7 +685,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
685 * Dequeue the entry... 685 * Dequeue the entry...
686 */ 686 */
687 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 687 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
688 (irda_queue_t*) entry ); 688 entry);
689 hashbin->hb_size--; 689 hashbin->hb_size--;
690 entry->q_next = NULL; 690 entry->q_next = NULL;
691 entry->q_prev = NULL; 691 entry->q_prev = NULL;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 07d7d55a1b93..cd6f7a991d80 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
372 skb_trim(skb, skb->dev->mtu); 372 skb_trim(skb, skb->dev->mtu);
373 } 373 }
374 skb->protocol = ETH_P_AF_IUCV; 374 skb->protocol = ETH_P_AF_IUCV;
375 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
376 nskb = skb_clone(skb, GFP_ATOMIC); 375 nskb = skb_clone(skb, GFP_ATOMIC);
377 if (!nskb) 376 if (!nskb)
378 return -ENOMEM; 377 return -ENOMEM;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 32b2155e7ab4..393355d37b47 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1128,6 +1128,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1128 int headroom; 1128 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1130 int udp_len;
1131 int ret = NET_XMIT_SUCCESS;
1131 1132
1132 /* Check that there's enough headroom in the skb to insert IP, 1133 /* Check that there's enough headroom in the skb to insert IP,
1133 * UDP and L2TP headers. If not enough, expand it to 1134 * UDP and L2TP headers. If not enough, expand it to
@@ -1137,8 +1138,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1137 uhlen + hdr_len; 1138 uhlen + hdr_len;
1138 old_headroom = skb_headroom(skb); 1139 old_headroom = skb_headroom(skb);
1139 if (skb_cow_head(skb, headroom)) { 1140 if (skb_cow_head(skb, headroom)) {
1140 dev_kfree_skb(skb); 1141 kfree_skb(skb);
1141 goto abort; 1142 return NET_XMIT_DROP;
1142 } 1143 }
1143 1144
1144 new_headroom = skb_headroom(skb); 1145 new_headroom = skb_headroom(skb);
@@ -1156,7 +1157,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1156 1157
1157 bh_lock_sock(sk); 1158 bh_lock_sock(sk);
1158 if (sock_owned_by_user(sk)) { 1159 if (sock_owned_by_user(sk)) {
1159 dev_kfree_skb(skb); 1160 kfree_skb(skb);
1161 ret = NET_XMIT_DROP;
1160 goto out_unlock; 1162 goto out_unlock;
1161 } 1163 }
1162 1164
@@ -1215,8 +1217,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1215out_unlock: 1217out_unlock:
1216 bh_unlock_sock(sk); 1218 bh_unlock_sock(sk);
1217 1219
1218abort: 1220 return ret;
1219 return 0;
1220} 1221}
1221EXPORT_SYMBOL_GPL(l2tp_xmit_skb); 1222EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1222 1223
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 185f12f4a5fa..f9ee74deeac2 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -42,6 +42,12 @@ struct l2tp_eth {
42 struct sock *tunnel_sock; 42 struct sock *tunnel_sock;
43 struct l2tp_session *session; 43 struct l2tp_session *session;
44 struct list_head list; 44 struct list_head list;
45 atomic_long_t tx_bytes;
46 atomic_long_t tx_packets;
47 atomic_long_t tx_dropped;
48 atomic_long_t rx_bytes;
49 atomic_long_t rx_packets;
50 atomic_long_t rx_errors;
45}; 51};
46 52
47/* via l2tp_session_priv() */ 53/* via l2tp_session_priv() */
@@ -87,25 +93,45 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
87{ 93{
88 struct l2tp_eth *priv = netdev_priv(dev); 94 struct l2tp_eth *priv = netdev_priv(dev);
89 struct l2tp_session *session = priv->session; 95 struct l2tp_session *session = priv->session;
96 unsigned int len = skb->len;
97 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
98
99 if (likely(ret == NET_XMIT_SUCCESS)) {
100 atomic_long_add(len, &priv->tx_bytes);
101 atomic_long_inc(&priv->tx_packets);
102 } else {
103 atomic_long_inc(&priv->tx_dropped);
104 }
105 return NETDEV_TX_OK;
106}
90 107
91 l2tp_xmit_skb(session, skb, session->hdr_len); 108static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
92 109 struct rtnl_link_stats64 *stats)
93 dev->stats.tx_bytes += skb->len; 110{
94 dev->stats.tx_packets++; 111 struct l2tp_eth *priv = netdev_priv(dev);
95 112
96 return 0; 113 stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
114 stats->tx_packets = atomic_long_read(&priv->tx_packets);
115 stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
116 stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
117 stats->rx_packets = atomic_long_read(&priv->rx_packets);
118 stats->rx_errors = atomic_long_read(&priv->rx_errors);
119 return stats;
97} 120}
98 121
122
99static struct net_device_ops l2tp_eth_netdev_ops = { 123static struct net_device_ops l2tp_eth_netdev_ops = {
100 .ndo_init = l2tp_eth_dev_init, 124 .ndo_init = l2tp_eth_dev_init,
101 .ndo_uninit = l2tp_eth_dev_uninit, 125 .ndo_uninit = l2tp_eth_dev_uninit,
102 .ndo_start_xmit = l2tp_eth_dev_xmit, 126 .ndo_start_xmit = l2tp_eth_dev_xmit,
127 .ndo_get_stats64 = l2tp_eth_get_stats64,
103}; 128};
104 129
105static void l2tp_eth_dev_setup(struct net_device *dev) 130static void l2tp_eth_dev_setup(struct net_device *dev)
106{ 131{
107 ether_setup(dev); 132 ether_setup(dev);
108 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 133 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
134 dev->features |= NETIF_F_LLTX;
109 dev->netdev_ops = &l2tp_eth_netdev_ops; 135 dev->netdev_ops = &l2tp_eth_netdev_ops;
110 dev->destructor = free_netdev; 136 dev->destructor = free_netdev;
111} 137}
@@ -114,17 +140,17 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
114{ 140{
115 struct l2tp_eth_sess *spriv = l2tp_session_priv(session); 141 struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
116 struct net_device *dev = spriv->dev; 142 struct net_device *dev = spriv->dev;
143 struct l2tp_eth *priv = netdev_priv(dev);
117 144
118 if (session->debug & L2TP_MSG_DATA) { 145 if (session->debug & L2TP_MSG_DATA) {
119 unsigned int length; 146 unsigned int length;
120 u8 *ptr = skb->data;
121 147
122 length = min(32u, skb->len); 148 length = min(32u, skb->len);
123 if (!pskb_may_pull(skb, length)) 149 if (!pskb_may_pull(skb, length))
124 goto error; 150 goto error;
125 151
126 pr_debug("%s: eth recv\n", session->name); 152 pr_debug("%s: eth recv\n", session->name);
127 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 153 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
128 } 154 }
129 155
130 if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) 156 if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -139,15 +165,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
139 nf_reset(skb); 165 nf_reset(skb);
140 166
141 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { 167 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
142 dev->stats.rx_packets++; 168 atomic_long_inc(&priv->rx_packets);
143 dev->stats.rx_bytes += data_len; 169 atomic_long_add(data_len, &priv->rx_bytes);
144 } else 170 } else {
145 dev->stats.rx_errors++; 171 atomic_long_inc(&priv->rx_errors);
146 172 }
147 return; 173 return;
148 174
149error: 175error:
150 dev->stats.rx_errors++; 176 atomic_long_inc(&priv->rx_errors);
151 kfree_skb(skb); 177 kfree_skb(skb);
152} 178}
153 179
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ddc553e76671..d71cd9229a47 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -72,7 +72,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
72 void *hdr; 72 void *hdr;
73 int ret = -ENOBUFS; 73 int ret = -ENOBUFS;
74 74
75 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 75 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
76 if (!msg) { 76 if (!msg) {
77 ret = -ENOMEM; 77 ret = -ENOMEM;
78 goto out; 78 goto out;
@@ -353,7 +353,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
353 goto out; 353 goto out;
354 } 354 }
355 355
356 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 356 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
357 if (!msg) { 357 if (!msg) {
358 ret = -ENOMEM; 358 ret = -ENOMEM;
359 goto out; 359 goto out;
@@ -699,7 +699,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 702 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
703 if (!msg) { 703 if (!msg) {
704 ret = -ENOMEM; 704 ret = -ENOMEM;
705 goto out; 705 goto out;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8ef6b9416cba..286366ef8930 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1522,8 +1522,8 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
1522 * handler, according to whether the PPPoX socket is a for a regular session 1522 * handler, according to whether the PPPoX socket is a for a regular session
1523 * or the special tunnel type. 1523 * or the special tunnel type.
1524 */ 1524 */
1525static int pppol2tp_getsockopt(struct socket *sock, int level, 1525static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1526 int optname, char __user *optval, int __user *optlen) 1526 char __user *optval, int __user *optlen)
1527{ 1527{
1528 struct sock *sk = sock->sk; 1528 struct sock *sk = sock->sk;
1529 struct l2tp_session *session; 1529 struct l2tp_session *session;
@@ -1535,7 +1535,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
1535 if (level != SOL_PPPOL2TP) 1535 if (level != SOL_PPPOL2TP)
1536 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1536 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1537 1537
1538 if (get_user(len, (int __user *) optlen)) 1538 if (get_user(len, optlen))
1539 return -EFAULT; 1539 return -EFAULT;
1540 1540
1541 len = min_t(unsigned int, len, sizeof(int)); 1541 len = min_t(unsigned int, len, sizeof(int));
@@ -1568,7 +1568,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
1568 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1568 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1569 1569
1570 err = -EFAULT; 1570 err = -EFAULT;
1571 if (put_user(len, (int __user *) optlen)) 1571 if (put_user(len, optlen))
1572 goto end_put_sess; 1572 goto end_put_sess;
1573 1573
1574 if (copy_to_user((void __user *) optval, &val, len)) 1574 if (copy_to_user((void __user *) optval, &val, len))
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index fe5453c3e719..f6fe4d400502 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
1024 * @sock: Socket to set options on. 1024 * @sock: Socket to set options on.
1025 * @level: Socket level user is requesting operations on. 1025 * @level: Socket level user is requesting operations on.
1026 * @optname: Operation name. 1026 * @optname: Operation name.
1027 * @optval User provided operation data. 1027 * @optval: User provided operation data.
1028 * @optlen: Length of optval. 1028 * @optlen: Length of optval.
1029 * 1029 *
1030 * Set various connection specific parameters. 1030 * Set various connection specific parameters.
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index cf4aea3ba30f..39a8d8924b9c 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -30,12 +30,12 @@
30 * 30 *
31 * SAP and connection resource manager, one per adapter. 31 * SAP and connection resource manager, one per adapter.
32 * 32 *
33 * @state - state of station 33 * @state: state of station
34 * @xid_r_count - XID response PDU counter 34 * @xid_r_count: XID response PDU counter
35 * @mac_sa - MAC source address 35 * @mac_sa: MAC source address
36 * @sap_list - list of related SAPs 36 * @sap_list: list of related SAPs
37 * @ev_q - events entering state mach. 37 * @ev_q: events entering state mach.
38 * @mac_pdu_q - PDUs ready to send to MAC 38 * @mac_pdu_q: PDUs ready to send to MAC
39 */ 39 */
40struct llc_station { 40struct llc_station {
41 u8 state; 41 u8 state;
@@ -646,7 +646,7 @@ static void llc_station_service_events(void)
646} 646}
647 647
648/** 648/**
649 * llc_station_state_process: queue event and try to process queue. 649 * llc_station_state_process - queue event and try to process queue.
650 * @skb: Address of the event 650 * @skb: Address of the event
651 * 651 *
652 * Queues an event (on the station event queue) for handling by the 652 * Queues an event (on the station event queue) for handling by the
@@ -672,7 +672,7 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
672 } 672 }
673} 673}
674 674
675/* 675/**
676 * llc_station_rcv - send received pdu to the station state machine 676 * llc_station_rcv - send received pdu to the station state machine
677 * @skb: received frame. 677 * @skb: received frame.
678 * 678 *
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8d249d705980..63af25458fda 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -107,6 +107,19 @@ config MAC80211_DEBUGFS
107 107
108 Say N unless you know you need this. 108 Say N unless you know you need this.
109 109
110config MAC80211_MESSAGE_TRACING
111 bool "Trace all mac80211 debug messages"
112 depends on MAC80211
113 ---help---
114 Select this option to have mac80211 register the
115 mac80211_msg trace subsystem with tracepoints to
116 collect all debugging messages, independent of
117 printing them into the kernel log.
118
119 The overhead in this option is that all the messages
120 need to be present in the binary and formatted at
121 runtime for tracing.
122
110menuconfig MAC80211_DEBUG_MENU 123menuconfig MAC80211_DEBUG_MENU
111 bool "Select mac80211 debugging features" 124 bool "Select mac80211 debugging features"
112 depends on MAC80211 125 depends on MAC80211
@@ -140,26 +153,35 @@ config MAC80211_VERBOSE_DEBUG
140 153
141 Do not select this option. 154 Do not select this option.
142 155
143config MAC80211_HT_DEBUG 156config MAC80211_MLME_DEBUG
144 bool "Verbose HT debugging" 157 bool "Verbose managed MLME output"
145 depends on MAC80211_DEBUG_MENU 158 depends on MAC80211_DEBUG_MENU
146 ---help--- 159 ---help---
147 This option enables 802.11n High Throughput features 160 Selecting this option causes mac80211 to print out
148 debug tracing output. 161 debugging messages for the managed-mode MLME. It
149 162 should not be selected on production systems as some
150 It should not be selected on production systems as some
151 of the messages are remotely triggerable. 163 of the messages are remotely triggerable.
152 164
153 Do not select this option. 165 Do not select this option.
154 166
155config MAC80211_TKIP_DEBUG 167config MAC80211_STA_DEBUG
156 bool "Verbose TKIP debugging" 168 bool "Verbose station debugging"
157 depends on MAC80211_DEBUG_MENU 169 depends on MAC80211_DEBUG_MENU
158 ---help--- 170 ---help---
159 Selecting this option causes mac80211 to print out 171 Selecting this option causes mac80211 to print out
160 very verbose TKIP debugging messages. It should not 172 debugging messages for station addition/removal.
161 be selected on production systems as those messages 173
162 are remotely triggerable. 174 Do not select this option.
175
176config MAC80211_HT_DEBUG
177 bool "Verbose HT debugging"
178 depends on MAC80211_DEBUG_MENU
179 ---help---
180 This option enables 802.11n High Throughput features
181 debug tracing output.
182
183 It should not be selected on production systems as some
184 of the messages are remotely triggerable.
163 185
164 Do not select this option. 186 Do not select this option.
165 187
@@ -174,7 +196,7 @@ config MAC80211_IBSS_DEBUG
174 196
175 Do not select this option. 197 Do not select this option.
176 198
177config MAC80211_VERBOSE_PS_DEBUG 199config MAC80211_PS_DEBUG
178 bool "Verbose powersave mode debugging" 200 bool "Verbose powersave mode debugging"
179 depends on MAC80211_DEBUG_MENU 201 depends on MAC80211_DEBUG_MENU
180 ---help--- 202 ---help---
@@ -186,7 +208,7 @@ config MAC80211_VERBOSE_PS_DEBUG
186 208
187 Do not select this option. 209 Do not select this option.
188 210
189config MAC80211_VERBOSE_MPL_DEBUG 211config MAC80211_MPL_DEBUG
190 bool "Verbose mesh peer link debugging" 212 bool "Verbose mesh peer link debugging"
191 depends on MAC80211_DEBUG_MENU 213 depends on MAC80211_DEBUG_MENU
192 depends on MAC80211_MESH 214 depends on MAC80211_MESH
@@ -199,7 +221,7 @@ config MAC80211_VERBOSE_MPL_DEBUG
199 221
200 Do not select this option. 222 Do not select this option.
201 223
202config MAC80211_VERBOSE_MPATH_DEBUG 224config MAC80211_MPATH_DEBUG
203 bool "Verbose mesh path debugging" 225 bool "Verbose mesh path debugging"
204 depends on MAC80211_DEBUG_MENU 226 depends on MAC80211_DEBUG_MENU
205 depends on MAC80211_MESH 227 depends on MAC80211_MESH
@@ -212,7 +234,7 @@ config MAC80211_VERBOSE_MPATH_DEBUG
212 234
213 Do not select this option. 235 Do not select this option.
214 236
215config MAC80211_VERBOSE_MHWMP_DEBUG 237config MAC80211_MHWMP_DEBUG
216 bool "Verbose mesh HWMP routing debugging" 238 bool "Verbose mesh HWMP routing debugging"
217 depends on MAC80211_DEBUG_MENU 239 depends on MAC80211_DEBUG_MENU
218 depends on MAC80211_MESH 240 depends on MAC80211_MESH
@@ -225,7 +247,7 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
225 247
226 Do not select this option. 248 Do not select this option.
227 249
228config MAC80211_VERBOSE_MESH_SYNC_DEBUG 250config MAC80211_MESH_SYNC_DEBUG
229 bool "Verbose mesh mesh synchronization debugging" 251 bool "Verbose mesh mesh synchronization debugging"
230 depends on MAC80211_DEBUG_MENU 252 depends on MAC80211_DEBUG_MENU
231 depends on MAC80211_MESH 253 depends on MAC80211_MESH
@@ -236,7 +258,7 @@ config MAC80211_VERBOSE_MESH_SYNC_DEBUG
236 258
237 Do not select this option. 259 Do not select this option.
238 260
239config MAC80211_VERBOSE_TDLS_DEBUG 261config MAC80211_TDLS_DEBUG
240 bool "Verbose TDLS debugging" 262 bool "Verbose TDLS debugging"
241 depends on MAC80211_DEBUG_MENU 263 depends on MAC80211_DEBUG_MENU
242 ---help--- 264 ---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3e9d931bba35..a7dd110faafa 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,6 @@ mac80211-y := \
9 scan.o offchannel.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 work.o \
13 iface.o \ 12 iface.o \
14 rate.o \ 13 rate.o \
15 michael.o \ 14 michael.o \
@@ -25,7 +24,7 @@ mac80211-y := \
25 wme.o \ 24 wme.o \
26 event.o \ 25 event.o \
27 chan.o \ 26 chan.o \
28 driver-trace.o mlme.o 27 trace.o mlme.o
29 28
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 29mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 30mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -43,7 +42,7 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
43 42
44mac80211-$(CONFIG_PM) += pm.o 43mac80211-$(CONFIG_PM) += pm.o
45 44
46CFLAGS_driver-trace.o := -I$(src) 45CFLAGS_trace.o := -I$(src)
47 46
48# objects for PID algorithm 47# objects for PID algorithm
49rc80211_pid-y := rc80211_pid_algo.o 48rc80211_pid-y := rc80211_pid_algo.o
@@ -59,4 +58,4 @@ mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
59mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) 58mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
60mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) 59mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
61 60
62ccflags-y += -D__CHECK_ENDIAN__ 61ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index c649188314cc..186d9919b043 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -74,18 +74,17 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
74 74
75 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); 75 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
76 76
77#ifdef CONFIG_MAC80211_HT_DEBUG 77 ht_dbg(sta->sdata,
78 printk(KERN_DEBUG
79 "Rx BA session stop requested for %pM tid %u %s reason: %d\n", 78 "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
80 sta->sta.addr, tid, 79 sta->sta.addr, tid,
81 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", 80 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
82 (int)reason); 81 (int)reason);
83#endif /* CONFIG_MAC80211_HT_DEBUG */
84 82
85 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, 83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
86 &sta->sta, tid, NULL, 0)) 84 &sta->sta, tid, NULL, 0))
87 printk(KERN_DEBUG "HW problem - can not stop rx " 85 sdata_info(sta->sdata,
88 "aggregation for tid %d\n", tid); 86 "HW problem - can not stop rx aggregation for tid %d\n",
87 tid);
89 88
90 /* check if this is a self generated aggregation halt */ 89 /* check if this is a self generated aggregation halt */
91 if (initiator == WLAN_BACK_RECIPIENT && tx) 90 if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -160,9 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
160 } 159 }
161 rcu_read_unlock(); 160 rcu_read_unlock();
162 161
163#ifdef CONFIG_MAC80211_HT_DEBUG 162 ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid);
164 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 163
165#endif
166 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); 164 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
167 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 165 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
168} 166}
@@ -249,10 +247,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
249 status = WLAN_STATUS_REQUEST_DECLINED; 247 status = WLAN_STATUS_REQUEST_DECLINED;
250 248
251 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 249 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
252#ifdef CONFIG_MAC80211_HT_DEBUG 250 ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n");
253 printk(KERN_DEBUG "Suspend in progress. "
254 "Denying ADDBA request\n");
255#endif
256 goto end_no_lock; 251 goto end_no_lock;
257 } 252 }
258 253
@@ -264,10 +259,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
264 (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || 259 (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
265 (buf_size > IEEE80211_MAX_AMPDU_BUF)) { 260 (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
266 status = WLAN_STATUS_INVALID_QOS_PARAM; 261 status = WLAN_STATUS_INVALID_QOS_PARAM;
267#ifdef CONFIG_MAC80211_HT_DEBUG 262 ht_dbg_ratelimited(sta->sdata,
268 net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", 263 "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
269 mgmt->sa, tid, ba_policy, buf_size); 264 mgmt->sa, tid, ba_policy, buf_size);
270#endif /* CONFIG_MAC80211_HT_DEBUG */
271 goto end_no_lock; 265 goto end_no_lock;
272 } 266 }
273 /* determine default buffer size */ 267 /* determine default buffer size */
@@ -282,10 +276,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
282 mutex_lock(&sta->ampdu_mlme.mtx); 276 mutex_lock(&sta->ampdu_mlme.mtx);
283 277
284 if (sta->ampdu_mlme.tid_rx[tid]) { 278 if (sta->ampdu_mlme.tid_rx[tid]) {
285#ifdef CONFIG_MAC80211_HT_DEBUG 279 ht_dbg_ratelimited(sta->sdata,
286 net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n", 280 "unexpected AddBA Req from %pM on tid %u\n",
287 mgmt->sa, tid); 281 mgmt->sa, tid);
288#endif /* CONFIG_MAC80211_HT_DEBUG */
289 282
290 /* delete existing Rx BA session on the same tid */ 283 /* delete existing Rx BA session on the same tid */
291 ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 284 ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
@@ -324,10 +317,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
324 317
325 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, 318 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
326 &sta->sta, tid, &start_seq_num, 0); 319 &sta->sta, tid, &start_seq_num, 0);
327#ifdef CONFIG_MAC80211_HT_DEBUG 320 ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret);
328 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
329#endif /* CONFIG_MAC80211_HT_DEBUG */
330
331 if (ret) { 321 if (ret) {
332 kfree(tid_agg_rx->reorder_buf); 322 kfree(tid_agg_rx->reorder_buf);
333 kfree(tid_agg_rx->reorder_time); 323 kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 7cf07158805c..d0deb3edae21 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -135,7 +135,8 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
135 bar->control = cpu_to_le16(bar_control); 135 bar->control = cpu_to_le16(bar_control);
136 bar->start_seq_num = cpu_to_le16(ssn); 136 bar->start_seq_num = cpu_to_le16(ssn);
137 137
138 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 138 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
139 IEEE80211_TX_CTL_REQ_TX_STATUS;
139 ieee80211_tx_skb_tid(sdata, skb, tid); 140 ieee80211_tx_skb_tid(sdata, skb, tid);
140} 141}
141EXPORT_SYMBOL(ieee80211_send_bar); 142EXPORT_SYMBOL(ieee80211_send_bar);
@@ -184,10 +185,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
184 185
185 spin_unlock_bh(&sta->lock); 186 spin_unlock_bh(&sta->lock);
186 187
187#ifdef CONFIG_MAC80211_HT_DEBUG 188 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
188 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
189 sta->sta.addr, tid); 189 sta->sta.addr, tid);
190#endif /* CONFIG_MAC80211_HT_DEBUG */
191 190
192 del_timer_sync(&tid_tx->addba_resp_timer); 191 del_timer_sync(&tid_tx->addba_resp_timer);
193 del_timer_sync(&tid_tx->session_timer); 192 del_timer_sync(&tid_tx->session_timer);
@@ -253,17 +252,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
253 if (!tid_tx || 252 if (!tid_tx ||
254 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 253 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
255 rcu_read_unlock(); 254 rcu_read_unlock();
256#ifdef CONFIG_MAC80211_HT_DEBUG 255 ht_dbg(sta->sdata,
257 printk(KERN_DEBUG "timer expired on tid %d but we are not " 256 "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n",
258 "(or no longer) expecting addBA response there\n", 257 tid);
259 tid);
260#endif
261 return; 258 return;
262 } 259 }
263 260
264#ifdef CONFIG_MAC80211_HT_DEBUG 261 ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid);
265 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
266#endif
267 262
268 ieee80211_stop_tx_ba_session(&sta->sta, tid); 263 ieee80211_stop_tx_ba_session(&sta->sta, tid);
269 rcu_read_unlock(); 264 rcu_read_unlock();
@@ -323,8 +318,9 @@ ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
323 318
324 ieee80211_stop_queue_agg(sdata, tid); 319 ieee80211_stop_queue_agg(sdata, tid);
325 320
326 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" 321 if (WARN(!tid_tx,
327 " from the pending queue\n", tid)) 322 "TID %d gone but expected when splicing aggregates from the pending queue\n",
323 tid))
328 return; 324 return;
329 325
330 if (!skb_queue_empty(&tid_tx->pending)) { 326 if (!skb_queue_empty(&tid_tx->pending)) {
@@ -372,10 +368,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
372 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 368 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
373 &sta->sta, tid, &start_seq_num, 0); 369 &sta->sta, tid, &start_seq_num, 0);
374 if (ret) { 370 if (ret) {
375#ifdef CONFIG_MAC80211_HT_DEBUG 371 ht_dbg(sdata,
376 printk(KERN_DEBUG "BA request denied - HW unavailable for" 372 "BA request denied - HW unavailable for tid %d\n", tid);
377 " tid %d\n", tid);
378#endif
379 spin_lock_bh(&sta->lock); 373 spin_lock_bh(&sta->lock);
380 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 374 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
381 ieee80211_assign_tid_tx(sta, tid, NULL); 375 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -388,9 +382,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
388 382
389 /* activate the timer for the recipient's addBA response */ 383 /* activate the timer for the recipient's addBA response */
390 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 384 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
391#ifdef CONFIG_MAC80211_HT_DEBUG 385 ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid);
392 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
393#endif
394 386
395 spin_lock_bh(&sta->lock); 387 spin_lock_bh(&sta->lock);
396 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 388 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -437,9 +429,7 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
437 429
438 rcu_read_unlock(); 430 rcu_read_unlock();
439 431
440#ifdef CONFIG_MAC80211_HT_DEBUG 432 ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid);
441 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
442#endif
443 433
444 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 434 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
445} 435}
@@ -463,10 +453,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
463 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) 453 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
464 return -EINVAL; 454 return -EINVAL;
465 455
466#ifdef CONFIG_MAC80211_HT_DEBUG 456 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
467 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
468 pubsta->addr, tid); 457 pubsta->addr, tid);
469#endif /* CONFIG_MAC80211_HT_DEBUG */
470 458
471 if (sdata->vif.type != NL80211_IFTYPE_STATION && 459 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
472 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 460 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -476,10 +464,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
476 return -EINVAL; 464 return -EINVAL;
477 465
478 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 466 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
479#ifdef CONFIG_MAC80211_HT_DEBUG 467 ht_dbg(sdata,
480 printk(KERN_DEBUG "BA sessions blocked. " 468 "BA sessions blocked - Denying BA session request\n");
481 "Denying BA session request\n");
482#endif
483 return -EINVAL; 469 return -EINVAL;
484 } 470 }
485 471
@@ -497,10 +483,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
497 */ 483 */
498 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 484 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
499 !sta->sta.ht_cap.ht_supported) { 485 !sta->sta.ht_cap.ht_supported) {
500#ifdef CONFIG_MAC80211_HT_DEBUG 486 ht_dbg(sdata,
501 printk(KERN_DEBUG "BA request denied - IBSS STA %pM" 487 "BA request denied - IBSS STA %pM does not advertise HT support\n",
502 "does not advertise HT support\n", pubsta->addr); 488 pubsta->addr);
503#endif /* CONFIG_MAC80211_HT_DEBUG */
504 return -EINVAL; 489 return -EINVAL;
505 } 490 }
506 491
@@ -520,12 +505,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
520 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 505 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
521 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 506 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
522 HT_AGG_RETRIES_PERIOD)) { 507 HT_AGG_RETRIES_PERIOD)) {
523#ifdef CONFIG_MAC80211_HT_DEBUG 508 ht_dbg(sdata,
524 printk(KERN_DEBUG "BA request denied - " 509 "BA request denied - waiting a grace period after %d failed requests on tid %u\n",
525 "waiting a grace period after %d failed requests "
526 "on tid %u\n",
527 sta->ampdu_mlme.addba_req_num[tid], tid); 510 sta->ampdu_mlme.addba_req_num[tid], tid);
528#endif /* CONFIG_MAC80211_HT_DEBUG */
529 ret = -EBUSY; 511 ret = -EBUSY;
530 goto err_unlock_sta; 512 goto err_unlock_sta;
531 } 513 }
@@ -533,10 +515,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
533 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 515 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
534 /* check if the TID is not in aggregation flow already */ 516 /* check if the TID is not in aggregation flow already */
535 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 517 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
536#ifdef CONFIG_MAC80211_HT_DEBUG 518 ht_dbg(sdata,
537 printk(KERN_DEBUG "BA request denied - session is not " 519 "BA request denied - session is not idle on tid %u\n",
538 "idle on tid %u\n", tid); 520 tid);
539#endif /* CONFIG_MAC80211_HT_DEBUG */
540 ret = -EAGAIN; 521 ret = -EAGAIN;
541 goto err_unlock_sta; 522 goto err_unlock_sta;
542 } 523 }
@@ -591,9 +572,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
591 572
592 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 573 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
593 574
594#ifdef CONFIG_MAC80211_HT_DEBUG 575 ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid);
595 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
596#endif
597 576
598 drv_ampdu_action(local, sta->sdata, 577 drv_ampdu_action(local, sta->sdata,
599 IEEE80211_AMPDU_TX_OPERATIONAL, 578 IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -627,10 +606,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
627 trace_api_start_tx_ba_cb(sdata, ra, tid); 606 trace_api_start_tx_ba_cb(sdata, ra, tid);
628 607
629 if (tid >= STA_TID_NUM) { 608 if (tid >= STA_TID_NUM) {
630#ifdef CONFIG_MAC80211_HT_DEBUG 609 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
631 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 610 tid, STA_TID_NUM);
632 tid, STA_TID_NUM);
633#endif
634 return; 611 return;
635 } 612 }
636 613
@@ -638,9 +615,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
638 sta = sta_info_get_bss(sdata, ra); 615 sta = sta_info_get_bss(sdata, ra);
639 if (!sta) { 616 if (!sta) {
640 mutex_unlock(&local->sta_mtx); 617 mutex_unlock(&local->sta_mtx);
641#ifdef CONFIG_MAC80211_HT_DEBUG 618 ht_dbg(sdata, "Could not find station: %pM\n", ra);
642 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
643#endif
644 return; 619 return;
645 } 620 }
646 621
@@ -648,9 +623,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
648 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 623 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
649 624
650 if (WARN_ON(!tid_tx)) { 625 if (WARN_ON(!tid_tx)) {
651#ifdef CONFIG_MAC80211_HT_DEBUG 626 ht_dbg(sdata, "addBA was not requested!\n");
652 printk(KERN_DEBUG "addBA was not requested!\n");
653#endif
654 goto unlock; 627 goto unlock;
655 } 628 }
656 629
@@ -750,25 +723,18 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
750 trace_api_stop_tx_ba_cb(sdata, ra, tid); 723 trace_api_stop_tx_ba_cb(sdata, ra, tid);
751 724
752 if (tid >= STA_TID_NUM) { 725 if (tid >= STA_TID_NUM) {
753#ifdef CONFIG_MAC80211_HT_DEBUG 726 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
754 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 727 tid, STA_TID_NUM);
755 tid, STA_TID_NUM);
756#endif
757 return; 728 return;
758 } 729 }
759 730
760#ifdef CONFIG_MAC80211_HT_DEBUG 731 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
761 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
762 ra, tid);
763#endif /* CONFIG_MAC80211_HT_DEBUG */
764 732
765 mutex_lock(&local->sta_mtx); 733 mutex_lock(&local->sta_mtx);
766 734
767 sta = sta_info_get_bss(sdata, ra); 735 sta = sta_info_get_bss(sdata, ra);
768 if (!sta) { 736 if (!sta) {
769#ifdef CONFIG_MAC80211_HT_DEBUG 737 ht_dbg(sdata, "Could not find station: %pM\n", ra);
770 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
771#endif
772 goto unlock; 738 goto unlock;
773 } 739 }
774 740
@@ -777,9 +743,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
777 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 743 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
778 744
779 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 745 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
780#ifdef CONFIG_MAC80211_HT_DEBUG 746 ht_dbg(sdata, "unexpected callback to A-MPDU stop\n");
781 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
782#endif
783 goto unlock_sta; 747 goto unlock_sta;
784 } 748 }
785 749
@@ -855,17 +819,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
855 goto out; 819 goto out;
856 820
857 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 821 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
858#ifdef CONFIG_MAC80211_HT_DEBUG 822 ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid);
859 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
860#endif
861 goto out; 823 goto out;
862 } 824 }
863 825
864 del_timer_sync(&tid_tx->addba_resp_timer); 826 del_timer_sync(&tid_tx->addba_resp_timer);
865 827
866#ifdef CONFIG_MAC80211_HT_DEBUG 828 ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid);
867 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
868#endif
869 829
870 /* 830 /*
871 * addba_resp_timer may have fired before we got here, and 831 * addba_resp_timer may have fired before we got here, and
@@ -874,11 +834,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
874 */ 834 */
875 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 835 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
876 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 836 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
877#ifdef CONFIG_MAC80211_HT_DEBUG 837 ht_dbg(sta->sdata,
878 printk(KERN_DEBUG
879 "got addBA resp for tid %d but we already gave up\n", 838 "got addBA resp for tid %d but we already gave up\n",
880 tid); 839 tid);
881#endif
882 goto out; 840 goto out;
883 } 841 }
884 842
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e9cecca5c44d..d41974aacf51 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -20,31 +20,31 @@
20#include "rate.h" 20#include "rate.h"
21#include "mesh.h" 21#include "mesh.h"
22 22
23static struct net_device *ieee80211_add_iface(struct wiphy *wiphy, char *name, 23static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name,
24 enum nl80211_iftype type, 24 enum nl80211_iftype type,
25 u32 *flags, 25 u32 *flags,
26 struct vif_params *params) 26 struct vif_params *params)
27{ 27{
28 struct ieee80211_local *local = wiphy_priv(wiphy); 28 struct ieee80211_local *local = wiphy_priv(wiphy);
29 struct net_device *dev; 29 struct wireless_dev *wdev;
30 struct ieee80211_sub_if_data *sdata; 30 struct ieee80211_sub_if_data *sdata;
31 int err; 31 int err;
32 32
33 err = ieee80211_if_add(local, name, &dev, type, params); 33 err = ieee80211_if_add(local, name, &wdev, type, params);
34 if (err) 34 if (err)
35 return ERR_PTR(err); 35 return ERR_PTR(err);
36 36
37 if (type == NL80211_IFTYPE_MONITOR && flags) { 37 if (type == NL80211_IFTYPE_MONITOR && flags) {
38 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 38 sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
39 sdata->u.mntr_flags = *flags; 39 sdata->u.mntr_flags = *flags;
40 } 40 }
41 41
42 return dev; 42 return wdev;
43} 43}
44 44
45static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev) 45static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
46{ 46{
47 ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev)); 47 ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev));
48 48
49 return 0; 49 return 0;
50} 50}
@@ -353,6 +353,7 @@ void sta_set_rate_info_tx(struct sta_info *sta,
353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
354{ 354{
355 struct ieee80211_sub_if_data *sdata = sta->sdata; 355 struct ieee80211_sub_if_data *sdata = sta->sdata;
356 struct ieee80211_local *local = sdata->local;
356 struct timespec uptime; 357 struct timespec uptime;
357 358
358 sinfo->generation = sdata->local->sta_generation; 359 sinfo->generation = sdata->local->sta_generation;
@@ -388,7 +389,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
388 if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || 389 if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
389 (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { 390 (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
390 sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; 391 sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
391 sinfo->signal = (s8)sta->last_signal; 392 if (!local->ops->get_rssi ||
393 drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
394 sinfo->signal = (s8)sta->last_signal;
392 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); 395 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
393 } 396 }
394 397
@@ -517,7 +520,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
517 * network device. 520 * network device.
518 */ 521 */
519 522
520 rcu_read_lock(); 523 mutex_lock(&local->sta_mtx);
521 524
522 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 525 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
523 sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); 526 sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
@@ -546,7 +549,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
546 data[i] = (u8)sinfo.signal_avg; 549 data[i] = (u8)sinfo.signal_avg;
547 i++; 550 i++;
548 } else { 551 } else {
549 list_for_each_entry_rcu(sta, &local->sta_list, list) { 552 list_for_each_entry(sta, &local->sta_list, list) {
550 /* Make sure this station belongs to the proper dev */ 553 /* Make sure this station belongs to the proper dev */
551 if (sta->sdata->dev != dev) 554 if (sta->sdata->dev != dev)
552 continue; 555 continue;
@@ -603,7 +606,7 @@ do_survey:
603 else 606 else
604 data[i++] = -1LL; 607 data[i++] = -1LL;
605 608
606 rcu_read_unlock(); 609 mutex_unlock(&local->sta_mtx);
607 610
608 if (WARN_ON(i != STA_STATS_LEN)) 611 if (WARN_ON(i != STA_STATS_LEN))
609 return; 612 return;
@@ -629,10 +632,11 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
629 int idx, u8 *mac, struct station_info *sinfo) 632 int idx, u8 *mac, struct station_info *sinfo)
630{ 633{
631 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 634 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
635 struct ieee80211_local *local = sdata->local;
632 struct sta_info *sta; 636 struct sta_info *sta;
633 int ret = -ENOENT; 637 int ret = -ENOENT;
634 638
635 rcu_read_lock(); 639 mutex_lock(&local->sta_mtx);
636 640
637 sta = sta_info_get_by_idx(sdata, idx); 641 sta = sta_info_get_by_idx(sdata, idx);
638 if (sta) { 642 if (sta) {
@@ -641,7 +645,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
641 sta_set_sinfo(sta, sinfo); 645 sta_set_sinfo(sta, sinfo);
642 } 646 }
643 647
644 rcu_read_unlock(); 648 mutex_unlock(&local->sta_mtx);
645 649
646 return ret; 650 return ret;
647} 651}
@@ -658,10 +662,11 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
658 u8 *mac, struct station_info *sinfo) 662 u8 *mac, struct station_info *sinfo)
659{ 663{
660 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 664 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
665 struct ieee80211_local *local = sdata->local;
661 struct sta_info *sta; 666 struct sta_info *sta;
662 int ret = -ENOENT; 667 int ret = -ENOENT;
663 668
664 rcu_read_lock(); 669 mutex_lock(&local->sta_mtx);
665 670
666 sta = sta_info_get_bss(sdata, mac); 671 sta = sta_info_get_bss(sdata, mac);
667 if (sta) { 672 if (sta) {
@@ -669,11 +674,54 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
669 sta_set_sinfo(sta, sinfo); 674 sta_set_sinfo(sta, sinfo);
670 } 675 }
671 676
672 rcu_read_unlock(); 677 mutex_unlock(&local->sta_mtx);
673 678
674 return ret; 679 return ret;
675} 680}
676 681
682static int ieee80211_set_channel(struct wiphy *wiphy,
683 struct net_device *netdev,
684 struct ieee80211_channel *chan,
685 enum nl80211_channel_type channel_type)
686{
687 struct ieee80211_local *local = wiphy_priv(wiphy);
688 struct ieee80211_sub_if_data *sdata = NULL;
689
690 if (netdev)
691 sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
692
693 switch (ieee80211_get_channel_mode(local, NULL)) {
694 case CHAN_MODE_HOPPING:
695 return -EBUSY;
696 case CHAN_MODE_FIXED:
697 if (local->oper_channel != chan ||
698 (!sdata && local->_oper_channel_type != channel_type))
699 return -EBUSY;
700 if (!sdata && local->_oper_channel_type == channel_type)
701 return 0;
702 break;
703 case CHAN_MODE_UNDEFINED:
704 break;
705 }
706
707 if (!ieee80211_set_channel_type(local, sdata, channel_type))
708 return -EBUSY;
709
710 local->oper_channel = chan;
711
712 /* auto-detects changes */
713 ieee80211_hw_config(local, 0);
714
715 return 0;
716}
717
718static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
719 struct ieee80211_channel *chan,
720 enum nl80211_channel_type channel_type)
721{
722 return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
723}
724
677static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 725static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
678 const u8 *resp, size_t resp_len) 726 const u8 *resp, size_t resp_len)
679{ 727{
@@ -788,6 +836,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
788 if (old) 836 if (old)
789 return -EALREADY; 837 return -EALREADY;
790 838
839 err = ieee80211_set_channel(wiphy, dev, params->channel,
840 params->channel_type);
841 if (err)
842 return err;
843
791 /* 844 /*
792 * Apply control port protocol, this allows us to 845 * Apply control port protocol, this allows us to
793 * not encrypt dynamic WEP control frames. 846 * not encrypt dynamic WEP control frames.
@@ -864,6 +917,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
864 917
865 kfree_rcu(old, rcu_head); 918 kfree_rcu(old, rcu_head);
866 919
920 sta_info_flush(sdata->local, sdata);
867 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 921 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
868 922
869 return 0; 923 return 0;
@@ -1482,7 +1536,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1482 if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) 1536 if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask))
1483 conf->dot11MeshTTL = nconf->dot11MeshTTL; 1537 conf->dot11MeshTTL = nconf->dot11MeshTTL;
1484 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) 1538 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
1485 conf->dot11MeshTTL = nconf->element_ttl; 1539 conf->element_ttl = nconf->element_ttl;
1486 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1540 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
1487 conf->auto_open_plinks = nconf->auto_open_plinks; 1541 conf->auto_open_plinks = nconf->auto_open_plinks;
1488 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) 1542 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
@@ -1517,17 +1571,16 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1517 * announcements, so require this ifmsh to also be a root node 1571 * announcements, so require this ifmsh to also be a root node
1518 * */ 1572 * */
1519 if (nconf->dot11MeshGateAnnouncementProtocol && 1573 if (nconf->dot11MeshGateAnnouncementProtocol &&
1520 !conf->dot11MeshHWMPRootMode) { 1574 !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) {
1521 conf->dot11MeshHWMPRootMode = 1; 1575 conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN;
1522 ieee80211_mesh_root_setup(ifmsh); 1576 ieee80211_mesh_root_setup(ifmsh);
1523 } 1577 }
1524 conf->dot11MeshGateAnnouncementProtocol = 1578 conf->dot11MeshGateAnnouncementProtocol =
1525 nconf->dot11MeshGateAnnouncementProtocol; 1579 nconf->dot11MeshGateAnnouncementProtocol;
1526 } 1580 }
1527 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) { 1581 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask))
1528 conf->dot11MeshHWMPRannInterval = 1582 conf->dot11MeshHWMPRannInterval =
1529 nconf->dot11MeshHWMPRannInterval; 1583 nconf->dot11MeshHWMPRannInterval;
1530 }
1531 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask)) 1584 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
1532 conf->dot11MeshForwarding = nconf->dot11MeshForwarding; 1585 conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
1533 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) { 1586 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
@@ -1543,6 +1596,15 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1543 sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; 1596 sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
1544 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); 1597 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
1545 } 1598 }
1599 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask))
1600 conf->dot11MeshHWMPactivePathToRootTimeout =
1601 nconf->dot11MeshHWMPactivePathToRootTimeout;
1602 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask))
1603 conf->dot11MeshHWMProotInterval =
1604 nconf->dot11MeshHWMProotInterval;
1605 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
1606 conf->dot11MeshHWMPconfirmationInterval =
1607 nconf->dot11MeshHWMPconfirmationInterval;
1546 return 0; 1608 return 0;
1547} 1609}
1548 1610
@@ -1558,6 +1620,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1558 err = copy_mesh_setup(ifmsh, setup); 1620 err = copy_mesh_setup(ifmsh, setup);
1559 if (err) 1621 if (err)
1560 return err; 1622 return err;
1623
1624 err = ieee80211_set_channel(wiphy, dev, setup->channel,
1625 setup->channel_type);
1626 if (err)
1627 return err;
1628
1561 ieee80211_start_mesh(sdata); 1629 ieee80211_start_mesh(sdata);
1562 1630
1563 return 0; 1631 return 0;
@@ -1674,54 +1742,7 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1674 return -EINVAL; 1742 return -EINVAL;
1675 } 1743 }
1676 1744
1677 return 0; 1745 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
1678}
1679
1680static int ieee80211_set_channel(struct wiphy *wiphy,
1681 struct net_device *netdev,
1682 struct ieee80211_channel *chan,
1683 enum nl80211_channel_type channel_type)
1684{
1685 struct ieee80211_local *local = wiphy_priv(wiphy);
1686 struct ieee80211_sub_if_data *sdata = NULL;
1687 struct ieee80211_channel *old_oper;
1688 enum nl80211_channel_type old_oper_type;
1689 enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
1690
1691 if (netdev)
1692 sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
1693
1694 switch (ieee80211_get_channel_mode(local, NULL)) {
1695 case CHAN_MODE_HOPPING:
1696 return -EBUSY;
1697 case CHAN_MODE_FIXED:
1698 if (local->oper_channel != chan)
1699 return -EBUSY;
1700 if (!sdata && local->_oper_channel_type == channel_type)
1701 return 0;
1702 break;
1703 case CHAN_MODE_UNDEFINED:
1704 break;
1705 }
1706
1707 if (sdata)
1708 old_vif_oper_type = sdata->vif.bss_conf.channel_type;
1709 old_oper_type = local->_oper_channel_type;
1710
1711 if (!ieee80211_set_channel_type(local, sdata, channel_type))
1712 return -EBUSY;
1713
1714 old_oper = local->oper_channel;
1715 local->oper_channel = chan;
1716
1717 /* Update driver if changes were actually made. */
1718 if ((old_oper != local->oper_channel) ||
1719 (old_oper_type != local->_oper_channel_type))
1720 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
1721
1722 if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1723 old_vif_oper_type != sdata->vif.bss_conf.channel_type)
1724 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
1725 1746
1726 return 0; 1747 return 0;
1727} 1748}
@@ -1743,10 +1764,11 @@ static int ieee80211_resume(struct wiphy *wiphy)
1743#endif 1764#endif
1744 1765
1745static int ieee80211_scan(struct wiphy *wiphy, 1766static int ieee80211_scan(struct wiphy *wiphy,
1746 struct net_device *dev,
1747 struct cfg80211_scan_request *req) 1767 struct cfg80211_scan_request *req)
1748{ 1768{
1749 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1769 struct ieee80211_sub_if_data *sdata;
1770
1771 sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
1750 1772
1751 switch (ieee80211_vif_type_p2p(&sdata->vif)) { 1773 switch (ieee80211_vif_type_p2p(&sdata->vif)) {
1752 case NL80211_IFTYPE_STATION: 1774 case NL80211_IFTYPE_STATION:
@@ -2093,6 +2115,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2093 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2115 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2094 int i, ret; 2116 int i, ret;
2095 2117
2118 if (!ieee80211_sdata_running(sdata))
2119 return -ENETDOWN;
2120
2096 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { 2121 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
2097 ret = drv_set_bitrate_mask(local, sdata, mask); 2122 ret = drv_set_bitrate_mask(local, sdata, mask);
2098 if (ret) 2123 if (ret)
@@ -2108,143 +2133,291 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2108 return 0; 2133 return 0;
2109} 2134}
2110 2135
2111static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local, 2136static int ieee80211_start_roc_work(struct ieee80211_local *local,
2112 struct net_device *dev, 2137 struct ieee80211_sub_if_data *sdata,
2113 struct ieee80211_channel *chan, 2138 struct ieee80211_channel *channel,
2114 enum nl80211_channel_type chantype, 2139 enum nl80211_channel_type channel_type,
2115 unsigned int duration, u64 *cookie) 2140 unsigned int duration, u64 *cookie,
2141 struct sk_buff *txskb)
2116{ 2142{
2143 struct ieee80211_roc_work *roc, *tmp;
2144 bool queued = false;
2117 int ret; 2145 int ret;
2118 u32 random_cookie;
2119 2146
2120 lockdep_assert_held(&local->mtx); 2147 lockdep_assert_held(&local->mtx);
2121 2148
2122 if (local->hw_roc_cookie) 2149 roc = kzalloc(sizeof(*roc), GFP_KERNEL);
2123 return -EBUSY; 2150 if (!roc)
2124 /* must be nonzero */ 2151 return -ENOMEM;
2125 random_cookie = random32() | 1; 2152
2126 2153 roc->chan = channel;
2127 *cookie = random_cookie; 2154 roc->chan_type = channel_type;
2128 local->hw_roc_dev = dev; 2155 roc->duration = duration;
2129 local->hw_roc_cookie = random_cookie; 2156 roc->req_duration = duration;
2130 local->hw_roc_channel = chan; 2157 roc->frame = txskb;
2131 local->hw_roc_channel_type = chantype; 2158 roc->mgmt_tx_cookie = (unsigned long)txskb;
2132 local->hw_roc_duration = duration; 2159 roc->sdata = sdata;
2133 ret = drv_remain_on_channel(local, chan, chantype, duration); 2160 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
2161 INIT_LIST_HEAD(&roc->dependents);
2162
2163 /* if there's one pending or we're scanning, queue this one */
2164 if (!list_empty(&local->roc_list) || local->scanning)
2165 goto out_check_combine;
2166
2167 /* if not HW assist, just queue & schedule work */
2168 if (!local->ops->remain_on_channel) {
2169 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
2170 goto out_queue;
2171 }
2172
2173 /* otherwise actually kick it off here (for error handling) */
2174
2175 /*
2176 * If the duration is zero, then the driver
2177 * wouldn't actually do anything. Set it to
2178 * 10 for now.
2179 *
2180 * TODO: cancel the off-channel operation
2181 * when we get the SKB's TX status and
2182 * the wait time was zero before.
2183 */
2184 if (!duration)
2185 duration = 10;
2186
2187 ret = drv_remain_on_channel(local, channel, channel_type, duration);
2134 if (ret) { 2188 if (ret) {
2135 local->hw_roc_channel = NULL; 2189 kfree(roc);
2136 local->hw_roc_cookie = 0; 2190 return ret;
2137 } 2191 }
2138 2192
2139 return ret; 2193 roc->started = true;
2194 goto out_queue;
2195
2196 out_check_combine:
2197 list_for_each_entry(tmp, &local->roc_list, list) {
2198 if (tmp->chan != channel || tmp->chan_type != channel_type)
2199 continue;
2200
2201 /*
2202 * Extend this ROC if possible:
2203 *
2204 * If it hasn't started yet, just increase the duration
2205 * and add the new one to the list of dependents.
2206 */
2207 if (!tmp->started) {
2208 list_add_tail(&roc->list, &tmp->dependents);
2209 tmp->duration = max(tmp->duration, roc->duration);
2210 queued = true;
2211 break;
2212 }
2213
2214 /* If it has already started, it's more difficult ... */
2215 if (local->ops->remain_on_channel) {
2216 unsigned long j = jiffies;
2217
2218 /*
2219 * In the offloaded ROC case, if it hasn't begun, add
2220 * this new one to the dependent list to be handled
2221 * when the the master one begins. If it has begun,
2222 * check that there's still a minimum time left and
2223 * if so, start this one, transmitting the frame, but
2224 * add it to the list directly after this one with a
2225 * a reduced time so we'll ask the driver to execute
2226 * it right after finishing the previous one, in the
2227 * hope that it'll also be executed right afterwards,
2228 * effectively extending the old one.
2229 * If there's no minimum time left, just add it to the
2230 * normal list.
2231 */
2232 if (!tmp->hw_begun) {
2233 list_add_tail(&roc->list, &tmp->dependents);
2234 queued = true;
2235 break;
2236 }
2237
2238 if (time_before(j + IEEE80211_ROC_MIN_LEFT,
2239 tmp->hw_start_time +
2240 msecs_to_jiffies(tmp->duration))) {
2241 int new_dur;
2242
2243 ieee80211_handle_roc_started(roc);
2244
2245 new_dur = roc->duration -
2246 jiffies_to_msecs(tmp->hw_start_time +
2247 msecs_to_jiffies(
2248 tmp->duration) -
2249 j);
2250
2251 if (new_dur > 0) {
2252 /* add right after tmp */
2253 list_add(&roc->list, &tmp->list);
2254 } else {
2255 list_add_tail(&roc->list,
2256 &tmp->dependents);
2257 }
2258 queued = true;
2259 }
2260 } else if (del_timer_sync(&tmp->work.timer)) {
2261 unsigned long new_end;
2262
2263 /*
2264 * In the software ROC case, cancel the timer, if
2265 * that fails then the finish work is already
2266 * queued/pending and thus we queue the new ROC
2267 * normally, if that succeeds then we can extend
2268 * the timer duration and TX the frame (if any.)
2269 */
2270
2271 list_add_tail(&roc->list, &tmp->dependents);
2272 queued = true;
2273
2274 new_end = jiffies + msecs_to_jiffies(roc->duration);
2275
2276 /* ok, it was started & we canceled timer */
2277 if (time_after(new_end, tmp->work.timer.expires))
2278 mod_timer(&tmp->work.timer, new_end);
2279 else
2280 add_timer(&tmp->work.timer);
2281
2282 ieee80211_handle_roc_started(roc);
2283 }
2284 break;
2285 }
2286
2287 out_queue:
2288 if (!queued)
2289 list_add_tail(&roc->list, &local->roc_list);
2290
2291 /*
2292 * cookie is either the roc (for normal roc)
2293 * or the SKB (for mgmt TX)
2294 */
2295 if (txskb)
2296 *cookie = (unsigned long)txskb;
2297 else
2298 *cookie = (unsigned long)roc;
2299
2300 return 0;
2140} 2301}
2141 2302
2142static int ieee80211_remain_on_channel(struct wiphy *wiphy, 2303static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2143 struct net_device *dev, 2304 struct wireless_dev *wdev,
2144 struct ieee80211_channel *chan, 2305 struct ieee80211_channel *chan,
2145 enum nl80211_channel_type channel_type, 2306 enum nl80211_channel_type channel_type,
2146 unsigned int duration, 2307 unsigned int duration,
2147 u64 *cookie) 2308 u64 *cookie)
2148{ 2309{
2149 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2310 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2150 struct ieee80211_local *local = sdata->local; 2311 struct ieee80211_local *local = sdata->local;
2312 int ret;
2151 2313
2152 if (local->ops->remain_on_channel) { 2314 mutex_lock(&local->mtx);
2153 int ret; 2315 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
2154 2316 duration, cookie, NULL);
2155 mutex_lock(&local->mtx); 2317 mutex_unlock(&local->mtx);
2156 ret = ieee80211_remain_on_channel_hw(local, dev,
2157 chan, channel_type,
2158 duration, cookie);
2159 local->hw_roc_for_tx = false;
2160 mutex_unlock(&local->mtx);
2161
2162 return ret;
2163 }
2164 2318
2165 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type, 2319 return ret;
2166 duration, cookie);
2167} 2320}
2168 2321
2169static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local, 2322static int ieee80211_cancel_roc(struct ieee80211_local *local,
2170 u64 cookie) 2323 u64 cookie, bool mgmt_tx)
2171{ 2324{
2325 struct ieee80211_roc_work *roc, *tmp, *found = NULL;
2172 int ret; 2326 int ret;
2173 2327
2174 lockdep_assert_held(&local->mtx); 2328 mutex_lock(&local->mtx);
2329 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
2330 struct ieee80211_roc_work *dep, *tmp2;
2175 2331
2176 if (local->hw_roc_cookie != cookie) 2332 list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
2177 return -ENOENT; 2333 if (!mgmt_tx && (unsigned long)dep != cookie)
2334 continue;
2335 else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
2336 continue;
2337 /* found dependent item -- just remove it */
2338 list_del(&dep->list);
2339 mutex_unlock(&local->mtx);
2178 2340
2179 ret = drv_cancel_remain_on_channel(local); 2341 ieee80211_roc_notify_destroy(dep);
2180 if (ret) 2342 return 0;
2181 return ret; 2343 }
2344
2345 if (!mgmt_tx && (unsigned long)roc != cookie)
2346 continue;
2347 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
2348 continue;
2182 2349
2183 local->hw_roc_cookie = 0; 2350 found = roc;
2184 local->hw_roc_channel = NULL; 2351 break;
2352 }
2185 2353
2186 ieee80211_recalc_idle(local); 2354 if (!found) {
2355 mutex_unlock(&local->mtx);
2356 return -ENOENT;
2357 }
2187 2358
2188 return 0; 2359 /*
2189} 2360 * We found the item to cancel, so do that. Note that it
2361 * may have dependents, which we also cancel (and send
2362 * the expired signal for.) Not doing so would be quite
2363 * tricky here, but we may need to fix it later.
2364 */
2190 2365
2191static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, 2366 if (local->ops->remain_on_channel) {
2192 struct net_device *dev, 2367 if (found->started) {
2193 u64 cookie) 2368 ret = drv_cancel_remain_on_channel(local);
2194{ 2369 if (WARN_ON_ONCE(ret)) {
2195 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2370 mutex_unlock(&local->mtx);
2196 struct ieee80211_local *local = sdata->local; 2371 return ret;
2372 }
2373 }
2197 2374
2198 if (local->ops->cancel_remain_on_channel) { 2375 list_del(&found->list);
2199 int ret;
2200 2376
2201 mutex_lock(&local->mtx); 2377 if (found->started)
2202 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); 2378 ieee80211_start_next_roc(local);
2203 mutex_unlock(&local->mtx); 2379 mutex_unlock(&local->mtx);
2204 2380
2205 return ret; 2381 ieee80211_roc_notify_destroy(found);
2382 } else {
2383 /* work may be pending so use it all the time */
2384 found->abort = true;
2385 ieee80211_queue_delayed_work(&local->hw, &found->work, 0);
2386
2387 mutex_unlock(&local->mtx);
2388
2389 /* work will clean up etc */
2390 flush_delayed_work(&found->work);
2206 } 2391 }
2207 2392
2208 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); 2393 return 0;
2209} 2394}
2210 2395
2211static enum work_done_result 2396static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
2212ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb) 2397 struct wireless_dev *wdev,
2398 u64 cookie)
2213{ 2399{
2214 /* 2400 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2215 * Use the data embedded in the work struct for reporting 2401 struct ieee80211_local *local = sdata->local;
2216 * here so if the driver mangled the SKB before dropping
2217 * it (which is the only way we really should get here)
2218 * then we don't report mangled data.
2219 *
2220 * If there was no wait time, then by the time we get here
2221 * the driver will likely not have reported the status yet,
2222 * so in that case userspace will have to deal with it.
2223 */
2224
2225 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
2226 cfg80211_mgmt_tx_status(wk->sdata->dev,
2227 (unsigned long) wk->offchan_tx.frame,
2228 wk->data, wk->data_len, false, GFP_KERNEL);
2229 2402
2230 return WORK_DONE_DESTROY; 2403 return ieee80211_cancel_roc(local, cookie, false);
2231} 2404}
2232 2405
2233static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, 2406static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2234 struct ieee80211_channel *chan, bool offchan, 2407 struct ieee80211_channel *chan, bool offchan,
2235 enum nl80211_channel_type channel_type, 2408 enum nl80211_channel_type channel_type,
2236 bool channel_type_valid, unsigned int wait, 2409 bool channel_type_valid, unsigned int wait,
2237 const u8 *buf, size_t len, bool no_cck, 2410 const u8 *buf, size_t len, bool no_cck,
2238 bool dont_wait_for_ack, u64 *cookie) 2411 bool dont_wait_for_ack, u64 *cookie)
2239{ 2412{
2240 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2413 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2241 struct ieee80211_local *local = sdata->local; 2414 struct ieee80211_local *local = sdata->local;
2242 struct sk_buff *skb; 2415 struct sk_buff *skb;
2243 struct sta_info *sta; 2416 struct sta_info *sta;
2244 struct ieee80211_work *wk;
2245 const struct ieee80211_mgmt *mgmt = (void *)buf; 2417 const struct ieee80211_mgmt *mgmt = (void *)buf;
2418 bool need_offchan = false;
2246 u32 flags; 2419 u32 flags;
2247 bool is_offchan = false; 2420 int ret;
2248 2421
2249 if (dont_wait_for_ack) 2422 if (dont_wait_for_ack)
2250 flags = IEEE80211_TX_CTL_NO_ACK; 2423 flags = IEEE80211_TX_CTL_NO_ACK;
@@ -2252,33 +2425,28 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2252 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | 2425 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
2253 IEEE80211_TX_CTL_REQ_TX_STATUS; 2426 IEEE80211_TX_CTL_REQ_TX_STATUS;
2254 2427
2255 /* Check that we are on the requested channel for transmission */
2256 if (chan != local->tmp_channel &&
2257 chan != local->oper_channel)
2258 is_offchan = true;
2259 if (channel_type_valid &&
2260 (channel_type != local->tmp_channel_type &&
2261 channel_type != local->_oper_channel_type))
2262 is_offchan = true;
2263
2264 if (chan == local->hw_roc_channel) {
2265 /* TODO: check channel type? */
2266 is_offchan = false;
2267 flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2268 }
2269
2270 if (no_cck) 2428 if (no_cck)
2271 flags |= IEEE80211_TX_CTL_NO_CCK_RATE; 2429 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
2272 2430
2273 if (is_offchan && !offchan)
2274 return -EBUSY;
2275
2276 switch (sdata->vif.type) { 2431 switch (sdata->vif.type) {
2277 case NL80211_IFTYPE_ADHOC: 2432 case NL80211_IFTYPE_ADHOC:
2433 if (!sdata->vif.bss_conf.ibss_joined)
2434 need_offchan = true;
2435 /* fall through */
2436#ifdef CONFIG_MAC80211_MESH
2437 case NL80211_IFTYPE_MESH_POINT:
2438 if (ieee80211_vif_is_mesh(&sdata->vif) &&
2439 !sdata->u.mesh.mesh_id_len)
2440 need_offchan = true;
2441 /* fall through */
2442#endif
2278 case NL80211_IFTYPE_AP: 2443 case NL80211_IFTYPE_AP:
2279 case NL80211_IFTYPE_AP_VLAN: 2444 case NL80211_IFTYPE_AP_VLAN:
2280 case NL80211_IFTYPE_P2P_GO: 2445 case NL80211_IFTYPE_P2P_GO:
2281 case NL80211_IFTYPE_MESH_POINT: 2446 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2447 !ieee80211_vif_is_mesh(&sdata->vif) &&
2448 !rcu_access_pointer(sdata->bss->beacon))
2449 need_offchan = true;
2282 if (!ieee80211_is_action(mgmt->frame_control) || 2450 if (!ieee80211_is_action(mgmt->frame_control) ||
2283 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) 2451 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
2284 break; 2452 break;
@@ -2290,167 +2458,101 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2290 break; 2458 break;
2291 case NL80211_IFTYPE_STATION: 2459 case NL80211_IFTYPE_STATION:
2292 case NL80211_IFTYPE_P2P_CLIENT: 2460 case NL80211_IFTYPE_P2P_CLIENT:
2461 if (!sdata->u.mgd.associated)
2462 need_offchan = true;
2293 break; 2463 break;
2294 default: 2464 default:
2295 return -EOPNOTSUPP; 2465 return -EOPNOTSUPP;
2296 } 2466 }
2297 2467
2468 mutex_lock(&local->mtx);
2469
2470 /* Check if the operating channel is the requested channel */
2471 if (!need_offchan) {
2472 need_offchan = chan != local->oper_channel;
2473 if (channel_type_valid &&
2474 channel_type != local->_oper_channel_type)
2475 need_offchan = true;
2476 }
2477
2478 if (need_offchan && !offchan) {
2479 ret = -EBUSY;
2480 goto out_unlock;
2481 }
2482
2298 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); 2483 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
2299 if (!skb) 2484 if (!skb) {
2300 return -ENOMEM; 2485 ret = -ENOMEM;
2486 goto out_unlock;
2487 }
2301 skb_reserve(skb, local->hw.extra_tx_headroom); 2488 skb_reserve(skb, local->hw.extra_tx_headroom);
2302 2489
2303 memcpy(skb_put(skb, len), buf, len); 2490 memcpy(skb_put(skb, len), buf, len);
2304 2491
2305 IEEE80211_SKB_CB(skb)->flags = flags; 2492 IEEE80211_SKB_CB(skb)->flags = flags;
2306 2493
2307 if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
2308 IEEE80211_SKB_CB(skb)->hw_queue =
2309 local->hw.offchannel_tx_hw_queue;
2310
2311 skb->dev = sdata->dev; 2494 skb->dev = sdata->dev;
2312 2495
2313 *cookie = (unsigned long) skb; 2496 if (!need_offchan) {
2314 2497 *cookie = (unsigned long) skb;
2315 if (is_offchan && local->ops->remain_on_channel) { 2498 ieee80211_tx_skb(sdata, skb);
2316 unsigned int duration; 2499 ret = 0;
2317 int ret; 2500 goto out_unlock;
2318 2501 }
2319 mutex_lock(&local->mtx);
2320 /*
2321 * If the duration is zero, then the driver
2322 * wouldn't actually do anything. Set it to
2323 * 100 for now.
2324 *
2325 * TODO: cancel the off-channel operation
2326 * when we get the SKB's TX status and
2327 * the wait time was zero before.
2328 */
2329 duration = 100;
2330 if (wait)
2331 duration = wait;
2332 ret = ieee80211_remain_on_channel_hw(local, dev, chan,
2333 channel_type,
2334 duration, cookie);
2335 if (ret) {
2336 kfree_skb(skb);
2337 mutex_unlock(&local->mtx);
2338 return ret;
2339 }
2340
2341 local->hw_roc_for_tx = true;
2342 local->hw_roc_duration = wait;
2343
2344 /*
2345 * queue up frame for transmission after
2346 * ieee80211_ready_on_channel call
2347 */
2348 2502
2349 /* modify cookie to prevent API mismatches */ 2503 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2350 *cookie ^= 2; 2504 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
2351 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2352 IEEE80211_SKB_CB(skb)->hw_queue = 2505 IEEE80211_SKB_CB(skb)->hw_queue =
2353 local->hw.offchannel_tx_hw_queue; 2506 local->hw.offchannel_tx_hw_queue;
2354 local->hw_roc_skb = skb;
2355 local->hw_roc_skb_for_status = skb;
2356 mutex_unlock(&local->mtx);
2357
2358 return 0;
2359 }
2360
2361 /*
2362 * Can transmit right away if the channel was the
2363 * right one and there's no wait involved... If a
2364 * wait is involved, we might otherwise not be on
2365 * the right channel for long enough!
2366 */
2367 if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) {
2368 ieee80211_tx_skb(sdata, skb);
2369 return 0;
2370 }
2371 2507
2372 wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL); 2508 /* This will handle all kinds of coalescing and immediate TX */
2373 if (!wk) { 2509 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
2510 wait, cookie, skb);
2511 if (ret)
2374 kfree_skb(skb); 2512 kfree_skb(skb);
2375 return -ENOMEM; 2513 out_unlock:
2376 } 2514 mutex_unlock(&local->mtx);
2377 2515 return ret;
2378 wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
2379 wk->chan = chan;
2380 wk->chan_type = channel_type;
2381 wk->sdata = sdata;
2382 wk->done = ieee80211_offchan_tx_done;
2383 wk->offchan_tx.frame = skb;
2384 wk->offchan_tx.wait = wait;
2385 wk->data_len = len;
2386 memcpy(wk->data, buf, len);
2387
2388 ieee80211_add_work(wk);
2389 return 0;
2390} 2516}
2391 2517
2392static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, 2518static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
2393 struct net_device *dev, 2519 struct wireless_dev *wdev,
2394 u64 cookie) 2520 u64 cookie)
2395{ 2521{
2396 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2522 struct ieee80211_local *local = wiphy_priv(wiphy);
2397 struct ieee80211_local *local = sdata->local;
2398 struct ieee80211_work *wk;
2399 int ret = -ENOENT;
2400
2401 mutex_lock(&local->mtx);
2402
2403 if (local->ops->cancel_remain_on_channel) {
2404 cookie ^= 2;
2405 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
2406
2407 if (ret == 0) {
2408 kfree_skb(local->hw_roc_skb);
2409 local->hw_roc_skb = NULL;
2410 local->hw_roc_skb_for_status = NULL;
2411 }
2412
2413 mutex_unlock(&local->mtx);
2414
2415 return ret;
2416 }
2417
2418 list_for_each_entry(wk, &local->work_list, list) {
2419 if (wk->sdata != sdata)
2420 continue;
2421
2422 if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
2423 continue;
2424
2425 if (cookie != (unsigned long) wk->offchan_tx.frame)
2426 continue;
2427
2428 wk->timeout = jiffies;
2429
2430 ieee80211_queue_work(&local->hw, &local->work_work);
2431 ret = 0;
2432 break;
2433 }
2434 mutex_unlock(&local->mtx);
2435 2523
2436 return ret; 2524 return ieee80211_cancel_roc(local, cookie, true);
2437} 2525}
2438 2526
2439static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, 2527static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
2440 struct net_device *dev, 2528 struct wireless_dev *wdev,
2441 u16 frame_type, bool reg) 2529 u16 frame_type, bool reg)
2442{ 2530{
2443 struct ieee80211_local *local = wiphy_priv(wiphy); 2531 struct ieee80211_local *local = wiphy_priv(wiphy);
2532 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2444 2533
2445 if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) 2534 switch (frame_type) {
2446 return; 2535 case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
2536 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2537 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2447 2538
2448 if (reg) 2539 if (reg)
2449 local->probe_req_reg++; 2540 ifibss->auth_frame_registrations++;
2450 else 2541 else
2451 local->probe_req_reg--; 2542 ifibss->auth_frame_registrations--;
2543 }
2544 break;
2545 case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
2546 if (reg)
2547 local->probe_req_reg++;
2548 else
2549 local->probe_req_reg--;
2452 2550
2453 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 2551 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
2552 break;
2553 default:
2554 break;
2555 }
2454} 2556}
2455 2557
2456static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) 2558static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
@@ -2570,8 +2672,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2570 tf->u.setup_req.capability = 2672 tf->u.setup_req.capability =
2571 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2673 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2572 2674
2573 ieee80211_add_srates_ie(&sdata->vif, skb, false); 2675 ieee80211_add_srates_ie(sdata, skb, false);
2574 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); 2676 ieee80211_add_ext_srates_ie(sdata, skb, false);
2575 ieee80211_tdls_add_ext_capab(skb); 2677 ieee80211_tdls_add_ext_capab(skb);
2576 break; 2678 break;
2577 case WLAN_TDLS_SETUP_RESPONSE: 2679 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2584,8 +2686,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2584 tf->u.setup_resp.capability = 2686 tf->u.setup_resp.capability =
2585 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2687 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2586 2688
2587 ieee80211_add_srates_ie(&sdata->vif, skb, false); 2689 ieee80211_add_srates_ie(sdata, skb, false);
2588 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); 2690 ieee80211_add_ext_srates_ie(sdata, skb, false);
2589 ieee80211_tdls_add_ext_capab(skb); 2691 ieee80211_tdls_add_ext_capab(skb);
2590 break; 2692 break;
2591 case WLAN_TDLS_SETUP_CONFIRM: 2693 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2645,8 +2747,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2645 mgmt->u.action.u.tdls_discover_resp.capability = 2747 mgmt->u.action.u.tdls_discover_resp.capability =
2646 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2748 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2647 2749
2648 ieee80211_add_srates_ie(&sdata->vif, skb, false); 2750 ieee80211_add_srates_ie(sdata, skb, false);
2649 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); 2751 ieee80211_add_ext_srates_ie(sdata, skb, false);
2650 ieee80211_tdls_add_ext_capab(skb); 2752 ieee80211_tdls_add_ext_capab(skb);
2651 break; 2753 break;
2652 default: 2754 default:
@@ -2676,9 +2778,8 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2676 !sdata->u.mgd.associated) 2778 !sdata->u.mgd.associated)
2677 return -EINVAL; 2779 return -EINVAL;
2678 2780
2679#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG 2781 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
2680 printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer); 2782 action_code, peer);
2681#endif
2682 2783
2683 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 2784 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
2684 max(sizeof(struct ieee80211_mgmt), 2785 max(sizeof(struct ieee80211_mgmt),
@@ -2787,9 +2888,7 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2787 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2888 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2788 return -EINVAL; 2889 return -EINVAL;
2789 2890
2790#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG 2891 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
2791 printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
2792#endif
2793 2892
2794 switch (oper) { 2893 switch (oper) {
2795 case NL80211_TDLS_ENABLE_LINK: 2894 case NL80211_TDLS_ENABLE_LINK:
@@ -2886,8 +2985,8 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
2886} 2985}
2887 2986
2888static struct ieee80211_channel * 2987static struct ieee80211_channel *
2889ieee80211_wiphy_get_channel(struct wiphy *wiphy, 2988ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
2890 enum nl80211_channel_type *type) 2989 enum nl80211_channel_type *type)
2891{ 2990{
2892 struct ieee80211_local *local = wiphy_priv(wiphy); 2991 struct ieee80211_local *local = wiphy_priv(wiphy);
2893 2992
@@ -2933,7 +3032,7 @@ struct cfg80211_ops mac80211_config_ops = {
2933#endif 3032#endif
2934 .change_bss = ieee80211_change_bss, 3033 .change_bss = ieee80211_change_bss,
2935 .set_txq_params = ieee80211_set_txq_params, 3034 .set_txq_params = ieee80211_set_txq_params,
2936 .set_channel = ieee80211_set_channel, 3035 .set_monitor_channel = ieee80211_set_monitor_channel,
2937 .suspend = ieee80211_suspend, 3036 .suspend = ieee80211_suspend,
2938 .resume = ieee80211_resume, 3037 .resume = ieee80211_resume,
2939 .scan = ieee80211_scan, 3038 .scan = ieee80211_scan,
@@ -2968,7 +3067,6 @@ struct cfg80211_ops mac80211_config_ops = {
2968 .tdls_oper = ieee80211_tdls_oper, 3067 .tdls_oper = ieee80211_tdls_oper,
2969 .tdls_mgmt = ieee80211_tdls_mgmt, 3068 .tdls_mgmt = ieee80211_tdls_mgmt,
2970 .probe_client = ieee80211_probe_client, 3069 .probe_client = ieee80211_probe_client,
2971 .get_channel = ieee80211_wiphy_get_channel,
2972 .set_noack_map = ieee80211_set_noack_map, 3070 .set_noack_map = ieee80211_set_noack_map,
2973#ifdef CONFIG_PM 3071#ifdef CONFIG_PM
2974 .set_wakeup = ieee80211_set_wakeup, 3072 .set_wakeup = ieee80211_set_wakeup,
@@ -2976,4 +3074,5 @@ struct cfg80211_ops mac80211_config_ops = {
2976 .get_et_sset_count = ieee80211_get_et_sset_count, 3074 .get_et_sset_count = ieee80211_get_et_sset_count,
2977 .get_et_stats = ieee80211_get_et_stats, 3075 .get_et_stats = ieee80211_get_et_stats,
2978 .get_et_strings = ieee80211_get_et_strings, 3076 .get_et_strings = ieee80211_get_et_strings,
3077 .get_channel = ieee80211_cfg_get_channel,
2979}; 3078};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c76cf7230c7d..f0f87e5a1d35 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -41,6 +41,10 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
41 if (!sdata->u.ap.beacon) 41 if (!sdata->u.ap.beacon)
42 continue; 42 continue;
43 break; 43 break;
44 case NL80211_IFTYPE_MESH_POINT:
45 if (!sdata->wdev.mesh_id_len)
46 continue;
47 break;
44 default: 48 default:
45 break; 49 break;
46 } 50 }
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
new file mode 100644
index 000000000000..8f383a576016
--- /dev/null
+++ b/net/mac80211/debug.h
@@ -0,0 +1,170 @@
1#ifndef __MAC80211_DEBUG_H
2#define __MAC80211_DEBUG_H
3#include <net/cfg80211.h>
4
5#ifdef CONFIG_MAC80211_IBSS_DEBUG
6#define MAC80211_IBSS_DEBUG 1
7#else
8#define MAC80211_IBSS_DEBUG 0
9#endif
10
11#ifdef CONFIG_MAC80211_PS_DEBUG
12#define MAC80211_PS_DEBUG 1
13#else
14#define MAC80211_PS_DEBUG 0
15#endif
16
17#ifdef CONFIG_MAC80211_HT_DEBUG
18#define MAC80211_HT_DEBUG 1
19#else
20#define MAC80211_HT_DEBUG 0
21#endif
22
23#ifdef CONFIG_MAC80211_MPL_DEBUG
24#define MAC80211_MPL_DEBUG 1
25#else
26#define MAC80211_MPL_DEBUG 0
27#endif
28
29#ifdef CONFIG_MAC80211_MPATH_DEBUG
30#define MAC80211_MPATH_DEBUG 1
31#else
32#define MAC80211_MPATH_DEBUG 0
33#endif
34
35#ifdef CONFIG_MAC80211_MHWMP_DEBUG
36#define MAC80211_MHWMP_DEBUG 1
37#else
38#define MAC80211_MHWMP_DEBUG 0
39#endif
40
41#ifdef CONFIG_MAC80211_MESH_SYNC_DEBUG
42#define MAC80211_MESH_SYNC_DEBUG 1
43#else
44#define MAC80211_MESH_SYNC_DEBUG 0
45#endif
46
47#ifdef CONFIG_MAC80211_TDLS_DEBUG
48#define MAC80211_TDLS_DEBUG 1
49#else
50#define MAC80211_TDLS_DEBUG 0
51#endif
52
53#ifdef CONFIG_MAC80211_STA_DEBUG
54#define MAC80211_STA_DEBUG 1
55#else
56#define MAC80211_STA_DEBUG 0
57#endif
58
59#ifdef CONFIG_MAC80211_MLME_DEBUG
60#define MAC80211_MLME_DEBUG 1
61#else
62#define MAC80211_MLME_DEBUG 0
63#endif
64
65#ifdef CONFIG_MAC80211_MESSAGE_TRACING
66void __sdata_info(const char *fmt, ...) __printf(1, 2);
67void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3);
68void __sdata_err(const char *fmt, ...) __printf(1, 2);
69void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
70 __printf(3, 4);
71
72#define _sdata_info(sdata, fmt, ...) \
73 __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
74#define _sdata_dbg(print, sdata, fmt, ...) \
75 __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__)
76#define _sdata_err(sdata, fmt, ...) \
77 __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
78#define _wiphy_dbg(print, wiphy, fmt, ...) \
79 __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__)
80#else
81#define _sdata_info(sdata, fmt, ...) \
82do { \
83 pr_info("%s: " fmt, \
84 (sdata)->name, ##__VA_ARGS__); \
85} while (0)
86
87#define _sdata_dbg(print, sdata, fmt, ...) \
88do { \
89 if (print) \
90 pr_debug("%s: " fmt, \
91 (sdata)->name, ##__VA_ARGS__); \
92} while (0)
93
94#define _sdata_err(sdata, fmt, ...) \
95do { \
96 pr_err("%s: " fmt, \
97 (sdata)->name, ##__VA_ARGS__); \
98} while (0)
99
100#define _wiphy_dbg(print, wiphy, fmt, ...) \
101do { \
102 if (print) \
103 wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \
104} while (0)
105#endif
106
107#define sdata_info(sdata, fmt, ...) \
108 _sdata_info(sdata, fmt, ##__VA_ARGS__)
109#define sdata_err(sdata, fmt, ...) \
110 _sdata_err(sdata, fmt, ##__VA_ARGS__)
111#define sdata_dbg(sdata, fmt, ...) \
112 _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__)
113
114#define ht_dbg(sdata, fmt, ...) \
115 _sdata_dbg(MAC80211_HT_DEBUG, \
116 sdata, fmt, ##__VA_ARGS__)
117
118#define ht_dbg_ratelimited(sdata, fmt, ...) \
119 _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \
120 sdata, fmt, ##__VA_ARGS__)
121
122#define ibss_dbg(sdata, fmt, ...) \
123 _sdata_dbg(MAC80211_IBSS_DEBUG, \
124 sdata, fmt, ##__VA_ARGS__)
125
126#define ps_dbg(sdata, fmt, ...) \
127 _sdata_dbg(MAC80211_PS_DEBUG, \
128 sdata, fmt, ##__VA_ARGS__)
129
130#define ps_dbg_hw(hw, fmt, ...) \
131 _wiphy_dbg(MAC80211_PS_DEBUG, \
132 (hw)->wiphy, fmt, ##__VA_ARGS__)
133
134#define ps_dbg_ratelimited(sdata, fmt, ...) \
135 _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \
136 sdata, fmt, ##__VA_ARGS__)
137
138#define mpl_dbg(sdata, fmt, ...) \
139 _sdata_dbg(MAC80211_MPL_DEBUG, \
140 sdata, fmt, ##__VA_ARGS__)
141
142#define mpath_dbg(sdata, fmt, ...) \
143 _sdata_dbg(MAC80211_MPATH_DEBUG, \
144 sdata, fmt, ##__VA_ARGS__)
145
146#define mhwmp_dbg(sdata, fmt, ...) \
147 _sdata_dbg(MAC80211_MHWMP_DEBUG, \
148 sdata, fmt, ##__VA_ARGS__)
149
150#define msync_dbg(sdata, fmt, ...) \
151 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
152 sdata, fmt, ##__VA_ARGS__)
153
154#define tdls_dbg(sdata, fmt, ...) \
155 _sdata_dbg(MAC80211_TDLS_DEBUG, \
156 sdata, fmt, ##__VA_ARGS__)
157
158#define sta_dbg(sdata, fmt, ...) \
159 _sdata_dbg(MAC80211_STA_DEBUG, \
160 sdata, fmt, ##__VA_ARGS__)
161
162#define mlme_dbg(sdata, fmt, ...) \
163 _sdata_dbg(MAC80211_MLME_DEBUG, \
164 sdata, fmt, ##__VA_ARGS__)
165
166#define mlme_dbg_ratelimited(sdata, fmt, ...) \
167 _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
168 sdata, fmt, ##__VA_ARGS__)
169
170#endif /* __MAC80211_DEBUG_H */
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 778e5916d7c3..b8dfb440c8ef 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -325,8 +325,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
325 local->rx_handlers_drop_defrag); 325 local->rx_handlers_drop_defrag);
326 DEBUGFS_STATS_ADD(rx_handlers_drop_short, 326 DEBUGFS_STATS_ADD(rx_handlers_drop_short,
327 local->rx_handlers_drop_short); 327 local->rx_handlers_drop_short);
328 DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
329 local->rx_handlers_drop_passive_scan);
330 DEBUGFS_STATS_ADD(tx_expand_skb_head, 328 DEBUGFS_STATS_ADD(tx_expand_skb_head,
331 local->tx_expand_skb_head); 329 local->tx_expand_skb_head);
332 DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned, 330 DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7932767bb482..090d08ff22c4 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -283,6 +283,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
283 283
284 lockdep_assert_held(&sdata->local->key_mtx); 284 lockdep_assert_held(&sdata->local->key_mtx);
285 285
286 if (sdata->debugfs.default_unicast_key) {
287 debugfs_remove(sdata->debugfs.default_unicast_key);
288 sdata->debugfs.default_unicast_key = NULL;
289 }
290
286 if (sdata->default_unicast_key) { 291 if (sdata->default_unicast_key) {
287 key = key_mtx_dereference(sdata->local, 292 key = key_mtx_dereference(sdata->local,
288 sdata->default_unicast_key); 293 sdata->default_unicast_key);
@@ -290,9 +295,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
290 sdata->debugfs.default_unicast_key = 295 sdata->debugfs.default_unicast_key =
291 debugfs_create_symlink("default_unicast_key", 296 debugfs_create_symlink("default_unicast_key",
292 sdata->debugfs.dir, buf); 297 sdata->debugfs.dir, buf);
293 } else { 298 }
294 debugfs_remove(sdata->debugfs.default_unicast_key); 299
295 sdata->debugfs.default_unicast_key = NULL; 300 if (sdata->debugfs.default_multicast_key) {
301 debugfs_remove(sdata->debugfs.default_multicast_key);
302 sdata->debugfs.default_multicast_key = NULL;
296 } 303 }
297 304
298 if (sdata->default_multicast_key) { 305 if (sdata->default_multicast_key) {
@@ -302,9 +309,6 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
302 sdata->debugfs.default_multicast_key = 309 sdata->debugfs.default_multicast_key =
303 debugfs_create_symlink("default_multicast_key", 310 debugfs_create_symlink("default_multicast_key",
304 sdata->debugfs.dir, buf); 311 sdata->debugfs.dir, buf);
305 } else {
306 debugfs_remove(sdata->debugfs.default_multicast_key);
307 sdata->debugfs.default_multicast_key = NULL;
308 } 312 }
309} 313}
310 314
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 7ed433c66d68..6d5aec9418ee 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -468,48 +468,54 @@ IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
468IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); 468IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
469IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); 469IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
470IEEE80211_IF_FILE(dropped_frames_congestion, 470IEEE80211_IF_FILE(dropped_frames_congestion,
471 u.mesh.mshstats.dropped_frames_congestion, DEC); 471 u.mesh.mshstats.dropped_frames_congestion, DEC);
472IEEE80211_IF_FILE(dropped_frames_no_route, 472IEEE80211_IF_FILE(dropped_frames_no_route,
473 u.mesh.mshstats.dropped_frames_no_route, DEC); 473 u.mesh.mshstats.dropped_frames_no_route, DEC);
474IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); 474IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
475 475
476/* Mesh parameters */ 476/* Mesh parameters */
477IEEE80211_IF_FILE(dot11MeshMaxRetries, 477IEEE80211_IF_FILE(dot11MeshMaxRetries,
478 u.mesh.mshcfg.dot11MeshMaxRetries, DEC); 478 u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
479IEEE80211_IF_FILE(dot11MeshRetryTimeout, 479IEEE80211_IF_FILE(dot11MeshRetryTimeout,
480 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); 480 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
481IEEE80211_IF_FILE(dot11MeshConfirmTimeout, 481IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
482 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); 482 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
483IEEE80211_IF_FILE(dot11MeshHoldingTimeout, 483IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
484 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); 484 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
485IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); 485IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
486IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC); 486IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
487IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); 487IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
488IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, 488IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
489 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); 489 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
490IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, 490IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
491 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); 491 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
492IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, 492IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
493 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); 493 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
494IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, 494IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
495 u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); 495 u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
496IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, 496IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
497 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); 497 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
498IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, 498IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
499 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); 499 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
500IEEE80211_IF_FILE(path_refresh_time, 500IEEE80211_IF_FILE(path_refresh_time,
501 u.mesh.mshcfg.path_refresh_time, DEC); 501 u.mesh.mshcfg.path_refresh_time, DEC);
502IEEE80211_IF_FILE(min_discovery_timeout, 502IEEE80211_IF_FILE(min_discovery_timeout,
503 u.mesh.mshcfg.min_discovery_timeout, DEC); 503 u.mesh.mshcfg.min_discovery_timeout, DEC);
504IEEE80211_IF_FILE(dot11MeshHWMPRootMode, 504IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
505 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); 505 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
506IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol, 506IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
507 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); 507 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
508IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, 508IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); 509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); 510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); 511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC); 512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
513IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout,
514 u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC);
515IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
516 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
517IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
518 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
513#endif 519#endif
514 520
515#define DEBUGFS_ADD_MODE(name, mode) \ 521#define DEBUGFS_ADD_MODE(name, mode) \
@@ -607,9 +613,13 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
607 MESHPARAMS_ADD(min_discovery_timeout); 613 MESHPARAMS_ADD(min_discovery_timeout);
608 MESHPARAMS_ADD(dot11MeshHWMPRootMode); 614 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
609 MESHPARAMS_ADD(dot11MeshHWMPRannInterval); 615 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
616 MESHPARAMS_ADD(dot11MeshForwarding);
610 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); 617 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
611 MESHPARAMS_ADD(rssi_threshold); 618 MESHPARAMS_ADD(rssi_threshold);
612 MESHPARAMS_ADD(ht_opmode); 619 MESHPARAMS_ADD(ht_opmode);
620 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
621 MESHPARAMS_ADD(dot11MeshHWMProotInterval);
622 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
613#undef MESHPARAMS_ADD 623#undef MESHPARAMS_ADD
614} 624}
615#endif 625#endif
@@ -685,6 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
685 695
686 sprintf(buf, "netdev:%s", sdata->name); 696 sprintf(buf, "netdev:%s", sdata->name);
687 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 697 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
688 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 698 sdata_err(sdata,
689 "dir to %s\n", buf); 699 "debugfs: failed to rename debugfs dir to %s\n",
700 buf);
690} 701}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 6d33a0c743ab..df9203199102 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -3,7 +3,7 @@
3 3
4#include <net/mac80211.h> 4#include <net/mac80211.h>
5#include "ieee80211_i.h" 5#include "ieee80211_i.h"
6#include "driver-trace.h" 6#include "trace.h"
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
@@ -27,14 +27,6 @@ static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
27 local->ops->tx(&local->hw, skb); 27 local->ops->tx(&local->hw, skb);
28} 28}
29 29
30static inline void drv_tx_frags(struct ieee80211_local *local,
31 struct ieee80211_vif *vif,
32 struct ieee80211_sta *sta,
33 struct sk_buff_head *skbs)
34{
35 local->ops->tx_frags(&local->hw, vif, sta, skbs);
36}
37
38static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, 30static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
39 u32 sset, u8 *data) 31 u32 sset, u8 *data)
40{ 32{
@@ -845,4 +837,33 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
845 more_data); 837 more_data);
846 trace_drv_return_void(local); 838 trace_drv_return_void(local);
847} 839}
840
841static inline int drv_get_rssi(struct ieee80211_local *local,
842 struct ieee80211_sub_if_data *sdata,
843 struct ieee80211_sta *sta,
844 s8 *rssi_dbm)
845{
846 int ret;
847
848 might_sleep();
849
850 ret = local->ops->get_rssi(&local->hw, &sdata->vif, sta, rssi_dbm);
851 trace_drv_get_rssi(local, sta, *rssi_dbm, ret);
852
853 return ret;
854}
855
856static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
857 struct ieee80211_sub_if_data *sdata)
858{
859 might_sleep();
860
861 check_sdata_in_driver(sdata);
862 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
863
864 trace_drv_mgd_prepare_tx(local, sdata);
865 if (local->ops->mgd_prepare_tx)
866 local->ops->mgd_prepare_tx(&local->hw, &sdata->vif);
867 trace_drv_return_void(local);
868}
848#endif /* __MAC80211_DRIVER_OPS */ 869#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c
deleted file mode 100644
index 8ed8711b1a6d..000000000000
--- a/net/mac80211/driver-trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
1/* bug in tracepoint.h, it should include this */
2#include <linux/module.h>
3
4/* sparse isn't too happy with all macros... */
5#ifndef __CHECKER__
6#include "driver-ops.h"
7#define CREATE_TRACE_POINTS
8#include "driver-trace.h"
9#endif
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 6f8615c54b22..4b4538d63925 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -305,12 +305,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
305 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; 305 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
306 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; 306 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
307 307
308#ifdef CONFIG_MAC80211_HT_DEBUG 308 ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n",
309 net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n", 309 mgmt->sa, initiator ? "initiator" : "recipient",
310 mgmt->sa, initiator ? "initiator" : "recipient", 310 tid,
311 tid, 311 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
312 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
313#endif /* CONFIG_MAC80211_HT_DEBUG */
314 312
315 if (initiator == WLAN_BACK_INITIATOR) 313 if (initiator == WLAN_BACK_INITIATOR)
316 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, 314 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33d9d0c3e3d0..5746d62faba1 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -82,8 +82,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
82 82
83 local->oper_channel = chan; 83 local->oper_channel = chan;
84 channel_type = ifibss->channel_type; 84 channel_type = ifibss->channel_type;
85 if (channel_type > NL80211_CHAN_HT20 && 85 if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
86 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
87 channel_type = NL80211_CHAN_HT20; 86 channel_type = NL80211_CHAN_HT20;
88 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 87 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
89 /* can only fail due to HT40+/- mismatch */ 88 /* can only fail due to HT40+/- mismatch */
@@ -262,11 +261,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
262 261
263 memcpy(addr, sta->sta.addr, ETH_ALEN); 262 memcpy(addr, sta->sta.addr, ETH_ALEN);
264 263
265#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 264 ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr);
266 wiphy_debug(sdata->local->hw.wiphy,
267 "Adding new IBSS station %pM (dev=%s)\n",
268 addr, sdata->name);
269#endif
270 265
271 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 266 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
272 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 267 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -280,12 +275,10 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
280 /* If it fails, maybe we raced another insertion? */ 275 /* If it fails, maybe we raced another insertion? */
281 if (sta_info_insert_rcu(sta)) 276 if (sta_info_insert_rcu(sta))
282 return sta_info_get(sdata, addr); 277 return sta_info_get(sdata, addr);
283 if (auth) { 278 if (auth && !sdata->u.ibss.auth_frame_registrations) {
284#ifdef CONFIG_MAC80211_IBSS_DEBUG 279 ibss_dbg(sdata,
285 printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM" 280 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
286 "(auth_transaction=1)\n", sdata->vif.addr, 281 sdata->vif.addr, sdata->u.ibss.bssid, addr);
287 sdata->u.ibss.bssid, addr);
288#endif
289 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0, 282 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
290 addr, sdata->u.ibss.bssid, NULL, 0, 0); 283 addr, sdata->u.ibss.bssid, NULL, 0, 0);
291 } 284 }
@@ -308,7 +301,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
308 * allow new one to be added. 301 * allow new one to be added.
309 */ 302 */
310 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 303 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
311 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", 304 net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
312 sdata->name, addr); 305 sdata->name, addr);
313 rcu_read_lock(); 306 rcu_read_lock();
314 return NULL; 307 return NULL;
@@ -355,11 +348,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
355 348
356 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 349 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
357 return; 350 return;
358#ifdef CONFIG_MAC80211_IBSS_DEBUG 351 ibss_dbg(sdata,
359 printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM." 352 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
360 "(auth_transaction=%d)\n", 353 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
361 sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
362#endif
363 sta_info_destroy_addr(sdata, mgmt->sa); 354 sta_info_destroy_addr(sdata, mgmt->sa);
364 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); 355 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
365 rcu_read_unlock(); 356 rcu_read_unlock();
@@ -422,15 +413,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
422 ieee80211_mandatory_rates(local, band); 413 ieee80211_mandatory_rates(local, band);
423 414
424 if (sta->sta.supp_rates[band] != prev_rates) { 415 if (sta->sta.supp_rates[band] != prev_rates) {
425#ifdef CONFIG_MAC80211_IBSS_DEBUG 416 ibss_dbg(sdata,
426 printk(KERN_DEBUG 417 "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
427 "%s: updated supp_rates set " 418 sta->sta.addr, prev_rates,
428 "for %pM based on beacon" 419 sta->sta.supp_rates[band]);
429 "/probe_resp (0x%x -> 0x%x)\n",
430 sdata->name, sta->sta.addr,
431 prev_rates,
432 sta->sta.supp_rates[band]);
433#endif
434 rates_updated = true; 420 rates_updated = true;
435 } 421 }
436 } else { 422 } else {
@@ -545,22 +531,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
545 rx_timestamp = drv_get_tsf(local, sdata); 531 rx_timestamp = drv_get_tsf(local, sdata);
546 } 532 }
547 533
548#ifdef CONFIG_MAC80211_IBSS_DEBUG 534 ibss_dbg(sdata,
549 printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" 535 "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
550 "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", 536 mgmt->sa, mgmt->bssid,
551 mgmt->sa, mgmt->bssid, 537 (unsigned long long)rx_timestamp,
552 (unsigned long long)rx_timestamp, 538 (unsigned long long)beacon_timestamp,
553 (unsigned long long)beacon_timestamp, 539 (unsigned long long)(rx_timestamp - beacon_timestamp),
554 (unsigned long long)(rx_timestamp - beacon_timestamp), 540 jiffies);
555 jiffies);
556#endif
557 541
558 if (beacon_timestamp > rx_timestamp) { 542 if (beacon_timestamp > rx_timestamp) {
559#ifdef CONFIG_MAC80211_IBSS_DEBUG 543 ibss_dbg(sdata,
560 printk(KERN_DEBUG "%s: beacon TSF higher than " 544 "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
561 "local TSF - IBSS merge with BSSID %pM\n", 545 mgmt->bssid);
562 sdata->name, mgmt->bssid);
563#endif
564 ieee80211_sta_join_ibss(sdata, bss); 546 ieee80211_sta_join_ibss(sdata, bss);
565 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); 547 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
566 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 548 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
@@ -586,7 +568,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
586 * allow new one to be added. 568 * allow new one to be added.
587 */ 569 */
588 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 570 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
589 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", 571 net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
590 sdata->name, addr); 572 sdata->name, addr);
591 return; 573 return;
592 } 574 }
@@ -662,8 +644,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
662 if (ifibss->fixed_channel) 644 if (ifibss->fixed_channel)
663 return; 645 return;
664 646
665 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 647 sdata_info(sdata,
666 "IBSS networks with same SSID (merge)\n", sdata->name); 648 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
667 649
668 ieee80211_request_internal_scan(sdata, 650 ieee80211_request_internal_scan(sdata,
669 ifibss->ssid, ifibss->ssid_len, NULL); 651 ifibss->ssid, ifibss->ssid_len, NULL);
@@ -691,8 +673,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
691 bssid[0] |= 0x02; 673 bssid[0] |= 0x02;
692 } 674 }
693 675
694 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 676 sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid);
695 sdata->name, bssid);
696 677
697 capability = WLAN_CAPABILITY_IBSS; 678 capability = WLAN_CAPABILITY_IBSS;
698 679
@@ -723,10 +704,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
723 lockdep_assert_held(&ifibss->mtx); 704 lockdep_assert_held(&ifibss->mtx);
724 705
725 active_ibss = ieee80211_sta_active_ibss(sdata); 706 active_ibss = ieee80211_sta_active_ibss(sdata);
726#ifdef CONFIG_MAC80211_IBSS_DEBUG 707 ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
727 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
728 sdata->name, active_ibss);
729#endif /* CONFIG_MAC80211_IBSS_DEBUG */
730 708
731 if (active_ibss) 709 if (active_ibss)
732 return; 710 return;
@@ -749,29 +727,24 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
749 struct ieee80211_bss *bss; 727 struct ieee80211_bss *bss;
750 728
751 bss = (void *)cbss->priv; 729 bss = (void *)cbss->priv;
752#ifdef CONFIG_MAC80211_IBSS_DEBUG 730 ibss_dbg(sdata,
753 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 731 "sta_find_ibss: selected %pM current %pM\n",
754 "%pM\n", cbss->bssid, ifibss->bssid); 732 cbss->bssid, ifibss->bssid);
755#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 733 sdata_info(sdata,
756 734 "Selected IBSS BSSID %pM based on configured SSID\n",
757 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 735 cbss->bssid);
758 " based on configured SSID\n",
759 sdata->name, cbss->bssid);
760 736
761 ieee80211_sta_join_ibss(sdata, bss); 737 ieee80211_sta_join_ibss(sdata, bss);
762 ieee80211_rx_bss_put(local, bss); 738 ieee80211_rx_bss_put(local, bss);
763 return; 739 return;
764 } 740 }
765 741
766#ifdef CONFIG_MAC80211_IBSS_DEBUG 742 ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
767 printk(KERN_DEBUG " did not try to join ibss\n");
768#endif /* CONFIG_MAC80211_IBSS_DEBUG */
769 743
770 /* Selected IBSS not found in current scan results - try to scan */ 744 /* Selected IBSS not found in current scan results - try to scan */
771 if (time_after(jiffies, ifibss->last_scan_completed + 745 if (time_after(jiffies, ifibss->last_scan_completed +
772 IEEE80211_SCAN_INTERVAL)) { 746 IEEE80211_SCAN_INTERVAL)) {
773 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 747 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
774 "join\n", sdata->name);
775 748
776 ieee80211_request_internal_scan(sdata, 749 ieee80211_request_internal_scan(sdata,
777 ifibss->ssid, ifibss->ssid_len, 750 ifibss->ssid, ifibss->ssid_len,
@@ -785,9 +758,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
785 ieee80211_sta_create_ibss(sdata); 758 ieee80211_sta_create_ibss(sdata);
786 return; 759 return;
787 } 760 }
788 printk(KERN_DEBUG "%s: IBSS not allowed on" 761 sdata_info(sdata, "IBSS not allowed on %d MHz\n",
789 " %d MHz\n", sdata->name, 762 local->hw.conf.channel->center_freq);
790 local->hw.conf.channel->center_freq);
791 763
792 /* No IBSS found - decrease scan interval and continue 764 /* No IBSS found - decrease scan interval and continue
793 * scanning. */ 765 * scanning. */
@@ -822,12 +794,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
822 794
823 tx_last_beacon = drv_tx_last_beacon(local); 795 tx_last_beacon = drv_tx_last_beacon(local);
824 796
825#ifdef CONFIG_MAC80211_IBSS_DEBUG 797 ibss_dbg(sdata,
826 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 798 "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
827 " (tx_last_beacon=%d)\n", 799 mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
828 sdata->name, mgmt->sa, mgmt->da,
829 mgmt->bssid, tx_last_beacon);
830#endif /* CONFIG_MAC80211_IBSS_DEBUG */
831 800
832 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) 801 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
833 return; 802 return;
@@ -840,11 +809,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
840 pos = mgmt->u.probe_req.variable; 809 pos = mgmt->u.probe_req.variable;
841 if (pos[0] != WLAN_EID_SSID || 810 if (pos[0] != WLAN_EID_SSID ||
842 pos + 2 + pos[1] > end) { 811 pos + 2 + pos[1] > end) {
843#ifdef CONFIG_MAC80211_IBSS_DEBUG 812 ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n",
844 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 813 mgmt->sa);
845 "from %pM\n",
846 sdata->name, mgmt->sa);
847#endif
848 return; 814 return;
849 } 815 }
850 if (pos[1] != 0 && 816 if (pos[1] != 0 &&
@@ -861,10 +827,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
861 827
862 resp = (struct ieee80211_mgmt *) skb->data; 828 resp = (struct ieee80211_mgmt *) skb->data;
863 memcpy(resp->da, mgmt->sa, ETH_ALEN); 829 memcpy(resp->da, mgmt->sa, ETH_ALEN);
864#ifdef CONFIG_MAC80211_IBSS_DEBUG 830 ibss_dbg(sdata, "Sending ProbeResp to %pM\n", resp->da);
865 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
866 sdata->name, resp->da);
867#endif /* CONFIG_MAC80211_IBSS_DEBUG */
868 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 831 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
869 ieee80211_tx_skb(sdata, skb); 832 ieee80211_tx_skb(sdata, skb);
870} 833}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3f3cd50fff16..bb61f7718c4c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -30,6 +30,7 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include "key.h" 31#include "key.h"
32#include "sta_info.h" 32#include "sta_info.h"
33#include "debug.h"
33 34
34struct ieee80211_local; 35struct ieee80211_local;
35 36
@@ -55,11 +56,14 @@ struct ieee80211_local;
55#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 56#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
56#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 57#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
57 58
59/*
60 * Some APs experience problems when working with U-APSD. Decrease the
61 * probability of that happening by using legacy mode for all ACs but VO.
62 * The AP that caused us trouble was a Cisco 4410N. It ignores our
63 * setting, and always treats non-VO ACs as legacy.
64 */
58#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 65#define IEEE80211_DEFAULT_UAPSD_QUEUES \
59 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ 66 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
60 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
61 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
62 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
63 67
64#define IEEE80211_DEFAULT_MAX_SP_LEN \ 68#define IEEE80211_DEFAULT_MAX_SP_LEN \
65 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 69 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -81,6 +85,8 @@ struct ieee80211_bss {
81 size_t ssid_len; 85 size_t ssid_len;
82 u8 ssid[IEEE80211_MAX_SSID_LEN]; 86 u8 ssid[IEEE80211_MAX_SSID_LEN];
83 87
88 u32 device_ts;
89
84 u8 dtim_period; 90 u8 dtim_period;
85 91
86 bool wmm_used; 92 bool wmm_used;
@@ -203,7 +209,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
203 * enum ieee80211_packet_rx_flags - packet RX flags 209 * enum ieee80211_packet_rx_flags - packet RX flags
204 * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed 210 * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
205 * (incl. multicast frames) 211 * (incl. multicast frames)
206 * @IEEE80211_RX_IN_SCAN: received while scanning
207 * @IEEE80211_RX_FRAGMENTED: fragmented frame 212 * @IEEE80211_RX_FRAGMENTED: fragmented frame
208 * @IEEE80211_RX_AMSDU: a-MSDU packet 213 * @IEEE80211_RX_AMSDU: a-MSDU packet
209 * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed 214 * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -213,7 +218,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
213 * @rx_flags field of &struct ieee80211_rx_status. 218 * @rx_flags field of &struct ieee80211_rx_status.
214 */ 219 */
215enum ieee80211_packet_rx_flags { 220enum ieee80211_packet_rx_flags {
216 IEEE80211_RX_IN_SCAN = BIT(0),
217 IEEE80211_RX_RA_MATCH = BIT(1), 221 IEEE80211_RX_RA_MATCH = BIT(1),
218 IEEE80211_RX_FRAGMENTED = BIT(2), 222 IEEE80211_RX_FRAGMENTED = BIT(2),
219 IEEE80211_RX_AMSDU = BIT(3), 223 IEEE80211_RX_AMSDU = BIT(3),
@@ -317,55 +321,30 @@ struct mesh_preq_queue {
317 u8 flags; 321 u8 flags;
318}; 322};
319 323
320enum ieee80211_work_type { 324#if HZ/100 == 0
321 IEEE80211_WORK_ABORT, 325#define IEEE80211_ROC_MIN_LEFT 1
322 IEEE80211_WORK_REMAIN_ON_CHANNEL, 326#else
323 IEEE80211_WORK_OFFCHANNEL_TX, 327#define IEEE80211_ROC_MIN_LEFT (HZ/100)
324}; 328#endif
325
326/**
327 * enum work_done_result - indicates what to do after work was done
328 *
329 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
330 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
331 * should be requeued.
332 */
333enum work_done_result {
334 WORK_DONE_DESTROY,
335 WORK_DONE_REQUEUE,
336};
337 329
338struct ieee80211_work { 330struct ieee80211_roc_work {
339 struct list_head list; 331 struct list_head list;
332 struct list_head dependents;
340 333
341 struct rcu_head rcu_head; 334 struct delayed_work work;
342 335
343 struct ieee80211_sub_if_data *sdata; 336 struct ieee80211_sub_if_data *sdata;
344 337
345 enum work_done_result (*done)(struct ieee80211_work *wk,
346 struct sk_buff *skb);
347
348 struct ieee80211_channel *chan; 338 struct ieee80211_channel *chan;
349 enum nl80211_channel_type chan_type; 339 enum nl80211_channel_type chan_type;
350 340
351 unsigned long timeout; 341 bool started, abort, hw_begun, notified;
352 enum ieee80211_work_type type;
353 342
354 bool started; 343 unsigned long hw_start_time;
355 344
356 union { 345 u32 duration, req_duration;
357 struct { 346 struct sk_buff *frame;
358 u32 duration; 347 u64 mgmt_tx_cookie;
359 } remain;
360 struct {
361 struct sk_buff *frame;
362 u32 wait;
363 bool status;
364 } offchan_tx;
365 };
366
367 size_t data_len;
368 u8 data[];
369}; 348};
370 349
371/* flags used in struct ieee80211_if_managed.flags */ 350/* flags used in struct ieee80211_if_managed.flags */
@@ -399,7 +378,6 @@ struct ieee80211_mgd_auth_data {
399struct ieee80211_mgd_assoc_data { 378struct ieee80211_mgd_assoc_data {
400 struct cfg80211_bss *bss; 379 struct cfg80211_bss *bss;
401 const u8 *supp_rates; 380 const u8 *supp_rates;
402 const u8 *ht_operation_ie;
403 381
404 unsigned long timeout; 382 unsigned long timeout;
405 int tries; 383 int tries;
@@ -414,6 +392,8 @@ struct ieee80211_mgd_assoc_data {
414 bool sent_assoc; 392 bool sent_assoc;
415 bool synced; 393 bool synced;
416 394
395 u8 ap_ht_param;
396
417 size_t ie_len; 397 size_t ie_len;
418 u8 ie[]; 398 u8 ie[];
419}; 399};
@@ -532,6 +512,7 @@ struct ieee80211_if_ibss {
532 bool privacy; 512 bool privacy;
533 513
534 bool control_port; 514 bool control_port;
515 unsigned int auth_frame_registrations;
535 516
536 u8 bssid[ETH_ALEN] __aligned(2); 517 u8 bssid[ETH_ALEN] __aligned(2);
537 u8 ssid[IEEE80211_MAX_SSID_LEN]; 518 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -701,6 +682,9 @@ struct ieee80211_sub_if_data {
701 /* TID bitmap for NoAck policy */ 682 /* TID bitmap for NoAck policy */
702 u16 noack_map; 683 u16 noack_map;
703 684
685 /* bit field of ACM bits (BIT(802.1D tag)) */
686 u8 wmm_acm;
687
704 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 688 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
705 struct ieee80211_key __rcu *default_unicast_key; 689 struct ieee80211_key __rcu *default_unicast_key;
706 struct ieee80211_key __rcu *default_multicast_key; 690 struct ieee80211_key __rcu *default_multicast_key;
@@ -847,13 +831,6 @@ struct ieee80211_local {
847 const struct ieee80211_ops *ops; 831 const struct ieee80211_ops *ops;
848 832
849 /* 833 /*
850 * work stuff, potentially off-channel (in the future)
851 */
852 struct list_head work_list;
853 struct timer_list work_timer;
854 struct work_struct work_work;
855
856 /*
857 * private workqueue to mac80211. mac80211 makes this accessible 834 * private workqueue to mac80211. mac80211 makes this accessible
858 * via ieee80211_queue_work() 835 * via ieee80211_queue_work()
859 */ 836 */
@@ -912,6 +889,9 @@ struct ieee80211_local {
912 /* device is started */ 889 /* device is started */
913 bool started; 890 bool started;
914 891
892 /* device is during a HW reconfig */
893 bool in_reconfig;
894
915 /* wowlan is enabled -- don't reconfig on resume */ 895 /* wowlan is enabled -- don't reconfig on resume */
916 bool wowlan; 896 bool wowlan;
917 897
@@ -985,14 +965,14 @@ struct ieee80211_local {
985 int scan_channel_idx; 965 int scan_channel_idx;
986 int scan_ies_len; 966 int scan_ies_len;
987 967
988 bool sched_scanning;
989 struct ieee80211_sched_scan_ies sched_scan_ies; 968 struct ieee80211_sched_scan_ies sched_scan_ies;
990 struct work_struct sched_scan_stopped_work; 969 struct work_struct sched_scan_stopped_work;
970 struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
991 971
992 unsigned long leave_oper_channel_time; 972 unsigned long leave_oper_channel_time;
993 enum mac80211_scan_state next_scan_state; 973 enum mac80211_scan_state next_scan_state;
994 struct delayed_work scan_work; 974 struct delayed_work scan_work;
995 struct ieee80211_sub_if_data *scan_sdata; 975 struct ieee80211_sub_if_data __rcu *scan_sdata;
996 enum nl80211_channel_type _oper_channel_type; 976 enum nl80211_channel_type _oper_channel_type;
997 struct ieee80211_channel *oper_channel, *csa_channel; 977 struct ieee80211_channel *oper_channel, *csa_channel;
998 978
@@ -1034,7 +1014,6 @@ struct ieee80211_local {
1034 unsigned int rx_handlers_drop_nullfunc; 1014 unsigned int rx_handlers_drop_nullfunc;
1035 unsigned int rx_handlers_drop_defrag; 1015 unsigned int rx_handlers_drop_defrag;
1036 unsigned int rx_handlers_drop_short; 1016 unsigned int rx_handlers_drop_short;
1037 unsigned int rx_handlers_drop_passive_scan;
1038 unsigned int tx_expand_skb_head; 1017 unsigned int tx_expand_skb_head;
1039 unsigned int tx_expand_skb_head_cloned; 1018 unsigned int tx_expand_skb_head_cloned;
1040 unsigned int rx_expand_skb_head; 1019 unsigned int rx_expand_skb_head;
@@ -1050,7 +1029,6 @@ struct ieee80211_local {
1050 int total_ps_buffered; /* total number of all buffered unicast and 1029 int total_ps_buffered; /* total number of all buffered unicast and
1051 * multicast packets for power saving stations 1030 * multicast packets for power saving stations
1052 */ 1031 */
1053 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
1054 1032
1055 bool pspolling; 1033 bool pspolling;
1056 bool offchannel_ps_enabled; 1034 bool offchannel_ps_enabled;
@@ -1087,14 +1065,12 @@ struct ieee80211_local {
1087 } debugfs; 1065 } debugfs;
1088#endif 1066#endif
1089 1067
1090 struct ieee80211_channel *hw_roc_channel; 1068 /*
1091 struct net_device *hw_roc_dev; 1069 * Remain-on-channel support
1092 struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status; 1070 */
1071 struct list_head roc_list;
1093 struct work_struct hw_roc_start, hw_roc_done; 1072 struct work_struct hw_roc_start, hw_roc_done;
1094 enum nl80211_channel_type hw_roc_channel_type; 1073 unsigned long hw_roc_start_time;
1095 unsigned int hw_roc_duration;
1096 u32 hw_roc_cookie;
1097 bool hw_roc_for_tx;
1098 1074
1099 struct idr ack_status_frames; 1075 struct idr ack_status_frames;
1100 spinlock_t ack_status_lock; 1076 spinlock_t ack_status_lock;
@@ -1114,6 +1090,12 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
1114 return netdev_priv(dev); 1090 return netdev_priv(dev);
1115} 1091}
1116 1092
1093static inline struct ieee80211_sub_if_data *
1094IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev)
1095{
1096 return container_of(wdev, struct ieee80211_sub_if_data, wdev);
1097}
1098
1117/* this struct represents 802.11n's RA/TID combination */ 1099/* this struct represents 802.11n's RA/TID combination */
1118struct ieee80211_ra_tid { 1100struct ieee80211_ra_tid {
1119 u8 ra[ETH_ALEN]; 1101 u8 ra[ETH_ALEN];
@@ -1264,8 +1246,7 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1264 struct cfg80211_scan_request *req); 1246 struct cfg80211_scan_request *req);
1265void ieee80211_scan_cancel(struct ieee80211_local *local); 1247void ieee80211_scan_cancel(struct ieee80211_local *local);
1266void ieee80211_run_deferred_scan(struct ieee80211_local *local); 1248void ieee80211_run_deferred_scan(struct ieee80211_local *local);
1267ieee80211_rx_result 1249void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
1268ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1269 1250
1270void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 1251void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
1271struct ieee80211_bss * 1252struct ieee80211_bss *
@@ -1290,19 +1271,23 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
1290 bool offchannel_ps_enable); 1271 bool offchannel_ps_enable);
1291void ieee80211_offchannel_return(struct ieee80211_local *local, 1272void ieee80211_offchannel_return(struct ieee80211_local *local,
1292 bool offchannel_ps_disable); 1273 bool offchannel_ps_disable);
1293void ieee80211_hw_roc_setup(struct ieee80211_local *local); 1274void ieee80211_roc_setup(struct ieee80211_local *local);
1275void ieee80211_start_next_roc(struct ieee80211_local *local);
1276void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
1277void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
1278void ieee80211_sw_roc_work(struct work_struct *work);
1279void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
1294 1280
1295/* interface handling */ 1281/* interface handling */
1296int ieee80211_iface_init(void); 1282int ieee80211_iface_init(void);
1297void ieee80211_iface_exit(void); 1283void ieee80211_iface_exit(void);
1298int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1284int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1299 struct net_device **new_dev, enum nl80211_iftype type, 1285 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1300 struct vif_params *params); 1286 struct vif_params *params);
1301int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 1287int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1302 enum nl80211_iftype type); 1288 enum nl80211_iftype type);
1303void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); 1289void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
1304void ieee80211_remove_interfaces(struct ieee80211_local *local); 1290void ieee80211_remove_interfaces(struct ieee80211_local *local);
1305u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
1306void ieee80211_recalc_idle(struct ieee80211_local *local); 1291void ieee80211_recalc_idle(struct ieee80211_local *local);
1307void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 1292void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
1308 const int offset); 1293 const int offset);
@@ -1499,18 +1484,12 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1499 struct ieee80211_channel *channel, 1484 struct ieee80211_channel *channel,
1500 enum nl80211_channel_type channel_type, 1485 enum nl80211_channel_type channel_type,
1501 u16 prot_mode); 1486 u16 prot_mode);
1502 1487u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1503/* internal work items */ 1488 u32 cap);
1504void ieee80211_work_init(struct ieee80211_local *local); 1489int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1505void ieee80211_add_work(struct ieee80211_work *wk); 1490 struct sk_buff *skb, bool need_basic);
1506void free_work(struct ieee80211_work *wk); 1491int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1507void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata); 1492 struct sk_buff *skb, bool need_basic);
1508int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1509 struct ieee80211_channel *chan,
1510 enum nl80211_channel_type channel_type,
1511 unsigned int duration, u64 *cookie);
1512int ieee80211_wk_cancel_remain_on_channel(
1513 struct ieee80211_sub_if_data *sdata, u64 cookie);
1514 1493
1515/* channel management */ 1494/* channel management */
1516enum ieee80211_chan_mode { 1495enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8664111d0566..bfb57dcc1538 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -43,6 +43,128 @@
43 */ 43 */
44 44
45 45
46static u32 ieee80211_idle_off(struct ieee80211_local *local,
47 const char *reason)
48{
49 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
50 return 0;
51
52 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
53 return IEEE80211_CONF_CHANGE_IDLE;
54}
55
56static u32 ieee80211_idle_on(struct ieee80211_local *local)
57{
58 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
59 return 0;
60
61 drv_flush(local, false);
62
63 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
64 return IEEE80211_CONF_CHANGE_IDLE;
65}
66
67static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
68{
69 struct ieee80211_sub_if_data *sdata;
70 int count = 0;
71 bool working = false, scanning = false;
72 unsigned int led_trig_start = 0, led_trig_stop = 0;
73 struct ieee80211_roc_work *roc;
74
75#ifdef CONFIG_PROVE_LOCKING
76 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
77 !lockdep_is_held(&local->iflist_mtx));
78#endif
79 lockdep_assert_held(&local->mtx);
80
81 list_for_each_entry(sdata, &local->interfaces, list) {
82 if (!ieee80211_sdata_running(sdata)) {
83 sdata->vif.bss_conf.idle = true;
84 continue;
85 }
86
87 sdata->old_idle = sdata->vif.bss_conf.idle;
88
89 /* do not count disabled managed interfaces */
90 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
91 !sdata->u.mgd.associated &&
92 !sdata->u.mgd.auth_data &&
93 !sdata->u.mgd.assoc_data) {
94 sdata->vif.bss_conf.idle = true;
95 continue;
96 }
97 /* do not count unused IBSS interfaces */
98 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
99 !sdata->u.ibss.ssid_len) {
100 sdata->vif.bss_conf.idle = true;
101 continue;
102 }
103 /* count everything else */
104 sdata->vif.bss_conf.idle = false;
105 count++;
106 }
107
108 if (!local->ops->remain_on_channel) {
109 list_for_each_entry(roc, &local->roc_list, list) {
110 working = true;
111 roc->sdata->vif.bss_conf.idle = false;
112 }
113 }
114
115 sdata = rcu_dereference_protected(local->scan_sdata,
116 lockdep_is_held(&local->mtx));
117 if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
118 scanning = true;
119 sdata->vif.bss_conf.idle = false;
120 }
121
122 list_for_each_entry(sdata, &local->interfaces, list) {
123 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
124 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
125 continue;
126 if (sdata->old_idle == sdata->vif.bss_conf.idle)
127 continue;
128 if (!ieee80211_sdata_running(sdata))
129 continue;
130 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
131 }
132
133 if (working || scanning)
134 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
135 else
136 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
137
138 if (count)
139 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
140 else
141 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
142
143 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
144
145 if (working)
146 return ieee80211_idle_off(local, "working");
147 if (scanning)
148 return ieee80211_idle_off(local, "scanning");
149 if (!count)
150 return ieee80211_idle_on(local);
151 else
152 return ieee80211_idle_off(local, "in use");
153
154 return 0;
155}
156
157void ieee80211_recalc_idle(struct ieee80211_local *local)
158{
159 u32 chg;
160
161 mutex_lock(&local->iflist_mtx);
162 chg = __ieee80211_recalc_idle(local);
163 mutex_unlock(&local->iflist_mtx);
164 if (chg)
165 ieee80211_hw_config(local, chg);
166}
167
46static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 168static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
47{ 169{
48 int meshhdrlen; 170 int meshhdrlen;
@@ -57,9 +179,6 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
57 return -EINVAL; 179 return -EINVAL;
58 } 180 }
59 181
60#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
61 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
62#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
63 dev->mtu = new_mtu; 182 dev->mtu = new_mtu;
64 return 0; 183 return 0;
65} 184}
@@ -100,15 +219,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
100{ 219{
101 struct ieee80211_local *local = sdata->local; 220 struct ieee80211_local *local = sdata->local;
102 struct ieee80211_sub_if_data *nsdata; 221 struct ieee80211_sub_if_data *nsdata;
103 struct net_device *dev = sdata->dev;
104 222
105 ASSERT_RTNL(); 223 ASSERT_RTNL();
106 224
107 /* we hold the RTNL here so can safely walk the list */ 225 /* we hold the RTNL here so can safely walk the list */
108 list_for_each_entry(nsdata, &local->interfaces, list) { 226 list_for_each_entry(nsdata, &local->interfaces, list) {
109 struct net_device *ndev = nsdata->dev; 227 if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
110
111 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
112 /* 228 /*
113 * Allow only a single IBSS interface to be up at any 229 * Allow only a single IBSS interface to be up at any
114 * time. This is restricted because beacon distribution 230 * time. This is restricted because beacon distribution
@@ -127,7 +243,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
127 * The remaining checks are only performed for interfaces 243 * The remaining checks are only performed for interfaces
128 * with the same MAC address. 244 * with the same MAC address.
129 */ 245 */
130 if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr)) 246 if (!ether_addr_equal(sdata->vif.addr,
247 nsdata->vif.addr))
131 continue; 248 continue;
132 249
133 /* 250 /*
@@ -217,17 +334,21 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
217static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) 334static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
218{ 335{
219 struct ieee80211_sub_if_data *sdata; 336 struct ieee80211_sub_if_data *sdata;
220 int ret; 337 int ret = 0;
221 338
222 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 339 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
223 return 0; 340 return 0;
224 341
342 mutex_lock(&local->iflist_mtx);
343
225 if (local->monitor_sdata) 344 if (local->monitor_sdata)
226 return 0; 345 goto out_unlock;
227 346
228 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); 347 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
229 if (!sdata) 348 if (!sdata) {
230 return -ENOMEM; 349 ret = -ENOMEM;
350 goto out_unlock;
351 }
231 352
232 /* set up data */ 353 /* set up data */
233 sdata->local = local; 354 sdata->local = local;
@@ -241,18 +362,19 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
241 if (WARN_ON(ret)) { 362 if (WARN_ON(ret)) {
242 /* ok .. stupid driver, it asked for this! */ 363 /* ok .. stupid driver, it asked for this! */
243 kfree(sdata); 364 kfree(sdata);
244 return ret; 365 goto out_unlock;
245 } 366 }
246 367
247 ret = ieee80211_check_queues(sdata); 368 ret = ieee80211_check_queues(sdata);
248 if (ret) { 369 if (ret) {
249 kfree(sdata); 370 kfree(sdata);
250 return ret; 371 goto out_unlock;
251 } 372 }
252 373
253 rcu_assign_pointer(local->monitor_sdata, sdata); 374 rcu_assign_pointer(local->monitor_sdata, sdata);
254 375 out_unlock:
255 return 0; 376 mutex_unlock(&local->iflist_mtx);
377 return ret;
256} 378}
257 379
258static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) 380static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -262,10 +384,12 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
262 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 384 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
263 return; 385 return;
264 386
265 sdata = rtnl_dereference(local->monitor_sdata); 387 mutex_lock(&local->iflist_mtx);
266 388
389 sdata = rcu_dereference_protected(local->monitor_sdata,
390 lockdep_is_held(&local->iflist_mtx));
267 if (!sdata) 391 if (!sdata)
268 return; 392 goto out_unlock;
269 393
270 rcu_assign_pointer(local->monitor_sdata, NULL); 394 rcu_assign_pointer(local->monitor_sdata, NULL);
271 synchronize_net(); 395 synchronize_net();
@@ -273,6 +397,8 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
273 drv_remove_interface(local, sdata); 397 drv_remove_interface(local, sdata);
274 398
275 kfree(sdata); 399 kfree(sdata);
400 out_unlock:
401 mutex_unlock(&local->iflist_mtx);
276} 402}
277 403
278/* 404/*
@@ -520,7 +646,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
520 646
521 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 647 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
522 648
523 if (local->scan_sdata == sdata) 649 if (rcu_access_pointer(local->scan_sdata) == sdata)
524 ieee80211_scan_cancel(local); 650 ieee80211_scan_cancel(local);
525 651
526 /* 652 /*
@@ -528,10 +654,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
528 */ 654 */
529 netif_tx_stop_all_queues(sdata->dev); 655 netif_tx_stop_all_queues(sdata->dev);
530 656
531 /* 657 ieee80211_roc_purge(sdata);
532 * Purge work for this interface.
533 */
534 ieee80211_work_purge(sdata);
535 658
536 /* 659 /*
537 * Remove all stations associated with this interface. 660 * Remove all stations associated with this interface.
@@ -637,18 +760,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
637 ieee80211_configure_filter(local); 760 ieee80211_configure_filter(local);
638 break; 761 break;
639 default: 762 default:
640 mutex_lock(&local->mtx);
641 if (local->hw_roc_dev == sdata->dev &&
642 local->hw_roc_channel) {
643 /* ignore return value since this is racy */
644 drv_cancel_remain_on_channel(local);
645 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
646 }
647 mutex_unlock(&local->mtx);
648
649 flush_work(&local->hw_roc_start);
650 flush_work(&local->hw_roc_done);
651
652 flush_work(&sdata->work); 763 flush_work(&sdata->work);
653 /* 764 /*
654 * When we get here, the interface is marked down. 765 * When we get here, the interface is marked down.
@@ -823,7 +934,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
823 934
824 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); 935 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
825 936
826 return ieee80211_select_queue_80211(local, skb, hdr); 937 return ieee80211_select_queue_80211(sdata, skb, hdr);
827} 938}
828 939
829static const struct net_device_ops ieee80211_monitorif_ops = { 940static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -1238,7 +1349,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1238 1349
1239 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { 1350 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
1240 /* not a contiguous mask ... not handled now! */ 1351 /* not a contiguous mask ... not handled now! */
1241 printk(KERN_DEBUG "not contiguous\n"); 1352 pr_info("not contiguous\n");
1242 break; 1353 break;
1243 } 1354 }
1244 1355
@@ -1284,7 +1395,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1284} 1395}
1285 1396
1286int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1397int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1287 struct net_device **new_dev, enum nl80211_iftype type, 1398 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1288 struct vif_params *params) 1399 struct vif_params *params)
1289{ 1400{
1290 struct net_device *ndev; 1401 struct net_device *ndev;
@@ -1364,6 +1475,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1364 sdata->u.mgd.use_4addr = params->use_4addr; 1475 sdata->u.mgd.use_4addr = params->use_4addr;
1365 } 1476 }
1366 1477
1478 ndev->features |= local->hw.netdev_features;
1479
1367 ret = register_netdevice(ndev); 1480 ret = register_netdevice(ndev);
1368 if (ret) 1481 if (ret)
1369 goto fail; 1482 goto fail;
@@ -1372,8 +1485,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1372 list_add_tail_rcu(&sdata->list, &local->interfaces); 1485 list_add_tail_rcu(&sdata->list, &local->interfaces);
1373 mutex_unlock(&local->iflist_mtx); 1486 mutex_unlock(&local->iflist_mtx);
1374 1487
1375 if (new_dev) 1488 if (new_wdev)
1376 *new_dev = ndev; 1489 *new_wdev = &sdata->wdev;
1377 1490
1378 return 0; 1491 return 0;
1379 1492
@@ -1421,138 +1534,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1421 list_del(&unreg_list); 1534 list_del(&unreg_list);
1422} 1535}
1423 1536
1424static u32 ieee80211_idle_off(struct ieee80211_local *local,
1425 const char *reason)
1426{
1427 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
1428 return 0;
1429
1430#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1431 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
1432#endif
1433
1434 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
1435 return IEEE80211_CONF_CHANGE_IDLE;
1436}
1437
1438static u32 ieee80211_idle_on(struct ieee80211_local *local)
1439{
1440 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
1441 return 0;
1442
1443#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1444 wiphy_debug(local->hw.wiphy, "device now idle\n");
1445#endif
1446
1447 drv_flush(local, false);
1448
1449 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
1450 return IEEE80211_CONF_CHANGE_IDLE;
1451}
1452
1453u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1454{
1455 struct ieee80211_sub_if_data *sdata;
1456 int count = 0;
1457 bool working = false, scanning = false, hw_roc = false;
1458 struct ieee80211_work *wk;
1459 unsigned int led_trig_start = 0, led_trig_stop = 0;
1460
1461#ifdef CONFIG_PROVE_LOCKING
1462 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
1463 !lockdep_is_held(&local->iflist_mtx));
1464#endif
1465 lockdep_assert_held(&local->mtx);
1466
1467 list_for_each_entry(sdata, &local->interfaces, list) {
1468 if (!ieee80211_sdata_running(sdata)) {
1469 sdata->vif.bss_conf.idle = true;
1470 continue;
1471 }
1472
1473 sdata->old_idle = sdata->vif.bss_conf.idle;
1474
1475 /* do not count disabled managed interfaces */
1476 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1477 !sdata->u.mgd.associated &&
1478 !sdata->u.mgd.auth_data &&
1479 !sdata->u.mgd.assoc_data) {
1480 sdata->vif.bss_conf.idle = true;
1481 continue;
1482 }
1483 /* do not count unused IBSS interfaces */
1484 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
1485 !sdata->u.ibss.ssid_len) {
1486 sdata->vif.bss_conf.idle = true;
1487 continue;
1488 }
1489 /* count everything else */
1490 sdata->vif.bss_conf.idle = false;
1491 count++;
1492 }
1493
1494 list_for_each_entry(wk, &local->work_list, list) {
1495 working = true;
1496 wk->sdata->vif.bss_conf.idle = false;
1497 }
1498
1499 if (local->scan_sdata &&
1500 !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
1501 scanning = true;
1502 local->scan_sdata->vif.bss_conf.idle = false;
1503 }
1504
1505 if (local->hw_roc_channel)
1506 hw_roc = true;
1507
1508 list_for_each_entry(sdata, &local->interfaces, list) {
1509 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1510 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1511 continue;
1512 if (sdata->old_idle == sdata->vif.bss_conf.idle)
1513 continue;
1514 if (!ieee80211_sdata_running(sdata))
1515 continue;
1516 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
1517 }
1518
1519 if (working || scanning || hw_roc)
1520 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
1521 else
1522 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
1523
1524 if (count)
1525 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
1526 else
1527 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
1528
1529 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
1530
1531 if (hw_roc)
1532 return ieee80211_idle_off(local, "hw remain-on-channel");
1533 if (working)
1534 return ieee80211_idle_off(local, "working");
1535 if (scanning)
1536 return ieee80211_idle_off(local, "scanning");
1537 if (!count)
1538 return ieee80211_idle_on(local);
1539 else
1540 return ieee80211_idle_off(local, "in use");
1541
1542 return 0;
1543}
1544
1545void ieee80211_recalc_idle(struct ieee80211_local *local)
1546{
1547 u32 chg;
1548
1549 mutex_lock(&local->iflist_mtx);
1550 chg = __ieee80211_recalc_idle(local);
1551 mutex_unlock(&local->iflist_mtx);
1552 if (chg)
1553 ieee80211_hw_config(local, chg);
1554}
1555
1556static int netdev_notify(struct notifier_block *nb, 1537static int netdev_notify(struct notifier_block *nb,
1557 unsigned long state, 1538 unsigned long state,
1558 void *ndev) 1539 void *ndev)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5bb600d93d77..7ae678ba5d67 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 } 139 }
140 140
141 if (ret != -ENOSPC && ret != -EOPNOTSUPP) 141 if (ret != -ENOSPC && ret != -EOPNOTSUPP)
142 wiphy_err(key->local->hw.wiphy, 142 sdata_err(sdata,
143 "failed to set key (%d, %pM) to hardware (%d)\n", 143 "failed to set key (%d, %pM) to hardware (%d)\n",
144 key->conf.keyidx, 144 key->conf.keyidx,
145 sta ? sta->sta.addr : bcast_addr, ret); 145 sta ? sta->sta.addr : bcast_addr, ret);
@@ -186,7 +186,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
186 sta ? &sta->sta : NULL, &key->conf); 186 sta ? &sta->sta : NULL, &key->conf);
187 187
188 if (ret) 188 if (ret)
189 wiphy_err(key->local->hw.wiphy, 189 sdata_err(sdata,
190 "failed to remove key (%d, %pM) from hardware (%d)\n", 190 "failed to remove key (%d, %pM) from hardware (%d)\n",
191 key->conf.keyidx, 191 key->conf.keyidx,
192 sta ? sta->sta.addr : bcast_addr, ret); 192 sta ? sta->sta.addr : bcast_addr, ret);
@@ -194,26 +194,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
194 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 194 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
195} 195}
196 196
197void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
198{
199 struct ieee80211_key *key;
200
201 key = container_of(key_conf, struct ieee80211_key, conf);
202
203 might_sleep();
204 assert_key_lock(key->local);
205
206 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
207
208 /*
209 * Flush TX path to avoid attempts to use this key
210 * after this function returns. Until then, drivers
211 * must be prepared to handle the key.
212 */
213 synchronize_rcu();
214}
215EXPORT_SYMBOL_GPL(ieee80211_key_removed);
216
217static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, 197static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
218 int idx, bool uni, bool multi) 198 int idx, bool uni, bool multi)
219{ 199{
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 1bf7903496f8..bcffa6903129 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -276,7 +276,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
276 276
277 read_lock(&tpt_trig->trig.leddev_list_lock); 277 read_lock(&tpt_trig->trig.leddev_list_lock);
278 list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list) 278 list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
279 led_brightness_set(led_cdev, LED_OFF); 279 led_set_brightness(led_cdev, LED_OFF);
280 read_unlock(&tpt_trig->trig.leddev_list_lock); 280 read_unlock(&tpt_trig->trig.leddev_list_lock);
281} 281}
282 282
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f5548e953259..c26e231c733a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -322,7 +322,8 @@ static void ieee80211_restart_work(struct work_struct *work)
322 322
323 mutex_lock(&local->mtx); 323 mutex_lock(&local->mtx);
324 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) || 324 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
325 local->sched_scanning, 325 rcu_dereference_protected(local->sched_scan_sdata,
326 lockdep_is_held(&local->mtx)),
326 "%s called with hardware scan in progress\n", __func__); 327 "%s called with hardware scan in progress\n", __func__);
327 mutex_unlock(&local->mtx); 328 mutex_unlock(&local->mtx);
328 329
@@ -345,6 +346,13 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
345 ieee80211_stop_queues_by_reason(hw, 346 ieee80211_stop_queues_by_reason(hw,
346 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 347 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
347 348
349 /*
350 * Stop all Rx during the reconfig. We don't want state changes
351 * or driver callbacks while this is in progress.
352 */
353 local->in_reconfig = true;
354 barrier();
355
348 schedule_work(&local->restart_work); 356 schedule_work(&local->restart_work);
349} 357}
350EXPORT_SYMBOL(ieee80211_restart_hw); 358EXPORT_SYMBOL(ieee80211_restart_hw);
@@ -455,7 +463,9 @@ static const struct ieee80211_txrx_stypes
455ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { 463ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
456 [NL80211_IFTYPE_ADHOC] = { 464 [NL80211_IFTYPE_ADHOC] = {
457 .tx = 0xffff, 465 .tx = 0xffff,
458 .rx = BIT(IEEE80211_STYPE_ACTION >> 4), 466 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
467 BIT(IEEE80211_STYPE_AUTH >> 4) |
468 BIT(IEEE80211_STYPE_DEAUTH >> 4),
459 }, 469 },
460 [NL80211_IFTYPE_STATION] = { 470 [NL80211_IFTYPE_STATION] = {
461 .tx = 0xffff, 471 .tx = 0xffff,
@@ -578,7 +588,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
578 588
579 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); 589 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
580 590
581 BUG_ON(!ops->tx && !ops->tx_frags); 591 BUG_ON(!ops->tx);
582 BUG_ON(!ops->start); 592 BUG_ON(!ops->start);
583 BUG_ON(!ops->stop); 593 BUG_ON(!ops->stop);
584 BUG_ON(!ops->config); 594 BUG_ON(!ops->config);
@@ -625,8 +635,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
625 635
626 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 636 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
627 637
628 ieee80211_work_init(local);
629
630 INIT_WORK(&local->restart_work, ieee80211_restart_work); 638 INIT_WORK(&local->restart_work, ieee80211_restart_work);
631 639
632 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 640 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
@@ -669,7 +677,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
669 677
670 ieee80211_led_names(local); 678 ieee80211_led_names(local);
671 679
672 ieee80211_hw_roc_setup(local); 680 ieee80211_roc_setup(local);
673 681
674 return &local->hw; 682 return &local->hw;
675} 683}
@@ -681,7 +689,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
681 int result, i; 689 int result, i;
682 enum ieee80211_band band; 690 enum ieee80211_band band;
683 int channels, max_bitrates; 691 int channels, max_bitrates;
684 bool supp_ht; 692 bool supp_ht, supp_vht;
693 netdev_features_t feature_whitelist;
685 static const u32 cipher_suites[] = { 694 static const u32 cipher_suites[] = {
686 /* keep WEP first, it may be removed below */ 695 /* keep WEP first, it may be removed below */
687 WLAN_CIPHER_SUITE_WEP40, 696 WLAN_CIPHER_SUITE_WEP40,
@@ -698,16 +707,21 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
698 local->hw.offchannel_tx_hw_queue >= local->hw.queues)) 707 local->hw.offchannel_tx_hw_queue >= local->hw.queues))
699 return -EINVAL; 708 return -EINVAL;
700 709
701 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
702#ifdef CONFIG_PM 710#ifdef CONFIG_PM
703 && (!local->ops->suspend || !local->ops->resume) 711 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) &&
704#endif 712 (!local->ops->suspend || !local->ops->resume))
705 )
706 return -EINVAL; 713 return -EINVAL;
714#endif
707 715
708 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan) 716 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
709 return -EINVAL; 717 return -EINVAL;
710 718
719 /* Only HW csum features are currently compatible with mac80211 */
720 feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
721 NETIF_F_HW_CSUM;
722 if (WARN_ON(hw->netdev_features & ~feature_whitelist))
723 return -EINVAL;
724
711 if (hw->max_report_rates == 0) 725 if (hw->max_report_rates == 0)
712 hw->max_report_rates = hw->max_rates; 726 hw->max_report_rates = hw->max_rates;
713 727
@@ -719,6 +733,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
719 channels = 0; 733 channels = 0;
720 max_bitrates = 0; 734 max_bitrates = 0;
721 supp_ht = false; 735 supp_ht = false;
736 supp_vht = false;
722 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 737 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
723 struct ieee80211_supported_band *sband; 738 struct ieee80211_supported_band *sband;
724 739
@@ -736,6 +751,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
736 if (max_bitrates < sband->n_bitrates) 751 if (max_bitrates < sband->n_bitrates)
737 max_bitrates = sband->n_bitrates; 752 max_bitrates = sband->n_bitrates;
738 supp_ht = supp_ht || sband->ht_cap.ht_supported; 753 supp_ht = supp_ht || sband->ht_cap.ht_supported;
754 supp_vht = supp_vht || sband->vht_cap.vht_supported;
739 } 755 }
740 756
741 local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + 757 local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -811,6 +827,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
811 if (supp_ht) 827 if (supp_ht)
812 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); 828 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
813 829
830 if (supp_vht)
831 local->scan_ies_len +=
832 2 + sizeof(struct ieee80211_vht_capabilities);
833
814 if (!local->ops->hw_scan) { 834 if (!local->ops->hw_scan) {
815 /* For hw_scan, driver needs to set these up. */ 835 /* For hw_scan, driver needs to set these up. */
816 local->hw.wiphy->max_scan_ssids = 4; 836 local->hw.wiphy->max_scan_ssids = 4;
@@ -1009,12 +1029,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1009 1029
1010 rtnl_unlock(); 1030 rtnl_unlock();
1011 1031
1012 /*
1013 * Now all work items will be gone, but the
1014 * timer might still be armed, so delete it
1015 */
1016 del_timer_sync(&local->work_timer);
1017
1018 cancel_work_sync(&local->restart_work); 1032 cancel_work_sync(&local->restart_work);
1019 cancel_work_sync(&local->reconfig_filter); 1033 cancel_work_sync(&local->reconfig_filter);
1020 1034
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2913113c5833..6fac18c0423f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -133,7 +133,7 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
133} 133}
134 134
135/** 135/**
136 * mesh_accept_plinks_update: update accepting_plink in local mesh beacons 136 * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
137 * 137 *
138 * @sdata: mesh interface in which mesh beacons are going to be updated 138 * @sdata: mesh interface in which mesh beacons are going to be updated
139 */ 139 */
@@ -443,7 +443,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
443 443
444void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) 444void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
445{ 445{
446 if (ifmsh->mshcfg.dot11MeshHWMPRootMode) 446 if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)
447 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 447 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
448 else { 448 else {
449 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 449 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -523,11 +523,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
523{ 523{
524 bool free_plinks; 524 bool free_plinks;
525 525
526#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
527 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
528 sdata->name);
529#endif
530
531 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 526 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
532 mesh_path_expire(sdata); 527 mesh_path_expire(sdata);
533 528
@@ -542,11 +537,17 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
542static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) 537static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
543{ 538{
544 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 539 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
540 u32 interval;
545 541
546 mesh_path_tx_root_frame(sdata); 542 mesh_path_tx_root_frame(sdata);
543
544 if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN)
545 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
546 else
547 interval = ifmsh->mshcfg.dot11MeshHWMProotInterval;
548
547 mod_timer(&ifmsh->mesh_path_root_timer, 549 mod_timer(&ifmsh->mesh_path_root_timer,
548 round_jiffies(TU_TO_EXP_TIME( 550 round_jiffies(TU_TO_EXP_TIME(interval)));
549 ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
550} 551}
551 552
552#ifdef CONFIG_PM 553#ifdef CONFIG_PM
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e3642756f8f4..faaa39bcfd10 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -104,6 +104,7 @@ enum mesh_deferred_task_flags {
104 * an mpath to a hash bucket on a path table. 104 * an mpath to a hash bucket on a path table.
105 * @rann_snd_addr: the RANN sender address 105 * @rann_snd_addr: the RANN sender address
106 * @rann_metric: the aggregated path metric towards the root node 106 * @rann_metric: the aggregated path metric towards the root node
107 * @last_preq_to_root: Timestamp of last PREQ sent to root
107 * @is_root: the destination station of this path is a root node 108 * @is_root: the destination station of this path is a root node
108 * @is_gate: the destination station of this path is a mesh gate 109 * @is_gate: the destination station of this path is a mesh gate
109 * 110 *
@@ -131,6 +132,7 @@ struct mesh_path {
131 spinlock_t state_lock; 132 spinlock_t state_lock;
132 u8 rann_snd_addr[ETH_ALEN]; 133 u8 rann_snd_addr[ETH_ALEN];
133 u32 rann_metric; 134 u32 rann_metric;
135 unsigned long last_preq_to_root;
134 bool is_root; 136 bool is_root;
135 bool is_gate; 137 bool is_gate;
136}; 138};
@@ -245,7 +247,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
245int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 247int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
246void ieee80211s_init(void); 248void ieee80211s_init(void);
247void ieee80211s_update_metric(struct ieee80211_local *local, 249void ieee80211s_update_metric(struct ieee80211_local *local,
248 struct sta_info *stainfo, struct sk_buff *skb); 250 struct sta_info *sta, struct sk_buff *skb);
249void ieee80211s_stop(void); 251void ieee80211s_stop(void);
250void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 252void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
251void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 253void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9b59658e8650..494bc39f61a4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -13,13 +13,6 @@
13#include "wme.h" 13#include "wme.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
17#define mhwmp_dbg(fmt, args...) \
18 printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
19#else
20#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
21#endif
22
23#define TEST_FRAME_LEN 8192 16#define TEST_FRAME_LEN 8192
24#define MAX_METRIC 0xffffffff 17#define MAX_METRIC 0xffffffff
25#define ARITH_SHIFT 8 18#define ARITH_SHIFT 8
@@ -98,6 +91,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
98#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) 91#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
99#define disc_timeout_jiff(s) \ 92#define disc_timeout_jiff(s) \
100 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) 93 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
94#define root_path_confirmation_jiffies(s) \
95 msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
101 96
102enum mpath_frame_type { 97enum mpath_frame_type {
103 MPATH_PREQ = 0, 98 MPATH_PREQ = 0,
@@ -142,19 +137,19 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
142 137
143 switch (action) { 138 switch (action) {
144 case MPATH_PREQ: 139 case MPATH_PREQ:
145 mhwmp_dbg("sending PREQ to %pM", target); 140 mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
146 ie_len = 37; 141 ie_len = 37;
147 pos = skb_put(skb, 2 + ie_len); 142 pos = skb_put(skb, 2 + ie_len);
148 *pos++ = WLAN_EID_PREQ; 143 *pos++ = WLAN_EID_PREQ;
149 break; 144 break;
150 case MPATH_PREP: 145 case MPATH_PREP:
151 mhwmp_dbg("sending PREP to %pM", target); 146 mhwmp_dbg(sdata, "sending PREP to %pM\n", target);
152 ie_len = 31; 147 ie_len = 31;
153 pos = skb_put(skb, 2 + ie_len); 148 pos = skb_put(skb, 2 + ie_len);
154 *pos++ = WLAN_EID_PREP; 149 *pos++ = WLAN_EID_PREP;
155 break; 150 break;
156 case MPATH_RANN: 151 case MPATH_RANN:
157 mhwmp_dbg("sending RANN from %pM", orig_addr); 152 mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
158 ie_len = sizeof(struct ieee80211_rann_ie); 153 ie_len = sizeof(struct ieee80211_rann_ie);
159 pos = skb_put(skb, 2 + ie_len); 154 pos = skb_put(skb, 2 + ie_len);
160 *pos++ = WLAN_EID_RANN; 155 *pos++ = WLAN_EID_RANN;
@@ -303,7 +298,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
303} 298}
304 299
305void ieee80211s_update_metric(struct ieee80211_local *local, 300void ieee80211s_update_metric(struct ieee80211_local *local,
306 struct sta_info *stainfo, struct sk_buff *skb) 301 struct sta_info *sta, struct sk_buff *skb)
307{ 302{
308 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); 303 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
309 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 304 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -315,15 +310,14 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
315 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); 310 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
316 311
317 /* moving average, scaled to 100 */ 312 /* moving average, scaled to 100 */
318 stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed); 313 sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
319 if (stainfo->fail_avg > 95) 314 if (sta->fail_avg > 95)
320 mesh_plink_broken(stainfo); 315 mesh_plink_broken(sta);
321} 316}
322 317
323static u32 airtime_link_metric_get(struct ieee80211_local *local, 318static u32 airtime_link_metric_get(struct ieee80211_local *local,
324 struct sta_info *sta) 319 struct sta_info *sta)
325{ 320{
326 struct ieee80211_supported_band *sband;
327 struct rate_info rinfo; 321 struct rate_info rinfo;
328 /* This should be adjusted for each device */ 322 /* This should be adjusted for each device */
329 int device_constant = 1 << ARITH_SHIFT; 323 int device_constant = 1 << ARITH_SHIFT;
@@ -333,8 +327,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
333 u32 tx_time, estimated_retx; 327 u32 tx_time, estimated_retx;
334 u64 result; 328 u64 result;
335 329
336 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
337
338 if (sta->fail_avg >= 100) 330 if (sta->fail_avg >= 100)
339 return MAX_METRIC; 331 return MAX_METRIC;
340 332
@@ -519,10 +511,11 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
519 struct mesh_path *mpath = NULL; 511 struct mesh_path *mpath = NULL;
520 u8 *target_addr, *orig_addr; 512 u8 *target_addr, *orig_addr;
521 const u8 *da; 513 const u8 *da;
522 u8 target_flags, ttl; 514 u8 target_flags, ttl, flags;
523 u32 orig_sn, target_sn, lifetime; 515 u32 orig_sn, target_sn, lifetime, orig_metric;
524 bool reply = false; 516 bool reply = false;
525 bool forward = true; 517 bool forward = true;
518 bool root_is_gate;
526 519
527 /* Update target SN, if present */ 520 /* Update target SN, if present */
528 target_addr = PREQ_IE_TARGET_ADDR(preq_elem); 521 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
@@ -530,11 +523,15 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
530 target_sn = PREQ_IE_TARGET_SN(preq_elem); 523 target_sn = PREQ_IE_TARGET_SN(preq_elem);
531 orig_sn = PREQ_IE_ORIG_SN(preq_elem); 524 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
532 target_flags = PREQ_IE_TARGET_F(preq_elem); 525 target_flags = PREQ_IE_TARGET_F(preq_elem);
526 orig_metric = metric;
527 /* Proactive PREQ gate announcements */
528 flags = PREQ_IE_FLAGS(preq_elem);
529 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
533 530
534 mhwmp_dbg("received PREQ from %pM", orig_addr); 531 mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
535 532
536 if (ether_addr_equal(target_addr, sdata->vif.addr)) { 533 if (ether_addr_equal(target_addr, sdata->vif.addr)) {
537 mhwmp_dbg("PREQ is for us"); 534 mhwmp_dbg(sdata, "PREQ is for us\n");
538 forward = false; 535 forward = false;
539 reply = true; 536 reply = true;
540 metric = 0; 537 metric = 0;
@@ -544,6 +541,22 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
544 target_sn = ++ifmsh->sn; 541 target_sn = ++ifmsh->sn;
545 ifmsh->last_sn_update = jiffies; 542 ifmsh->last_sn_update = jiffies;
546 } 543 }
544 } else if (is_broadcast_ether_addr(target_addr) &&
545 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
546 rcu_read_lock();
547 mpath = mesh_path_lookup(orig_addr, sdata);
548 if (mpath) {
549 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
550 reply = true;
551 target_addr = sdata->vif.addr;
552 target_sn = ++ifmsh->sn;
553 metric = 0;
554 ifmsh->last_sn_update = jiffies;
555 }
556 if (root_is_gate)
557 mesh_path_add_gate(mpath);
558 }
559 rcu_read_unlock();
547 } else { 560 } else {
548 rcu_read_lock(); 561 rcu_read_lock();
549 mpath = mesh_path_lookup(target_addr, sdata); 562 mpath = mesh_path_lookup(target_addr, sdata);
@@ -570,19 +583,20 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
570 lifetime = PREQ_IE_LIFETIME(preq_elem); 583 lifetime = PREQ_IE_LIFETIME(preq_elem);
571 ttl = ifmsh->mshcfg.element_ttl; 584 ttl = ifmsh->mshcfg.element_ttl;
572 if (ttl != 0) { 585 if (ttl != 0) {
573 mhwmp_dbg("replying to the PREQ"); 586 mhwmp_dbg(sdata, "replying to the PREQ\n");
574 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, 587 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
575 cpu_to_le32(orig_sn), 0, target_addr, 588 cpu_to_le32(orig_sn), 0, target_addr,
576 cpu_to_le32(target_sn), mgmt->sa, 0, ttl, 589 cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
577 cpu_to_le32(lifetime), cpu_to_le32(metric), 590 cpu_to_le32(lifetime), cpu_to_le32(metric),
578 0, sdata); 591 0, sdata);
579 } else 592 } else {
580 ifmsh->mshstats.dropped_frames_ttl++; 593 ifmsh->mshstats.dropped_frames_ttl++;
594 }
581 } 595 }
582 596
583 if (forward && ifmsh->mshcfg.dot11MeshForwarding) { 597 if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
584 u32 preq_id; 598 u32 preq_id;
585 u8 hopcount, flags; 599 u8 hopcount;
586 600
587 ttl = PREQ_IE_TTL(preq_elem); 601 ttl = PREQ_IE_TTL(preq_elem);
588 lifetime = PREQ_IE_LIFETIME(preq_elem); 602 lifetime = PREQ_IE_LIFETIME(preq_elem);
@@ -590,13 +604,19 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
590 ifmsh->mshstats.dropped_frames_ttl++; 604 ifmsh->mshstats.dropped_frames_ttl++;
591 return; 605 return;
592 } 606 }
593 mhwmp_dbg("forwarding the PREQ from %pM", orig_addr); 607 mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
594 --ttl; 608 --ttl;
595 flags = PREQ_IE_FLAGS(preq_elem);
596 preq_id = PREQ_IE_PREQ_ID(preq_elem); 609 preq_id = PREQ_IE_PREQ_ID(preq_elem);
597 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 610 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
598 da = (mpath && mpath->is_root) ? 611 da = (mpath && mpath->is_root) ?
599 mpath->rann_snd_addr : broadcast_addr; 612 mpath->rann_snd_addr : broadcast_addr;
613
614 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
615 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
616 target_sn = PREQ_IE_TARGET_SN(preq_elem);
617 metric = orig_metric;
618 }
619
600 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 620 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
601 cpu_to_le32(orig_sn), target_flags, target_addr, 621 cpu_to_le32(orig_sn), target_flags, target_addr,
602 cpu_to_le32(target_sn), da, 622 cpu_to_le32(target_sn), da,
@@ -631,7 +651,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
631 u8 next_hop[ETH_ALEN]; 651 u8 next_hop[ETH_ALEN];
632 u32 target_sn, orig_sn, lifetime; 652 u32 target_sn, orig_sn, lifetime;
633 653
634 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); 654 mhwmp_dbg(sdata, "received PREP from %pM\n",
655 PREP_IE_ORIG_ADDR(prep_elem));
635 656
636 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 657 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
637 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 658 if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -744,11 +765,6 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
744 bool root_is_gate; 765 bool root_is_gate;
745 766
746 ttl = rann->rann_ttl; 767 ttl = rann->rann_ttl;
747 if (ttl <= 1) {
748 ifmsh->mshstats.dropped_frames_ttl++;
749 return;
750 }
751 ttl--;
752 flags = rann->rann_flags; 768 flags = rann->rann_flags;
753 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 769 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
754 orig_addr = rann->rann_addr; 770 orig_addr = rann->rann_addr;
@@ -762,8 +778,9 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
762 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 778 if (ether_addr_equal(orig_addr, sdata->vif.addr))
763 return; 779 return;
764 780
765 mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)", 781 mhwmp_dbg(sdata,
766 orig_addr, mgmt->sa, root_is_gate); 782 "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
783 orig_addr, mgmt->sa, root_is_gate);
767 784
768 rcu_read_lock(); 785 rcu_read_lock();
769 sta = sta_info_get(sdata, mgmt->sa); 786 sta = sta_info_get(sdata, mgmt->sa);
@@ -785,34 +802,50 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
785 } 802 }
786 } 803 }
787 804
805 if (!(SN_LT(mpath->sn, orig_sn)) &&
806 !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
807 rcu_read_unlock();
808 return;
809 }
810
788 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || 811 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
789 time_after(jiffies, mpath->exp_time - 1*HZ)) && 812 (time_after(jiffies, mpath->last_preq_to_root +
790 !(mpath->flags & MESH_PATH_FIXED)) { 813 root_path_confirmation_jiffies(sdata)) ||
791 mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name, 814 time_before(jiffies, mpath->last_preq_to_root))) &&
792 orig_addr); 815 !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
816 mhwmp_dbg(sdata,
817 "time to refresh root mpath %pM\n",
818 orig_addr);
793 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 819 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
820 mpath->last_preq_to_root = jiffies;
794 } 821 }
795 822
796 if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && 823 mpath->sn = orig_sn;
797 metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) { 824 mpath->rann_metric = metric + metric_txsta;
825 mpath->is_root = true;
826 /* Recording RANNs sender address to send individually
827 * addressed PREQs destined for root mesh STA */
828 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
829
830 if (root_is_gate)
831 mesh_path_add_gate(mpath);
832
833 if (ttl <= 1) {
834 ifmsh->mshstats.dropped_frames_ttl++;
835 rcu_read_unlock();
836 return;
837 }
838 ttl--;
839
840 if (ifmsh->mshcfg.dot11MeshForwarding) {
798 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 841 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
799 cpu_to_le32(orig_sn), 842 cpu_to_le32(orig_sn),
800 0, NULL, 0, broadcast_addr, 843 0, NULL, 0, broadcast_addr,
801 hopcount, ttl, cpu_to_le32(interval), 844 hopcount, ttl, cpu_to_le32(interval),
802 cpu_to_le32(metric + metric_txsta), 845 cpu_to_le32(metric + metric_txsta),
803 0, sdata); 846 0, sdata);
804 mpath->sn = orig_sn;
805 mpath->rann_metric = metric + metric_txsta;
806 /* Recording RANNs sender address to send individually
807 * addressed PREQs destined for root mesh STA */
808 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
809 } 847 }
810 848
811 mpath->is_root = true;
812
813 if (root_is_gate)
814 mesh_path_add_gate(mpath);
815
816 rcu_read_unlock(); 849 rcu_read_unlock();
817} 850}
818 851
@@ -889,7 +922,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
889 922
890 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); 923 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
891 if (!preq_node) { 924 if (!preq_node) {
892 mhwmp_dbg("could not allocate PREQ node"); 925 mhwmp_dbg(sdata, "could not allocate PREQ node\n");
893 return; 926 return;
894 } 927 }
895 928
@@ -898,7 +931,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
898 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 931 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
899 kfree(preq_node); 932 kfree(preq_node);
900 if (printk_ratelimit()) 933 if (printk_ratelimit())
901 mhwmp_dbg("PREQ node queue full"); 934 mhwmp_dbg(sdata, "PREQ node queue full\n");
902 return; 935 return;
903 } 936 }
904 937
@@ -1021,12 +1054,15 @@ enddiscovery:
1021 kfree(preq_node); 1054 kfree(preq_node);
1022} 1055}
1023 1056
1024/* mesh_nexthop_resolve - lookup next hop for given skb and start path 1057/**
1025 * discovery if no forwarding information is found. 1058 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1026 * 1059 *
1027 * @skb: 802.11 frame to be sent 1060 * @skb: 802.11 frame to be sent
1028 * @sdata: network subif the frame will be sent through 1061 * @sdata: network subif the frame will be sent through
1029 * 1062 *
1063 * Lookup next hop for given skb and start path discovery if no
1064 * forwarding information is found.
1065 *
1030 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. 1066 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1031 * skb is freeed here if no mpath could be allocated. 1067 * skb is freeed here if no mpath could be allocated.
1032 */ 1068 */
@@ -1146,7 +1182,7 @@ void mesh_path_timer(unsigned long data)
1146 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { 1182 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1147 ret = mesh_path_send_to_gates(mpath); 1183 ret = mesh_path_send_to_gates(mpath);
1148 if (ret) 1184 if (ret)
1149 mhwmp_dbg("no gate was reachable"); 1185 mhwmp_dbg(sdata, "no gate was reachable\n");
1150 } else 1186 } else
1151 mesh_path_flush_pending(mpath); 1187 mesh_path_flush_pending(mpath);
1152 } 1188 }
@@ -1157,13 +1193,34 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1157{ 1193{
1158 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1194 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1159 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; 1195 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1160 u8 flags; 1196 u8 flags, target_flags = 0;
1161 1197
1162 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol) 1198 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1163 ? RANN_FLAG_IS_GATE : 0; 1199 ? RANN_FLAG_IS_GATE : 0;
1164 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr, 1200
1201 switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1202 case IEEE80211_PROACTIVE_RANN:
1203 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1165 cpu_to_le32(++ifmsh->sn), 1204 cpu_to_le32(++ifmsh->sn),
1166 0, NULL, 0, broadcast_addr, 1205 0, NULL, 0, broadcast_addr,
1167 0, sdata->u.mesh.mshcfg.element_ttl, 1206 0, ifmsh->mshcfg.element_ttl,
1168 cpu_to_le32(interval), 0, 0, sdata); 1207 cpu_to_le32(interval), 0, 0, sdata);
1208 break;
1209 case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1210 flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1211 case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1212 interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1213 target_flags |= IEEE80211_PREQ_TO_FLAG |
1214 IEEE80211_PREQ_USN_FLAG;
1215 mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1216 cpu_to_le32(++ifmsh->sn), target_flags,
1217 (u8 *) broadcast_addr, 0, broadcast_addr,
1218 0, ifmsh->mshcfg.element_ttl,
1219 cpu_to_le32(interval),
1220 0, cpu_to_le32(ifmsh->preq_id++), sdata);
1221 break;
1222 default:
1223 mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1224 return;
1225 }
1169} 1226}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b39224d8255c..075bc535c601 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,12 +18,6 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "mesh.h" 19#include "mesh.h"
20 20
21#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23#else
24#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25#endif
26
27/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
28#define INIT_PATHS_SIZE_ORDER 2 22#define INIT_PATHS_SIZE_ORDER 2
29 23
@@ -322,9 +316,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
322 316
323 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); 317 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
324 skb_queue_splice(&gateq, &gate_mpath->frame_queue); 318 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
325 mpath_dbg("Mpath queue for gate %pM has %d frames\n", 319 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
326 gate_mpath->dst, 320 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
327 skb_queue_len(&gate_mpath->frame_queue));
328 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); 321 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
329 322
330 if (!copy) 323 if (!copy)
@@ -446,9 +439,9 @@ int mesh_path_add_gate(struct mesh_path *mpath)
446 hlist_add_head_rcu(&new_gate->list, tbl->known_gates); 439 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
447 spin_unlock_bh(&tbl->gates_lock); 440 spin_unlock_bh(&tbl->gates_lock);
448 rcu_read_unlock(); 441 rcu_read_unlock();
449 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", 442 mpath_dbg(mpath->sdata,
450 mpath->sdata->name, mpath->dst, 443 "Mesh path: Recorded new gate: %pM. %d known gates\n",
451 mpath->sdata->u.mesh.num_gates); 444 mpath->dst, mpath->sdata->u.mesh.num_gates);
452 return 0; 445 return 0;
453err_rcu: 446err_rcu:
454 rcu_read_unlock(); 447 rcu_read_unlock();
@@ -477,8 +470,8 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
477 spin_unlock_bh(&tbl->gates_lock); 470 spin_unlock_bh(&tbl->gates_lock);
478 mpath->sdata->u.mesh.num_gates--; 471 mpath->sdata->u.mesh.num_gates--;
479 mpath->is_gate = false; 472 mpath->is_gate = false;
480 mpath_dbg("Mesh path (%s): Deleted gate: %pM. " 473 mpath_dbg(mpath->sdata,
481 "%d known gates\n", mpath->sdata->name, 474 "Mesh path: Deleted gate: %pM. %d known gates\n",
482 mpath->dst, mpath->sdata->u.mesh.num_gates); 475 mpath->dst, mpath->sdata->u.mesh.num_gates);
483 break; 476 break;
484 } 477 }
@@ -785,7 +778,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
785/** 778/**
786 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 779 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
787 * 780 *
788 * @sta - mesh peer to match 781 * @sta: mesh peer to match
789 * 782 *
790 * RCU notes: this function is called when a mesh plink transitions from 783 * RCU notes: this function is called when a mesh plink transitions from
791 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 784 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
@@ -840,7 +833,7 @@ static void table_flush_by_iface(struct mesh_table *tbl,
840 * 833 *
841 * This function deletes both mesh paths as well as mesh portal paths. 834 * This function deletes both mesh paths as well as mesh portal paths.
842 * 835 *
843 * @sdata - interface data to match 836 * @sdata: interface data to match
844 * 837 *
845 */ 838 */
846void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 839void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
@@ -946,19 +939,20 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
946 continue; 939 continue;
947 940
948 if (gate->mpath->flags & MESH_PATH_ACTIVE) { 941 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
949 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); 942 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
950 mesh_path_move_to_queue(gate->mpath, from_mpath, copy); 943 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
951 from_mpath = gate->mpath; 944 from_mpath = gate->mpath;
952 copy = true; 945 copy = true;
953 } else { 946 } else {
954 mpath_dbg("Not forwarding %p\n", gate->mpath); 947 mpath_dbg(sdata,
955 mpath_dbg("flags %x\n", gate->mpath->flags); 948 "Not forwarding %p (flags %#x)\n",
949 gate->mpath, gate->mpath->flags);
956 } 950 }
957 } 951 }
958 952
959 hlist_for_each_entry_rcu(gate, n, known_gates, list) 953 hlist_for_each_entry_rcu(gate, n, known_gates, list)
960 if (gate->mpath->sdata == sdata) { 954 if (gate->mpath->sdata == sdata) {
961 mpath_dbg("Sending to %pM\n", gate->mpath->dst); 955 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
962 mesh_path_tx_pending(gate->mpath); 956 mesh_path_tx_pending(gate->mpath);
963 } 957 }
964 958
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60ef235c9d9b..af671b984df3 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,12 +13,6 @@
13#include "rate.h" 13#include "rate.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
17#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
18#else
19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22#define PLINK_GET_LLID(p) (p + 2) 16#define PLINK_GET_LLID(p) (p + 2)
23#define PLINK_GET_PLID(p) (p + 4) 17#define PLINK_GET_PLID(p) (p + 4)
24 18
@@ -105,7 +99,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 return sta; 99 return sta;
106} 100}
107 101
108/* 102/**
109 * mesh_set_ht_prot_mode - set correct HT protection mode 103 * mesh_set_ht_prot_mode - set correct HT protection mode
110 * 104 *
111 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT 105 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
@@ -134,12 +128,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
134 128
135 switch (sta->ch_type) { 129 switch (sta->ch_type) {
136 case NL80211_CHAN_NO_HT: 130 case NL80211_CHAN_NO_HT:
137 mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present", 131 mpl_dbg(sdata,
132 "mesh_plink %pM: nonHT sta (%pM) is present\n",
138 sdata->vif.addr, sta->sta.addr); 133 sdata->vif.addr, sta->sta.addr);
139 non_ht_sta = true; 134 non_ht_sta = true;
140 goto out; 135 goto out;
141 case NL80211_CHAN_HT20: 136 case NL80211_CHAN_HT20:
142 mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present", 137 mpl_dbg(sdata,
138 "mesh_plink %pM: HT20 sta (%pM) is present\n",
143 sdata->vif.addr, sta->sta.addr); 139 sdata->vif.addr, sta->sta.addr);
144 ht20_sta = true; 140 ht20_sta = true;
145 default: 141 default:
@@ -160,7 +156,8 @@ out:
160 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 156 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
161 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; 157 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
162 changed = BSS_CHANGED_HT; 158 changed = BSS_CHANGED_HT;
163 mpl_dbg("mesh_plink %pM: protection mode changed to %d", 159 mpl_dbg(sdata,
160 "mesh_plink %pM: protection mode changed to %d\n",
164 sdata->vif.addr, ht_opmode); 161 sdata->vif.addr, ht_opmode);
165 } 162 }
166 163
@@ -261,8 +258,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
261 pos = skb_put(skb, 2); 258 pos = skb_put(skb, 2);
262 memcpy(pos + 2, &plid, 2); 259 memcpy(pos + 2, &plid, 2);
263 } 260 }
264 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || 261 if (ieee80211_add_srates_ie(sdata, skb, true) ||
265 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || 262 ieee80211_add_ext_srates_ie(sdata, skb, true) ||
266 mesh_add_rsn_ie(skb, sdata) || 263 mesh_add_rsn_ie(skb, sdata) ||
267 mesh_add_meshid_ie(skb, sdata) || 264 mesh_add_meshid_ie(skb, sdata) ||
268 mesh_add_meshconf_ie(skb, sdata)) 265 mesh_add_meshconf_ie(skb, sdata))
@@ -323,7 +320,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
323 return 0; 320 return 0;
324} 321}
325 322
326/* mesh_peer_init - initialize new mesh peer and return resulting sta_info 323/**
324 * mesh_peer_init - initialize new mesh peer and return resulting sta_info
327 * 325 *
328 * @sdata: local meshif 326 * @sdata: local meshif
329 * @addr: peer's address 327 * @addr: peer's address
@@ -437,7 +435,8 @@ static void mesh_plink_timer(unsigned long data)
437 spin_unlock_bh(&sta->lock); 435 spin_unlock_bh(&sta->lock);
438 return; 436 return;
439 } 437 }
440 mpl_dbg("Mesh plink timer for %pM fired on state %d\n", 438 mpl_dbg(sta->sdata,
439 "Mesh plink timer for %pM fired on state %d\n",
441 sta->sta.addr, sta->plink_state); 440 sta->sta.addr, sta->plink_state);
442 reason = 0; 441 reason = 0;
443 llid = sta->llid; 442 llid = sta->llid;
@@ -450,7 +449,8 @@ static void mesh_plink_timer(unsigned long data)
450 /* retry timer */ 449 /* retry timer */
451 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 450 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
452 u32 rand; 451 u32 rand;
453 mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n", 452 mpl_dbg(sta->sdata,
453 "Mesh plink for %pM (retry, timeout): %d %d\n",
454 sta->sta.addr, sta->plink_retries, 454 sta->sta.addr, sta->plink_retries,
455 sta->plink_timeout); 455 sta->plink_timeout);
456 get_random_bytes(&rand, sizeof(u32)); 456 get_random_bytes(&rand, sizeof(u32));
@@ -530,7 +530,8 @@ int mesh_plink_open(struct sta_info *sta)
530 sta->plink_state = NL80211_PLINK_OPN_SNT; 530 sta->plink_state = NL80211_PLINK_OPN_SNT;
531 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 531 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
532 spin_unlock_bh(&sta->lock); 532 spin_unlock_bh(&sta->lock);
533 mpl_dbg("Mesh plink: starting establishment with %pM\n", 533 mpl_dbg(sdata,
534 "Mesh plink: starting establishment with %pM\n",
534 sta->sta.addr); 535 sta->sta.addr);
535 536
536 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN, 537 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
@@ -565,7 +566,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
565 u8 *baseaddr; 566 u8 *baseaddr;
566 u32 changed = 0; 567 u32 changed = 0;
567 __le16 plid, llid, reason; 568 __le16 plid, llid, reason;
568#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
569 static const char *mplstates[] = { 569 static const char *mplstates[] = {
570 [NL80211_PLINK_LISTEN] = "LISTEN", 570 [NL80211_PLINK_LISTEN] = "LISTEN",
571 [NL80211_PLINK_OPN_SNT] = "OPN-SNT", 571 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
@@ -575,14 +575,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
575 [NL80211_PLINK_HOLDING] = "HOLDING", 575 [NL80211_PLINK_HOLDING] = "HOLDING",
576 [NL80211_PLINK_BLOCKED] = "BLOCKED" 576 [NL80211_PLINK_BLOCKED] = "BLOCKED"
577 }; 577 };
578#endif
579 578
580 /* need action_code, aux */ 579 /* need action_code, aux */
581 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 580 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
582 return; 581 return;
583 582
584 if (is_multicast_ether_addr(mgmt->da)) { 583 if (is_multicast_ether_addr(mgmt->da)) {
585 mpl_dbg("Mesh plink: ignore frame from multicast address"); 584 mpl_dbg(sdata,
585 "Mesh plink: ignore frame from multicast address\n");
586 return; 586 return;
587 } 587 }
588 588
@@ -595,12 +595,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
595 } 595 }
596 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 596 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
597 if (!elems.peering) { 597 if (!elems.peering) {
598 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 598 mpl_dbg(sdata,
599 "Mesh plink: missing necessary peer link ie\n");
599 return; 600 return;
600 } 601 }
601 if (elems.rsn_len && 602 if (elems.rsn_len &&
602 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { 603 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
603 mpl_dbg("Mesh plink: can't establish link with secure peer\n"); 604 mpl_dbg(sdata,
605 "Mesh plink: can't establish link with secure peer\n");
604 return; 606 return;
605 } 607 }
606 608
@@ -610,14 +612,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
610 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) || 612 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
611 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6 613 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
612 && ie_len != 8)) { 614 && ie_len != 8)) {
613 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", 615 mpl_dbg(sdata,
614 ftype, ie_len); 616 "Mesh plink: incorrect plink ie length %d %d\n",
617 ftype, ie_len);
615 return; 618 return;
616 } 619 }
617 620
618 if (ftype != WLAN_SP_MESH_PEERING_CLOSE && 621 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
619 (!elems.mesh_id || !elems.mesh_config)) { 622 (!elems.mesh_id || !elems.mesh_config)) {
620 mpl_dbg("Mesh plink: missing necessary ie\n"); 623 mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
621 return; 624 return;
622 } 625 }
623 /* Note the lines below are correct, the llid in the frame is the plid 626 /* Note the lines below are correct, the llid in the frame is the plid
@@ -632,21 +635,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
632 635
633 sta = sta_info_get(sdata, mgmt->sa); 636 sta = sta_info_get(sdata, mgmt->sa);
634 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) { 637 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
635 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 638 mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n");
636 rcu_read_unlock(); 639 rcu_read_unlock();
637 return; 640 return;
638 } 641 }
639 642
640 if (ftype == WLAN_SP_MESH_PEERING_OPEN && 643 if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
641 !rssi_threshold_check(sta, sdata)) { 644 !rssi_threshold_check(sta, sdata)) {
642 mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n", 645 mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n",
643 mgmt->sa); 646 mgmt->sa);
644 rcu_read_unlock(); 647 rcu_read_unlock();
645 return; 648 return;
646 } 649 }
647 650
648 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) { 651 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
649 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 652 mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
650 rcu_read_unlock(); 653 rcu_read_unlock();
651 return; 654 return;
652 } 655 }
@@ -683,7 +686,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
683 } else if (!sta) { 686 } else if (!sta) {
684 /* ftype == WLAN_SP_MESH_PEERING_OPEN */ 687 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
685 if (!mesh_plink_free_count(sdata)) { 688 if (!mesh_plink_free_count(sdata)) {
686 mpl_dbg("Mesh plink error: no more free plinks\n"); 689 mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
687 rcu_read_unlock(); 690 rcu_read_unlock();
688 return; 691 return;
689 } 692 }
@@ -724,7 +727,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
724 event = CLS_ACPT; 727 event = CLS_ACPT;
725 break; 728 break;
726 default: 729 default:
727 mpl_dbg("Mesh plink: unknown frame subtype\n"); 730 mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n");
728 rcu_read_unlock(); 731 rcu_read_unlock();
729 return; 732 return;
730 } 733 }
@@ -734,13 +737,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
734 /* allocate sta entry if necessary and update info */ 737 /* allocate sta entry if necessary and update info */
735 sta = mesh_peer_init(sdata, mgmt->sa, &elems); 738 sta = mesh_peer_init(sdata, mgmt->sa, &elems);
736 if (!sta) { 739 if (!sta) {
737 mpl_dbg("Mesh plink: failed to init peer!\n"); 740 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
738 rcu_read_unlock(); 741 rcu_read_unlock();
739 return; 742 return;
740 } 743 }
741 } 744 }
742 745
743 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", 746 mpl_dbg(sdata,
747 "Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
744 mgmt->sa, mplstates[sta->plink_state], 748 mgmt->sa, mplstates[sta->plink_state],
745 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), 749 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
746 event); 750 event);
@@ -851,7 +855,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
851 mesh_plink_inc_estab_count(sdata); 855 mesh_plink_inc_estab_count(sdata);
852 changed |= mesh_set_ht_prot_mode(sdata); 856 changed |= mesh_set_ht_prot_mode(sdata);
853 changed |= BSS_CHANGED_BEACON; 857 changed |= BSS_CHANGED_BEACON;
854 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 858 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
855 sta->sta.addr); 859 sta->sta.addr);
856 break; 860 break;
857 default: 861 default:
@@ -887,7 +891,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
887 mesh_plink_inc_estab_count(sdata); 891 mesh_plink_inc_estab_count(sdata);
888 changed |= mesh_set_ht_prot_mode(sdata); 892 changed |= mesh_set_ht_prot_mode(sdata);
889 changed |= BSS_CHANGED_BEACON; 893 changed |= BSS_CHANGED_BEACON;
890 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 894 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
891 sta->sta.addr); 895 sta->sta.addr);
892 mesh_plink_frame_tx(sdata, 896 mesh_plink_frame_tx(sdata,
893 WLAN_SP_MESH_PEERING_CONFIRM, 897 WLAN_SP_MESH_PEERING_CONFIRM,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 38d30e8ce6dc..accfa00ffcdf 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -12,13 +12,6 @@
12#include "mesh.h" 12#include "mesh.h"
13#include "driver-ops.h" 13#include "driver-ops.h"
14 14
15#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
16#define msync_dbg(fmt, args...) \
17 printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
18#else
19#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22/* This is not in the standard. It represents a tolerable tbtt drift below 15/* This is not in the standard. It represents a tolerable tbtt drift below
23 * which we do no TSF adjustment. 16 * which we do no TSF adjustment.
24 */ 17 */
@@ -65,14 +58,14 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
65 spin_lock_bh(&ifmsh->sync_offset_lock); 58 spin_lock_bh(&ifmsh->sync_offset_lock);
66 59
67 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { 60 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
68 msync_dbg("TBTT : max clockdrift=%lld; adjusting", 61 msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
69 (long long) ifmsh->sync_offset_clockdrift_max); 62 (long long) ifmsh->sync_offset_clockdrift_max);
70 tsfdelta = -ifmsh->sync_offset_clockdrift_max; 63 tsfdelta = -ifmsh->sync_offset_clockdrift_max;
71 ifmsh->sync_offset_clockdrift_max = 0; 64 ifmsh->sync_offset_clockdrift_max = 0;
72 } else { 65 } else {
73 msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu", 66 msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
74 (long long) ifmsh->sync_offset_clockdrift_max, 67 (long long) ifmsh->sync_offset_clockdrift_max,
75 (unsigned long long) beacon_int_fraction); 68 (unsigned long long) beacon_int_fraction);
76 tsfdelta = -beacon_int_fraction; 69 tsfdelta = -beacon_int_fraction;
77 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; 70 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
78 } 71 }
@@ -120,7 +113,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
120 113
121 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { 114 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
122 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 115 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
123 msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr); 116 msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr);
124 goto no_sync; 117 goto no_sync;
125 } 118 }
126 119
@@ -169,7 +162,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
169 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 162 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
170 s64 t_clockdrift = sta->t_offset_setpoint 163 s64 t_clockdrift = sta->t_offset_setpoint
171 - sta->t_offset; 164 - sta->t_offset;
172 msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld", 165 msync_dbg(sdata,
166 "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
173 sta->sta.addr, 167 sta->sta.addr,
174 (long long) sta->t_offset, 168 (long long) sta->t_offset,
175 (long long) 169 (long long)
@@ -178,7 +172,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
178 172
179 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || 173 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
180 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { 174 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
181 msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset", 175 msync_dbg(sdata,
176 "STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
182 sta->sta.addr, 177 sta->sta.addr,
183 (long long) t_clockdrift); 178 (long long) t_clockdrift);
184 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 179 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
@@ -197,8 +192,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
197 } else { 192 } else {
198 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; 193 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
199 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 194 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
200 msync_dbg("STA %pM : offset was invalid, " 195 msync_dbg(sdata,
201 " sta->t_offset=%lld", 196 "STA %pM : offset was invalid, sta->t_offset=%lld\n",
202 sta->sta.addr, 197 sta->sta.addr,
203 (long long) sta->t_offset); 198 (long long) sta->t_offset);
204 rcu_read_unlock(); 199 rcu_read_unlock();
@@ -226,17 +221,15 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
226 * to the driver tsf setter, we punt 221 * to the driver tsf setter, we punt
227 * the tsf adjustment to the mesh tasklet 222 * the tsf adjustment to the mesh tasklet
228 */ 223 */
229 msync_dbg("TBTT : kicking off TBTT " 224 msync_dbg(sdata,
230 "adjustment with " 225 "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
231 "clockdrift_max=%lld", 226 ifmsh->sync_offset_clockdrift_max);
232 ifmsh->sync_offset_clockdrift_max);
233 set_bit(MESH_WORK_DRIFT_ADJUST, 227 set_bit(MESH_WORK_DRIFT_ADJUST,
234 &ifmsh->wrkq_flags); 228 &ifmsh->wrkq_flags);
235 } else { 229 } else {
236 msync_dbg("TBTT : max clockdrift=%lld; " 230 msync_dbg(sdata,
237 "too small to adjust", 231 "TBTT : max clockdrift=%lld; too small to adjust\n",
238 (long long) 232 (long long)ifmsh->sync_offset_clockdrift_max);
239 ifmsh->sync_offset_clockdrift_max);
240 ifmsh->sync_offset_clockdrift_max = 0; 233 ifmsh->sync_offset_clockdrift_max = 0;
241 } 234 }
242 spin_unlock_bh(&ifmsh->sync_offset_lock); 235 spin_unlock_bh(&ifmsh->sync_offset_lock);
@@ -268,7 +261,7 @@ static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
268 const u8 *oui; 261 const u8 *oui;
269 262
270 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); 263 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
271 msync_dbg("called mesh_sync_vendor_rx_bcn_presp"); 264 msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
272 oui = mesh_get_vendor_oui(sdata); 265 oui = mesh_get_vendor_oui(sdata);
273 /* here you would implement the vendor offset tracking for this oui */ 266 /* here you would implement the vendor offset tracking for this oui */
274} 267}
@@ -278,7 +271,7 @@ static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
278 const u8 *oui; 271 const u8 *oui;
279 272
280 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); 273 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
281 msync_dbg("called mesh_sync_vendor_adjust_tbtt"); 274 msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
282 oui = mesh_get_vendor_oui(sdata); 275 oui = mesh_get_vendor_oui(sdata);
283 /* here you would implement the vendor tsf adjustment for this oui */ 276 /* here you would implement the vendor tsf adjustment for this oui */
284} 277}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 91d84cc77bbf..cef0c9e79aba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -258,12 +258,11 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
258} 258}
259 259
260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, 260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
261 struct sk_buff *skb, const u8 *ht_oper_ie, 261 struct sk_buff *skb, u8 ap_ht_param,
262 struct ieee80211_supported_band *sband, 262 struct ieee80211_supported_band *sband,
263 struct ieee80211_channel *channel, 263 struct ieee80211_channel *channel,
264 enum ieee80211_smps_mode smps) 264 enum ieee80211_smps_mode smps)
265{ 265{
266 struct ieee80211_ht_operation *ht_oper;
267 u8 *pos; 266 u8 *pos;
268 u32 flags = channel->flags; 267 u32 flags = channel->flags;
269 u16 cap; 268 u16 cap;
@@ -271,21 +270,13 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
271 270
272 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); 271 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
273 272
274 if (!ht_oper_ie)
275 return;
276
277 if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
278 return;
279
280 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); 273 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
281 ieee80211_apply_htcap_overrides(sdata, &ht_cap); 274 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
282 275
283 ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
284
285 /* determine capability flags */ 276 /* determine capability flags */
286 cap = ht_cap.cap; 277 cap = ht_cap.cap;
287 278
288 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 279 switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
289 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 280 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
290 if (flags & IEEE80211_CHAN_NO_HT40PLUS) { 281 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
291 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 282 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -509,7 +500,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
509 } 500 }
510 501
511 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 502 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
512 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie, 503 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
513 sband, local->oper_channel, ifmgd->ap_smps); 504 sband, local->oper_channel, ifmgd->ap_smps);
514 505
515 /* if present, add any custom non-vendor IEs that go after HT */ 506 /* if present, add any custom non-vendor IEs that go after HT */
@@ -550,6 +541,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
550 memcpy(pos, assoc_data->ie + offset, noffset - offset); 541 memcpy(pos, assoc_data->ie + offset, noffset - offset);
551 } 542 }
552 543
544 drv_mgd_prepare_tx(local, sdata);
545
553 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 546 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
554 ieee80211_tx_skb(sdata, skb); 547 ieee80211_tx_skb(sdata, skb);
555} 548}
@@ -589,6 +582,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
589 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) 582 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
590 IEEE80211_SKB_CB(skb)->flags |= 583 IEEE80211_SKB_CB(skb)->flags |=
591 IEEE80211_TX_INTFL_DONT_ENCRYPT; 584 IEEE80211_TX_INTFL_DONT_ENCRYPT;
585
586 drv_mgd_prepare_tx(local, sdata);
587
592 ieee80211_tx_skb(sdata, skb); 588 ieee80211_tx_skb(sdata, skb);
593 } 589 }
594} 590}
@@ -911,9 +907,6 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
911 if (!mgd->associated) 907 if (!mgd->associated)
912 return false; 908 return false;
913 909
914 if (!mgd->associated->beacon_ies)
915 return false;
916
917 if (mgd->flags & (IEEE80211_STA_BEACON_POLL | 910 if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
918 IEEE80211_STA_CONNECTION_POLL)) 911 IEEE80211_STA_CONNECTION_POLL))
919 return false; 912 return false;
@@ -939,11 +932,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
939 return; 932 return;
940 } 933 }
941 934
942 if (!list_empty(&local->work_list)) {
943 local->ps_sdata = NULL;
944 goto change;
945 }
946
947 list_for_each_entry(sdata, &local->interfaces, list) { 935 list_for_each_entry(sdata, &local->interfaces, list) {
948 if (!ieee80211_sdata_running(sdata)) 936 if (!ieee80211_sdata_running(sdata))
949 continue; 937 continue;
@@ -1016,7 +1004,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1016 local->ps_sdata = NULL; 1004 local->ps_sdata = NULL;
1017 } 1005 }
1018 1006
1019 change:
1020 ieee80211_change_ps(local); 1007 ieee80211_change_ps(local);
1021} 1008}
1022 1009
@@ -1121,7 +1108,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
1121} 1108}
1122 1109
1123/* MLME */ 1110/* MLME */
1124static void ieee80211_sta_wmm_params(struct ieee80211_local *local, 1111static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
1125 struct ieee80211_sub_if_data *sdata, 1112 struct ieee80211_sub_if_data *sdata,
1126 u8 *wmm_param, size_t wmm_param_len) 1113 u8 *wmm_param, size_t wmm_param_len)
1127{ 1114{
@@ -1132,23 +1119,23 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1132 u8 *pos, uapsd_queues = 0; 1119 u8 *pos, uapsd_queues = 0;
1133 1120
1134 if (!local->ops->conf_tx) 1121 if (!local->ops->conf_tx)
1135 return; 1122 return false;
1136 1123
1137 if (local->hw.queues < IEEE80211_NUM_ACS) 1124 if (local->hw.queues < IEEE80211_NUM_ACS)
1138 return; 1125 return false;
1139 1126
1140 if (!wmm_param) 1127 if (!wmm_param)
1141 return; 1128 return false;
1142 1129
1143 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 1130 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
1144 return; 1131 return false;
1145 1132
1146 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 1133 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
1147 uapsd_queues = ifmgd->uapsd_queues; 1134 uapsd_queues = ifmgd->uapsd_queues;
1148 1135
1149 count = wmm_param[6] & 0x0f; 1136 count = wmm_param[6] & 0x0f;
1150 if (count == ifmgd->wmm_last_param_set) 1137 if (count == ifmgd->wmm_last_param_set)
1151 return; 1138 return false;
1152 ifmgd->wmm_last_param_set = count; 1139 ifmgd->wmm_last_param_set = count;
1153 1140
1154 pos = wmm_param + 8; 1141 pos = wmm_param + 8;
@@ -1156,7 +1143,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1156 1143
1157 memset(&params, 0, sizeof(params)); 1144 memset(&params, 0, sizeof(params));
1158 1145
1159 local->wmm_acm = 0; 1146 sdata->wmm_acm = 0;
1160 for (; left >= 4; left -= 4, pos += 4) { 1147 for (; left >= 4; left -= 4, pos += 4) {
1161 int aci = (pos[0] >> 5) & 0x03; 1148 int aci = (pos[0] >> 5) & 0x03;
1162 int acm = (pos[0] >> 4) & 0x01; 1149 int acm = (pos[0] >> 4) & 0x01;
@@ -1167,21 +1154,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1167 case 1: /* AC_BK */ 1154 case 1: /* AC_BK */
1168 queue = 3; 1155 queue = 3;
1169 if (acm) 1156 if (acm)
1170 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 1157 sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
1171 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 1158 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
1172 uapsd = true; 1159 uapsd = true;
1173 break; 1160 break;
1174 case 2: /* AC_VI */ 1161 case 2: /* AC_VI */
1175 queue = 1; 1162 queue = 1;
1176 if (acm) 1163 if (acm)
1177 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 1164 sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
1178 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 1165 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
1179 uapsd = true; 1166 uapsd = true;
1180 break; 1167 break;
1181 case 3: /* AC_VO */ 1168 case 3: /* AC_VO */
1182 queue = 0; 1169 queue = 0;
1183 if (acm) 1170 if (acm)
1184 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 1171 sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
1185 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 1172 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
1186 uapsd = true; 1173 uapsd = true;
1187 break; 1174 break;
@@ -1189,7 +1176,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1189 default: 1176 default:
1190 queue = 2; 1177 queue = 2;
1191 if (acm) 1178 if (acm)
1192 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 1179 sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
1193 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 1180 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
1194 uapsd = true; 1181 uapsd = true;
1195 break; 1182 break;
@@ -1201,23 +1188,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1201 params.txop = get_unaligned_le16(pos + 2); 1188 params.txop = get_unaligned_le16(pos + 2);
1202 params.uapsd = uapsd; 1189 params.uapsd = uapsd;
1203 1190
1204#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1191 mlme_dbg(sdata,
1205 wiphy_debug(local->hw.wiphy, 1192 "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
1206 "WMM queue=%d aci=%d acm=%d aifs=%d " 1193 queue, aci, acm,
1207 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", 1194 params.aifs, params.cw_min, params.cw_max,
1208 queue, aci, acm, 1195 params.txop, params.uapsd);
1209 params.aifs, params.cw_min, params.cw_max,
1210 params.txop, params.uapsd);
1211#endif
1212 sdata->tx_conf[queue] = params; 1196 sdata->tx_conf[queue] = params;
1213 if (drv_conf_tx(local, sdata, queue, &params)) 1197 if (drv_conf_tx(local, sdata, queue, &params))
1214 wiphy_debug(local->hw.wiphy, 1198 sdata_err(sdata,
1215 "failed to set TX queue parameters for queue %d\n", 1199 "failed to set TX queue parameters for queue %d\n",
1216 queue); 1200 queue);
1217 } 1201 }
1218 1202
1219 /* enable WMM or activate new settings */ 1203 /* enable WMM or activate new settings */
1220 sdata->vif.bss_conf.qos = true; 1204 sdata->vif.bss_conf.qos = true;
1205 return true;
1221} 1206}
1222 1207
1223static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) 1208static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
@@ -1284,13 +1269,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1284 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1269 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1285 1270
1286 bss_info_changed |= BSS_CHANGED_ASSOC; 1271 bss_info_changed |= BSS_CHANGED_ASSOC;
1287 /* set timing information */
1288 bss_conf->beacon_int = cbss->beacon_interval;
1289 bss_conf->last_tsf = cbss->tsf;
1290
1291 bss_info_changed |= BSS_CHANGED_BEACON_INT;
1292 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 1272 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
1293 cbss->capability, bss->has_erp_value, bss->erp_value); 1273 bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value);
1294 1274
1295 sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec( 1275 sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
1296 IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int)); 1276 IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
@@ -1342,7 +1322,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1342 struct ieee80211_local *local = sdata->local; 1322 struct ieee80211_local *local = sdata->local;
1343 struct sta_info *sta; 1323 struct sta_info *sta;
1344 u32 changed = 0; 1324 u32 changed = 0;
1345 u8 bssid[ETH_ALEN];
1346 1325
1347 ASSERT_MGD_MTX(ifmgd); 1326 ASSERT_MGD_MTX(ifmgd);
1348 1327
@@ -1352,10 +1331,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1352 if (WARN_ON(!ifmgd->associated)) 1331 if (WARN_ON(!ifmgd->associated))
1353 return; 1332 return;
1354 1333
1355 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1334 ieee80211_stop_poll(sdata);
1356 1335
1357 ifmgd->associated = NULL; 1336 ifmgd->associated = NULL;
1358 memset(ifmgd->bssid, 0, ETH_ALEN);
1359 1337
1360 /* 1338 /*
1361 * we need to commit the associated = NULL change because the 1339 * we need to commit the associated = NULL change because the
@@ -1375,22 +1353,40 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1375 netif_carrier_off(sdata->dev); 1353 netif_carrier_off(sdata->dev);
1376 1354
1377 mutex_lock(&local->sta_mtx); 1355 mutex_lock(&local->sta_mtx);
1378 sta = sta_info_get(sdata, bssid); 1356 sta = sta_info_get(sdata, ifmgd->bssid);
1379 if (sta) { 1357 if (sta) {
1380 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1358 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1381 ieee80211_sta_tear_down_BA_sessions(sta, tx); 1359 ieee80211_sta_tear_down_BA_sessions(sta, tx);
1382 } 1360 }
1383 mutex_unlock(&local->sta_mtx); 1361 mutex_unlock(&local->sta_mtx);
1384 1362
1363 /*
1364 * if we want to get out of ps before disassoc (why?) we have
1365 * to do it before sending disassoc, as otherwise the null-packet
1366 * won't be valid.
1367 */
1368 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1369 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1370 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1371 }
1372 local->ps_sdata = NULL;
1373
1374 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
1375 if (tx)
1376 drv_flush(local, false);
1377
1385 /* deauthenticate/disassociate now */ 1378 /* deauthenticate/disassociate now */
1386 if (tx || frame_buf) 1379 if (tx || frame_buf)
1387 ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason, 1380 ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
1388 tx, frame_buf); 1381 reason, tx, frame_buf);
1389 1382
1390 /* flush out frame */ 1383 /* flush out frame */
1391 if (tx) 1384 if (tx)
1392 drv_flush(local, false); 1385 drv_flush(local, false);
1393 1386
1387 /* clear bssid only after building the needed mgmt frames */
1388 memset(ifmgd->bssid, 0, ETH_ALEN);
1389
1394 /* remove AP and TDLS peers */ 1390 /* remove AP and TDLS peers */
1395 sta_info_flush(local, sdata); 1391 sta_info_flush(local, sdata);
1396 1392
@@ -1410,12 +1406,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1410 del_timer_sync(&local->dynamic_ps_timer); 1406 del_timer_sync(&local->dynamic_ps_timer);
1411 cancel_work_sync(&local->dynamic_ps_enable_work); 1407 cancel_work_sync(&local->dynamic_ps_enable_work);
1412 1408
1413 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1414 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1415 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1416 }
1417 local->ps_sdata = NULL;
1418
1419 /* Disable ARP filtering */ 1409 /* Disable ARP filtering */
1420 if (sdata->vif.bss_conf.arp_filter_enabled) { 1410 if (sdata->vif.bss_conf.arp_filter_enabled) {
1421 sdata->vif.bss_conf.arp_filter_enabled = false; 1411 sdata->vif.bss_conf.arp_filter_enabled = false;
@@ -1580,11 +1570,12 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1580 goto out; 1570 goto out;
1581 } 1571 }
1582 1572
1583#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1584 if (beacon) 1573 if (beacon)
1585 net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n", 1574 mlme_dbg_ratelimited(sdata,
1586 sdata->name); 1575 "detected beacon loss from AP - sending probe request\n");
1587#endif 1576
1577 ieee80211_cqm_rssi_notify(&sdata->vif,
1578 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
1588 1579
1589 /* 1580 /*
1590 * The driver/our work has already reported this event or the 1581 * The driver/our work has already reported this event or the
@@ -1626,6 +1617,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1626{ 1617{
1627 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1618 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1628 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1619 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1620 struct cfg80211_bss *cbss;
1629 struct sk_buff *skb; 1621 struct sk_buff *skb;
1630 const u8 *ssid; 1622 const u8 *ssid;
1631 int ssid_len; 1623 int ssid_len;
@@ -1635,16 +1627,22 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1635 1627
1636 ASSERT_MGD_MTX(ifmgd); 1628 ASSERT_MGD_MTX(ifmgd);
1637 1629
1638 if (!ifmgd->associated) 1630 if (ifmgd->associated)
1631 cbss = ifmgd->associated;
1632 else if (ifmgd->auth_data)
1633 cbss = ifmgd->auth_data->bss;
1634 else if (ifmgd->assoc_data)
1635 cbss = ifmgd->assoc_data->bss;
1636 else
1639 return NULL; 1637 return NULL;
1640 1638
1641 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1639 ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
1642 if (WARN_ON_ONCE(ssid == NULL)) 1640 if (WARN_ON_ONCE(ssid == NULL))
1643 ssid_len = 0; 1641 ssid_len = 0;
1644 else 1642 else
1645 ssid_len = ssid[1]; 1643 ssid_len = ssid[1];
1646 1644
1647 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, 1645 skb = ieee80211_build_probe_req(sdata, cbss->bssid,
1648 (u32) -1, ssid + 2, ssid_len, 1646 (u32) -1, ssid + 2, ssid_len,
1649 NULL, 0, true); 1647 NULL, 0, true);
1650 1648
@@ -1667,8 +1665,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1667 1665
1668 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1666 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1669 1667
1670 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", 1668 sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
1671 sdata->name, bssid);
1672 1669
1673 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 1670 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1674 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1671 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1764,6 +1761,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1764 if (!elems.challenge) 1761 if (!elems.challenge)
1765 return; 1762 return;
1766 auth_data->expected_transaction = 4; 1763 auth_data->expected_transaction = 4;
1764 drv_mgd_prepare_tx(sdata->local, sdata);
1767 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 1765 ieee80211_send_auth(sdata, 3, auth_data->algorithm,
1768 elems.challenge - 2, elems.challenge_len + 2, 1766 elems.challenge - 2, elems.challenge_len + 2,
1769 auth_data->bss->bssid, auth_data->bss->bssid, 1767 auth_data->bss->bssid, auth_data->bss->bssid,
@@ -1802,9 +1800,10 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1802 return RX_MGMT_NONE; 1800 return RX_MGMT_NONE;
1803 1801
1804 if (status_code != WLAN_STATUS_SUCCESS) { 1802 if (status_code != WLAN_STATUS_SUCCESS) {
1805 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n", 1803 sdata_info(sdata, "%pM denied authentication (status %d)\n",
1806 sdata->name, mgmt->sa, status_code); 1804 mgmt->sa, status_code);
1807 goto out; 1805 ieee80211_destroy_auth_data(sdata, false);
1806 return RX_MGMT_CFG80211_RX_AUTH;
1808 } 1807 }
1809 1808
1810 switch (ifmgd->auth_data->algorithm) { 1809 switch (ifmgd->auth_data->algorithm) {
@@ -1825,8 +1824,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1825 return RX_MGMT_NONE; 1824 return RX_MGMT_NONE;
1826 } 1825 }
1827 1826
1828 printk(KERN_DEBUG "%s: authenticated\n", sdata->name); 1827 sdata_info(sdata, "authenticated\n");
1829 out:
1830 ifmgd->auth_data->done = true; 1828 ifmgd->auth_data->done = true;
1831 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; 1829 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
1832 run_again(ifmgd, ifmgd->auth_data->timeout); 1830 run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -1839,8 +1837,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1839 goto out_err; 1837 goto out_err;
1840 } 1838 }
1841 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { 1839 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
1842 printk(KERN_DEBUG "%s: failed moving %pM to auth\n", 1840 sdata_info(sdata, "failed moving %pM to auth\n", bssid);
1843 sdata->name, bssid);
1844 goto out_err; 1841 goto out_err;
1845 } 1842 }
1846 mutex_unlock(&sdata->local->sta_mtx); 1843 mutex_unlock(&sdata->local->sta_mtx);
@@ -1874,8 +1871,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1874 1871
1875 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1872 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1876 1873
1877 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1874 sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
1878 sdata->name, bssid, reason_code); 1875 bssid, reason_code);
1879 1876
1880 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 1877 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1881 1878
@@ -1905,8 +1902,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1905 1902
1906 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1903 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1907 1904
1908 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1905 sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
1909 sdata->name, mgmt->sa, reason_code); 1906 mgmt->sa, reason_code);
1910 1907
1911 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 1908 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1912 1909
@@ -1998,17 +1995,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
1998 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1995 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1999 1996
2000 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1997 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
2001 printk(KERN_DEBUG 1998 sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
2002 "%s: invalid AID value 0x%x; bits 15:14 not set\n", 1999 aid);
2003 sdata->name, aid);
2004 aid &= ~(BIT(15) | BIT(14)); 2000 aid &= ~(BIT(15) | BIT(14));
2005 2001
2006 ifmgd->broken_ap = false; 2002 ifmgd->broken_ap = false;
2007 2003
2008 if (aid == 0 || aid > IEEE80211_MAX_AID) { 2004 if (aid == 0 || aid > IEEE80211_MAX_AID) {
2009 printk(KERN_DEBUG 2005 sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n",
2010 "%s: invalid AID value %d (out of range), turn off PS\n", 2006 aid);
2011 sdata->name, aid);
2012 aid = 0; 2007 aid = 0;
2013 ifmgd->broken_ap = true; 2008 ifmgd->broken_ap = true;
2014 } 2009 }
@@ -2017,8 +2012,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2017 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 2012 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
2018 2013
2019 if (!elems.supp_rates) { 2014 if (!elems.supp_rates) {
2020 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 2015 sdata_info(sdata, "no SuppRates element in AssocResp\n");
2021 sdata->name);
2022 return false; 2016 return false;
2023 } 2017 }
2024 2018
@@ -2058,9 +2052,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2058 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 2052 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2059 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 2053 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
2060 if (err) { 2054 if (err) {
2061 printk(KERN_DEBUG 2055 sdata_info(sdata,
2062 "%s: failed to move station %pM to desired state\n", 2056 "failed to move station %pM to desired state\n",
2063 sdata->name, sta->sta.addr); 2057 sta->sta.addr);
2064 WARN_ON(__sta_info_destroy(sta)); 2058 WARN_ON(__sta_info_destroy(sta));
2065 mutex_unlock(&sdata->local->sta_mtx); 2059 mutex_unlock(&sdata->local->sta_mtx);
2066 return false; 2060 return false;
@@ -2143,10 +2137,10 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2143 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 2137 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2144 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 2138 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
2145 2139
2146 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x " 2140 sdata_info(sdata,
2147 "status=%d aid=%d)\n", 2141 "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
2148 sdata->name, reassoc ? "Rea" : "A", mgmt->sa, 2142 reassoc ? "Rea" : "A", mgmt->sa,
2149 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 2143 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2150 2144
2151 pos = mgmt->u.assoc_resp.variable; 2145 pos = mgmt->u.assoc_resp.variable;
2152 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 2146 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -2157,9 +2151,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2157 u32 tu, ms; 2151 u32 tu, ms;
2158 tu = get_unaligned_le32(elems.timeout_int + 1); 2152 tu = get_unaligned_le32(elems.timeout_int + 1);
2159 ms = tu * 1024 / 1000; 2153 ms = tu * 1024 / 1000;
2160 printk(KERN_DEBUG "%s: %pM rejected association temporarily; " 2154 sdata_info(sdata,
2161 "comeback duration %u TU (%u ms)\n", 2155 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
2162 sdata->name, mgmt->sa, tu, ms); 2156 mgmt->sa, tu, ms);
2163 assoc_data->timeout = jiffies + msecs_to_jiffies(ms); 2157 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2164 if (ms > IEEE80211_ASSOC_TIMEOUT) 2158 if (ms > IEEE80211_ASSOC_TIMEOUT)
2165 run_again(ifmgd, assoc_data->timeout); 2159 run_again(ifmgd, assoc_data->timeout);
@@ -2169,19 +2163,17 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2169 *bss = assoc_data->bss; 2163 *bss = assoc_data->bss;
2170 2164
2171 if (status_code != WLAN_STATUS_SUCCESS) { 2165 if (status_code != WLAN_STATUS_SUCCESS) {
2172 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n", 2166 sdata_info(sdata, "%pM denied association (code=%d)\n",
2173 sdata->name, mgmt->sa, status_code); 2167 mgmt->sa, status_code);
2174 ieee80211_destroy_assoc_data(sdata, false); 2168 ieee80211_destroy_assoc_data(sdata, false);
2175 } else { 2169 } else {
2176 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2177
2178 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2170 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2179 /* oops -- internal error -- send timeout for now */ 2171 /* oops -- internal error -- send timeout for now */
2180 ieee80211_destroy_assoc_data(sdata, true); 2172 ieee80211_destroy_assoc_data(sdata, false);
2181 sta_info_destroy_addr(sdata, mgmt->bssid);
2182 cfg80211_put_bss(*bss); 2173 cfg80211_put_bss(*bss);
2183 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2174 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2184 } 2175 }
2176 sdata_info(sdata, "associated\n");
2185 2177
2186 /* 2178 /*
2187 * destroy assoc_data afterwards, as otherwise an idle 2179 * destroy assoc_data afterwards, as otherwise an idle
@@ -2281,7 +2273,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2281 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies && 2273 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
2282 ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) { 2274 ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
2283 /* got probe response, continue with auth */ 2275 /* got probe response, continue with auth */
2284 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); 2276 sdata_info(sdata, "direct probe responded\n");
2285 ifmgd->auth_data->tries = 0; 2277 ifmgd->auth_data->tries = 0;
2286 ifmgd->auth_data->timeout = jiffies; 2278 ifmgd->auth_data->timeout = jiffies;
2287 run_again(ifmgd, ifmgd->auth_data->timeout); 2279 run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -2417,10 +2409,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2417 } 2409 }
2418 2410
2419 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 2411 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
2420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2412 mlme_dbg_ratelimited(sdata,
2421 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", 2413 "cancelling probereq poll due to a received beacon\n");
2422 sdata->name);
2423#endif
2424 mutex_lock(&local->mtx); 2414 mutex_lock(&local->mtx);
2425 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2415 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2426 ieee80211_run_deferred_scan(local); 2416 ieee80211_run_deferred_scan(local);
@@ -2446,14 +2436,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2446 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len, 2436 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
2447 ifmgd->aid); 2437 ifmgd->aid);
2448 2438
2449 if (ncrc != ifmgd->beacon_crc || !ifmgd->beacon_crc_valid) {
2450 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
2451 true);
2452
2453 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
2454 elems.wmm_param_len);
2455 }
2456
2457 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 2439 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
2458 if (directed_tim) { 2440 if (directed_tim) {
2459 if (local->hw.conf.dynamic_ps_timeout > 0) { 2441 if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -2484,6 +2466,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2484 ifmgd->beacon_crc = ncrc; 2466 ifmgd->beacon_crc = ncrc;
2485 ifmgd->beacon_crc_valid = true; 2467 ifmgd->beacon_crc_valid = true;
2486 2468
2469 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
2470 true);
2471
2472 if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
2473 elems.wmm_param_len))
2474 changed |= BSS_CHANGED_QOS;
2475
2487 if (elems.erp_info && elems.erp_info_len >= 1) { 2476 if (elems.erp_info && elems.erp_info_len >= 1) {
2488 erp_valid = true; 2477 erp_valid = true;
2489 erp_value = elems.erp_info[0]; 2478 erp_value = elems.erp_info[0];
@@ -2612,8 +2601,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2612 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2601 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2613 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2602 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2614 2603
2615 ieee80211_stop_poll(sdata);
2616
2617 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2604 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2618 false, frame_buf); 2605 false, frame_buf);
2619 mutex_unlock(&ifmgd->mtx); 2606 mutex_unlock(&ifmgd->mtx);
@@ -2645,8 +2632,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2645 auth_data->tries++; 2632 auth_data->tries++;
2646 2633
2647 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { 2634 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
2648 printk(KERN_DEBUG "%s: authentication with %pM timed out\n", 2635 sdata_info(sdata, "authentication with %pM timed out\n",
2649 sdata->name, auth_data->bss->bssid); 2636 auth_data->bss->bssid);
2650 2637
2651 /* 2638 /*
2652 * Most likely AP is not in the range so remove the 2639 * Most likely AP is not in the range so remove the
@@ -2657,10 +2644,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2657 return -ETIMEDOUT; 2644 return -ETIMEDOUT;
2658 } 2645 }
2659 2646
2647 drv_mgd_prepare_tx(local, sdata);
2648
2660 if (auth_data->bss->proberesp_ies) { 2649 if (auth_data->bss->proberesp_ies) {
2661 printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n", 2650 sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
2662 sdata->name, auth_data->bss->bssid, auth_data->tries, 2651 auth_data->bss->bssid, auth_data->tries,
2663 IEEE80211_AUTH_MAX_TRIES); 2652 IEEE80211_AUTH_MAX_TRIES);
2664 2653
2665 auth_data->expected_transaction = 2; 2654 auth_data->expected_transaction = 2;
2666 ieee80211_send_auth(sdata, 1, auth_data->algorithm, 2655 ieee80211_send_auth(sdata, 1, auth_data->algorithm,
@@ -2670,9 +2659,9 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2670 } else { 2659 } else {
2671 const u8 *ssidie; 2660 const u8 *ssidie;
2672 2661
2673 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n", 2662 sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
2674 sdata->name, auth_data->bss->bssid, auth_data->tries, 2663 auth_data->bss->bssid, auth_data->tries,
2675 IEEE80211_AUTH_MAX_TRIES); 2664 IEEE80211_AUTH_MAX_TRIES);
2676 2665
2677 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID); 2666 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
2678 if (!ssidie) 2667 if (!ssidie)
@@ -2700,8 +2689,8 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2700 2689
2701 assoc_data->tries++; 2690 assoc_data->tries++;
2702 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { 2691 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
2703 printk(KERN_DEBUG "%s: association with %pM timed out\n", 2692 sdata_info(sdata, "association with %pM timed out\n",
2704 sdata->name, assoc_data->bss->bssid); 2693 assoc_data->bss->bssid);
2705 2694
2706 /* 2695 /*
2707 * Most likely AP is not in the range so remove the 2696 * Most likely AP is not in the range so remove the
@@ -2712,9 +2701,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2712 return -ETIMEDOUT; 2701 return -ETIMEDOUT;
2713 } 2702 }
2714 2703
2715 printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n", 2704 sdata_info(sdata, "associate with %pM (try %d/%d)\n",
2716 sdata->name, assoc_data->bss->bssid, assoc_data->tries, 2705 assoc_data->bss->bssid, assoc_data->tries,
2717 IEEE80211_ASSOC_MAX_TRIES); 2706 IEEE80211_ASSOC_MAX_TRIES);
2718 ieee80211_send_assoc(sdata); 2707 ieee80211_send_assoc(sdata);
2719 2708
2720 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; 2709 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -2787,45 +2776,31 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2787 ieee80211_reset_ap_probe(sdata); 2776 ieee80211_reset_ap_probe(sdata);
2788 else if (ifmgd->nullfunc_failed) { 2777 else if (ifmgd->nullfunc_failed) {
2789 if (ifmgd->probe_send_count < max_tries) { 2778 if (ifmgd->probe_send_count < max_tries) {
2790#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2779 mlme_dbg(sdata,
2791 wiphy_debug(local->hw.wiphy, 2780 "No ack for nullfunc frame to AP %pM, try %d/%i\n",
2792 "%s: No ack for nullfunc frame to" 2781 bssid, ifmgd->probe_send_count,
2793 " AP %pM, try %d/%i\n", 2782 max_tries);
2794 sdata->name, bssid,
2795 ifmgd->probe_send_count, max_tries);
2796#endif
2797 ieee80211_mgd_probe_ap_send(sdata); 2783 ieee80211_mgd_probe_ap_send(sdata);
2798 } else { 2784 } else {
2799#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2785 mlme_dbg(sdata,
2800 wiphy_debug(local->hw.wiphy, 2786 "No ack for nullfunc frame to AP %pM, disconnecting.\n",
2801 "%s: No ack for nullfunc frame to" 2787 bssid);
2802 " AP %pM, disconnecting.\n",
2803 sdata->name, bssid);
2804#endif
2805 ieee80211_sta_connection_lost(sdata, bssid, 2788 ieee80211_sta_connection_lost(sdata, bssid,
2806 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 2789 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2807 } 2790 }
2808 } else if (time_is_after_jiffies(ifmgd->probe_timeout)) 2791 } else if (time_is_after_jiffies(ifmgd->probe_timeout))
2809 run_again(ifmgd, ifmgd->probe_timeout); 2792 run_again(ifmgd, ifmgd->probe_timeout);
2810 else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { 2793 else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
2811#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2794 mlme_dbg(sdata,
2812 wiphy_debug(local->hw.wiphy, 2795 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
2813 "%s: Failed to send nullfunc to AP %pM" 2796 bssid, probe_wait_ms);
2814 " after %dms, disconnecting.\n",
2815 sdata->name,
2816 bssid, probe_wait_ms);
2817#endif
2818 ieee80211_sta_connection_lost(sdata, bssid, 2797 ieee80211_sta_connection_lost(sdata, bssid,
2819 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 2798 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2820 } else if (ifmgd->probe_send_count < max_tries) { 2799 } else if (ifmgd->probe_send_count < max_tries) {
2821#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2800 mlme_dbg(sdata,
2822 wiphy_debug(local->hw.wiphy, 2801 "No probe response from AP %pM after %dms, try %d/%i\n",
2823 "%s: No probe response from AP %pM" 2802 bssid, probe_wait_ms,
2824 " after %dms, try %d/%i\n", 2803 ifmgd->probe_send_count, max_tries);
2825 sdata->name,
2826 bssid, probe_wait_ms,
2827 ifmgd->probe_send_count, max_tries);
2828#endif
2829 ieee80211_mgd_probe_ap_send(sdata); 2804 ieee80211_mgd_probe_ap_send(sdata);
2830 } else { 2805 } else {
2831 /* 2806 /*
@@ -2940,11 +2915,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2940 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; 2915 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
2941 mutex_lock(&ifmgd->mtx); 2916 mutex_lock(&ifmgd->mtx);
2942 if (ifmgd->associated) { 2917 if (ifmgd->associated) {
2943#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2918 mlme_dbg(sdata,
2944 wiphy_debug(sdata->local->hw.wiphy, 2919 "driver requested disconnect after resume\n");
2945 "%s: driver requested disconnect after resume.\n",
2946 sdata->name);
2947#endif
2948 ieee80211_sta_connection_lost(sdata, 2920 ieee80211_sta_connection_lost(sdata,
2949 ifmgd->associated->bssid, 2921 ifmgd->associated->bssid,
2950 WLAN_REASON_UNSPECIFIED); 2922 WLAN_REASON_UNSPECIFIED);
@@ -3002,7 +2974,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3002/* scan finished notification */ 2974/* scan finished notification */
3003void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) 2975void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
3004{ 2976{
3005 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 2977 struct ieee80211_sub_if_data *sdata;
3006 2978
3007 /* Restart STA timers */ 2979 /* Restart STA timers */
3008 rcu_read_lock(); 2980 rcu_read_lock();
@@ -3032,7 +3004,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3032 struct ieee80211_local *local = sdata->local; 3004 struct ieee80211_local *local = sdata->local;
3033 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3005 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3034 struct ieee80211_bss *bss = (void *)cbss->priv; 3006 struct ieee80211_bss *bss = (void *)cbss->priv;
3035 struct sta_info *sta; 3007 struct sta_info *sta = NULL;
3036 bool have_sta = false; 3008 bool have_sta = false;
3037 int err; 3009 int err;
3038 int ht_cfreq; 3010 int ht_cfreq;
@@ -3085,13 +3057,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3085 * since we look at probe response/beacon data here 3057 * since we look at probe response/beacon data here
3086 * it should be OK. 3058 * it should be OK.
3087 */ 3059 */
3088 printk(KERN_DEBUG 3060 sdata_info(sdata,
3089 "%s: Wrong control channel: center-freq: %d" 3061 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
3090 " ht-cfreq: %d ht->primary_chan: %d" 3062 cbss->channel->center_freq,
3091 " band: %d. Disabling HT.\n", 3063 ht_cfreq, ht_oper->primary_chan,
3092 sdata->name, cbss->channel->center_freq, 3064 cbss->channel->band);
3093 ht_cfreq, ht_oper->primary_chan,
3094 cbss->channel->band);
3095 ht_oper = NULL; 3065 ht_oper = NULL;
3096 } 3066 }
3097 } 3067 }
@@ -3115,9 +3085,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3115 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 3085 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
3116 /* can only fail due to HT40+/- mismatch */ 3086 /* can only fail due to HT40+/- mismatch */
3117 channel_type = NL80211_CHAN_HT20; 3087 channel_type = NL80211_CHAN_HT20;
3118 printk(KERN_DEBUG 3088 sdata_info(sdata,
3119 "%s: disabling 40 MHz due to multi-vif mismatch\n", 3089 "disabling 40 MHz due to multi-vif mismatch\n");
3120 sdata->name);
3121 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; 3090 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3122 WARN_ON(!ieee80211_set_channel_type(local, sdata, 3091 WARN_ON(!ieee80211_set_channel_type(local, sdata,
3123 channel_type)); 3092 channel_type));
@@ -3126,7 +3095,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3126 local->oper_channel = cbss->channel; 3095 local->oper_channel = cbss->channel;
3127 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3096 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3128 3097
3129 if (!have_sta) { 3098 if (sta) {
3130 u32 rates = 0, basic_rates = 0; 3099 u32 rates = 0, basic_rates = 0;
3131 bool have_higher_than_11mbit; 3100 bool have_higher_than_11mbit;
3132 int min_rate = INT_MAX, min_rate_index = -1; 3101 int min_rate = INT_MAX, min_rate_index = -1;
@@ -3146,9 +3115,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3146 * we can connect -- with a warning. 3115 * we can connect -- with a warning.
3147 */ 3116 */
3148 if (!basic_rates && min_rate_index >= 0) { 3117 if (!basic_rates && min_rate_index >= 0) {
3149 printk(KERN_DEBUG 3118 sdata_info(sdata,
3150 "%s: No basic rates, using min rate instead.\n", 3119 "No basic rates, using min rate instead\n");
3151 sdata->name);
3152 basic_rates = BIT(min_rate_index); 3120 basic_rates = BIT(min_rate_index);
3153 } 3121 }
3154 3122
@@ -3164,9 +3132,15 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3164 3132
3165 memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN); 3133 memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
3166 3134
3167 /* tell driver about BSSID and basic rates */ 3135 /* set timing information */
3136 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
3137 sdata->vif.bss_conf.sync_tsf = cbss->tsf;
3138 sdata->vif.bss_conf.sync_device_ts = bss->device_ts;
3139
3140 /* tell driver about BSSID, basic rates and timing */
3168 ieee80211_bss_info_change_notify(sdata, 3141 ieee80211_bss_info_change_notify(sdata,
3169 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES); 3142 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
3143 BSS_CHANGED_BEACON_INT);
3170 3144
3171 if (assoc) 3145 if (assoc)
3172 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 3146 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
@@ -3174,9 +3148,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3174 err = sta_info_insert(sta); 3148 err = sta_info_insert(sta);
3175 sta = NULL; 3149 sta = NULL;
3176 if (err) { 3150 if (err) {
3177 printk(KERN_DEBUG 3151 sdata_info(sdata,
3178 "%s: failed to insert STA entry for the AP (error %d)\n", 3152 "failed to insert STA entry for the AP (error %d)\n",
3179 sdata->name, err); 3153 err);
3180 return err; 3154 return err;
3181 } 3155 }
3182 } else 3156 } else
@@ -3254,8 +3228,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3254 if (ifmgd->associated) 3228 if (ifmgd->associated)
3255 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 3229 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
3256 3230
3257 printk(KERN_DEBUG "%s: authenticate with %pM\n", 3231 sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
3258 sdata->name, req->bss->bssid);
3259 3232
3260 err = ieee80211_prep_connection(sdata, req->bss, false); 3233 err = ieee80211_prep_connection(sdata, req->bss, false);
3261 if (err) 3234 if (err)
@@ -3290,7 +3263,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3290 struct ieee80211_bss *bss = (void *)req->bss->priv; 3263 struct ieee80211_bss *bss = (void *)req->bss->priv;
3291 struct ieee80211_mgd_assoc_data *assoc_data; 3264 struct ieee80211_mgd_assoc_data *assoc_data;
3292 struct ieee80211_supported_band *sband; 3265 struct ieee80211_supported_band *sband;
3293 const u8 *ssidie; 3266 const u8 *ssidie, *ht_ie;
3294 int i, err; 3267 int i, err;
3295 3268
3296 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3269 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
@@ -3338,11 +3311,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3338 * We can set this to true for non-11n hardware, that'll be checked 3311 * We can set this to true for non-11n hardware, that'll be checked
3339 * separately along with the peer capabilities. 3312 * separately along with the peer capabilities.
3340 */ 3313 */
3341 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 3314 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
3342 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 3315 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
3343 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3316 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
3344 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 3317 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
3345 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3318 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3319 netdev_info(sdata->dev,
3320 "disabling HT due to WEP/TKIP use\n");
3321 }
3322 }
3346 3323
3347 if (req->flags & ASSOC_REQ_DISABLE_HT) 3324 if (req->flags & ASSOC_REQ_DISABLE_HT)
3348 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3325 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@ -3350,8 +3327,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3350 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3327 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3351 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3328 sband = local->hw.wiphy->bands[req->bss->channel->band];
3352 if (!sband->ht_cap.ht_supported || 3329 if (!sband->ht_cap.ht_supported ||
3353 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) 3330 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3354 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3331 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3332 netdev_info(sdata->dev,
3333 "disabling HT as WMM/QoS is not supported\n");
3334 }
3355 3335
3356 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3336 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
3357 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 3337 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
@@ -3377,8 +3357,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3377 (local->hw.queues >= IEEE80211_NUM_ACS); 3357 (local->hw.queues >= IEEE80211_NUM_ACS);
3378 assoc_data->supp_rates = bss->supp_rates; 3358 assoc_data->supp_rates = bss->supp_rates;
3379 assoc_data->supp_rates_len = bss->supp_rates_len; 3359 assoc_data->supp_rates_len = bss->supp_rates_len;
3380 assoc_data->ht_operation_ie = 3360
3381 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); 3361 ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
3362 if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
3363 assoc_data->ap_ht_param =
3364 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
3365 else
3366 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3382 3367
3383 if (bss->wmm_used && bss->uapsd_supported && 3368 if (bss->wmm_used && bss->uapsd_supported &&
3384 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3369 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3425,8 +3410,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3425 * Wait up to one beacon interval ... 3410 * Wait up to one beacon interval ...
3426 * should this be more if we miss one? 3411 * should this be more if we miss one?
3427 */ 3412 */
3428 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", 3413 sdata_info(sdata, "waiting for beacon from %pM\n",
3429 sdata->name, ifmgd->bssid); 3414 ifmgd->bssid);
3430 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); 3415 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3431 } else { 3416 } else {
3432 assoc_data->have_beacon = true; 3417 assoc_data->have_beacon = true;
@@ -3445,8 +3430,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3445 corrupt_type = "beacon"; 3430 corrupt_type = "beacon";
3446 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) 3431 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
3447 corrupt_type = "probe response"; 3432 corrupt_type = "probe response";
3448 printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n", 3433 sdata_info(sdata, "associating with AP with corrupt %s\n",
3449 sdata->name, corrupt_type); 3434 corrupt_type);
3450 } 3435 }
3451 3436
3452 err = 0; 3437 err = 0;
@@ -3475,9 +3460,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3475 return 0; 3460 return 0;
3476 } 3461 }
3477 3462
3478 printk(KERN_DEBUG 3463 sdata_info(sdata,
3479 "%s: deauthenticating from %pM by local choice (reason=%d)\n", 3464 "deauthenticating from %pM by local choice (reason=%d)\n",
3480 sdata->name, req->bssid, req->reason_code); 3465 req->bssid, req->reason_code);
3481 3466
3482 if (ifmgd->associated && 3467 if (ifmgd->associated &&
3483 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) 3468 ether_addr_equal(ifmgd->associated->bssid, req->bssid))
@@ -3519,8 +3504,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3519 return -ENOLINK; 3504 return -ENOLINK;
3520 } 3505 }
3521 3506
3522 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 3507 sdata_info(sdata,
3523 sdata->name, req->bss->bssid, req->reason_code); 3508 "disassociating from %pM by local choice (reason=%d)\n",
3509 req->bss->bssid, req->reason_code);
3524 3510
3525 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3511 memcpy(bssid, req->bss->bssid, ETH_ALEN);
3526 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, 3512 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
@@ -3561,10 +3547,3 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
3561 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); 3547 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
3562} 3548}
3563EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); 3549EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
3564
3565unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
3566{
3567 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
3568 return sdata->dev->operstate;
3569}
3570EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 935aa4b6deee..635c3250c668 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -15,7 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "ieee80211_i.h" 17#include "ieee80211_i.h"
18#include "driver-trace.h" 18#include "driver-ops.h"
19 19
20/* 20/*
21 * Tell our hardware to disable PS. 21 * Tell our hardware to disable PS.
@@ -24,8 +24,7 @@
24 * because we *may* be doing work on-operating channel, and want our 24 * because we *may* be doing work on-operating channel, and want our
25 * hardware unconditionally awake, but still let the AP send us normal frames. 25 * hardware unconditionally awake, but still let the AP send us normal frames.
26 */ 26 */
27static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata, 27static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
28 bool tell_ap)
29{ 28{
30 struct ieee80211_local *local = sdata->local; 29 struct ieee80211_local *local = sdata->local;
31 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 30 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -46,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
46 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 45 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
47 } 46 }
48 47
49 if (tell_ap && (!local->offchannel_ps_enabled || 48 if (!local->offchannel_ps_enabled ||
50 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))) 49 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
51 /* 50 /*
52 * If power save was enabled, no need to send a nullfunc 51 * If power save was enabled, no need to send a nullfunc
53 * frame because AP knows that we are sleeping. But if the 52 * frame because AP knows that we are sleeping. But if the
@@ -132,7 +131,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
132 if (offchannel_ps_enable && 131 if (offchannel_ps_enable &&
133 (sdata->vif.type == NL80211_IFTYPE_STATION) && 132 (sdata->vif.type == NL80211_IFTYPE_STATION) &&
134 sdata->u.mgd.associated) 133 sdata->u.mgd.associated)
135 ieee80211_offchannel_ps_enable(sdata, true); 134 ieee80211_offchannel_ps_enable(sdata);
136 } 135 }
137 } 136 }
138 mutex_unlock(&local->iflist_mtx); 137 mutex_unlock(&local->iflist_mtx);
@@ -181,34 +180,58 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
181 mutex_unlock(&local->iflist_mtx); 180 mutex_unlock(&local->iflist_mtx);
182} 181}
183 182
183void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
184{
185 if (roc->notified)
186 return;
187
188 if (roc->mgmt_tx_cookie) {
189 if (!WARN_ON(!roc->frame)) {
190 ieee80211_tx_skb(roc->sdata, roc->frame);
191 roc->frame = NULL;
192 }
193 } else {
194 cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc,
195 roc->chan, roc->chan_type,
196 roc->req_duration, GFP_KERNEL);
197 }
198
199 roc->notified = true;
200}
201
184static void ieee80211_hw_roc_start(struct work_struct *work) 202static void ieee80211_hw_roc_start(struct work_struct *work)
185{ 203{
186 struct ieee80211_local *local = 204 struct ieee80211_local *local =
187 container_of(work, struct ieee80211_local, hw_roc_start); 205 container_of(work, struct ieee80211_local, hw_roc_start);
188 struct ieee80211_sub_if_data *sdata; 206 struct ieee80211_roc_work *roc, *dep, *tmp;
189 207
190 mutex_lock(&local->mtx); 208 mutex_lock(&local->mtx);
191 209
192 if (!local->hw_roc_channel) { 210 if (list_empty(&local->roc_list))
193 mutex_unlock(&local->mtx); 211 goto out_unlock;
194 return;
195 }
196 212
197 if (local->hw_roc_skb) { 213 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
198 sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); 214 list);
199 ieee80211_tx_skb(sdata, local->hw_roc_skb); 215
200 local->hw_roc_skb = NULL; 216 if (!roc->started)
201 } else { 217 goto out_unlock;
202 cfg80211_ready_on_channel(local->hw_roc_dev,
203 local->hw_roc_cookie,
204 local->hw_roc_channel,
205 local->hw_roc_channel_type,
206 local->hw_roc_duration,
207 GFP_KERNEL);
208 }
209 218
210 ieee80211_recalc_idle(local); 219 roc->hw_begun = true;
220 roc->hw_start_time = local->hw_roc_start_time;
211 221
222 ieee80211_handle_roc_started(roc);
223 list_for_each_entry_safe(dep, tmp, &roc->dependents, list) {
224 ieee80211_handle_roc_started(dep);
225
226 if (dep->duration > roc->duration) {
227 u32 dur = dep->duration;
228 dep->duration = dur - roc->duration;
229 roc->duration = dur;
230 list_del(&dep->list);
231 list_add(&dep->list, &roc->list);
232 }
233 }
234 out_unlock:
212 mutex_unlock(&local->mtx); 235 mutex_unlock(&local->mtx);
213} 236}
214 237
@@ -216,52 +239,181 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
216{ 239{
217 struct ieee80211_local *local = hw_to_local(hw); 240 struct ieee80211_local *local = hw_to_local(hw);
218 241
242 local->hw_roc_start_time = jiffies;
243
219 trace_api_ready_on_channel(local); 244 trace_api_ready_on_channel(local);
220 245
221 ieee80211_queue_work(hw, &local->hw_roc_start); 246 ieee80211_queue_work(hw, &local->hw_roc_start);
222} 247}
223EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); 248EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
224 249
225static void ieee80211_hw_roc_done(struct work_struct *work) 250void ieee80211_start_next_roc(struct ieee80211_local *local)
226{ 251{
227 struct ieee80211_local *local = 252 struct ieee80211_roc_work *roc;
228 container_of(work, struct ieee80211_local, hw_roc_done);
229 253
230 mutex_lock(&local->mtx); 254 lockdep_assert_held(&local->mtx);
231 255
232 if (!local->hw_roc_channel) { 256 if (list_empty(&local->roc_list)) {
233 mutex_unlock(&local->mtx); 257 ieee80211_run_deferred_scan(local);
234 return; 258 return;
235 } 259 }
236 260
237 /* was never transmitted */ 261 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
238 if (local->hw_roc_skb) { 262 list);
239 u64 cookie;
240 263
241 cookie = local->hw_roc_cookie ^ 2; 264 if (WARN_ON_ONCE(roc->started))
265 return;
266
267 if (local->ops->remain_on_channel) {
268 int ret, duration = roc->duration;
269
270 /* XXX: duplicated, see ieee80211_start_roc_work() */
271 if (!duration)
272 duration = 10;
273
274 ret = drv_remain_on_channel(local, roc->chan,
275 roc->chan_type,
276 duration);
277
278 roc->started = true;
279
280 if (ret) {
281 wiphy_warn(local->hw.wiphy,
282 "failed to start next HW ROC (%d)\n", ret);
283 /*
284 * queue the work struct again to avoid recursion
285 * when multiple failures occur
286 */
287 ieee80211_remain_on_channel_expired(&local->hw);
288 }
289 } else {
290 /* delay it a bit */
291 ieee80211_queue_delayed_work(&local->hw, &roc->work,
292 round_jiffies_relative(HZ/2));
293 }
294}
242 295
243 cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, 296void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
244 local->hw_roc_skb->data, 297{
245 local->hw_roc_skb->len, false, 298 struct ieee80211_roc_work *dep, *tmp;
246 GFP_KERNEL);
247 299
248 kfree_skb(local->hw_roc_skb); 300 /* was never transmitted */
249 local->hw_roc_skb = NULL; 301 if (roc->frame) {
250 local->hw_roc_skb_for_status = NULL; 302 cfg80211_mgmt_tx_status(&roc->sdata->wdev,
303 (unsigned long)roc->frame,
304 roc->frame->data, roc->frame->len,
305 false, GFP_KERNEL);
306 kfree_skb(roc->frame);
251 } 307 }
252 308
253 if (!local->hw_roc_for_tx) 309 if (!roc->mgmt_tx_cookie)
254 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 310 cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
255 local->hw_roc_cookie, 311 (unsigned long)roc,
256 local->hw_roc_channel, 312 roc->chan, roc->chan_type,
257 local->hw_roc_channel_type,
258 GFP_KERNEL); 313 GFP_KERNEL);
259 314
260 local->hw_roc_channel = NULL; 315 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
261 local->hw_roc_cookie = 0; 316 ieee80211_roc_notify_destroy(dep);
317
318 kfree(roc);
319}
320
321void ieee80211_sw_roc_work(struct work_struct *work)
322{
323 struct ieee80211_roc_work *roc =
324 container_of(work, struct ieee80211_roc_work, work.work);
325 struct ieee80211_sub_if_data *sdata = roc->sdata;
326 struct ieee80211_local *local = sdata->local;
327 bool started;
328
329 mutex_lock(&local->mtx);
330
331 if (roc->abort)
332 goto finish;
333
334 if (WARN_ON(list_empty(&local->roc_list)))
335 goto out_unlock;
336
337 if (WARN_ON(roc != list_first_entry(&local->roc_list,
338 struct ieee80211_roc_work,
339 list)))
340 goto out_unlock;
262 341
263 ieee80211_recalc_idle(local); 342 if (!roc->started) {
343 struct ieee80211_roc_work *dep;
264 344
345 /* start this ROC */
346
347 /* switch channel etc */
348 ieee80211_recalc_idle(local);
349
350 local->tmp_channel = roc->chan;
351 local->tmp_channel_type = roc->chan_type;
352 ieee80211_hw_config(local, 0);
353
354 /* tell userspace or send frame */
355 ieee80211_handle_roc_started(roc);
356 list_for_each_entry(dep, &roc->dependents, list)
357 ieee80211_handle_roc_started(dep);
358
359 /* if it was pure TX, just finish right away */
360 if (!roc->duration)
361 goto finish;
362
363 roc->started = true;
364 ieee80211_queue_delayed_work(&local->hw, &roc->work,
365 msecs_to_jiffies(roc->duration));
366 } else {
367 /* finish this ROC */
368 finish:
369 list_del(&roc->list);
370 started = roc->started;
371 ieee80211_roc_notify_destroy(roc);
372
373 if (started) {
374 drv_flush(local, false);
375
376 local->tmp_channel = NULL;
377 ieee80211_hw_config(local, 0);
378
379 ieee80211_offchannel_return(local, true);
380 }
381
382 ieee80211_recalc_idle(local);
383
384 if (started)
385 ieee80211_start_next_roc(local);
386 }
387
388 out_unlock:
389 mutex_unlock(&local->mtx);
390}
391
392static void ieee80211_hw_roc_done(struct work_struct *work)
393{
394 struct ieee80211_local *local =
395 container_of(work, struct ieee80211_local, hw_roc_done);
396 struct ieee80211_roc_work *roc;
397
398 mutex_lock(&local->mtx);
399
400 if (list_empty(&local->roc_list))
401 goto out_unlock;
402
403 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
404 list);
405
406 if (!roc->started)
407 goto out_unlock;
408
409 list_del(&roc->list);
410
411 ieee80211_roc_notify_destroy(roc);
412
413 /* if there's another roc, start it now */
414 ieee80211_start_next_roc(local);
415
416 out_unlock:
265 mutex_unlock(&local->mtx); 417 mutex_unlock(&local->mtx);
266} 418}
267 419
@@ -275,8 +427,47 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
275} 427}
276EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); 428EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
277 429
278void ieee80211_hw_roc_setup(struct ieee80211_local *local) 430void ieee80211_roc_setup(struct ieee80211_local *local)
279{ 431{
280 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start); 432 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
281 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done); 433 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
434 INIT_LIST_HEAD(&local->roc_list);
435}
436
437void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
438{
439 struct ieee80211_local *local = sdata->local;
440 struct ieee80211_roc_work *roc, *tmp;
441 LIST_HEAD(tmp_list);
442
443 mutex_lock(&local->mtx);
444 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
445 if (roc->sdata != sdata)
446 continue;
447
448 if (roc->started && local->ops->remain_on_channel) {
449 /* can race, so ignore return value */
450 drv_cancel_remain_on_channel(local);
451 }
452
453 list_move_tail(&roc->list, &tmp_list);
454 roc->abort = true;
455 }
456
457 ieee80211_start_next_roc(local);
458 mutex_unlock(&local->mtx);
459
460 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
461 if (local->ops->remain_on_channel) {
462 list_del(&roc->list);
463 ieee80211_roc_notify_destroy(roc);
464 } else {
465 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
466
467 /* work will clean up etc */
468 flush_delayed_work(&roc->work);
469 }
470 }
471
472 WARN_ON_ONCE(!list_empty(&tmp_list));
282} 473}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index af1c4e26e965..5c572e7a1a71 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -77,6 +77,17 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77 int err = drv_suspend(local, wowlan); 77 int err = drv_suspend(local, wowlan);
78 if (err < 0) { 78 if (err < 0) {
79 local->quiescing = false; 79 local->quiescing = false;
80 local->wowlan = false;
81 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
82 mutex_lock(&local->sta_mtx);
83 list_for_each_entry(sta,
84 &local->sta_list, list) {
85 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
86 }
87 mutex_unlock(&local->sta_mtx);
88 }
89 ieee80211_wake_queues_by_reason(hw,
90 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
80 return err; 91 return err;
81 } else if (err > 0) { 92 } else if (err > 0) {
82 WARN_ON(err != 1); 93 WARN_ON(err != 1);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2d1acc6c5445..fb1d4aa65e8c 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -626,8 +626,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
626 626
627#ifdef CONFIG_MAC80211_DEBUGFS 627#ifdef CONFIG_MAC80211_DEBUGFS
628 /* use fixed index if set */ 628 /* use fixed index if set */
629 if (mp->fixed_rate_idx != -1) 629 if (mp->fixed_rate_idx != -1) {
630 sample_idx = mp->fixed_rate_idx; 630 mi->max_tp_rate = mp->fixed_rate_idx;
631 mi->max_tp_rate2 = mp->fixed_rate_idx;
632 mi->max_prob_rate = mp->fixed_rate_idx;
633 sample_idx = -1;
634 }
631#endif 635#endif
632 636
633 if (sample_idx >= 0) { 637 if (sample_idx >= 0) {
@@ -809,7 +813,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
809 max_rates = sband->n_bitrates; 813 max_rates = sband->n_bitrates;
810 } 814 }
811 815
812 msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); 816 msp = kzalloc(sizeof(*msp), gfp);
813 if (!msp) 817 if (!msp)
814 return NULL; 818 return NULL;
815 819
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7bcecf73aafb..0cb4edee6af5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -94,7 +94,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
94 return len; 94 return len;
95} 95}
96 96
97/* 97/**
98 * ieee80211_add_rx_radiotap_header - add radiotap header 98 * ieee80211_add_rx_radiotap_header - add radiotap header
99 * 99 *
100 * add a radiotap header containing all the fields which the hardware provided. 100 * add a radiotap header containing all the fields which the hardware provided.
@@ -413,29 +413,6 @@ static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
413 413
414/* rx handlers */ 414/* rx handlers */
415 415
416static ieee80211_rx_result debug_noinline
417ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
418{
419 struct ieee80211_local *local = rx->local;
420 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
421 struct sk_buff *skb = rx->skb;
422
423 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
424 !local->sched_scanning))
425 return RX_CONTINUE;
426
427 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
428 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
429 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
430 local->sched_scanning)
431 return ieee80211_scan_rx(rx->sdata, skb);
432
433 /* scanning finished during invoking of handlers */
434 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
435 return RX_DROP_UNUSABLE;
436}
437
438
439static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 416static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
440{ 417{
441 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 418 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -554,11 +531,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
554} 531}
555 532
556 533
557static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 534static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
558 struct tid_ampdu_rx *tid_agg_rx, 535 struct tid_ampdu_rx *tid_agg_rx,
559 int index) 536 int index)
560{ 537{
561 struct ieee80211_local *local = hw_to_local(hw); 538 struct ieee80211_local *local = sdata->local;
562 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
563 struct ieee80211_rx_status *status; 540 struct ieee80211_rx_status *status;
564 541
@@ -578,7 +555,7 @@ no_frame:
578 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 555 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
579} 556}
580 557
581static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 558static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
582 struct tid_ampdu_rx *tid_agg_rx, 559 struct tid_ampdu_rx *tid_agg_rx,
583 u16 head_seq_num) 560 u16 head_seq_num)
584{ 561{
@@ -589,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
589 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 566 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
590 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 567 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
591 tid_agg_rx->buf_size; 568 tid_agg_rx->buf_size;
592 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 569 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
593 } 570 }
594} 571}
595 572
@@ -604,7 +581,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
604 */ 581 */
605#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 582#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
606 583
607static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 584static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
608 struct tid_ampdu_rx *tid_agg_rx) 585 struct tid_ampdu_rx *tid_agg_rx)
609{ 586{
610 int index, j; 587 int index, j;
@@ -632,12 +609,9 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
632 HT_RX_REORDER_BUF_TIMEOUT)) 609 HT_RX_REORDER_BUF_TIMEOUT))
633 goto set_release_timer; 610 goto set_release_timer;
634 611
635#ifdef CONFIG_MAC80211_HT_DEBUG 612 ht_dbg_ratelimited(sdata,
636 if (net_ratelimit()) 613 "release an RX reorder frame due to timeout on earlier frames\n");
637 wiphy_debug(hw->wiphy, 614 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
638 "release an RX reorder frame due to timeout on earlier frames\n");
639#endif
640 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
641 615
642 /* 616 /*
643 * Increment the head seq# also for the skipped slots. 617 * Increment the head seq# also for the skipped slots.
@@ -647,7 +621,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
647 skipped = 0; 621 skipped = 0;
648 } 622 }
649 } else while (tid_agg_rx->reorder_buf[index]) { 623 } else while (tid_agg_rx->reorder_buf[index]) {
650 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 624 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
651 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 625 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
652 tid_agg_rx->buf_size; 626 tid_agg_rx->buf_size;
653 } 627 }
@@ -677,7 +651,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
677 * rcu_read_lock protection. It returns false if the frame 651 * rcu_read_lock protection. It returns false if the frame
678 * can be processed immediately, true if it was consumed. 652 * can be processed immediately, true if it was consumed.
679 */ 653 */
680static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 654static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
681 struct tid_ampdu_rx *tid_agg_rx, 655 struct tid_ampdu_rx *tid_agg_rx,
682 struct sk_buff *skb) 656 struct sk_buff *skb)
683{ 657{
@@ -706,7 +680,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
706 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 680 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
707 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 681 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
708 /* release stored frames up to new head to stack */ 682 /* release stored frames up to new head to stack */
709 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); 683 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
684 head_seq_num);
710 } 685 }
711 686
712 /* Now the new frame is always in the range of the reordering buffer */ 687 /* Now the new frame is always in the range of the reordering buffer */
@@ -736,7 +711,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
736 tid_agg_rx->reorder_buf[index] = skb; 711 tid_agg_rx->reorder_buf[index] = skb;
737 tid_agg_rx->reorder_time[index] = jiffies; 712 tid_agg_rx->reorder_time[index] = jiffies;
738 tid_agg_rx->stored_mpdu_num++; 713 tid_agg_rx->stored_mpdu_num++;
739 ieee80211_sta_reorder_release(hw, tid_agg_rx); 714 ieee80211_sta_reorder_release(sdata, tid_agg_rx);
740 715
741 out: 716 out:
742 spin_unlock(&tid_agg_rx->reorder_lock); 717 spin_unlock(&tid_agg_rx->reorder_lock);
@@ -751,7 +726,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
751{ 726{
752 struct sk_buff *skb = rx->skb; 727 struct sk_buff *skb = rx->skb;
753 struct ieee80211_local *local = rx->local; 728 struct ieee80211_local *local = rx->local;
754 struct ieee80211_hw *hw = &local->hw;
755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 729 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
756 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 730 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
757 struct sta_info *sta = rx->sta; 731 struct sta_info *sta = rx->sta;
@@ -813,7 +787,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
813 * sure that we cannot get to it any more before doing 787 * sure that we cannot get to it any more before doing
814 * anything with it. 788 * anything with it.
815 */ 789 */
816 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) 790 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
817 return; 791 return;
818 792
819 dont_reorder: 793 dont_reorder:
@@ -1136,24 +1110,18 @@ static void ap_sta_ps_start(struct sta_info *sta)
1136 set_sta_flag(sta, WLAN_STA_PS_STA); 1110 set_sta_flag(sta, WLAN_STA_PS_STA);
1137 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1111 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1138 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1112 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1139#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1113 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1140 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1114 sta->sta.addr, sta->sta.aid);
1141 sdata->name, sta->sta.addr, sta->sta.aid);
1142#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1143} 1115}
1144 1116
1145static void ap_sta_ps_end(struct sta_info *sta) 1117static void ap_sta_ps_end(struct sta_info *sta)
1146{ 1118{
1147#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1119 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1148 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1120 sta->sta.addr, sta->sta.aid);
1149 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1150#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1151 1121
1152 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1122 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1153#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1123 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1154 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1124 sta->sta.addr, sta->sta.aid);
1155 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1156#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1157 return; 1125 return;
1158 } 1126 }
1159 1127
@@ -1383,19 +1351,8 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1383 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1351 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1384 sdata->fragment_next = 0; 1352 sdata->fragment_next = 0;
1385 1353
1386 if (!skb_queue_empty(&entry->skb_list)) { 1354 if (!skb_queue_empty(&entry->skb_list))
1387#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1388 struct ieee80211_hdr *hdr =
1389 (struct ieee80211_hdr *) entry->skb_list.next->data;
1390 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1391 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1392 "addr1=%pM addr2=%pM\n",
1393 sdata->name, idx,
1394 jiffies - entry->first_frag_time, entry->seq,
1395 entry->last_frag, hdr->addr1, hdr->addr2);
1396#endif
1397 __skb_queue_purge(&entry->skb_list); 1355 __skb_queue_purge(&entry->skb_list);
1398 }
1399 1356
1400 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1357 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1401 *skb = NULL; 1358 *skb = NULL;
@@ -1753,7 +1710,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1753 */ 1710 */
1754 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1711 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1755 if (!xmit_skb) 1712 if (!xmit_skb)
1756 net_dbg_ratelimited("%s: failed to clone multicast frame\n", 1713 net_info_ratelimited("%s: failed to clone multicast frame\n",
1757 dev->name); 1714 dev->name);
1758 } else { 1715 } else {
1759 dsta = sta_info_get(sdata, skb->data); 1716 dsta = sta_info_get(sdata, skb->data);
@@ -1937,7 +1894,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1937 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 1894 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1938 return RX_CONTINUE; 1895 return RX_CONTINUE;
1939 1896
1940 q = ieee80211_select_queue_80211(local, skb, hdr); 1897 q = ieee80211_select_queue_80211(sdata, skb, hdr);
1941 if (ieee80211_queue_stopped(&local->hw, q)) { 1898 if (ieee80211_queue_stopped(&local->hw, q)) {
1942 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 1899 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
1943 return RX_DROP_MONITOR; 1900 return RX_DROP_MONITOR;
@@ -1957,7 +1914,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1957 1914
1958 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1915 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1959 if (!fwd_skb) { 1916 if (!fwd_skb) {
1960 net_dbg_ratelimited("%s: failed to clone mesh frame\n", 1917 net_info_ratelimited("%s: failed to clone mesh frame\n",
1961 sdata->name); 1918 sdata->name);
1962 goto out; 1919 goto out;
1963 } 1920 }
@@ -2060,8 +2017,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2060static ieee80211_rx_result debug_noinline 2017static ieee80211_rx_result debug_noinline
2061ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2018ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2062{ 2019{
2063 struct ieee80211_local *local = rx->local;
2064 struct ieee80211_hw *hw = &local->hw;
2065 struct sk_buff *skb = rx->skb; 2020 struct sk_buff *skb = rx->skb;
2066 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2021 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2067 struct tid_ampdu_rx *tid_agg_rx; 2022 struct tid_ampdu_rx *tid_agg_rx;
@@ -2098,7 +2053,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2098 2053
2099 spin_lock(&tid_agg_rx->reorder_lock); 2054 spin_lock(&tid_agg_rx->reorder_lock);
2100 /* release stored frames up to start of BAR */ 2055 /* release stored frames up to start of BAR */
2101 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 2056 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2057 start_seq_num);
2102 spin_unlock(&tid_agg_rx->reorder_lock); 2058 spin_unlock(&tid_agg_rx->reorder_lock);
2103 2059
2104 kfree_skb(skb); 2060 kfree_skb(skb);
@@ -2425,7 +2381,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2425 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2381 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2426 sig = status->signal; 2382 sig = status->signal;
2427 2383
2428 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig, 2384 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
2429 rx->skb->data, rx->skb->len, 2385 rx->skb->data, rx->skb->len,
2430 GFP_ATOMIC)) { 2386 GFP_ATOMIC)) {
2431 if (rx->sta) 2387 if (rx->sta)
@@ -2455,7 +2411,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2455 * frames that we didn't handle, including returning unknown 2411 * frames that we didn't handle, including returning unknown
2456 * ones. For all other modes we will return them to the sender, 2412 * ones. For all other modes we will return them to the sender,
2457 * setting the 0x80 bit in the action category, as required by 2413 * setting the 0x80 bit in the action category, as required by
2458 * 802.11-2007 7.3.1.11. 2414 * 802.11-2012 9.24.4.
2459 * Newer versions of hostapd shall also use the management frame 2415 * Newer versions of hostapd shall also use the management frame
2460 * registration mechanisms, but older ones still use cooked 2416 * registration mechanisms, but older ones still use cooked
2461 * monitor interfaces so push all frames there. 2417 * monitor interfaces so push all frames there.
@@ -2465,6 +2421,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2465 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2421 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2466 return RX_DROP_MONITOR; 2422 return RX_DROP_MONITOR;
2467 2423
2424 if (is_multicast_ether_addr(mgmt->da))
2425 return RX_DROP_MONITOR;
2426
2468 /* do not return rejected action frames */ 2427 /* do not return rejected action frames */
2469 if (mgmt->u.action.category & 0x80) 2428 if (mgmt->u.action.category & 0x80)
2470 return RX_DROP_UNUSABLE; 2429 return RX_DROP_UNUSABLE;
@@ -2713,7 +2672,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2713 goto rxh_next; \ 2672 goto rxh_next; \
2714 } while (0); 2673 } while (0);
2715 2674
2716 CALL_RXH(ieee80211_rx_h_passive_scan)
2717 CALL_RXH(ieee80211_rx_h_check) 2675 CALL_RXH(ieee80211_rx_h_check)
2718 2676
2719 ieee80211_rx_reorder_ampdu(rx); 2677 ieee80211_rx_reorder_ampdu(rx);
@@ -2749,7 +2707,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2749 return; 2707 return;
2750 2708
2751 spin_lock(&tid_agg_rx->reorder_lock); 2709 spin_lock(&tid_agg_rx->reorder_lock);
2752 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); 2710 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
2753 spin_unlock(&tid_agg_rx->reorder_lock); 2711 spin_unlock(&tid_agg_rx->reorder_lock);
2754 2712
2755 ieee80211_rx_handlers(&rx); 2713 ieee80211_rx_handlers(&rx);
@@ -2783,11 +2741,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2783 return 0; 2741 return 0;
2784 if (ieee80211_is_beacon(hdr->frame_control)) { 2742 if (ieee80211_is_beacon(hdr->frame_control)) {
2785 return 1; 2743 return 1;
2786 } 2744 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2787 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2745 return 0;
2788 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2789 return 0;
2790 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2791 } else if (!multicast && 2746 } else if (!multicast &&
2792 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 2747 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2793 if (!(sdata->dev->flags & IFF_PROMISC)) 2748 if (!(sdata->dev->flags & IFF_PROMISC))
@@ -2825,11 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2825 * and location updates. Note that mac80211 2780 * and location updates. Note that mac80211
2826 * itself never looks at these frames. 2781 * itself never looks at these frames.
2827 */ 2782 */
2828 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2783 if (ieee80211_is_public_action(hdr, skb->len))
2829 ieee80211_is_public_action(hdr, skb->len))
2830 return 1; 2784 return 1;
2831 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2785 if (!ieee80211_is_beacon(hdr->frame_control))
2832 !ieee80211_is_beacon(hdr->frame_control))
2833 return 0; 2786 return 0;
2834 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2787 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2835 } 2788 }
@@ -2895,7 +2848,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2895static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2848static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2896 struct sk_buff *skb) 2849 struct sk_buff *skb)
2897{ 2850{
2898 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2899 struct ieee80211_local *local = hw_to_local(hw); 2851 struct ieee80211_local *local = hw_to_local(hw);
2900 struct ieee80211_sub_if_data *sdata; 2852 struct ieee80211_sub_if_data *sdata;
2901 struct ieee80211_hdr *hdr; 2853 struct ieee80211_hdr *hdr;
@@ -2913,11 +2865,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2913 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2865 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2914 local->dot11ReceivedFragmentCount++; 2866 local->dot11ReceivedFragmentCount++;
2915 2867
2916 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2917 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
2918 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2919 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2920
2921 if (ieee80211_is_mgmt(fc)) 2868 if (ieee80211_is_mgmt(fc))
2922 err = skb_linearize(skb); 2869 err = skb_linearize(skb);
2923 else 2870 else
@@ -2932,6 +2879,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2932 ieee80211_parse_qos(&rx); 2879 ieee80211_parse_qos(&rx);
2933 ieee80211_verify_alignment(&rx); 2880 ieee80211_verify_alignment(&rx);
2934 2881
2882 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
2883 ieee80211_is_beacon(hdr->frame_control)))
2884 ieee80211_scan_rx(local, skb);
2885
2935 if (ieee80211_is_data(fc)) { 2886 if (ieee80211_is_data(fc)) {
2936 prev_sta = NULL; 2887 prev_sta = NULL;
2937 2888
@@ -3029,6 +2980,10 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3029 if (unlikely(local->quiescing || local->suspended)) 2980 if (unlikely(local->quiescing || local->suspended))
3030 goto drop; 2981 goto drop;
3031 2982
2983 /* We might be during a HW reconfig, prevent Rx for the same reason */
2984 if (unlikely(local->in_reconfig))
2985 goto drop;
2986
3032 /* 2987 /*
3033 * The same happens when we're not even started, 2988 * The same happens when we're not even started,
3034 * but that's worth a warning. 2989 * but that's worth a warning.
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 169da0742c81..bcaee5d12839 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -83,13 +83,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
83 83
84 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel, 84 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
85 mgmt, len, signal, GFP_ATOMIC); 85 mgmt, len, signal, GFP_ATOMIC);
86
87 if (!cbss) 86 if (!cbss)
88 return NULL; 87 return NULL;
89 88
90 cbss->free_priv = ieee80211_rx_bss_free; 89 cbss->free_priv = ieee80211_rx_bss_free;
91 bss = (void *)cbss->priv; 90 bss = (void *)cbss->priv;
92 91
92 bss->device_ts = rx_status->device_timestamp;
93
93 if (elems->parse_error) { 94 if (elems->parse_error) {
94 if (beacon) 95 if (beacon)
95 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON; 96 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
@@ -114,8 +115,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
114 115
115 if (elems->tim && (!elems->parse_error || 116 if (elems->tim && (!elems->parse_error ||
116 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) { 117 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
117 struct ieee80211_tim_ie *tim_ie = 118 struct ieee80211_tim_ie *tim_ie = elems->tim;
118 (struct ieee80211_tim_ie *)elems->tim;
119 bss->dtim_period = tim_ie->dtim_period; 119 bss->dtim_period = tim_ie->dtim_period;
120 if (!elems->parse_error) 120 if (!elems->parse_error)
121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM; 121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
@@ -165,52 +165,47 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
165 return bss; 165 return bss;
166} 166}
167 167
168ieee80211_rx_result 168void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
169ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
170{ 169{
171 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 170 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
172 struct ieee80211_mgmt *mgmt; 171 struct ieee80211_sub_if_data *sdata1, *sdata2;
172 struct ieee80211_mgmt *mgmt = (void *)skb->data;
173 struct ieee80211_bss *bss; 173 struct ieee80211_bss *bss;
174 u8 *elements; 174 u8 *elements;
175 struct ieee80211_channel *channel; 175 struct ieee80211_channel *channel;
176 size_t baselen; 176 size_t baselen;
177 int freq; 177 int freq;
178 __le16 fc; 178 bool beacon;
179 bool presp, beacon = false;
180 struct ieee802_11_elems elems; 179 struct ieee802_11_elems elems;
181 180
182 if (skb->len < 2) 181 if (skb->len < 24 ||
183 return RX_DROP_UNUSABLE; 182 (!ieee80211_is_probe_resp(mgmt->frame_control) &&
184 183 !ieee80211_is_beacon(mgmt->frame_control)))
185 mgmt = (struct ieee80211_mgmt *) skb->data; 184 return;
186 fc = mgmt->frame_control;
187 185
188 if (ieee80211_is_ctl(fc)) 186 sdata1 = rcu_dereference(local->scan_sdata);
189 return RX_CONTINUE; 187 sdata2 = rcu_dereference(local->sched_scan_sdata);
190 188
191 if (skb->len < 24) 189 if (likely(!sdata1 && !sdata2))
192 return RX_CONTINUE; 190 return;
193 191
194 presp = ieee80211_is_probe_resp(fc); 192 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
195 if (presp) {
196 /* ignore ProbeResp to foreign address */ 193 /* ignore ProbeResp to foreign address */
197 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) 194 if ((!sdata1 || !ether_addr_equal(mgmt->da, sdata1->vif.addr)) &&
198 return RX_DROP_MONITOR; 195 (!sdata2 || !ether_addr_equal(mgmt->da, sdata2->vif.addr)))
196 return;
199 197
200 presp = true;
201 elements = mgmt->u.probe_resp.variable; 198 elements = mgmt->u.probe_resp.variable;
202 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); 199 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
200 beacon = false;
203 } else { 201 } else {
204 beacon = ieee80211_is_beacon(fc);
205 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); 202 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
206 elements = mgmt->u.beacon.variable; 203 elements = mgmt->u.beacon.variable;
204 beacon = true;
207 } 205 }
208 206
209 if (!presp && !beacon)
210 return RX_CONTINUE;
211
212 if (baselen > skb->len) 207 if (baselen > skb->len)
213 return RX_DROP_MONITOR; 208 return;
214 209
215 ieee802_11_parse_elems(elements, skb->len - baselen, &elems); 210 ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
216 211
@@ -220,22 +215,16 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
220 else 215 else
221 freq = rx_status->freq; 216 freq = rx_status->freq;
222 217
223 channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq); 218 channel = ieee80211_get_channel(local->hw.wiphy, freq);
224 219
225 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) 220 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
226 return RX_DROP_MONITOR; 221 return;
227 222
228 bss = ieee80211_bss_info_update(sdata->local, rx_status, 223 bss = ieee80211_bss_info_update(local, rx_status,
229 mgmt, skb->len, &elems, 224 mgmt, skb->len, &elems,
230 channel, beacon); 225 channel, beacon);
231 if (bss) 226 if (bss)
232 ieee80211_rx_bss_put(sdata->local, bss); 227 ieee80211_rx_bss_put(local, bss);
233
234 if (channel == sdata->local->oper_channel)
235 return RX_CONTINUE;
236
237 dev_kfree_skb(skb);
238 return RX_QUEUED;
239} 228}
240 229
241/* return false if no more work */ 230/* return false if no more work */
@@ -293,7 +282,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
293 return; 282 return;
294 283
295 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { 284 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
296 int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req); 285 int rc;
286
287 rc = drv_hw_scan(local,
288 rcu_dereference_protected(local->scan_sdata,
289 lockdep_is_held(&local->mtx)),
290 local->hw_scan_req);
291
297 if (rc == 0) 292 if (rc == 0)
298 return; 293 return;
299 } 294 }
@@ -323,7 +318,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
323 ieee80211_mlme_notify_scan_completed(local); 318 ieee80211_mlme_notify_scan_completed(local);
324 ieee80211_ibss_notify_scan_completed(local); 319 ieee80211_ibss_notify_scan_completed(local);
325 ieee80211_mesh_notify_scan_completed(local); 320 ieee80211_mesh_notify_scan_completed(local);
326 ieee80211_queue_work(&local->hw, &local->work_work); 321 ieee80211_start_next_roc(local);
327} 322}
328 323
329void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 324void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
@@ -376,7 +371,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
376static bool ieee80211_can_scan(struct ieee80211_local *local, 371static bool ieee80211_can_scan(struct ieee80211_local *local,
377 struct ieee80211_sub_if_data *sdata) 372 struct ieee80211_sub_if_data *sdata)
378{ 373{
379 if (!list_empty(&local->work_list)) 374 if (!list_empty(&local->roc_list))
380 return false; 375 return false;
381 376
382 if (sdata->vif.type == NL80211_IFTYPE_STATION && 377 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -394,7 +389,10 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
394 if (!local->scan_req || local->scanning) 389 if (!local->scan_req || local->scanning)
395 return; 390 return;
396 391
397 if (!ieee80211_can_scan(local, local->scan_sdata)) 392 if (!ieee80211_can_scan(local,
393 rcu_dereference_protected(
394 local->scan_sdata,
395 lockdep_is_held(&local->mtx))))
398 return; 396 return;
399 397
400 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 398 ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
@@ -405,9 +403,12 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
405 unsigned long *next_delay) 403 unsigned long *next_delay)
406{ 404{
407 int i; 405 int i;
408 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 406 struct ieee80211_sub_if_data *sdata;
409 enum ieee80211_band band = local->hw.conf.channel->band; 407 enum ieee80211_band band = local->hw.conf.channel->band;
410 408
409 sdata = rcu_dereference_protected(local->scan_sdata,
410 lockdep_is_held(&local->mtx));;
411
411 for (i = 0; i < local->scan_req->n_ssids; i++) 412 for (i = 0; i < local->scan_req->n_ssids; i++)
412 ieee80211_send_probe_req( 413 ieee80211_send_probe_req(
413 sdata, NULL, 414 sdata, NULL,
@@ -439,7 +440,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
439 if (!ieee80211_can_scan(local, sdata)) { 440 if (!ieee80211_can_scan(local, sdata)) {
440 /* wait for the work to finish/time out */ 441 /* wait for the work to finish/time out */
441 local->scan_req = req; 442 local->scan_req = req;
442 local->scan_sdata = sdata; 443 rcu_assign_pointer(local->scan_sdata, sdata);
443 return 0; 444 return 0;
444 } 445 }
445 446
@@ -473,7 +474,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
473 } 474 }
474 475
475 local->scan_req = req; 476 local->scan_req = req;
476 local->scan_sdata = sdata; 477 rcu_assign_pointer(local->scan_sdata, sdata);
477 478
478 if (local->ops->hw_scan) { 479 if (local->ops->hw_scan) {
479 __set_bit(SCAN_HW_SCANNING, &local->scanning); 480 __set_bit(SCAN_HW_SCANNING, &local->scanning);
@@ -533,7 +534,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
533 ieee80211_recalc_idle(local); 534 ieee80211_recalc_idle(local);
534 535
535 local->scan_req = NULL; 536 local->scan_req = NULL;
536 local->scan_sdata = NULL; 537 rcu_assign_pointer(local->scan_sdata, NULL);
537 } 538 }
538 539
539 return rc; 540 return rc;
@@ -720,7 +721,8 @@ void ieee80211_scan_work(struct work_struct *work)
720 721
721 mutex_lock(&local->mtx); 722 mutex_lock(&local->mtx);
722 723
723 sdata = local->scan_sdata; 724 sdata = rcu_dereference_protected(local->scan_sdata,
725 lockdep_is_held(&local->mtx));
724 726
725 /* When scanning on-channel, the first-callback means completed. */ 727 /* When scanning on-channel, the first-callback means completed. */
726 if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { 728 if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
@@ -741,7 +743,7 @@ void ieee80211_scan_work(struct work_struct *work)
741 int rc; 743 int rc;
742 744
743 local->scan_req = NULL; 745 local->scan_req = NULL;
744 local->scan_sdata = NULL; 746 rcu_assign_pointer(local->scan_sdata, NULL);
745 747
746 rc = __ieee80211_start_scan(sdata, req); 748 rc = __ieee80211_start_scan(sdata, req);
747 if (rc) { 749 if (rc) {
@@ -893,7 +895,9 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
893 895
894 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { 896 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
895 if (local->ops->cancel_hw_scan) 897 if (local->ops->cancel_hw_scan)
896 drv_cancel_hw_scan(local, local->scan_sdata); 898 drv_cancel_hw_scan(local,
899 rcu_dereference_protected(local->scan_sdata,
900 lockdep_is_held(&local->mtx)));
897 goto out; 901 goto out;
898 } 902 }
899 903
@@ -915,9 +919,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
915 struct ieee80211_local *local = sdata->local; 919 struct ieee80211_local *local = sdata->local;
916 int ret, i; 920 int ret, i;
917 921
918 mutex_lock(&sdata->local->mtx); 922 mutex_lock(&local->mtx);
919 923
920 if (local->sched_scanning) { 924 if (rcu_access_pointer(local->sched_scan_sdata)) {
921 ret = -EBUSY; 925 ret = -EBUSY;
922 goto out; 926 goto out;
923 } 927 }
@@ -928,6 +932,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
928 } 932 }
929 933
930 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 934 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
935 if (!local->hw.wiphy->bands[i])
936 continue;
937
931 local->sched_scan_ies.ie[i] = kzalloc(2 + 938 local->sched_scan_ies.ie[i] = kzalloc(2 +
932 IEEE80211_MAX_SSID_LEN + 939 IEEE80211_MAX_SSID_LEN +
933 local->scan_ies_len + 940 local->scan_ies_len +
@@ -948,7 +955,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
948 ret = drv_sched_scan_start(local, sdata, req, 955 ret = drv_sched_scan_start(local, sdata, req,
949 &local->sched_scan_ies); 956 &local->sched_scan_ies);
950 if (ret == 0) { 957 if (ret == 0) {
951 local->sched_scanning = true; 958 rcu_assign_pointer(local->sched_scan_sdata, sdata);
952 goto out; 959 goto out;
953 } 960 }
954 961
@@ -956,7 +963,7 @@ out_free:
956 while (i > 0) 963 while (i > 0)
957 kfree(local->sched_scan_ies.ie[--i]); 964 kfree(local->sched_scan_ies.ie[--i]);
958out: 965out:
959 mutex_unlock(&sdata->local->mtx); 966 mutex_unlock(&local->mtx);
960 return ret; 967 return ret;
961} 968}
962 969
@@ -965,22 +972,22 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
965 struct ieee80211_local *local = sdata->local; 972 struct ieee80211_local *local = sdata->local;
966 int ret = 0, i; 973 int ret = 0, i;
967 974
968 mutex_lock(&sdata->local->mtx); 975 mutex_lock(&local->mtx);
969 976
970 if (!local->ops->sched_scan_stop) { 977 if (!local->ops->sched_scan_stop) {
971 ret = -ENOTSUPP; 978 ret = -ENOTSUPP;
972 goto out; 979 goto out;
973 } 980 }
974 981
975 if (local->sched_scanning) { 982 if (rcu_access_pointer(local->sched_scan_sdata)) {
976 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 983 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
977 kfree(local->sched_scan_ies.ie[i]); 984 kfree(local->sched_scan_ies.ie[i]);
978 985
979 drv_sched_scan_stop(local, sdata); 986 drv_sched_scan_stop(local, sdata);
980 local->sched_scanning = false; 987 rcu_assign_pointer(local->sched_scan_sdata, NULL);
981 } 988 }
982out: 989out:
983 mutex_unlock(&sdata->local->mtx); 990 mutex_unlock(&local->mtx);
984 991
985 return ret; 992 return ret;
986} 993}
@@ -1004,7 +1011,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1004 1011
1005 mutex_lock(&local->mtx); 1012 mutex_lock(&local->mtx);
1006 1013
1007 if (!local->sched_scanning) { 1014 if (!rcu_access_pointer(local->sched_scan_sdata)) {
1008 mutex_unlock(&local->mtx); 1015 mutex_unlock(&local->mtx);
1009 return; 1016 return;
1010 } 1017 }
@@ -1012,7 +1019,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1012 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 1019 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1013 kfree(local->sched_scan_ies.ie[i]); 1020 kfree(local->sched_scan_ies.ie[i]);
1014 1021
1015 local->sched_scanning = false; 1022 rcu_assign_pointer(local->sched_scan_sdata, NULL);
1016 1023
1017 mutex_unlock(&local->mtx); 1024 mutex_unlock(&local->mtx);
1018 1025
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index de455f8bbb91..06fa75ceb025 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -169,9 +169,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
169 if (sta->rate_ctrl) 169 if (sta->rate_ctrl)
170 rate_control_free_sta(sta); 170 rate_control_free_sta(sta);
171 171
172#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 172 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
173 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
174#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
175 173
176 kfree(sta); 174 kfree(sta);
177} 175}
@@ -278,9 +276,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
278 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 276 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
279 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 277 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
280 278
281#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 279 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
282 wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
283#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
284 280
285#ifdef CONFIG_MAC80211_MESH 281#ifdef CONFIG_MAC80211_MESH
286 sta->plink_state = NL80211_PLINK_LISTEN; 282 sta->plink_state = NL80211_PLINK_LISTEN;
@@ -333,9 +329,9 @@ static int sta_info_insert_drv_state(struct ieee80211_local *local,
333 } 329 }
334 330
335 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 331 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
336 printk(KERN_DEBUG 332 sdata_info(sdata,
337 "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n", 333 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
338 sdata->name, sta->sta.addr, state + 1, err); 334 sta->sta.addr, state + 1, err);
339 err = 0; 335 err = 0;
340 } 336 }
341 337
@@ -390,9 +386,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
390 sinfo.generation = local->sta_generation; 386 sinfo.generation = local->sta_generation;
391 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 387 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
392 388
393#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 389 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
394 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
395#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
396 390
397 /* move reference to rcu-protected */ 391 /* move reference to rcu-protected */
398 rcu_read_lock(); 392 rcu_read_lock();
@@ -618,10 +612,8 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
618 break; 612 break;
619 613
620 local->total_ps_buffered--; 614 local->total_ps_buffered--;
621#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 615 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
622 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
623 sta->sta.addr); 616 sta->sta.addr);
624#endif
625 dev_kfree_skb(skb); 617 dev_kfree_skb(skb);
626 } 618 }
627 619
@@ -747,9 +739,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
747 mesh_accept_plinks_update(sdata); 739 mesh_accept_plinks_update(sdata);
748#endif 740#endif
749 741
750#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 742 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
751 wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr); 743
752#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
753 cancel_work_sync(&sta->drv_unblock_wk); 744 cancel_work_sync(&sta->drv_unblock_wk);
754 745
755 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL); 746 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
@@ -889,10 +880,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
889 continue; 880 continue;
890 881
891 if (time_after(jiffies, sta->last_rx + exp_time)) { 882 if (time_after(jiffies, sta->last_rx + exp_time)) {
892#ifdef CONFIG_MAC80211_IBSS_DEBUG 883 ibss_dbg(sdata, "expiring inactive STA %pM\n",
893 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 884 sta->sta.addr);
894 sdata->name, sta->sta.addr);
895#endif
896 WARN_ON(__sta_info_destroy(sta)); 885 WARN_ON(__sta_info_destroy(sta));
897 } 886 }
898 } 887 }
@@ -990,11 +979,9 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
990 979
991 sta_info_recalc_tim(sta); 980 sta_info_recalc_tim(sta);
992 981
993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 982 ps_dbg(sdata,
994 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 983 "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
995 "since STA not sleeping anymore\n", sdata->name,
996 sta->sta.addr, sta->sta.aid, filtered, buffered); 984 sta->sta.addr, sta->sta.aid, filtered, buffered);
997#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
998} 985}
999 986
1000static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, 987static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1384,10 +1371,8 @@ int sta_info_move_state(struct sta_info *sta,
1384 return -EINVAL; 1371 return -EINVAL;
1385 } 1372 }
1386 1373
1387#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1374 sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
1388 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n", 1375 sta->sta.addr, new_state);
1389 sta->sdata->name, sta->sta.addr, new_state);
1390#endif
1391 1376
1392 /* 1377 /*
1393 * notify the driver before the actual changes so it can 1378 * notify the driver before the actual changes so it can
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 3bb24a121c95..a470e1123a55 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
271 * @plink_timer: peer link watch timer 271 * @plink_timer: peer link watch timer
272 * @plink_timer_was_running: used by suspend/resume to restore timers 272 * @plink_timer_was_running: used by suspend/resume to restore timers
273 * @t_offset: timing offset relative to this host 273 * @t_offset: timing offset relative to this host
274 * @t_offset_setpoint: reference timing offset of this sta to be used when
275 * calculating clockdrift
276 * @ch_type: peer's channel type
274 * @debugfs: debug filesystem info 277 * @debugfs: debug filesystem info
275 * @dead: set to true when sta is unlinked 278 * @dead: set to true when sta is unlinked
276 * @uploaded: set to true when sta is uploaded to the driver 279 * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
278 * @sta: station information we share with the driver 281 * @sta: station information we share with the driver
279 * @sta_state: duplicates information about station state (for debug) 282 * @sta_state: duplicates information about station state (for debug)
280 * @beacon_loss_count: number of times beacon loss has triggered 283 * @beacon_loss_count: number of times beacon loss has triggered
284 * @supports_40mhz: tracks whether the station advertised 40 MHz support
285 * as we overwrite its HT parameters with the currently used value
281 */ 286 */
282struct sta_info { 287struct sta_info {
283 /* General information, mostly static */ 288 /* General information, mostly static */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 28cfa981cfb1..8cd72914cdaf 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -155,13 +155,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
155 return; 155 return;
156 } 156 }
157 157
158#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 158 ps_dbg_ratelimited(sta->sdata,
159 if (net_ratelimit()) 159 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
160 wiphy_debug(local->hw.wiphy, 160 skb_queue_len(&sta->tx_filtered[ac]),
161 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", 161 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
162 skb_queue_len(&sta->tx_filtered[ac]),
163 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
164#endif
165 dev_kfree_skb(skb); 162 dev_kfree_skb(skb);
166} 163}
167 164
@@ -520,36 +517,21 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
520 517
521 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 518 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
522 u64 cookie = (unsigned long)skb; 519 u64 cookie = (unsigned long)skb;
520 acked = info->flags & IEEE80211_TX_STAT_ACK;
523 521
524 if (ieee80211_is_nullfunc(hdr->frame_control) || 522 /*
525 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 523 * TODO: When we have non-netdev frame TX,
526 acked = info->flags & IEEE80211_TX_STAT_ACK; 524 * we cannot use skb->dev->ieee80211_ptr
525 */
527 526
527 if (ieee80211_is_nullfunc(hdr->frame_control) ||
528 ieee80211_is_qos_nullfunc(hdr->frame_control))
528 cfg80211_probe_status(skb->dev, hdr->addr1, 529 cfg80211_probe_status(skb->dev, hdr->addr1,
529 cookie, acked, GFP_ATOMIC); 530 cookie, acked, GFP_ATOMIC);
530 } else { 531 else
531 struct ieee80211_work *wk;
532
533 rcu_read_lock();
534 list_for_each_entry_rcu(wk, &local->work_list, list) {
535 if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
536 continue;
537 if (wk->offchan_tx.frame != skb)
538 continue;
539 wk->offchan_tx.status = true;
540 break;
541 }
542 rcu_read_unlock();
543 if (local->hw_roc_skb_for_status == skb) {
544 cookie = local->hw_roc_cookie ^ 2;
545 local->hw_roc_skb_for_status = NULL;
546 }
547
548 cfg80211_mgmt_tx_status( 532 cfg80211_mgmt_tx_status(
549 skb->dev, cookie, skb->data, skb->len, 533 skb->dev->ieee80211_ptr, cookie, skb->data,
550 !!(info->flags & IEEE80211_TX_STAT_ACK), 534 skb->len, acked, GFP_ATOMIC);
551 GFP_ATOMIC);
552 }
553 } 535 }
554 536
555 if (unlikely(info->ack_frame_id)) { 537 if (unlikely(info->ack_frame_id)) {
@@ -589,7 +571,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
589 /* send frame to monitor interfaces now */ 571 /* send frame to monitor interfaces now */
590 rtap_len = ieee80211_tx_radiotap_len(info); 572 rtap_len = ieee80211_tx_radiotap_len(info);
591 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { 573 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
592 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n"); 574 pr_err("ieee80211_tx_status: headroom too small\n");
593 dev_kfree_skb(skb); 575 dev_kfree_skb(skb);
594 return; 576 return;
595 } 577 }
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 51077a956a83..57e14d59e12f 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -260,17 +260,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
260 keyid = pos[3]; 260 keyid = pos[3];
261 iv32 = get_unaligned_le32(pos + 4); 261 iv32 = get_unaligned_le32(pos + 4);
262 pos += 8; 262 pos += 8;
263#ifdef CONFIG_MAC80211_TKIP_DEBUG
264 {
265 int i;
266 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
267 for (i = 0; i < payload_len; i++)
268 printk(" %02x", payload[i]);
269 printk("\n");
270 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
271 iv16, iv32);
272 }
273#endif
274 263
275 if (!(keyid & (1 << 5))) 264 if (!(keyid & (1 << 5)))
276 return TKIP_DECRYPT_NO_EXT_IV; 265 return TKIP_DECRYPT_NO_EXT_IV;
@@ -281,16 +270,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
281 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && 270 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
282 (iv32 < key->u.tkip.rx[queue].iv32 || 271 (iv32 < key->u.tkip.rx[queue].iv32 ||
283 (iv32 == key->u.tkip.rx[queue].iv32 && 272 (iv32 == key->u.tkip.rx[queue].iv32 &&
284 iv16 <= key->u.tkip.rx[queue].iv16))) { 273 iv16 <= key->u.tkip.rx[queue].iv16)))
285#ifdef CONFIG_MAC80211_TKIP_DEBUG
286 printk(KERN_DEBUG "TKIP replay detected for RX frame from "
287 "%pM (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
288 ta,
289 iv32, iv16, key->u.tkip.rx[queue].iv32,
290 key->u.tkip.rx[queue].iv16);
291#endif
292 return TKIP_DECRYPT_REPLAY; 274 return TKIP_DECRYPT_REPLAY;
293 }
294 275
295 if (only_iv) { 276 if (only_iv) {
296 res = TKIP_DECRYPT_OK; 277 res = TKIP_DECRYPT_OK;
@@ -302,22 +283,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
302 key->u.tkip.rx[queue].iv32 != iv32) { 283 key->u.tkip.rx[queue].iv32 != iv32) {
303 /* IV16 wrapped around - perform TKIP phase 1 */ 284 /* IV16 wrapped around - perform TKIP phase 1 */
304 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
305#ifdef CONFIG_MAC80211_TKIP_DEBUG
306 {
307 int i;
308 u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
309 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%pM"
310 " TK=", ta);
311 for (i = 0; i < 16; i++)
312 printk("%02x ",
313 key->conf.key[key_offset + i]);
314 printk("\n");
315 printk(KERN_DEBUG "TKIP decrypt: P1K=");
316 for (i = 0; i < 5; i++)
317 printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
318 printk("\n");
319 }
320#endif
321 } 286 }
322 if (key->local->ops->update_tkip_key && 287 if (key->local->ops->update_tkip_key &&
323 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 288 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
@@ -333,15 +298,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
333 } 298 }
334 299
335 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 300 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
336#ifdef CONFIG_MAC80211_TKIP_DEBUG
337 {
338 int i;
339 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
340 for (i = 0; i < 16; i++)
341 printk("%02x ", rc4key[i]);
342 printk("\n");
343 }
344#endif
345 301
346 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); 302 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
347 done: 303 done:
diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c
new file mode 100644
index 000000000000..386e45d8a958
--- /dev/null
+++ b/net/mac80211/trace.c
@@ -0,0 +1,75 @@
1/* bug in tracepoint.h, it should include this */
2#include <linux/module.h>
3
4/* sparse isn't too happy with all macros... */
5#ifndef __CHECKER__
6#include <net/cfg80211.h>
7#include "driver-ops.h"
8#include "debug.h"
9#define CREATE_TRACE_POINTS
10#include "trace.h"
11
12#ifdef CONFIG_MAC80211_MESSAGE_TRACING
13void __sdata_info(const char *fmt, ...)
14{
15 struct va_format vaf = {
16 .fmt = fmt,
17 };
18 va_list args;
19
20 va_start(args, fmt);
21 vaf.va = &args;
22
23 pr_info("%pV", &vaf);
24 trace_mac80211_info(&vaf);
25 va_end(args);
26}
27
28void __sdata_dbg(bool print, const char *fmt, ...)
29{
30 struct va_format vaf = {
31 .fmt = fmt,
32 };
33 va_list args;
34
35 va_start(args, fmt);
36 vaf.va = &args;
37
38 if (print)
39 pr_debug("%pV", &vaf);
40 trace_mac80211_dbg(&vaf);
41 va_end(args);
42}
43
44void __sdata_err(const char *fmt, ...)
45{
46 struct va_format vaf = {
47 .fmt = fmt,
48 };
49 va_list args;
50
51 va_start(args, fmt);
52 vaf.va = &args;
53
54 pr_err("%pV", &vaf);
55 trace_mac80211_err(&vaf);
56 va_end(args);
57}
58
59void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
60{
61 struct va_format vaf = {
62 .fmt = fmt,
63 };
64 va_list args;
65
66 va_start(args, fmt);
67 vaf.va = &args;
68
69 if (print)
70 wiphy_dbg(wiphy, "%pV", &vaf);
71 trace_mac80211_dbg(&vaf);
72 va_end(args);
73}
74#endif
75#endif
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/trace.h
index 6de00b2c268c..c6d33b55b2df 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/trace.h
@@ -306,7 +306,8 @@ TRACE_EVENT(drv_bss_info_changed,
306 __field(u8, dtimper) 306 __field(u8, dtimper)
307 __field(u16, bcnint) 307 __field(u16, bcnint)
308 __field(u16, assoc_cap) 308 __field(u16, assoc_cap)
309 __field(u64, timestamp) 309 __field(u64, sync_tsf)
310 __field(u32, sync_device_ts)
310 __field(u32, basic_rates) 311 __field(u32, basic_rates)
311 __field(u32, changed) 312 __field(u32, changed)
312 __field(bool, enable_beacon) 313 __field(bool, enable_beacon)
@@ -325,7 +326,8 @@ TRACE_EVENT(drv_bss_info_changed,
325 __entry->dtimper = info->dtim_period; 326 __entry->dtimper = info->dtim_period;
326 __entry->bcnint = info->beacon_int; 327 __entry->bcnint = info->beacon_int;
327 __entry->assoc_cap = info->assoc_capability; 328 __entry->assoc_cap = info->assoc_capability;
328 __entry->timestamp = info->last_tsf; 329 __entry->sync_tsf = info->sync_tsf;
330 __entry->sync_device_ts = info->sync_device_ts;
329 __entry->basic_rates = info->basic_rates; 331 __entry->basic_rates = info->basic_rates;
330 __entry->enable_beacon = info->enable_beacon; 332 __entry->enable_beacon = info->enable_beacon;
331 __entry->ht_operation_mode = info->ht_operation_mode; 333 __entry->ht_operation_mode = info->ht_operation_mode;
@@ -1218,6 +1220,39 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
1218 TP_ARGS(local, sta, tids, num_frames, reason, more_data) 1220 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1219); 1221);
1220 1222
1223TRACE_EVENT(drv_get_rssi,
1224 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta,
1225 s8 rssi, int ret),
1226
1227 TP_ARGS(local, sta, rssi, ret),
1228
1229 TP_STRUCT__entry(
1230 LOCAL_ENTRY
1231 STA_ENTRY
1232 __field(s8, rssi)
1233 __field(int, ret)
1234 ),
1235
1236 TP_fast_assign(
1237 LOCAL_ASSIGN;
1238 STA_ASSIGN;
1239 __entry->rssi = rssi;
1240 __entry->ret = ret;
1241 ),
1242
1243 TP_printk(
1244 LOCAL_PR_FMT STA_PR_FMT " rssi:%d ret:%d",
1245 LOCAL_PR_ARG, STA_PR_ARG, __entry->rssi, __entry->ret
1246 )
1247);
1248
1249DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
1250 TP_PROTO(struct ieee80211_local *local,
1251 struct ieee80211_sub_if_data *sdata),
1252
1253 TP_ARGS(local, sdata)
1254);
1255
1221/* 1256/*
1222 * Tracing for API calls that drivers call. 1257 * Tracing for API calls that drivers call.
1223 */ 1258 */
@@ -1606,10 +1641,49 @@ TRACE_EVENT(stop_queue,
1606 LOCAL_PR_ARG, __entry->queue, __entry->reason 1641 LOCAL_PR_ARG, __entry->queue, __entry->reason
1607 ) 1642 )
1608); 1643);
1644
1645#ifdef CONFIG_MAC80211_MESSAGE_TRACING
1646#undef TRACE_SYSTEM
1647#define TRACE_SYSTEM mac80211_msg
1648
1649#define MAX_MSG_LEN 100
1650
1651DECLARE_EVENT_CLASS(mac80211_msg_event,
1652 TP_PROTO(struct va_format *vaf),
1653
1654 TP_ARGS(vaf),
1655
1656 TP_STRUCT__entry(
1657 __dynamic_array(char, msg, MAX_MSG_LEN)
1658 ),
1659
1660 TP_fast_assign(
1661 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
1662 MAX_MSG_LEN, vaf->fmt,
1663 *vaf->va) >= MAX_MSG_LEN);
1664 ),
1665
1666 TP_printk("%s", __get_str(msg))
1667);
1668
1669DEFINE_EVENT(mac80211_msg_event, mac80211_info,
1670 TP_PROTO(struct va_format *vaf),
1671 TP_ARGS(vaf)
1672);
1673DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
1674 TP_PROTO(struct va_format *vaf),
1675 TP_ARGS(vaf)
1676);
1677DEFINE_EVENT(mac80211_msg_event, mac80211_err,
1678 TP_PROTO(struct va_format *vaf),
1679 TP_ARGS(vaf)
1680);
1681#endif
1682
1609#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 1683#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
1610 1684
1611#undef TRACE_INCLUDE_PATH 1685#undef TRACE_INCLUDE_PATH
1612#define TRACE_INCLUDE_PATH . 1686#define TRACE_INCLUDE_PATH .
1613#undef TRACE_INCLUDE_FILE 1687#undef TRACE_INCLUDE_FILE
1614#define TRACE_INCLUDE_FILE driver-trace 1688#define TRACE_INCLUDE_FILE trace
1615#include <trace/define_trace.h> 1689#include <trace/define_trace.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e453212fa17f..acf712ffb5e6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -140,6 +140,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
140 if (r->flags & IEEE80211_RATE_MANDATORY_A) 140 if (r->flags & IEEE80211_RATE_MANDATORY_A)
141 mrate = r->bitrate; 141 mrate = r->bitrate;
142 break; 142 break;
143 case IEEE80211_BAND_60GHZ:
144 /* TODO, for now fall through */
143 case IEEE80211_NUM_BANDS: 145 case IEEE80211_NUM_BANDS:
144 WARN_ON(1); 146 WARN_ON(1);
145 break; 147 break;
@@ -175,12 +177,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
175 return cpu_to_le16(dur); 177 return cpu_to_le16(dur);
176} 178}
177 179
178static inline int is_ieee80211_device(struct ieee80211_local *local,
179 struct net_device *dev)
180{
181 return local == wdev_priv(dev->ieee80211_ptr);
182}
183
184/* tx handlers */ 180/* tx handlers */
185static ieee80211_tx_result debug_noinline 181static ieee80211_tx_result debug_noinline
186ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) 182ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
@@ -297,10 +293,10 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
297 if (unlikely(!assoc && 293 if (unlikely(!assoc &&
298 ieee80211_is_data(hdr->frame_control))) { 294 ieee80211_is_data(hdr->frame_control))) {
299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 295#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
300 printk(KERN_DEBUG "%s: dropped data frame to not " 296 sdata_info(tx->sdata,
301 "associated station %pM\n", 297 "dropped data frame to not associated station %pM\n",
302 tx->sdata->name, hdr->addr1); 298 hdr->addr1);
303#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 299#endif
304 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 300 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
305 return TX_DROP; 301 return TX_DROP;
306 } 302 }
@@ -367,10 +363,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
367 rcu_read_unlock(); 363 rcu_read_unlock();
368 364
369 local->total_ps_buffered = total; 365 local->total_ps_buffered = total;
370#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 366 ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
371 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
372 purged);
373#endif
374} 367}
375 368
376static ieee80211_tx_result 369static ieee80211_tx_result
@@ -412,10 +405,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
412 purge_old_ps_buffers(tx->local); 405 purge_old_ps_buffers(tx->local);
413 406
414 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { 407 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
415#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 408 ps_dbg(tx->sdata,
416 net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n", 409 "BC TX buffer full - dropping the oldest frame\n");
417 tx->sdata->name);
418#endif
419 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 410 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
420 } else 411 } else
421 tx->local->total_ps_buffered++; 412 tx->local->total_ps_buffered++;
@@ -466,18 +457,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
466 return TX_CONTINUE; 457 return TX_CONTINUE;
467 } 458 }
468 459
469#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 460 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
470 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
471 sta->sta.addr, sta->sta.aid, ac); 461 sta->sta.addr, sta->sta.aid, ac);
472#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
473 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 462 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
474 purge_old_ps_buffers(tx->local); 463 purge_old_ps_buffers(tx->local);
475 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { 464 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
476 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); 465 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
477#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 466 ps_dbg(tx->sdata,
478 net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n", 467 "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
479 tx->sdata->name, sta->sta.addr, ac); 468 sta->sta.addr, ac);
480#endif
481 dev_kfree_skb(old); 469 dev_kfree_skb(old);
482 } else 470 } else
483 tx->local->total_ps_buffered++; 471 tx->local->total_ps_buffered++;
@@ -499,14 +487,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
499 sta_info_recalc_tim(sta); 487 sta_info_recalc_tim(sta);
500 488
501 return TX_QUEUED; 489 return TX_QUEUED;
490 } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
491 ps_dbg(tx->sdata,
492 "STA %pM in PS mode, but polling/in SP -> send frame\n",
493 sta->sta.addr);
502 } 494 }
503#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
504 else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
505 printk(KERN_DEBUG
506 "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
507 tx->sdata->name, sta->sta.addr);
508 }
509#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
510 495
511 return TX_CONTINUE; 496 return TX_CONTINUE;
512} 497}
@@ -538,7 +523,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
538static ieee80211_tx_result debug_noinline 523static ieee80211_tx_result debug_noinline
539ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 524ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
540{ 525{
541 struct ieee80211_key *key = NULL; 526 struct ieee80211_key *key;
542 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 527 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
543 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 528 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
544 529
@@ -557,16 +542,23 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
557 else if (!is_multicast_ether_addr(hdr->addr1) && 542 else if (!is_multicast_ether_addr(hdr->addr1) &&
558 (key = rcu_dereference(tx->sdata->default_unicast_key))) 543 (key = rcu_dereference(tx->sdata->default_unicast_key)))
559 tx->key = key; 544 tx->key = key;
560 else if (tx->sdata->drop_unencrypted && 545 else if (info->flags & IEEE80211_TX_CTL_INJECTED)
561 (tx->skb->protocol != tx->sdata->control_port_protocol) && 546 tx->key = NULL;
562 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 547 else if (!tx->sdata->drop_unencrypted)
563 (!ieee80211_is_robust_mgmt_frame(hdr) || 548 tx->key = NULL;
564 (ieee80211_is_action(hdr->frame_control) && 549 else if (tx->skb->protocol == tx->sdata->control_port_protocol)
565 tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) { 550 tx->key = NULL;
551 else if (ieee80211_is_robust_mgmt_frame(hdr) &&
552 !(ieee80211_is_action(hdr->frame_control) &&
553 tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
554 tx->key = NULL;
555 else if (ieee80211_is_mgmt(hdr->frame_control) &&
556 !ieee80211_is_robust_mgmt_frame(hdr))
557 tx->key = NULL;
558 else {
566 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 559 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
567 return TX_DROP; 560 return TX_DROP;
568 } else 561 }
569 tx->key = NULL;
570 562
571 if (tx->key) { 563 if (tx->key) {
572 bool skip_hw = false; 564 bool skip_hw = false;
@@ -974,8 +966,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
974 info->control.rates[1].idx = -1; 966 info->control.rates[1].idx = -1;
975 info->control.rates[2].idx = -1; 967 info->control.rates[2].idx = -1;
976 info->control.rates[3].idx = -1; 968 info->control.rates[3].idx = -1;
977 info->control.rates[4].idx = -1; 969 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
978 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
979 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 970 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
980 } else { 971 } else {
981 hdr->frame_control &= ~morefrags; 972 hdr->frame_control &= ~morefrags;
@@ -1310,11 +1301,8 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
1310 break; 1301 break;
1311 } 1302 }
1312 1303
1313 if (local->ops->tx_frags) 1304 result = ieee80211_tx_frags(local, vif, pubsta, skbs,
1314 drv_tx_frags(local, vif, pubsta, skbs); 1305 txpending);
1315 else
1316 result = ieee80211_tx_frags(local, vif, pubsta, skbs,
1317 txpending);
1318 1306
1319 ieee80211_tpt_led_trig_tx(local, fc, led_len); 1307 ieee80211_tpt_led_trig_tx(local, fc, led_len);
1320 ieee80211_led_tx(local, 1); 1308 ieee80211_led_tx(local, 1);
@@ -1836,6 +1824,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1836 /* RA TA mDA mSA AE:DA SA */ 1824 /* RA TA mDA mSA AE:DA SA */
1837 mesh_da = mppath->mpp; 1825 mesh_da = mppath->mpp;
1838 is_mesh_mcast = 0; 1826 is_mesh_mcast = 0;
1827 } else if (mpath) {
1828 mesh_da = mpath->dst;
1829 is_mesh_mcast = 0;
1839 } else { 1830 } else {
1840 /* DA TA mSA AE:SA */ 1831 /* DA TA mSA AE:SA */
1841 mesh_da = bcast; 1832 mesh_da = bcast;
@@ -1965,7 +1956,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1965 (cpu_to_be16(ethertype) != sdata->control_port_protocol || 1956 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1966 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { 1957 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1967#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1958#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1968 net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", 1959 net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
1969 dev->name, hdr.addr1); 1960 dev->name, hdr.addr1);
1970#endif 1961#endif
1971 1962
@@ -2437,9 +2428,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2437 *pos++ = WLAN_EID_SSID; 2428 *pos++ = WLAN_EID_SSID;
2438 *pos++ = 0x0; 2429 *pos++ = 0x0;
2439 2430
2440 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || 2431 if (ieee80211_add_srates_ie(sdata, skb, true) ||
2441 mesh_add_ds_params_ie(skb, sdata) || 2432 mesh_add_ds_params_ie(skb, sdata) ||
2442 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || 2433 ieee80211_add_ext_srates_ie(sdata, skb, true) ||
2443 mesh_add_rsn_ie(skb, sdata) || 2434 mesh_add_rsn_ie(skb, sdata) ||
2444 mesh_add_ht_cap_ie(skb, sdata) || 2435 mesh_add_ht_cap_ie(skb, sdata) ||
2445 mesh_add_ht_oper_ie(skb, sdata) || 2436 mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2733,7 +2724,7 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2733void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 2724void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
2734 struct sk_buff *skb, int tid) 2725 struct sk_buff *skb, int tid)
2735{ 2726{
2736 int ac = ieee802_1d_to_ac[tid]; 2727 int ac = ieee802_1d_to_ac[tid & 7];
2737 2728
2738 skb_set_mac_header(skb, 0); 2729 skb_set_mac_header(skb, 0);
2739 skb_set_network_header(skb, 0); 2730 skb_set_network_header(skb, 0);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8dd4712620ff..39b82fee4904 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -268,6 +268,10 @@ EXPORT_SYMBOL(ieee80211_ctstoself_duration);
268void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) 268void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
269{ 269{
270 struct ieee80211_sub_if_data *sdata; 270 struct ieee80211_sub_if_data *sdata;
271 int n_acs = IEEE80211_NUM_ACS;
272
273 if (local->hw.queues < IEEE80211_NUM_ACS)
274 n_acs = 1;
271 275
272 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 276 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
273 int ac; 277 int ac;
@@ -279,7 +283,7 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
279 local->queue_stop_reasons[sdata->vif.cab_queue] != 0) 283 local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
280 continue; 284 continue;
281 285
282 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 286 for (ac = 0; ac < n_acs; ac++) {
283 int ac_queue = sdata->vif.hw_queue[ac]; 287 int ac_queue = sdata->vif.hw_queue[ac];
284 288
285 if (ac_queue == queue || 289 if (ac_queue == queue ||
@@ -341,6 +345,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
341{ 345{
342 struct ieee80211_local *local = hw_to_local(hw); 346 struct ieee80211_local *local = hw_to_local(hw);
343 struct ieee80211_sub_if_data *sdata; 347 struct ieee80211_sub_if_data *sdata;
348 int n_acs = IEEE80211_NUM_ACS;
344 349
345 trace_stop_queue(local, queue, reason); 350 trace_stop_queue(local, queue, reason);
346 351
@@ -352,11 +357,14 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
352 357
353 __set_bit(reason, &local->queue_stop_reasons[queue]); 358 __set_bit(reason, &local->queue_stop_reasons[queue]);
354 359
360 if (local->hw.queues < IEEE80211_NUM_ACS)
361 n_acs = 1;
362
355 rcu_read_lock(); 363 rcu_read_lock();
356 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 364 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
357 int ac; 365 int ac;
358 366
359 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 367 for (ac = 0; ac < n_acs; ac++) {
360 if (sdata->vif.hw_queue[ac] == queue || 368 if (sdata->vif.hw_queue[ac] == queue ||
361 sdata->vif.cab_queue == queue) 369 sdata->vif.cab_queue == queue)
362 netif_stop_subqueue(sdata->dev, ac); 370 netif_stop_subqueue(sdata->dev, ac);
@@ -521,6 +529,11 @@ void ieee80211_iterate_active_interfaces(
521 &sdata->vif); 529 &sdata->vif);
522 } 530 }
523 531
532 sdata = rcu_dereference_protected(local->monitor_sdata,
533 lockdep_is_held(&local->iflist_mtx));
534 if (sdata)
535 iterator(data, sdata->vif.addr, &sdata->vif);
536
524 mutex_unlock(&local->iflist_mtx); 537 mutex_unlock(&local->iflist_mtx);
525} 538}
526EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); 539EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
@@ -549,6 +562,10 @@ void ieee80211_iterate_active_interfaces_atomic(
549 &sdata->vif); 562 &sdata->vif);
550 } 563 }
551 564
565 sdata = rcu_dereference(local->monitor_sdata);
566 if (sdata)
567 iterator(data, sdata->vif.addr, &sdata->vif);
568
552 rcu_read_unlock(); 569 rcu_read_unlock();
553} 570}
554EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); 571EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
@@ -804,7 +821,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
804 struct ieee80211_local *local = sdata->local; 821 struct ieee80211_local *local = sdata->local;
805 struct ieee80211_tx_queue_params qparam; 822 struct ieee80211_tx_queue_params qparam;
806 int ac; 823 int ac;
807 bool use_11b; 824 bool use_11b, enable_qos;
808 int aCWmin, aCWmax; 825 int aCWmin, aCWmax;
809 826
810 if (!local->ops->conf_tx) 827 if (!local->ops->conf_tx)
@@ -818,6 +835,13 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
818 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 835 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
819 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 836 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
820 837
838 /*
839 * By default disable QoS in STA mode for old access points, which do
840 * not support 802.11e. New APs will provide proper queue parameters,
841 * that we will configure later.
842 */
843 enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
844
821 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 845 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
822 /* Set defaults according to 802.11-2007 Table 7-37 */ 846 /* Set defaults according to 802.11-2007 Table 7-37 */
823 aCWmax = 1023; 847 aCWmax = 1023;
@@ -826,38 +850,47 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
826 else 850 else
827 aCWmin = 15; 851 aCWmin = 15;
828 852
829 switch (ac) { 853 if (enable_qos) {
830 case IEEE80211_AC_BK: 854 switch (ac) {
831 qparam.cw_max = aCWmax; 855 case IEEE80211_AC_BK:
832 qparam.cw_min = aCWmin; 856 qparam.cw_max = aCWmax;
833 qparam.txop = 0; 857 qparam.cw_min = aCWmin;
834 qparam.aifs = 7; 858 qparam.txop = 0;
835 break; 859 qparam.aifs = 7;
836 default: /* never happens but let's not leave undefined */ 860 break;
837 case IEEE80211_AC_BE: 861 /* never happens but let's not leave undefined */
862 default:
863 case IEEE80211_AC_BE:
864 qparam.cw_max = aCWmax;
865 qparam.cw_min = aCWmin;
866 qparam.txop = 0;
867 qparam.aifs = 3;
868 break;
869 case IEEE80211_AC_VI:
870 qparam.cw_max = aCWmin;
871 qparam.cw_min = (aCWmin + 1) / 2 - 1;
872 if (use_11b)
873 qparam.txop = 6016/32;
874 else
875 qparam.txop = 3008/32;
876 qparam.aifs = 2;
877 break;
878 case IEEE80211_AC_VO:
879 qparam.cw_max = (aCWmin + 1) / 2 - 1;
880 qparam.cw_min = (aCWmin + 1) / 4 - 1;
881 if (use_11b)
882 qparam.txop = 3264/32;
883 else
884 qparam.txop = 1504/32;
885 qparam.aifs = 2;
886 break;
887 }
888 } else {
889 /* Confiure old 802.11b/g medium access rules. */
838 qparam.cw_max = aCWmax; 890 qparam.cw_max = aCWmax;
839 qparam.cw_min = aCWmin; 891 qparam.cw_min = aCWmin;
840 qparam.txop = 0; 892 qparam.txop = 0;
841 qparam.aifs = 3;
842 break;
843 case IEEE80211_AC_VI:
844 qparam.cw_max = aCWmin;
845 qparam.cw_min = (aCWmin + 1) / 2 - 1;
846 if (use_11b)
847 qparam.txop = 6016/32;
848 else
849 qparam.txop = 3008/32;
850 qparam.aifs = 2;
851 break;
852 case IEEE80211_AC_VO:
853 qparam.cw_max = (aCWmin + 1) / 2 - 1;
854 qparam.cw_min = (aCWmin + 1) / 4 - 1;
855 if (use_11b)
856 qparam.txop = 3264/32;
857 else
858 qparam.txop = 1504/32;
859 qparam.aifs = 2; 893 qparam.aifs = 2;
860 break;
861 } 894 }
862 895
863 qparam.uapsd = false; 896 qparam.uapsd = false;
@@ -866,12 +899,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
866 drv_conf_tx(local, sdata, ac, &qparam); 899 drv_conf_tx(local, sdata, ac, &qparam);
867 } 900 }
868 901
869 /* after reinitialize QoS TX queues setting to default,
870 * disable QoS at all */
871
872 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 902 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
873 sdata->vif.bss_conf.qos = 903 sdata->vif.bss_conf.qos = enable_qos;
874 sdata->vif.type != NL80211_IFTYPE_STATION;
875 if (bss_notify) 904 if (bss_notify)
876 ieee80211_bss_info_change_notify(sdata, 905 ieee80211_bss_info_change_notify(sdata,
877 BSS_CHANGED_QOS); 906 BSS_CHANGED_QOS);
@@ -979,6 +1008,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
979 int ext_rates_len; 1008 int ext_rates_len;
980 1009
981 sband = local->hw.wiphy->bands[band]; 1010 sband = local->hw.wiphy->bands[band];
1011 if (WARN_ON_ONCE(!sband))
1012 return 0;
982 1013
983 pos = buffer; 1014 pos = buffer;
984 1015
@@ -1060,6 +1091,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1060 pos += noffset - offset; 1091 pos += noffset - offset;
1061 } 1092 }
1062 1093
1094 if (sband->vht_cap.vht_supported)
1095 pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
1096 sband->vht_cap.cap);
1097
1063 return pos - buffer; 1098 return pos - buffer;
1064} 1099}
1065 1100
@@ -1267,14 +1302,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1267 /* add STAs back */ 1302 /* add STAs back */
1268 mutex_lock(&local->sta_mtx); 1303 mutex_lock(&local->sta_mtx);
1269 list_for_each_entry(sta, &local->sta_list, list) { 1304 list_for_each_entry(sta, &local->sta_list, list) {
1270 if (sta->uploaded) { 1305 enum ieee80211_sta_state state;
1271 enum ieee80211_sta_state state;
1272 1306
1273 for (state = IEEE80211_STA_NOTEXIST; 1307 if (!sta->uploaded)
1274 state < sta->sta_state; state++) 1308 continue;
1275 WARN_ON(drv_sta_state(local, sta->sdata, sta, 1309
1276 state, state + 1)); 1310 /* AP-mode stations will be added later */
1277 } 1311 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1312 continue;
1313
1314 for (state = IEEE80211_STA_NOTEXIST;
1315 state < sta->sta_state; state++)
1316 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1317 state + 1));
1278 } 1318 }
1279 mutex_unlock(&local->sta_mtx); 1319 mutex_unlock(&local->sta_mtx);
1280 1320
@@ -1371,12 +1411,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1371 } 1411 }
1372 } 1412 }
1373 1413
1414 /* APs are now beaconing, add back stations */
1415 mutex_lock(&local->sta_mtx);
1416 list_for_each_entry(sta, &local->sta_list, list) {
1417 enum ieee80211_sta_state state;
1418
1419 if (!sta->uploaded)
1420 continue;
1421
1422 if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
1423 continue;
1424
1425 for (state = IEEE80211_STA_NOTEXIST;
1426 state < sta->sta_state; state++)
1427 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1428 state + 1));
1429 }
1430 mutex_unlock(&local->sta_mtx);
1431
1374 /* add back keys */ 1432 /* add back keys */
1375 list_for_each_entry(sdata, &local->interfaces, list) 1433 list_for_each_entry(sdata, &local->interfaces, list)
1376 if (ieee80211_sdata_running(sdata)) 1434 if (ieee80211_sdata_running(sdata))
1377 ieee80211_enable_keys(sdata); 1435 ieee80211_enable_keys(sdata);
1378 1436
1379 wake_up: 1437 wake_up:
1438 local->in_reconfig = false;
1439 barrier();
1440
1380 /* 1441 /*
1381 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1442 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1382 * sessions can be established after a resume. 1443 * sessions can be established after a resume.
@@ -1661,6 +1722,27 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1661 return pos; 1722 return pos;
1662} 1723}
1663 1724
1725u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1726 u32 cap)
1727{
1728 __le32 tmp;
1729
1730 *pos++ = WLAN_EID_VHT_CAPABILITY;
1731 *pos++ = sizeof(struct ieee80211_vht_capabilities);
1732 memset(pos, 0, sizeof(struct ieee80211_vht_capabilities));
1733
1734 /* capability flags */
1735 tmp = cpu_to_le32(cap);
1736 memcpy(pos, &tmp, sizeof(u32));
1737 pos += sizeof(u32);
1738
1739 /* VHT MCS set */
1740 memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
1741 pos += sizeof(vht_cap->vht_mcs);
1742
1743 return pos;
1744}
1745
1664u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1746u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1665 struct ieee80211_channel *channel, 1747 struct ieee80211_channel *channel,
1666 enum nl80211_channel_type channel_type, 1748 enum nl80211_channel_type channel_type,
@@ -1726,15 +1808,14 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1726 return channel_type; 1808 return channel_type;
1727} 1809}
1728 1810
1729int ieee80211_add_srates_ie(struct ieee80211_vif *vif, 1811int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1730 struct sk_buff *skb, bool need_basic) 1812 struct sk_buff *skb, bool need_basic)
1731{ 1813{
1732 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1733 struct ieee80211_local *local = sdata->local; 1814 struct ieee80211_local *local = sdata->local;
1734 struct ieee80211_supported_band *sband; 1815 struct ieee80211_supported_band *sband;
1735 int rate; 1816 int rate;
1736 u8 i, rates, *pos; 1817 u8 i, rates, *pos;
1737 u32 basic_rates = vif->bss_conf.basic_rates; 1818 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1738 1819
1739 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1820 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1740 rates = sband->n_bitrates; 1821 rates = sband->n_bitrates;
@@ -1758,15 +1839,14 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
1758 return 0; 1839 return 0;
1759} 1840}
1760 1841
1761int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, 1842int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1762 struct sk_buff *skb, bool need_basic) 1843 struct sk_buff *skb, bool need_basic)
1763{ 1844{
1764 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1765 struct ieee80211_local *local = sdata->local; 1845 struct ieee80211_local *local = sdata->local;
1766 struct ieee80211_supported_band *sband; 1846 struct ieee80211_supported_band *sband;
1767 int rate; 1847 int rate;
1768 u8 i, exrates, *pos; 1848 u8 i, exrates, *pos;
1769 u32 basic_rates = vif->bss_conf.basic_rates; 1849 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1770 1850
1771 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1851 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1772 exrates = sband->n_bitrates; 1852 exrates = sband->n_bitrates;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index c3d643a6536c..cea06e9f26f4 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,11 +52,11 @@ static int wme_downgrade_ac(struct sk_buff *skb)
52 } 52 }
53} 53}
54 54
55static u16 ieee80211_downgrade_queue(struct ieee80211_local *local, 55static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
56 struct sk_buff *skb) 56 struct sk_buff *skb)
57{ 57{
58 /* in case we are a client verify acm is not set for this ac */ 58 /* in case we are a client verify acm is not set for this ac */
59 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 59 while (unlikely(sdata->wmm_acm & BIT(skb->priority))) {
60 if (wme_downgrade_ac(skb)) { 60 if (wme_downgrade_ac(skb)) {
61 /* 61 /*
62 * This should not really happen. The AP has marked all 62 * This should not really happen. The AP has marked all
@@ -73,10 +73,11 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
73} 73}
74 74
75/* Indicate which queue to use for this fully formed 802.11 frame */ 75/* Indicate which queue to use for this fully formed 802.11 frame */
76u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 76u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
77 struct sk_buff *skb, 77 struct sk_buff *skb,
78 struct ieee80211_hdr *hdr) 78 struct ieee80211_hdr *hdr)
79{ 79{
80 struct ieee80211_local *local = sdata->local;
80 u8 *p; 81 u8 *p;
81 82
82 if (local->hw.queues < IEEE80211_NUM_ACS) 83 if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -94,7 +95,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
94 p = ieee80211_get_qos_ctl(hdr); 95 p = ieee80211_get_qos_ctl(hdr);
95 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; 96 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
96 97
97 return ieee80211_downgrade_queue(local, skb); 98 return ieee80211_downgrade_queue(sdata, skb);
98} 99}
99 100
100/* Indicate which queue to use. */ 101/* Indicate which queue to use. */
@@ -156,7 +157,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
156 * data frame has */ 157 * data frame has */
157 skb->priority = cfg80211_classify8021d(skb); 158 skb->priority = cfg80211_classify8021d(skb);
158 159
159 return ieee80211_downgrade_queue(local, skb); 160 return ieee80211_downgrade_queue(sdata, skb);
160} 161}
161 162
162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 163void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index ca80818b7b66..7fea4bb8acbc 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,7 +15,7 @@
15 15
16extern const int ieee802_1d_to_ac[8]; 16extern const int ieee802_1d_to_ac[8];
17 17
18u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 18u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
19 struct sk_buff *skb, 19 struct sk_buff *skb,
20 struct ieee80211_hdr *hdr); 20 struct ieee80211_hdr *hdr);
21u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, 21u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
deleted file mode 100644
index b2650a9d45ff..000000000000
--- a/net/mac80211/work.c
+++ /dev/null
@@ -1,370 +0,0 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <linux/slab.h>
23#include <net/mac80211.h>
24#include <asm/unaligned.h>
25
26#include "ieee80211_i.h"
27#include "rate.h"
28#include "driver-ops.h"
29
30enum work_action {
31 WORK_ACT_NONE,
32 WORK_ACT_TIMEOUT,
33};
34
35
36/* utils */
37static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
38{
39 lockdep_assert_held(&local->mtx);
40}
41
42/*
43 * We can have multiple work items (and connection probing)
44 * scheduling this timer, but we need to take care to only
45 * reschedule it when it should fire _earlier_ than it was
46 * asked for before, or if it's not pending right now. This
47 * function ensures that. Note that it then is required to
48 * run this function for all timeouts after the first one
49 * has happened -- the work that runs from this timer will
50 * do that.
51 */
52static void run_again(struct ieee80211_local *local,
53 unsigned long timeout)
54{
55 ASSERT_WORK_MTX(local);
56
57 if (!timer_pending(&local->work_timer) ||
58 time_before(timeout, local->work_timer.expires))
59 mod_timer(&local->work_timer, timeout);
60}
61
62void free_work(struct ieee80211_work *wk)
63{
64 kfree_rcu(wk, rcu_head);
65}
66
67static enum work_action __must_check
68ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
69{
70 /*
71 * First time we run, do nothing -- the generic code will
72 * have switched to the right channel etc.
73 */
74 if (!wk->started) {
75 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
76
77 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
78 wk->chan, wk->chan_type,
79 wk->remain.duration, GFP_KERNEL);
80
81 return WORK_ACT_NONE;
82 }
83
84 return WORK_ACT_TIMEOUT;
85}
86
87static enum work_action __must_check
88ieee80211_offchannel_tx(struct ieee80211_work *wk)
89{
90 if (!wk->started) {
91 wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait);
92
93 /*
94 * After this, offchan_tx.frame remains but now is no
95 * longer a valid pointer -- we still need it as the
96 * cookie for canceling this work/status matching.
97 */
98 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
99
100 return WORK_ACT_NONE;
101 }
102
103 return WORK_ACT_TIMEOUT;
104}
105
106static void ieee80211_work_timer(unsigned long data)
107{
108 struct ieee80211_local *local = (void *) data;
109
110 if (local->quiescing)
111 return;
112
113 ieee80211_queue_work(&local->hw, &local->work_work);
114}
115
116static void ieee80211_work_work(struct work_struct *work)
117{
118 struct ieee80211_local *local =
119 container_of(work, struct ieee80211_local, work_work);
120 struct ieee80211_work *wk, *tmp;
121 LIST_HEAD(free_work);
122 enum work_action rma;
123 bool remain_off_channel = false;
124
125 /*
126 * ieee80211_queue_work() should have picked up most cases,
127 * here we'll pick the rest.
128 */
129 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
130 return;
131
132 mutex_lock(&local->mtx);
133
134 if (local->scanning) {
135 mutex_unlock(&local->mtx);
136 return;
137 }
138
139 ieee80211_recalc_idle(local);
140
141 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
142 bool started = wk->started;
143
144 /* mark work as started if it's on the current off-channel */
145 if (!started && local->tmp_channel &&
146 wk->chan == local->tmp_channel &&
147 wk->chan_type == local->tmp_channel_type) {
148 started = true;
149 wk->timeout = jiffies;
150 }
151
152 if (!started && !local->tmp_channel) {
153 ieee80211_offchannel_stop_vifs(local, true);
154
155 local->tmp_channel = wk->chan;
156 local->tmp_channel_type = wk->chan_type;
157
158 ieee80211_hw_config(local, 0);
159
160 started = true;
161 wk->timeout = jiffies;
162 }
163
164 /* don't try to work with items that aren't started */
165 if (!started)
166 continue;
167
168 if (time_is_after_jiffies(wk->timeout)) {
169 /*
170 * This work item isn't supposed to be worked on
171 * right now, but take care to adjust the timer
172 * properly.
173 */
174 run_again(local, wk->timeout);
175 continue;
176 }
177
178 switch (wk->type) {
179 default:
180 WARN_ON(1);
181 /* nothing */
182 rma = WORK_ACT_NONE;
183 break;
184 case IEEE80211_WORK_ABORT:
185 rma = WORK_ACT_TIMEOUT;
186 break;
187 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
188 rma = ieee80211_remain_on_channel_timeout(wk);
189 break;
190 case IEEE80211_WORK_OFFCHANNEL_TX:
191 rma = ieee80211_offchannel_tx(wk);
192 break;
193 }
194
195 wk->started = started;
196
197 switch (rma) {
198 case WORK_ACT_NONE:
199 /* might have changed the timeout */
200 run_again(local, wk->timeout);
201 break;
202 case WORK_ACT_TIMEOUT:
203 list_del_rcu(&wk->list);
204 synchronize_rcu();
205 list_add(&wk->list, &free_work);
206 break;
207 default:
208 WARN(1, "unexpected: %d", rma);
209 }
210 }
211
212 list_for_each_entry(wk, &local->work_list, list) {
213 if (!wk->started)
214 continue;
215 if (wk->chan != local->tmp_channel ||
216 wk->chan_type != local->tmp_channel_type)
217 continue;
218 remain_off_channel = true;
219 }
220
221 if (!remain_off_channel && local->tmp_channel) {
222 local->tmp_channel = NULL;
223 ieee80211_hw_config(local, 0);
224
225 ieee80211_offchannel_return(local, true);
226
227 /* give connection some time to breathe */
228 run_again(local, jiffies + HZ/2);
229 }
230
231 ieee80211_recalc_idle(local);
232 ieee80211_run_deferred_scan(local);
233
234 mutex_unlock(&local->mtx);
235
236 list_for_each_entry_safe(wk, tmp, &free_work, list) {
237 wk->done(wk, NULL);
238 list_del(&wk->list);
239 kfree(wk);
240 }
241}
242
243void ieee80211_add_work(struct ieee80211_work *wk)
244{
245 struct ieee80211_local *local;
246
247 if (WARN_ON(!wk->chan))
248 return;
249
250 if (WARN_ON(!wk->sdata))
251 return;
252
253 if (WARN_ON(!wk->done))
254 return;
255
256 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
257 return;
258
259 wk->started = false;
260
261 local = wk->sdata->local;
262 mutex_lock(&local->mtx);
263 list_add_tail(&wk->list, &local->work_list);
264 mutex_unlock(&local->mtx);
265
266 ieee80211_queue_work(&local->hw, &local->work_work);
267}
268
269void ieee80211_work_init(struct ieee80211_local *local)
270{
271 INIT_LIST_HEAD(&local->work_list);
272 setup_timer(&local->work_timer, ieee80211_work_timer,
273 (unsigned long)local);
274 INIT_WORK(&local->work_work, ieee80211_work_work);
275}
276
277void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
278{
279 struct ieee80211_local *local = sdata->local;
280 struct ieee80211_work *wk;
281 bool cleanup = false;
282
283 mutex_lock(&local->mtx);
284 list_for_each_entry(wk, &local->work_list, list) {
285 if (wk->sdata != sdata)
286 continue;
287 cleanup = true;
288 wk->type = IEEE80211_WORK_ABORT;
289 wk->started = true;
290 wk->timeout = jiffies;
291 }
292 mutex_unlock(&local->mtx);
293
294 /* run cleanups etc. */
295 if (cleanup)
296 ieee80211_work_work(&local->work_work);
297
298 mutex_lock(&local->mtx);
299 list_for_each_entry(wk, &local->work_list, list) {
300 if (wk->sdata != sdata)
301 continue;
302 WARN_ON(1);
303 break;
304 }
305 mutex_unlock(&local->mtx);
306}
307
308static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
309 struct sk_buff *skb)
310{
311 /*
312 * We are done serving the remain-on-channel command.
313 */
314 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
315 wk->chan, wk->chan_type,
316 GFP_KERNEL);
317
318 return WORK_DONE_DESTROY;
319}
320
321int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
322 struct ieee80211_channel *chan,
323 enum nl80211_channel_type channel_type,
324 unsigned int duration, u64 *cookie)
325{
326 struct ieee80211_work *wk;
327
328 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
329 if (!wk)
330 return -ENOMEM;
331
332 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
333 wk->chan = chan;
334 wk->chan_type = channel_type;
335 wk->sdata = sdata;
336 wk->done = ieee80211_remain_done;
337
338 wk->remain.duration = duration;
339
340 *cookie = (unsigned long) wk;
341
342 ieee80211_add_work(wk);
343
344 return 0;
345}
346
347int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
348 u64 cookie)
349{
350 struct ieee80211_local *local = sdata->local;
351 struct ieee80211_work *wk, *tmp;
352 bool found = false;
353
354 mutex_lock(&local->mtx);
355 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
356 if ((unsigned long) wk == cookie) {
357 wk->timeout = jiffies;
358 found = true;
359 break;
360 }
361 }
362 mutex_unlock(&local->mtx);
363
364 if (!found)
365 return -ENOENT;
366
367 ieee80211_queue_work(&local->hw, &local->work_work);
368
369 return 0;
370}
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index ec1bd3fc1273..57cf5d1a2e4a 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_MAC802154) += mac802154.o 1obj-$(CONFIG_MAC802154) += mac802154.o
2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o 2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e3edfb0661b0..e748aed290aa 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -140,6 +140,10 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
140 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), 140 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
141 name, mac802154_monitor_setup); 141 name, mac802154_monitor_setup);
142 break; 142 break;
143 case IEEE802154_DEV_WPAN:
144 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
145 name, mac802154_wpan_setup);
146 break;
143 default: 147 default:
144 dev = NULL; 148 dev = NULL;
145 err = -EINVAL; 149 err = -EINVAL;
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 789d9c948aec..a4dcaf1dd4b6 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -93,6 +93,7 @@ struct mac802154_sub_if_data {
93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */ 93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
94 94
95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; 95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
96extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
96 97
97int mac802154_slave_open(struct net_device *dev); 98int mac802154_slave_open(struct net_device *dev);
98int mac802154_slave_close(struct net_device *dev); 99int mac802154_slave_close(struct net_device *dev);
@@ -100,10 +101,18 @@ int mac802154_slave_close(struct net_device *dev);
100void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb); 101void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
101void mac802154_monitor_setup(struct net_device *dev); 102void mac802154_monitor_setup(struct net_device *dev);
102 103
104void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb);
105void mac802154_wpan_setup(struct net_device *dev);
106
103netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, 107netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
104 u8 page, u8 chan); 108 u8 page, u8 chan);
105 109
106/* MIB callbacks */ 110/* MIB callbacks */
111void mac802154_dev_set_short_addr(struct net_device *dev, u16 val);
112u16 mac802154_dev_get_short_addr(const struct net_device *dev);
107void mac802154_dev_set_ieee_addr(struct net_device *dev); 113void mac802154_dev_set_ieee_addr(struct net_device *dev);
114u16 mac802154_dev_get_pan_id(const struct net_device *dev);
115void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
116void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
108 117
109#endif /* MAC802154_H */ 118#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index 7a5d0e052cd7..d8d277006089 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -25,13 +25,37 @@
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27 27
28#include <net/ieee802154.h>
28#include <net/ieee802154_netdev.h> 29#include <net/ieee802154_netdev.h>
29#include <net/wpan-phy.h> 30#include <net/wpan-phy.h>
30#include <net/mac802154.h> 31#include <net/mac802154.h>
32#include <net/nl802154.h>
31 33
32#include "mac802154.h" 34#include "mac802154.h"
33 35
34struct wpan_phy *mac802154_get_phy(const struct net_device *dev) 36static int mac802154_mlme_start_req(struct net_device *dev,
37 struct ieee802154_addr *addr,
38 u8 channel, u8 page,
39 u8 bcn_ord, u8 sf_ord,
40 u8 pan_coord, u8 blx,
41 u8 coord_realign)
42{
43 BUG_ON(addr->addr_type != IEEE802154_ADDR_SHORT);
44
45 mac802154_dev_set_pan_id(dev, addr->pan_id);
46 mac802154_dev_set_short_addr(dev, addr->short_addr);
47 mac802154_dev_set_ieee_addr(dev);
48 mac802154_dev_set_page_channel(dev, page, channel);
49
50 /* FIXME: add validation for unused parameters to be sane
51 * for SoftMAC
52 */
53 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
54
55 return 0;
56}
57
58static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
35{ 59{
36 struct mac802154_sub_if_data *priv = netdev_priv(dev); 60 struct mac802154_sub_if_data *priv = netdev_priv(dev);
37 61
@@ -43,3 +67,10 @@ struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
43struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { 67struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
44 .get_phy = mac802154_get_phy, 68 .get_phy = mac802154_get_phy,
45}; 69};
70
71struct ieee802154_mlme_ops mac802154_mlme_wpan = {
72 .get_phy = mac802154_get_phy,
73 .start_req = mac802154_mlme_start_req,
74 .get_pan_id = mac802154_dev_get_pan_id,
75 .get_short_addr = mac802154_dev_get_short_addr,
76};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index ab59821ec729..f47781ab0ccc 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -28,13 +28,18 @@
28 28
29#include "mac802154.h" 29#include "mac802154.h"
30 30
31struct phy_chan_notify_work {
32 struct work_struct work;
33 struct net_device *dev;
34};
35
31struct hw_addr_filt_notify_work { 36struct hw_addr_filt_notify_work {
32 struct work_struct work; 37 struct work_struct work;
33 struct net_device *dev; 38 struct net_device *dev;
34 unsigned long changed; 39 unsigned long changed;
35}; 40};
36 41
37struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev) 42static struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
38{ 43{
39 struct mac802154_sub_if_data *priv = netdev_priv(dev); 44 struct mac802154_sub_if_data *priv = netdev_priv(dev);
40 45
@@ -78,6 +83,37 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
78 return; 83 return;
79} 84}
80 85
86void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
87{
88 struct mac802154_sub_if_data *priv = netdev_priv(dev);
89
90 BUG_ON(dev->type != ARPHRD_IEEE802154);
91
92 spin_lock_bh(&priv->mib_lock);
93 priv->short_addr = val;
94 spin_unlock_bh(&priv->mib_lock);
95
96 if ((priv->hw->ops->set_hw_addr_filt) &&
97 (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) {
98 priv->hw->hw.hw_filt.short_addr = priv->short_addr;
99 set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED);
100 }
101}
102
103u16 mac802154_dev_get_short_addr(const struct net_device *dev)
104{
105 struct mac802154_sub_if_data *priv = netdev_priv(dev);
106 u16 ret;
107
108 BUG_ON(dev->type != ARPHRD_IEEE802154);
109
110 spin_lock_bh(&priv->mib_lock);
111 ret = priv->short_addr;
112 spin_unlock_bh(&priv->mib_lock);
113
114 return ret;
115}
116
81void mac802154_dev_set_ieee_addr(struct net_device *dev) 117void mac802154_dev_set_ieee_addr(struct net_device *dev)
82{ 118{
83 struct mac802154_sub_if_data *priv = netdev_priv(dev); 119 struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -91,3 +127,73 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev)
91 set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED); 127 set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
92 } 128 }
93} 129}
130
131u16 mac802154_dev_get_pan_id(const struct net_device *dev)
132{
133 struct mac802154_sub_if_data *priv = netdev_priv(dev);
134 u16 ret;
135
136 BUG_ON(dev->type != ARPHRD_IEEE802154);
137
138 spin_lock_bh(&priv->mib_lock);
139 ret = priv->pan_id;
140 spin_unlock_bh(&priv->mib_lock);
141
142 return ret;
143}
144
145void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
146{
147 struct mac802154_sub_if_data *priv = netdev_priv(dev);
148
149 BUG_ON(dev->type != ARPHRD_IEEE802154);
150
151 spin_lock_bh(&priv->mib_lock);
152 priv->pan_id = val;
153 spin_unlock_bh(&priv->mib_lock);
154
155 if ((priv->hw->ops->set_hw_addr_filt) &&
156 (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) {
157 priv->hw->hw.hw_filt.pan_id = priv->pan_id;
158 set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED);
159 }
160}
161
162static void phy_chan_notify(struct work_struct *work)
163{
164 struct phy_chan_notify_work *nw = container_of(work,
165 struct phy_chan_notify_work, work);
166 struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
167 struct mac802154_sub_if_data *priv = netdev_priv(nw->dev);
168 int res;
169
170 res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
171 if (res)
172 pr_debug("set_channel failed\n");
173
174 kfree(nw);
175}
176
177void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
178{
179 struct mac802154_sub_if_data *priv = netdev_priv(dev);
180 struct phy_chan_notify_work *work;
181
182 BUG_ON(dev->type != ARPHRD_IEEE802154);
183
184 spin_lock_bh(&priv->mib_lock);
185 priv->page = page;
186 priv->chan = chan;
187 spin_unlock_bh(&priv->mib_lock);
188
189 if (priv->hw->phy->current_channel != priv->chan ||
190 priv->hw->phy->current_page != priv->page) {
191 work = kzalloc(sizeof(*work), GFP_ATOMIC);
192 if (!work)
193 return;
194
195 INIT_WORK(&work->work, phy_chan_notify);
196 work->dev = dev;
197 queue_work(priv->hw->dev_workqueue, &work->work);
198 }
199}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 4a7d76d4f8bc..38548ec2098f 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -77,6 +77,7 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
77 } 77 }
78 78
79 mac802154_monitors_rx(priv, skb); 79 mac802154_monitors_rx(priv, skb);
80 mac802154_wpans_rx(priv, skb);
80out: 81out:
81 dev_kfree_skb(skb); 82 dev_kfree_skb(skb);
82 return; 83 return;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 8781d8f904d9..1a4df39c722e 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -83,9 +83,12 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
83{ 83{
84 struct xmit_work *work; 84 struct xmit_work *work;
85 85
86 if (!(priv->phy->channels_supported[page] & (1 << chan))) 86 if (!(priv->phy->channels_supported[page] & (1 << chan))) {
87 WARN_ON(1); 87 WARN_ON(1);
88 return NETDEV_TX_OK; 88 return NETDEV_TX_OK;
89 }
90
91 mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
89 92
90 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { 93 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
91 u16 crc = crc_ccitt(0, skb->data, skb->len); 94 u16 crc = crc_ccitt(0, skb->data, skb->len);
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
new file mode 100644
index 000000000000..f30f6d4beea1
--- /dev/null
+++ b/net/mac802154/wpan.c
@@ -0,0 +1,559 @@
1/*
2 * Copyright 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/netdevice.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27
28#include <net/rtnetlink.h>
29#include <linux/nl802154.h>
30#include <net/af_ieee802154.h>
31#include <net/mac802154.h>
32#include <net/ieee802154_netdev.h>
33#include <net/ieee802154.h>
34#include <net/wpan-phy.h>
35
36#include "mac802154.h"
37
38static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
39{
40 if (unlikely(!pskb_may_pull(skb, 1)))
41 return -EINVAL;
42
43 *val = skb->data[0];
44 skb_pull(skb, 1);
45
46 return 0;
47}
48
49static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val)
50{
51 if (unlikely(!pskb_may_pull(skb, 2)))
52 return -EINVAL;
53
54 *val = skb->data[0] | (skb->data[1] << 8);
55 skb_pull(skb, 2);
56
57 return 0;
58}
59
60static inline void mac802154_haddr_copy_swap(u8 *dest, const u8 *src)
61{
62 int i;
63 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
64 dest[IEEE802154_ADDR_LEN - i - 1] = src[i];
65}
66
67static int
68mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
69{
70 struct mac802154_sub_if_data *priv = netdev_priv(dev);
71 struct sockaddr_ieee802154 *sa =
72 (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
73 int err = -ENOIOCTLCMD;
74
75 spin_lock_bh(&priv->mib_lock);
76
77 switch (cmd) {
78 case SIOCGIFADDR:
79 if (priv->pan_id == IEEE802154_PANID_BROADCAST ||
80 priv->short_addr == IEEE802154_ADDR_BROADCAST) {
81 err = -EADDRNOTAVAIL;
82 break;
83 }
84
85 sa->family = AF_IEEE802154;
86 sa->addr.addr_type = IEEE802154_ADDR_SHORT;
87 sa->addr.pan_id = priv->pan_id;
88 sa->addr.short_addr = priv->short_addr;
89
90 err = 0;
91 break;
92 case SIOCSIFADDR:
93 dev_warn(&dev->dev,
94 "Using DEBUGing ioctl SIOCSIFADDR isn't recommened!\n");
95 if (sa->family != AF_IEEE802154 ||
96 sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
97 sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
98 sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
99 sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
100 err = -EINVAL;
101 break;
102 }
103
104 priv->pan_id = sa->addr.pan_id;
105 priv->short_addr = sa->addr.short_addr;
106
107 err = 0;
108 break;
109 }
110
111 spin_unlock_bh(&priv->mib_lock);
112 return err;
113}
114
115static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
116{
117 struct sockaddr *addr = p;
118
119 if (netif_running(dev))
120 return -EBUSY;
121
122 /* FIXME: validate addr */
123 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
124 mac802154_dev_set_ieee_addr(dev);
125 return 0;
126}
127
128static int mac802154_header_create(struct sk_buff *skb,
129 struct net_device *dev,
130 unsigned short type,
131 const void *_daddr,
132 const void *_saddr,
133 unsigned len)
134{
135 const struct ieee802154_addr *saddr = _saddr;
136 const struct ieee802154_addr *daddr = _daddr;
137 struct ieee802154_addr dev_addr;
138 struct mac802154_sub_if_data *priv = netdev_priv(dev);
139 int pos = 2;
140 u8 *head;
141 u16 fc;
142
143 if (!daddr)
144 return -EINVAL;
145
146 head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
147 if (head == NULL)
148 return -ENOMEM;
149
150 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
151 fc = mac_cb_type(skb);
152
153 if (!saddr) {
154 spin_lock_bh(&priv->mib_lock);
155
156 if (priv->short_addr == IEEE802154_ADDR_BROADCAST ||
157 priv->short_addr == IEEE802154_ADDR_UNDEF ||
158 priv->pan_id == IEEE802154_PANID_BROADCAST) {
159 dev_addr.addr_type = IEEE802154_ADDR_LONG;
160 memcpy(dev_addr.hwaddr, dev->dev_addr,
161 IEEE802154_ADDR_LEN);
162 } else {
163 dev_addr.addr_type = IEEE802154_ADDR_SHORT;
164 dev_addr.short_addr = priv->short_addr;
165 }
166
167 dev_addr.pan_id = priv->pan_id;
168 saddr = &dev_addr;
169
170 spin_unlock_bh(&priv->mib_lock);
171 }
172
173 if (daddr->addr_type != IEEE802154_ADDR_NONE) {
174 fc |= (daddr->addr_type << IEEE802154_FC_DAMODE_SHIFT);
175
176 head[pos++] = daddr->pan_id & 0xff;
177 head[pos++] = daddr->pan_id >> 8;
178
179 if (daddr->addr_type == IEEE802154_ADDR_SHORT) {
180 head[pos++] = daddr->short_addr & 0xff;
181 head[pos++] = daddr->short_addr >> 8;
182 } else {
183 mac802154_haddr_copy_swap(head + pos, daddr->hwaddr);
184 pos += IEEE802154_ADDR_LEN;
185 }
186 }
187
188 if (saddr->addr_type != IEEE802154_ADDR_NONE) {
189 fc |= (saddr->addr_type << IEEE802154_FC_SAMODE_SHIFT);
190
191 if ((saddr->pan_id == daddr->pan_id) &&
192 (saddr->pan_id != IEEE802154_PANID_BROADCAST)) {
193 /* PANID compression/intra PAN */
194 fc |= IEEE802154_FC_INTRA_PAN;
195 } else {
196 head[pos++] = saddr->pan_id & 0xff;
197 head[pos++] = saddr->pan_id >> 8;
198 }
199
200 if (saddr->addr_type == IEEE802154_ADDR_SHORT) {
201 head[pos++] = saddr->short_addr & 0xff;
202 head[pos++] = saddr->short_addr >> 8;
203 } else {
204 mac802154_haddr_copy_swap(head + pos, saddr->hwaddr);
205 pos += IEEE802154_ADDR_LEN;
206 }
207 }
208
209 head[0] = fc;
210 head[1] = fc >> 8;
211
212 memcpy(skb_push(skb, pos), head, pos);
213 kfree(head);
214
215 return pos;
216}
217
218static int
219mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
220{
221 const u8 *hdr = skb_mac_header(skb);
222 const u8 *tail = skb_tail_pointer(skb);
223 struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
224 u16 fc;
225 int da_type;
226
227 if (hdr + 3 > tail)
228 goto malformed;
229
230 fc = hdr[0] | (hdr[1] << 8);
231
232 hdr += 3;
233
234 da_type = IEEE802154_FC_DAMODE(fc);
235 addr->addr_type = IEEE802154_FC_SAMODE(fc);
236
237 switch (da_type) {
238 case IEEE802154_ADDR_NONE:
239 if (fc & IEEE802154_FC_INTRA_PAN)
240 goto malformed;
241 break;
242 case IEEE802154_ADDR_LONG:
243 if (fc & IEEE802154_FC_INTRA_PAN) {
244 if (hdr + 2 > tail)
245 goto malformed;
246 addr->pan_id = hdr[0] | (hdr[1] << 8);
247 hdr += 2;
248 }
249
250 if (hdr + IEEE802154_ADDR_LEN > tail)
251 goto malformed;
252
253 hdr += IEEE802154_ADDR_LEN;
254 break;
255 case IEEE802154_ADDR_SHORT:
256 if (fc & IEEE802154_FC_INTRA_PAN) {
257 if (hdr + 2 > tail)
258 goto malformed;
259 addr->pan_id = hdr[0] | (hdr[1] << 8);
260 hdr += 2;
261 }
262
263 if (hdr + 2 > tail)
264 goto malformed;
265
266 hdr += 2;
267 break;
268 default:
269 goto malformed;
270
271 }
272
273 switch (addr->addr_type) {
274 case IEEE802154_ADDR_NONE:
275 break;
276 case IEEE802154_ADDR_LONG:
277 if (!(fc & IEEE802154_FC_INTRA_PAN)) {
278 if (hdr + 2 > tail)
279 goto malformed;
280 addr->pan_id = hdr[0] | (hdr[1] << 8);
281 hdr += 2;
282 }
283
284 if (hdr + IEEE802154_ADDR_LEN > tail)
285 goto malformed;
286
287 mac802154_haddr_copy_swap(addr->hwaddr, hdr);
288 hdr += IEEE802154_ADDR_LEN;
289 break;
290 case IEEE802154_ADDR_SHORT:
291 if (!(fc & IEEE802154_FC_INTRA_PAN)) {
292 if (hdr + 2 > tail)
293 goto malformed;
294 addr->pan_id = hdr[0] | (hdr[1] << 8);
295 hdr += 2;
296 }
297
298 if (hdr + 2 > tail)
299 goto malformed;
300
301 addr->short_addr = hdr[0] | (hdr[1] << 8);
302 hdr += 2;
303 break;
304 default:
305 goto malformed;
306 }
307
308 return sizeof(struct ieee802154_addr);
309
310malformed:
311 pr_debug("malformed packet\n");
312 return 0;
313}
314
315static netdev_tx_t
316mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
317{
318 struct mac802154_sub_if_data *priv;
319 u8 chan, page;
320
321 priv = netdev_priv(dev);
322
323 spin_lock_bh(&priv->mib_lock);
324 chan = priv->chan;
325 page = priv->page;
326 spin_unlock_bh(&priv->mib_lock);
327
328 if (chan == MAC802154_CHAN_NONE ||
329 page >= WPAN_NUM_PAGES ||
330 chan >= WPAN_NUM_CHANNELS)
331 return NETDEV_TX_OK;
332
333 skb->skb_iif = dev->ifindex;
334 dev->stats.tx_packets++;
335 dev->stats.tx_bytes += skb->len;
336
337 return mac802154_tx(priv->hw, skb, page, chan);
338}
339
340static struct header_ops mac802154_header_ops = {
341 .create = mac802154_header_create,
342 .parse = mac802154_header_parse,
343};
344
345static const struct net_device_ops mac802154_wpan_ops = {
346 .ndo_open = mac802154_slave_open,
347 .ndo_stop = mac802154_slave_close,
348 .ndo_start_xmit = mac802154_wpan_xmit,
349 .ndo_do_ioctl = mac802154_wpan_ioctl,
350 .ndo_set_mac_address = mac802154_wpan_mac_addr,
351};
352
353void mac802154_wpan_setup(struct net_device *dev)
354{
355 struct mac802154_sub_if_data *priv;
356
357 dev->addr_len = IEEE802154_ADDR_LEN;
358 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
359
360 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
361 dev->header_ops = &mac802154_header_ops;
362 dev->needed_tailroom = 2; /* FCS */
363 dev->mtu = IEEE802154_MTU;
364 dev->tx_queue_len = 10;
365 dev->type = ARPHRD_IEEE802154;
366 dev->flags = IFF_NOARP | IFF_BROADCAST;
367 dev->watchdog_timeo = 0;
368
369 dev->destructor = free_netdev;
370 dev->netdev_ops = &mac802154_wpan_ops;
371 dev->ml_priv = &mac802154_mlme_wpan;
372
373 priv = netdev_priv(dev);
374 priv->type = IEEE802154_DEV_WPAN;
375
376 priv->chan = MAC802154_CHAN_NONE;
377 priv->page = 0;
378
379 spin_lock_init(&priv->mib_lock);
380
381 get_random_bytes(&priv->bsn, 1);
382 get_random_bytes(&priv->dsn, 1);
383
384 priv->pan_id = IEEE802154_PANID_BROADCAST;
385 priv->short_addr = IEEE802154_ADDR_BROADCAST;
386}
387
388static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
389{
390 return netif_rx(skb);
391}
392
393static int
394mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
395{
396 pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
397
398 spin_lock_bh(&sdata->mib_lock);
399
400 switch (mac_cb(skb)->da.addr_type) {
401 case IEEE802154_ADDR_NONE:
402 if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE)
403 /* FIXME: check if we are PAN coordinator */
404 skb->pkt_type = PACKET_OTHERHOST;
405 else
406 /* ACK comes with both addresses empty */
407 skb->pkt_type = PACKET_HOST;
408 break;
409 case IEEE802154_ADDR_LONG:
410 if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
411 mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
412 skb->pkt_type = PACKET_OTHERHOST;
413 else if (!memcmp(mac_cb(skb)->da.hwaddr, sdata->dev->dev_addr,
414 IEEE802154_ADDR_LEN))
415 skb->pkt_type = PACKET_HOST;
416 else
417 skb->pkt_type = PACKET_OTHERHOST;
418 break;
419 case IEEE802154_ADDR_SHORT:
420 if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
421 mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
422 skb->pkt_type = PACKET_OTHERHOST;
423 else if (mac_cb(skb)->da.short_addr == sdata->short_addr)
424 skb->pkt_type = PACKET_HOST;
425 else if (mac_cb(skb)->da.short_addr ==
426 IEEE802154_ADDR_BROADCAST)
427 skb->pkt_type = PACKET_BROADCAST;
428 else
429 skb->pkt_type = PACKET_OTHERHOST;
430 break;
431 default:
432 break;
433 }
434
435 spin_unlock_bh(&sdata->mib_lock);
436
437 skb->dev = sdata->dev;
438
439 sdata->dev->stats.rx_packets++;
440 sdata->dev->stats.rx_bytes += skb->len;
441
442 switch (mac_cb_type(skb)) {
443 case IEEE802154_FC_TYPE_DATA:
444 return mac802154_process_data(sdata->dev, skb);
445 default:
446 pr_warning("ieee802154: bad frame received (type = %d)\n",
447 mac_cb_type(skb));
448 kfree_skb(skb);
449 return NET_RX_DROP;
450 }
451}
452
453static int mac802154_parse_frame_start(struct sk_buff *skb)
454{
455 u8 *head = skb->data;
456 u16 fc;
457
458 if (mac802154_fetch_skb_u16(skb, &fc) ||
459 mac802154_fetch_skb_u8(skb, &(mac_cb(skb)->seq)))
460 goto err;
461
462 pr_debug("fc: %04x dsn: %02x\n", fc, head[2]);
463
464 mac_cb(skb)->flags = IEEE802154_FC_TYPE(fc);
465 mac_cb(skb)->sa.addr_type = IEEE802154_FC_SAMODE(fc);
466 mac_cb(skb)->da.addr_type = IEEE802154_FC_DAMODE(fc);
467
468 if (fc & IEEE802154_FC_INTRA_PAN)
469 mac_cb(skb)->flags |= MAC_CB_FLAG_INTRAPAN;
470
471 if (mac_cb(skb)->da.addr_type != IEEE802154_ADDR_NONE) {
472 if (mac802154_fetch_skb_u16(skb, &(mac_cb(skb)->da.pan_id)))
473 goto err;
474
475 /* source PAN id compression */
476 if (mac_cb_is_intrapan(skb))
477 mac_cb(skb)->sa.pan_id = mac_cb(skb)->da.pan_id;
478
479 pr_debug("dest PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
480
481 if (mac_cb(skb)->da.addr_type == IEEE802154_ADDR_SHORT) {
482 u16 *da = &(mac_cb(skb)->da.short_addr);
483
484 if (mac802154_fetch_skb_u16(skb, da))
485 goto err;
486
487 pr_debug("destination address is short: %04x\n",
488 mac_cb(skb)->da.short_addr);
489 } else {
490 if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
491 goto err;
492
493 mac802154_haddr_copy_swap(mac_cb(skb)->da.hwaddr,
494 skb->data);
495 skb_pull(skb, IEEE802154_ADDR_LEN);
496
497 pr_debug("destination address is hardware\n");
498 }
499 }
500
501 if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE) {
502 /* non PAN-compression, fetch source address id */
503 if (!(mac_cb_is_intrapan(skb))) {
504 u16 *sa_pan = &(mac_cb(skb)->sa.pan_id);
505
506 if (mac802154_fetch_skb_u16(skb, sa_pan))
507 goto err;
508 }
509
510 pr_debug("source PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
511
512 if (mac_cb(skb)->sa.addr_type == IEEE802154_ADDR_SHORT) {
513 u16 *sa = &(mac_cb(skb)->sa.short_addr);
514
515 if (mac802154_fetch_skb_u16(skb, sa))
516 goto err;
517
518 pr_debug("source address is short: %04x\n",
519 mac_cb(skb)->sa.short_addr);
520 } else {
521 if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
522 goto err;
523
524 mac802154_haddr_copy_swap(mac_cb(skb)->sa.hwaddr,
525 skb->data);
526 skb_pull(skb, IEEE802154_ADDR_LEN);
527
528 pr_debug("source address is hardware\n");
529 }
530 }
531
532 return 0;
533err:
534 return -EINVAL;
535}
536
537void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
538{
539 int ret;
540 struct sk_buff *sskb;
541 struct mac802154_sub_if_data *sdata;
542
543 ret = mac802154_parse_frame_start(skb);
544 if (ret) {
545 pr_debug("got invalid frame\n");
546 return;
547 }
548
549 rcu_read_lock();
550 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
551 if (sdata->type != IEEE802154_DEV_WPAN)
552 continue;
553
554 sskb = skb_clone(skb, GFP_ATOMIC);
555 if (sskb)
556 mac802154_subif_frame(sdata, sskb);
557 }
558 rcu_read_unlock();
559}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 209c1ed43368..c19b214ffd57 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -335,6 +335,27 @@ config NF_CT_NETLINK_TIMEOUT
335 335
336 If unsure, say `N'. 336 If unsure, say `N'.
337 337
338config NF_CT_NETLINK_HELPER
339 tristate 'Connection tracking helpers in user-space via Netlink'
340 select NETFILTER_NETLINK
341 depends on NF_CT_NETLINK
342 depends on NETFILTER_NETLINK_QUEUE
343 depends on NETFILTER_NETLINK_QUEUE_CT
344 depends on NETFILTER_ADVANCED
345 help
346 This option enables the user-space connection tracking helpers
347 infrastructure.
348
349 If unsure, say `N'.
350
351config NETFILTER_NETLINK_QUEUE_CT
352 bool "NFQUEUE integration with Connection Tracking"
353 default n
354 depends on NETFILTER_NETLINK_QUEUE
355 help
356 If this option is enabled, NFQUEUE can include Connection Tracking
357 information together with the packet is the enqueued via NFNETLINK.
358
338endif # NF_CONNTRACK 359endif # NF_CONNTRACK
339 360
340# transparent proxy support 361# transparent proxy support
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4e7960cc7b97..1c5160f2278e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
9 9
10obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o 10obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
11obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o 11obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
12nfnetlink_queue-y := nfnetlink_queue_core.o
13nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
12obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o 14obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
13obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o 15obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
14 16
@@ -24,6 +26,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
24# netlink interface for nf_conntrack 26# netlink interface for nf_conntrack
25obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o 27obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
26obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o 28obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
29obj-$(CONFIG_NF_CT_NETLINK_HELPER) += nfnetlink_cthelper.o
27 30
28# connection tracking helpers 31# connection tracking helpers
29nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o 32nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e19f3653db23..0bc6b60db4df 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -264,6 +264,13 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
264 rcu_read_unlock(); 264 rcu_read_unlock();
265} 265}
266EXPORT_SYMBOL(nf_conntrack_destroy); 266EXPORT_SYMBOL(nf_conntrack_destroy);
267
268struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
269EXPORT_SYMBOL_GPL(nfq_ct_hook);
270
271struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
272EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
273
267#endif /* CONFIG_NF_CONNTRACK */ 274#endif /* CONFIG_NF_CONNTRACK */
268 275
269#ifdef CONFIG_PROC_FS 276#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 819c342f5b30..9730882697aa 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -640,6 +640,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
640} 640}
641 641
642static int 642static int
643ip_set_none(struct sock *ctnl, struct sk_buff *skb,
644 const struct nlmsghdr *nlh,
645 const struct nlattr * const attr[])
646{
647 return -EOPNOTSUPP;
648}
649
650static int
643ip_set_create(struct sock *ctnl, struct sk_buff *skb, 651ip_set_create(struct sock *ctnl, struct sk_buff *skb,
644 const struct nlmsghdr *nlh, 652 const struct nlmsghdr *nlh,
645 const struct nlattr * const attr[]) 653 const struct nlattr * const attr[])
@@ -1539,6 +1547,10 @@ nlmsg_failure:
1539} 1547}
1540 1548
1541static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = { 1549static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
1550 [IPSET_CMD_NONE] = {
1551 .call = ip_set_none,
1552 .attr_count = IPSET_ATTR_CMD_MAX,
1553 },
1542 [IPSET_CMD_CREATE] = { 1554 [IPSET_CMD_CREATE] = {
1543 .call = ip_set_create, 1555 .call = ip_set_create,
1544 .attr_count = IPSET_ATTR_CMD_MAX, 1556 .attr_count = IPSET_ATTR_CMD_MAX,
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index ee863943c826..d5d3607ae7bc 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -38,30 +38,6 @@ struct iface_node {
38 38
39#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface) 39#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
40 40
41static inline long
42ifname_compare(const char *_a, const char *_b)
43{
44 const long *a = (const long *)_a;
45 const long *b = (const long *)_b;
46
47 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
48 if (a[0] != b[0])
49 return a[0] - b[0];
50 if (IFNAMSIZ > sizeof(long)) {
51 if (a[1] != b[1])
52 return a[1] - b[1];
53 }
54 if (IFNAMSIZ > 2 * sizeof(long)) {
55 if (a[2] != b[2])
56 return a[2] - b[2];
57 }
58 if (IFNAMSIZ > 3 * sizeof(long)) {
59 if (a[3] != b[3])
60 return a[3] - b[3];
61 }
62 return 0;
63}
64
65static void 41static void
66rbtree_destroy(struct rb_root *root) 42rbtree_destroy(struct rb_root *root)
67{ 43{
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
99 75
100 while (n) { 76 while (n) {
101 const char *d = iface_data(n); 77 const char *d = iface_data(n);
102 long res = ifname_compare(*iface, d); 78 int res = strcmp(*iface, d);
103 79
104 if (res < 0) 80 if (res < 0)
105 n = n->rb_left; 81 n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
121 97
122 while (*n) { 98 while (*n) {
123 char *ifname = iface_data(*n); 99 char *ifname = iface_data(*n);
124 long res = ifname_compare(*iface, ifname); 100 int res = strcmp(*iface, ifname);
125 101
126 p = *n; 102 p = *n;
127 if (res < 0) 103 if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
366 struct hash_netiface4_elem data = { .cidr = HOST_MASK }; 342 struct hash_netiface4_elem data = { .cidr = HOST_MASK };
367 u32 ip = 0, ip_to, last; 343 u32 ip = 0, ip_to, last;
368 u32 timeout = h->timeout; 344 u32 timeout = h->timeout;
369 char iface[IFNAMSIZ] = {}; 345 char iface[IFNAMSIZ];
370 int ret; 346 int ret;
371 347
372 if (unlikely(!tb[IPSET_ATTR_IP] || 348 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
663 ipset_adtfn adtfn = set->variant->adt[adt]; 639 ipset_adtfn adtfn = set->variant->adt[adt];
664 struct hash_netiface6_elem data = { .cidr = HOST_MASK }; 640 struct hash_netiface6_elem data = { .cidr = HOST_MASK };
665 u32 timeout = h->timeout; 641 u32 timeout = h->timeout;
666 char iface[IFNAMSIZ] = {}; 642 char iface[IFNAMSIZ];
667 int ret; 643 int ret;
668 644
669 if (unlikely(!tb[IPSET_ATTR_IP] || 645 if (unlikely(!tb[IPSET_ATTR_IP] ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a54b018c6eea..b54eccef40b5 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1742,7 +1742,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1742 { 1742 {
1743 .hook = ip_vs_reply4, 1743 .hook = ip_vs_reply4,
1744 .owner = THIS_MODULE, 1744 .owner = THIS_MODULE,
1745 .pf = PF_INET, 1745 .pf = NFPROTO_IPV4,
1746 .hooknum = NF_INET_LOCAL_IN, 1746 .hooknum = NF_INET_LOCAL_IN,
1747 .priority = NF_IP_PRI_NAT_SRC - 2, 1747 .priority = NF_IP_PRI_NAT_SRC - 2,
1748 }, 1748 },
@@ -1752,7 +1752,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1752 { 1752 {
1753 .hook = ip_vs_remote_request4, 1753 .hook = ip_vs_remote_request4,
1754 .owner = THIS_MODULE, 1754 .owner = THIS_MODULE,
1755 .pf = PF_INET, 1755 .pf = NFPROTO_IPV4,
1756 .hooknum = NF_INET_LOCAL_IN, 1756 .hooknum = NF_INET_LOCAL_IN,
1757 .priority = NF_IP_PRI_NAT_SRC - 1, 1757 .priority = NF_IP_PRI_NAT_SRC - 1,
1758 }, 1758 },
@@ -1760,7 +1760,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1760 { 1760 {
1761 .hook = ip_vs_local_reply4, 1761 .hook = ip_vs_local_reply4,
1762 .owner = THIS_MODULE, 1762 .owner = THIS_MODULE,
1763 .pf = PF_INET, 1763 .pf = NFPROTO_IPV4,
1764 .hooknum = NF_INET_LOCAL_OUT, 1764 .hooknum = NF_INET_LOCAL_OUT,
1765 .priority = NF_IP_PRI_NAT_DST + 1, 1765 .priority = NF_IP_PRI_NAT_DST + 1,
1766 }, 1766 },
@@ -1768,7 +1768,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1768 { 1768 {
1769 .hook = ip_vs_local_request4, 1769 .hook = ip_vs_local_request4,
1770 .owner = THIS_MODULE, 1770 .owner = THIS_MODULE,
1771 .pf = PF_INET, 1771 .pf = NFPROTO_IPV4,
1772 .hooknum = NF_INET_LOCAL_OUT, 1772 .hooknum = NF_INET_LOCAL_OUT,
1773 .priority = NF_IP_PRI_NAT_DST + 2, 1773 .priority = NF_IP_PRI_NAT_DST + 2,
1774 }, 1774 },
@@ -1777,7 +1777,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1777 { 1777 {
1778 .hook = ip_vs_forward_icmp, 1778 .hook = ip_vs_forward_icmp,
1779 .owner = THIS_MODULE, 1779 .owner = THIS_MODULE,
1780 .pf = PF_INET, 1780 .pf = NFPROTO_IPV4,
1781 .hooknum = NF_INET_FORWARD, 1781 .hooknum = NF_INET_FORWARD,
1782 .priority = 99, 1782 .priority = 99,
1783 }, 1783 },
@@ -1785,7 +1785,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1785 { 1785 {
1786 .hook = ip_vs_reply4, 1786 .hook = ip_vs_reply4,
1787 .owner = THIS_MODULE, 1787 .owner = THIS_MODULE,
1788 .pf = PF_INET, 1788 .pf = NFPROTO_IPV4,
1789 .hooknum = NF_INET_FORWARD, 1789 .hooknum = NF_INET_FORWARD,
1790 .priority = 100, 1790 .priority = 100,
1791 }, 1791 },
@@ -1794,7 +1794,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1794 { 1794 {
1795 .hook = ip_vs_reply6, 1795 .hook = ip_vs_reply6,
1796 .owner = THIS_MODULE, 1796 .owner = THIS_MODULE,
1797 .pf = PF_INET6, 1797 .pf = NFPROTO_IPV6,
1798 .hooknum = NF_INET_LOCAL_IN, 1798 .hooknum = NF_INET_LOCAL_IN,
1799 .priority = NF_IP6_PRI_NAT_SRC - 2, 1799 .priority = NF_IP6_PRI_NAT_SRC - 2,
1800 }, 1800 },
@@ -1804,7 +1804,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1804 { 1804 {
1805 .hook = ip_vs_remote_request6, 1805 .hook = ip_vs_remote_request6,
1806 .owner = THIS_MODULE, 1806 .owner = THIS_MODULE,
1807 .pf = PF_INET6, 1807 .pf = NFPROTO_IPV6,
1808 .hooknum = NF_INET_LOCAL_IN, 1808 .hooknum = NF_INET_LOCAL_IN,
1809 .priority = NF_IP6_PRI_NAT_SRC - 1, 1809 .priority = NF_IP6_PRI_NAT_SRC - 1,
1810 }, 1810 },
@@ -1812,7 +1812,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1812 { 1812 {
1813 .hook = ip_vs_local_reply6, 1813 .hook = ip_vs_local_reply6,
1814 .owner = THIS_MODULE, 1814 .owner = THIS_MODULE,
1815 .pf = PF_INET, 1815 .pf = NFPROTO_IPV4,
1816 .hooknum = NF_INET_LOCAL_OUT, 1816 .hooknum = NF_INET_LOCAL_OUT,
1817 .priority = NF_IP6_PRI_NAT_DST + 1, 1817 .priority = NF_IP6_PRI_NAT_DST + 1,
1818 }, 1818 },
@@ -1820,7 +1820,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1820 { 1820 {
1821 .hook = ip_vs_local_request6, 1821 .hook = ip_vs_local_request6,
1822 .owner = THIS_MODULE, 1822 .owner = THIS_MODULE,
1823 .pf = PF_INET6, 1823 .pf = NFPROTO_IPV6,
1824 .hooknum = NF_INET_LOCAL_OUT, 1824 .hooknum = NF_INET_LOCAL_OUT,
1825 .priority = NF_IP6_PRI_NAT_DST + 2, 1825 .priority = NF_IP6_PRI_NAT_DST + 2,
1826 }, 1826 },
@@ -1829,7 +1829,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1829 { 1829 {
1830 .hook = ip_vs_forward_icmp_v6, 1830 .hook = ip_vs_forward_icmp_v6,
1831 .owner = THIS_MODULE, 1831 .owner = THIS_MODULE,
1832 .pf = PF_INET6, 1832 .pf = NFPROTO_IPV6,
1833 .hooknum = NF_INET_FORWARD, 1833 .hooknum = NF_INET_FORWARD,
1834 .priority = 99, 1834 .priority = 99,
1835 }, 1835 },
@@ -1837,7 +1837,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1837 { 1837 {
1838 .hook = ip_vs_reply6, 1838 .hook = ip_vs_reply6,
1839 .owner = THIS_MODULE, 1839 .owner = THIS_MODULE,
1840 .pf = PF_INET6, 1840 .pf = NFPROTO_IPV6,
1841 .hooknum = NF_INET_FORWARD, 1841 .hooknum = NF_INET_FORWARD,
1842 .priority = 100, 1842 .priority = 100,
1843 }, 1843 },
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index dd811b8dd97c..84444dda194b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
76 76
77#ifdef CONFIG_IP_VS_IPV6 77#ifdef CONFIG_IP_VS_IPV6
78/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ 78/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
79static int __ip_vs_addr_is_local_v6(struct net *net, 79static bool __ip_vs_addr_is_local_v6(struct net *net,
80 const struct in6_addr *addr) 80 const struct in6_addr *addr)
81{ 81{
82 struct rt6_info *rt;
83 struct flowi6 fl6 = { 82 struct flowi6 fl6 = {
84 .daddr = *addr, 83 .daddr = *addr,
85 }; 84 };
85 struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
86 bool is_local;
86 87
87 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); 88 is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
88 if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
89 return 1;
90 89
91 return 0; 90 dst_release(dst);
91 return is_local;
92} 92}
93#endif 93#endif
94 94
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1521{ 1521{
1522 struct net_device *dev = ptr; 1522 struct net_device *dev = ptr;
1523 struct net *net = dev_net(dev); 1523 struct net *net = dev_net(dev);
1524 struct netns_ipvs *ipvs = net_ipvs(net);
1524 struct ip_vs_service *svc; 1525 struct ip_vs_service *svc;
1525 struct ip_vs_dest *dest; 1526 struct ip_vs_dest *dest;
1526 unsigned int idx; 1527 unsigned int idx;
1527 1528
1528 if (event != NETDEV_UNREGISTER) 1529 if (event != NETDEV_UNREGISTER || !ipvs)
1529 return NOTIFY_DONE; 1530 return NOTIFY_DONE;
1530 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); 1531 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1531 EnterFunction(2); 1532 EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1551 } 1552 }
1552 } 1553 }
1553 1554
1554 list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { 1555 list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
1555 __ip_vs_dev_reset(dest, dev); 1556 __ip_vs_dev_reset(dest, dev);
1556 } 1557 }
1557 mutex_unlock(&__ip_vs_mutex); 1558 mutex_unlock(&__ip_vs_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 7fd66dec859d..65b616ae1716 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -797,7 +797,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
797 goto tx_error_put; 797 goto tx_error_put;
798 } 798 }
799 if (skb_dst(skb)) 799 if (skb_dst(skb))
800 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 800 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
801 801
802 df |= (old_iph->frag_off & htons(IP_DF)); 802 df |= (old_iph->frag_off & htons(IP_DF));
803 803
@@ -823,7 +823,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
823 IP_VS_ERR_RL("%s(): no memory\n", __func__); 823 IP_VS_ERR_RL("%s(): no memory\n", __func__);
824 return NF_STOLEN; 824 return NF_STOLEN;
825 } 825 }
826 kfree_skb(skb); 826 consume_skb(skb);
827 skb = new_skb; 827 skb = new_skb;
828 old_iph = ip_hdr(skb); 828 old_iph = ip_hdr(skb);
829 } 829 }
@@ -913,7 +913,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
913 goto tx_error_put; 913 goto tx_error_put;
914 } 914 }
915 if (skb_dst(skb)) 915 if (skb_dst(skb))
916 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 916 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
917 917
918 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) && 918 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
919 !skb_is_gso(skb)) { 919 !skb_is_gso(skb)) {
@@ -942,7 +942,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
942 IP_VS_ERR_RL("%s(): no memory\n", __func__); 942 IP_VS_ERR_RL("%s(): no memory\n", __func__);
943 return NF_STOLEN; 943 return NF_STOLEN;
944 } 944 }
945 kfree_skb(skb); 945 consume_skb(skb);
946 skb = new_skb; 946 skb = new_skb;
947 old_iph = ipv6_hdr(skb); 947 old_iph = ipv6_hdr(skb);
948 } 948 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ac3af97cc468..cf4875565d67 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -531,7 +531,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
531 tstamp = nf_conn_tstamp_find(ct); 531 tstamp = nf_conn_tstamp_find(ct);
532 if (tstamp) { 532 if (tstamp) {
533 if (skb->tstamp.tv64 == 0) 533 if (skb->tstamp.tv64 == 0)
534 __net_timestamp((struct sk_buff *)skb); 534 __net_timestamp(skb);
535 535
536 tstamp->start = ktime_to_ns(skb->tstamp); 536 tstamp->start = ktime_to_ns(skb->tstamp);
537 } 537 }
@@ -819,7 +819,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
819 __set_bit(IPS_EXPECTED_BIT, &ct->status); 819 __set_bit(IPS_EXPECTED_BIT, &ct->status);
820 ct->master = exp->master; 820 ct->master = exp->master;
821 if (exp->helper) { 821 if (exp->helper) {
822 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 822 help = nf_ct_helper_ext_add(ct, exp->helper,
823 GFP_ATOMIC);
823 if (help) 824 if (help)
824 rcu_assign_pointer(help->helper, exp->helper); 825 rcu_assign_pointer(help->helper, exp->helper);
825 } 826 }
@@ -1333,7 +1334,6 @@ static void nf_conntrack_cleanup_init_net(void)
1333 while (untrack_refs() > 0) 1334 while (untrack_refs() > 0)
1334 schedule(); 1335 schedule();
1335 1336
1336 nf_conntrack_proto_fini();
1337#ifdef CONFIG_NF_CONNTRACK_ZONES 1337#ifdef CONFIG_NF_CONNTRACK_ZONES
1338 nf_ct_extend_unregister(&nf_ct_zone_extend); 1338 nf_ct_extend_unregister(&nf_ct_zone_extend);
1339#endif 1339#endif
@@ -1372,7 +1372,7 @@ void nf_conntrack_cleanup(struct net *net)
1372 netfilter framework. Roll on, two-stage module 1372 netfilter framework. Roll on, two-stage module
1373 delete... */ 1373 delete... */
1374 synchronize_net(); 1374 synchronize_net();
1375 1375 nf_conntrack_proto_fini(net);
1376 nf_conntrack_cleanup_net(net); 1376 nf_conntrack_cleanup_net(net);
1377 1377
1378 if (net_eq(net, &init_net)) { 1378 if (net_eq(net, &init_net)) {
@@ -1496,11 +1496,6 @@ static int nf_conntrack_init_init_net(void)
1496 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", 1496 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1497 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1497 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1498 nf_conntrack_max); 1498 nf_conntrack_max);
1499
1500 ret = nf_conntrack_proto_init();
1501 if (ret < 0)
1502 goto err_proto;
1503
1504#ifdef CONFIG_NF_CONNTRACK_ZONES 1499#ifdef CONFIG_NF_CONNTRACK_ZONES
1505 ret = nf_ct_extend_register(&nf_ct_zone_extend); 1500 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1506 if (ret < 0) 1501 if (ret < 0)
@@ -1518,9 +1513,7 @@ static int nf_conntrack_init_init_net(void)
1518 1513
1519#ifdef CONFIG_NF_CONNTRACK_ZONES 1514#ifdef CONFIG_NF_CONNTRACK_ZONES
1520err_extend: 1515err_extend:
1521 nf_conntrack_proto_fini();
1522#endif 1516#endif
1523err_proto:
1524 return ret; 1517 return ret;
1525} 1518}
1526 1519
@@ -1583,9 +1576,7 @@ static int nf_conntrack_init_net(struct net *net)
1583 ret = nf_conntrack_helper_init(net); 1576 ret = nf_conntrack_helper_init(net);
1584 if (ret < 0) 1577 if (ret < 0)
1585 goto err_helper; 1578 goto err_helper;
1586
1587 return 0; 1579 return 0;
1588
1589err_helper: 1580err_helper:
1590 nf_conntrack_timeout_fini(net); 1581 nf_conntrack_timeout_fini(net);
1591err_timeout: 1582err_timeout:
@@ -1622,6 +1613,9 @@ int nf_conntrack_init(struct net *net)
1622 if (ret < 0) 1613 if (ret < 0)
1623 goto out_init_net; 1614 goto out_init_net;
1624 } 1615 }
1616 ret = nf_conntrack_proto_init(net);
1617 if (ret < 0)
1618 goto out_proto;
1625 ret = nf_conntrack_init_net(net); 1619 ret = nf_conntrack_init_net(net);
1626 if (ret < 0) 1620 if (ret < 0)
1627 goto out_net; 1621 goto out_net;
@@ -1637,6 +1631,8 @@ int nf_conntrack_init(struct net *net)
1637 return 0; 1631 return 0;
1638 1632
1639out_net: 1633out_net:
1634 nf_conntrack_proto_fini(net);
1635out_proto:
1640 if (net_eq(net, &init_net)) 1636 if (net_eq(net, &init_net))
1641 nf_conntrack_cleanup_init_net(); 1637 nf_conntrack_cleanup_init_net();
1642out_init_net: 1638out_init_net:
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 641ff5f96718..1a9545965c0d 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -44,7 +44,8 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
44EXPORT_SYMBOL(__nf_ct_ext_destroy); 44EXPORT_SYMBOL(__nf_ct_ext_destroy);
45 45
46static void * 46static void *
47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) 47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
48 size_t var_alloc_len, gfp_t gfp)
48{ 49{
49 unsigned int off, len; 50 unsigned int off, len;
50 struct nf_ct_ext_type *t; 51 struct nf_ct_ext_type *t;
@@ -54,8 +55,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
54 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
55 BUG_ON(t == NULL); 56 BUG_ON(t == NULL);
56 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 57 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
57 len = off + t->len; 58 len = off + t->len + var_alloc_len;
58 alloc_size = t->alloc_size; 59 alloc_size = t->alloc_size + var_alloc_len;
59 rcu_read_unlock(); 60 rcu_read_unlock();
60 61
61 *ext = kzalloc(alloc_size, gfp); 62 *ext = kzalloc(alloc_size, gfp);
@@ -68,7 +69,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
68 return (void *)(*ext) + off; 69 return (void *)(*ext) + off;
69} 70}
70 71
71void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) 72void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
73 size_t var_alloc_len, gfp_t gfp)
72{ 74{
73 struct nf_ct_ext *old, *new; 75 struct nf_ct_ext *old, *new;
74 int i, newlen, newoff; 76 int i, newlen, newoff;
@@ -79,7 +81,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
79 81
80 old = ct->ext; 82 old = ct->ext;
81 if (!old) 83 if (!old)
82 return nf_ct_ext_create(&ct->ext, id, gfp); 84 return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
83 85
84 if (__nf_ct_ext_exist(old, id)) 86 if (__nf_ct_ext_exist(old, id))
85 return NULL; 87 return NULL;
@@ -89,7 +91,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
89 BUG_ON(t == NULL); 91 BUG_ON(t == NULL);
90 92
91 newoff = ALIGN(old->len, t->align); 93 newoff = ALIGN(old->len, t->align);
92 newlen = newoff + t->len; 94 newlen = newoff + t->len + var_alloc_len;
93 rcu_read_unlock(); 95 rcu_read_unlock();
94 96
95 new = __krealloc(old, newlen, gfp); 97 new = __krealloc(old, newlen, gfp);
@@ -117,7 +119,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
117 memset((void *)new + newoff, 0, newlen - newoff); 119 memset((void *)new + newoff, 0, newlen - newoff);
118 return (void *)new + newoff; 120 return (void *)new + newoff;
119} 121}
120EXPORT_SYMBOL(__nf_ct_ext_add); 122EXPORT_SYMBOL(__nf_ct_ext_add_length);
121 123
122static void update_alloc_size(struct nf_ct_ext_type *type) 124static void update_alloc_size(struct nf_ct_ext_type *type)
123{ 125{
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c5c95c6d34f..4bb771d1f57a 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -358,7 +358,7 @@ static int help(struct sk_buff *skb,
358 u32 seq; 358 u32 seq;
359 int dir = CTINFO2DIR(ctinfo); 359 int dir = CTINFO2DIR(ctinfo);
360 unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); 360 unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
361 struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; 361 struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
362 struct nf_conntrack_expect *exp; 362 struct nf_conntrack_expect *exp;
363 union nf_inet_addr *daddr; 363 union nf_inet_addr *daddr;
364 struct nf_conntrack_man cmd = {}; 364 struct nf_conntrack_man cmd = {};
@@ -512,7 +512,6 @@ out_update_nl:
512} 512}
513 513
514static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; 514static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
515static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
516 515
517static const struct nf_conntrack_expect_policy ftp_exp_policy = { 516static const struct nf_conntrack_expect_policy ftp_exp_policy = {
518 .max_expected = 1, 517 .max_expected = 1,
@@ -541,7 +540,6 @@ static void nf_conntrack_ftp_fini(void)
541static int __init nf_conntrack_ftp_init(void) 540static int __init nf_conntrack_ftp_init(void)
542{ 541{
543 int i, j = -1, ret = 0; 542 int i, j = -1, ret = 0;
544 char *tmpname;
545 543
546 ftp_buffer = kmalloc(65536, GFP_KERNEL); 544 ftp_buffer = kmalloc(65536, GFP_KERNEL);
547 if (!ftp_buffer) 545 if (!ftp_buffer)
@@ -556,17 +554,16 @@ static int __init nf_conntrack_ftp_init(void)
556 ftp[i][0].tuple.src.l3num = PF_INET; 554 ftp[i][0].tuple.src.l3num = PF_INET;
557 ftp[i][1].tuple.src.l3num = PF_INET6; 555 ftp[i][1].tuple.src.l3num = PF_INET6;
558 for (j = 0; j < 2; j++) { 556 for (j = 0; j < 2; j++) {
557 ftp[i][j].data_len = sizeof(struct nf_ct_ftp_master);
559 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); 558 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
560 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; 559 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
561 ftp[i][j].expect_policy = &ftp_exp_policy; 560 ftp[i][j].expect_policy = &ftp_exp_policy;
562 ftp[i][j].me = THIS_MODULE; 561 ftp[i][j].me = THIS_MODULE;
563 ftp[i][j].help = help; 562 ftp[i][j].help = help;
564 tmpname = &ftp_names[i][j][0];
565 if (ports[i] == FTP_PORT) 563 if (ports[i] == FTP_PORT)
566 sprintf(tmpname, "ftp"); 564 sprintf(ftp[i][j].name, "ftp");
567 else 565 else
568 sprintf(tmpname, "ftp-%d", ports[i]); 566 sprintf(ftp[i][j].name, "ftp-%d", ports[i]);
569 ftp[i][j].name = tmpname;
570 567
571 pr_debug("nf_ct_ftp: registering helper for pf: %d " 568 pr_debug("nf_ct_ftp: registering helper for pf: %d "
572 "port: %d\n", 569 "port: %d\n",
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 31f50bc3a312..4283b207e63b 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -114,7 +114,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
114 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 114 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
115 unsigned char **data, int *datalen, int *dataoff) 115 unsigned char **data, int *datalen, int *dataoff)
116{ 116{
117 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 117 struct nf_ct_h323_master *info = nfct_help_data(ct);
118 int dir = CTINFO2DIR(ctinfo); 118 int dir = CTINFO2DIR(ctinfo);
119 const struct tcphdr *th; 119 const struct tcphdr *th;
120 struct tcphdr _tcph; 120 struct tcphdr _tcph;
@@ -617,6 +617,7 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = {
617static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { 617static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
618 .name = "H.245", 618 .name = "H.245",
619 .me = THIS_MODULE, 619 .me = THIS_MODULE,
620 .data_len = sizeof(struct nf_ct_h323_master),
620 .tuple.src.l3num = AF_UNSPEC, 621 .tuple.src.l3num = AF_UNSPEC,
621 .tuple.dst.protonum = IPPROTO_UDP, 622 .tuple.dst.protonum = IPPROTO_UDP,
622 .help = h245_help, 623 .help = h245_help,
@@ -1169,6 +1170,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1169 { 1170 {
1170 .name = "Q.931", 1171 .name = "Q.931",
1171 .me = THIS_MODULE, 1172 .me = THIS_MODULE,
1173 .data_len = sizeof(struct nf_ct_h323_master),
1172 .tuple.src.l3num = AF_INET, 1174 .tuple.src.l3num = AF_INET,
1173 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), 1175 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
1174 .tuple.dst.protonum = IPPROTO_TCP, 1176 .tuple.dst.protonum = IPPROTO_TCP,
@@ -1244,7 +1246,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1244 unsigned char **data, 1246 unsigned char **data,
1245 TransportAddress *taddr, int count) 1247 TransportAddress *taddr, int count)
1246{ 1248{
1247 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1249 struct nf_ct_h323_master *info = nfct_help_data(ct);
1248 int dir = CTINFO2DIR(ctinfo); 1250 int dir = CTINFO2DIR(ctinfo);
1249 int ret = 0; 1251 int ret = 0;
1250 int i; 1252 int i;
@@ -1359,7 +1361,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
1359 enum ip_conntrack_info ctinfo, 1361 enum ip_conntrack_info ctinfo,
1360 unsigned char **data, RegistrationRequest *rrq) 1362 unsigned char **data, RegistrationRequest *rrq)
1361{ 1363{
1362 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1364 struct nf_ct_h323_master *info = nfct_help_data(ct);
1363 int ret; 1365 int ret;
1364 typeof(set_ras_addr_hook) set_ras_addr; 1366 typeof(set_ras_addr_hook) set_ras_addr;
1365 1367
@@ -1394,7 +1396,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1394 enum ip_conntrack_info ctinfo, 1396 enum ip_conntrack_info ctinfo,
1395 unsigned char **data, RegistrationConfirm *rcf) 1397 unsigned char **data, RegistrationConfirm *rcf)
1396{ 1398{
1397 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1399 struct nf_ct_h323_master *info = nfct_help_data(ct);
1398 int dir = CTINFO2DIR(ctinfo); 1400 int dir = CTINFO2DIR(ctinfo);
1399 int ret; 1401 int ret;
1400 struct nf_conntrack_expect *exp; 1402 struct nf_conntrack_expect *exp;
@@ -1443,7 +1445,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
1443 enum ip_conntrack_info ctinfo, 1445 enum ip_conntrack_info ctinfo,
1444 unsigned char **data, UnregistrationRequest *urq) 1446 unsigned char **data, UnregistrationRequest *urq)
1445{ 1447{
1446 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1448 struct nf_ct_h323_master *info = nfct_help_data(ct);
1447 int dir = CTINFO2DIR(ctinfo); 1449 int dir = CTINFO2DIR(ctinfo);
1448 int ret; 1450 int ret;
1449 typeof(set_sig_addr_hook) set_sig_addr; 1451 typeof(set_sig_addr_hook) set_sig_addr;
@@ -1475,7 +1477,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1475 enum ip_conntrack_info ctinfo, 1477 enum ip_conntrack_info ctinfo,
1476 unsigned char **data, AdmissionRequest *arq) 1478 unsigned char **data, AdmissionRequest *arq)
1477{ 1479{
1478 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1480 const struct nf_ct_h323_master *info = nfct_help_data(ct);
1479 int dir = CTINFO2DIR(ctinfo); 1481 int dir = CTINFO2DIR(ctinfo);
1480 __be16 port; 1482 __be16 port;
1481 union nf_inet_addr addr; 1483 union nf_inet_addr addr;
@@ -1742,6 +1744,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1742 { 1744 {
1743 .name = "RAS", 1745 .name = "RAS",
1744 .me = THIS_MODULE, 1746 .me = THIS_MODULE,
1747 .data_len = sizeof(struct nf_ct_h323_master),
1745 .tuple.src.l3num = AF_INET, 1748 .tuple.src.l3num = AF_INET,
1746 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), 1749 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1747 .tuple.dst.protonum = IPPROTO_UDP, 1750 .tuple.dst.protonum = IPPROTO_UDP,
@@ -1751,6 +1754,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1751 { 1754 {
1752 .name = "RAS", 1755 .name = "RAS",
1753 .me = THIS_MODULE, 1756 .me = THIS_MODULE,
1757 .data_len = sizeof(struct nf_ct_h323_master),
1754 .tuple.src.l3num = AF_INET6, 1758 .tuple.src.l3num = AF_INET6,
1755 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), 1759 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1756 .tuple.dst.protonum = IPPROTO_UDP, 1760 .tuple.dst.protonum = IPPROTO_UDP,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4fa2ff961f5a..c4bc637feb76 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -30,8 +30,10 @@
30#include <net/netfilter/nf_conntrack_extend.h> 30#include <net/netfilter/nf_conntrack_extend.h>
31 31
32static DEFINE_MUTEX(nf_ct_helper_mutex); 32static DEFINE_MUTEX(nf_ct_helper_mutex);
33static struct hlist_head *nf_ct_helper_hash __read_mostly; 33struct hlist_head *nf_ct_helper_hash __read_mostly;
34static unsigned int nf_ct_helper_hsize __read_mostly; 34EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
35unsigned int nf_ct_helper_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
35static unsigned int nf_ct_helper_count __read_mostly; 37static unsigned int nf_ct_helper_count __read_mostly;
36 38
37static bool nf_ct_auto_assign_helper __read_mostly = true; 39static bool nf_ct_auto_assign_helper __read_mostly = true;
@@ -161,11 +163,14 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
161} 163}
162EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 164EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
163 165
164struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) 166struct nf_conn_help *
167nf_ct_helper_ext_add(struct nf_conn *ct,
168 struct nf_conntrack_helper *helper, gfp_t gfp)
165{ 169{
166 struct nf_conn_help *help; 170 struct nf_conn_help *help;
167 171
168 help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp); 172 help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER,
173 helper->data_len, gfp);
169 if (help) 174 if (help)
170 INIT_HLIST_HEAD(&help->expectations); 175 INIT_HLIST_HEAD(&help->expectations);
171 else 176 else
@@ -218,13 +223,19 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
218 } 223 }
219 224
220 if (help == NULL) { 225 if (help == NULL) {
221 help = nf_ct_helper_ext_add(ct, flags); 226 help = nf_ct_helper_ext_add(ct, helper, flags);
222 if (help == NULL) { 227 if (help == NULL) {
223 ret = -ENOMEM; 228 ret = -ENOMEM;
224 goto out; 229 goto out;
225 } 230 }
226 } else { 231 } else {
227 memset(&help->help, 0, sizeof(help->help)); 232 /* We only allow helper re-assignment of the same sort since
233 * we cannot reallocate the helper extension area.
234 */
235 if (help->helper != helper) {
236 RCU_INIT_POINTER(help->helper, NULL);
237 goto out;
238 }
228 } 239 }
229 240
230 rcu_assign_pointer(help->helper, helper); 241 rcu_assign_pointer(help->helper, helper);
@@ -319,6 +330,9 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
319 330
320int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 331int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
321{ 332{
333 int ret = 0;
334 struct nf_conntrack_helper *cur;
335 struct hlist_node *n;
322 unsigned int h = helper_hash(&me->tuple); 336 unsigned int h = helper_hash(&me->tuple);
323 337
324 BUG_ON(me->expect_policy == NULL); 338 BUG_ON(me->expect_policy == NULL);
@@ -326,11 +340,19 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
326 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); 340 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
327 341
328 mutex_lock(&nf_ct_helper_mutex); 342 mutex_lock(&nf_ct_helper_mutex);
343 hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
344 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
345 cur->tuple.src.l3num == me->tuple.src.l3num &&
346 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
347 ret = -EEXIST;
348 goto out;
349 }
350 }
329 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 351 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
330 nf_ct_helper_count++; 352 nf_ct_helper_count++;
353out:
331 mutex_unlock(&nf_ct_helper_mutex); 354 mutex_unlock(&nf_ct_helper_mutex);
332 355 return ret;
333 return 0;
334} 356}
335EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); 357EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
336 358
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 81366c118271..009c52cfd1ec 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -221,7 +221,6 @@ static int help(struct sk_buff *skb, unsigned int protoff,
221} 221}
222 222
223static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; 223static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
224static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
225static struct nf_conntrack_expect_policy irc_exp_policy; 224static struct nf_conntrack_expect_policy irc_exp_policy;
226 225
227static void nf_conntrack_irc_fini(void); 226static void nf_conntrack_irc_fini(void);
@@ -229,7 +228,6 @@ static void nf_conntrack_irc_fini(void);
229static int __init nf_conntrack_irc_init(void) 228static int __init nf_conntrack_irc_init(void)
230{ 229{
231 int i, ret; 230 int i, ret;
232 char *tmpname;
233 231
234 if (max_dcc_channels < 1) { 232 if (max_dcc_channels < 1) {
235 printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n"); 233 printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n");
@@ -255,12 +253,10 @@ static int __init nf_conntrack_irc_init(void)
255 irc[i].me = THIS_MODULE; 253 irc[i].me = THIS_MODULE;
256 irc[i].help = help; 254 irc[i].help = help;
257 255
258 tmpname = &irc_names[i][0];
259 if (ports[i] == IRC_PORT) 256 if (ports[i] == IRC_PORT)
260 sprintf(tmpname, "irc"); 257 sprintf(irc[i].name, "irc");
261 else 258 else
262 sprintf(tmpname, "irc-%u", i); 259 sprintf(irc[i].name, "irc-%u", i);
263 irc[i].name = tmpname;
264 260
265 ret = nf_conntrack_helper_register(&irc[i]); 261 ret = nf_conntrack_helper_register(&irc[i]);
266 if (ret) { 262 if (ret) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6f4b00a8fc73..14f67a2cbcb5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -46,6 +46,7 @@
46#ifdef CONFIG_NF_NAT_NEEDED 46#ifdef CONFIG_NF_NAT_NEEDED
47#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
48#include <net/netfilter/nf_nat_protocol.h> 48#include <net/netfilter/nf_nat_protocol.h>
49#include <net/netfilter/nf_nat_helper.h>
49#endif 50#endif
50 51
51#include <linux/netfilter/nfnetlink.h> 52#include <linux/netfilter/nfnetlink.h>
@@ -477,7 +478,6 @@ nla_put_failure:
477 return -1; 478 return -1;
478} 479}
479 480
480#ifdef CONFIG_NF_CONNTRACK_EVENTS
481static inline size_t 481static inline size_t
482ctnetlink_proto_size(const struct nf_conn *ct) 482ctnetlink_proto_size(const struct nf_conn *ct)
483{ 483{
@@ -564,6 +564,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
564 ; 564 ;
565} 565}
566 566
567#ifdef CONFIG_NF_CONNTRACK_EVENTS
567static int 568static int
568ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) 569ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
569{ 570{
@@ -901,7 +902,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
901}; 902};
902 903
903static inline int 904static inline int
904ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) 905ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
906 struct nlattr **helpinfo)
905{ 907{
906 struct nlattr *tb[CTA_HELP_MAX+1]; 908 struct nlattr *tb[CTA_HELP_MAX+1];
907 909
@@ -912,6 +914,9 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
912 914
913 *helper_name = nla_data(tb[CTA_HELP_NAME]); 915 *helper_name = nla_data(tb[CTA_HELP_NAME]);
914 916
917 if (tb[CTA_HELP_INFO])
918 *helpinfo = tb[CTA_HELP_INFO];
919
915 return 0; 920 return 0;
916} 921}
917 922
@@ -1172,13 +1177,14 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1172 struct nf_conntrack_helper *helper; 1177 struct nf_conntrack_helper *helper;
1173 struct nf_conn_help *help = nfct_help(ct); 1178 struct nf_conn_help *help = nfct_help(ct);
1174 char *helpname = NULL; 1179 char *helpname = NULL;
1180 struct nlattr *helpinfo = NULL;
1175 int err; 1181 int err;
1176 1182
1177 /* don't change helper of sibling connections */ 1183 /* don't change helper of sibling connections */
1178 if (ct->master) 1184 if (ct->master)
1179 return -EBUSY; 1185 return -EBUSY;
1180 1186
1181 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1187 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1182 if (err < 0) 1188 if (err < 0)
1183 return err; 1189 return err;
1184 1190
@@ -1213,20 +1219,17 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1213 } 1219 }
1214 1220
1215 if (help) { 1221 if (help) {
1216 if (help->helper == helper) 1222 if (help->helper == helper) {
1223 /* update private helper data if allowed. */
1224 if (helper->from_nlattr && helpinfo)
1225 helper->from_nlattr(helpinfo, ct);
1217 return 0; 1226 return 0;
1218 if (help->helper) 1227 } else
1219 return -EBUSY; 1228 return -EBUSY;
1220 /* need to zero data of old helper */
1221 memset(&help->help, 0, sizeof(help->help));
1222 } else {
1223 /* we cannot set a helper for an existing conntrack */
1224 return -EOPNOTSUPP;
1225 } 1229 }
1226 1230
1227 rcu_assign_pointer(help->helper, helper); 1231 /* we cannot set a helper for an existing conntrack */
1228 1232 return -EOPNOTSUPP;
1229 return 0;
1230} 1233}
1231 1234
1232static inline int 1235static inline int
@@ -1410,8 +1413,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1410 rcu_read_lock(); 1413 rcu_read_lock();
1411 if (cda[CTA_HELP]) { 1414 if (cda[CTA_HELP]) {
1412 char *helpname = NULL; 1415 char *helpname = NULL;
1413 1416 struct nlattr *helpinfo = NULL;
1414 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1417
1418 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1415 if (err < 0) 1419 if (err < 0)
1416 goto err2; 1420 goto err2;
1417 1421
@@ -1440,11 +1444,14 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1440 } else { 1444 } else {
1441 struct nf_conn_help *help; 1445 struct nf_conn_help *help;
1442 1446
1443 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1447 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1444 if (help == NULL) { 1448 if (help == NULL) {
1445 err = -ENOMEM; 1449 err = -ENOMEM;
1446 goto err2; 1450 goto err2;
1447 } 1451 }
1452 /* set private helper data if allowed. */
1453 if (helper->from_nlattr && helpinfo)
1454 helper->from_nlattr(helpinfo, ct);
1448 1455
1449 /* not in hash table yet so not strictly necessary */ 1456 /* not in hash table yet so not strictly necessary */
1450 RCU_INIT_POINTER(help->helper, helper); 1457 RCU_INIT_POINTER(help->helper, helper);
@@ -1620,6 +1627,288 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1620 return err; 1627 return err;
1621} 1628}
1622 1629
1630static int
1631ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1632 __u16 cpu, const struct ip_conntrack_stat *st)
1633{
1634 struct nlmsghdr *nlh;
1635 struct nfgenmsg *nfmsg;
1636 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1637
1638 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1639 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1640 if (nlh == NULL)
1641 goto nlmsg_failure;
1642
1643 nfmsg = nlmsg_data(nlh);
1644 nfmsg->nfgen_family = AF_UNSPEC;
1645 nfmsg->version = NFNETLINK_V0;
1646 nfmsg->res_id = htons(cpu);
1647
1648 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1649 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1650 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1651 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1652 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1653 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1654 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1655 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1656 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1657 htonl(st->insert_failed)) ||
1658 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1659 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1660 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1661 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1662 htonl(st->search_restart)))
1663 goto nla_put_failure;
1664
1665 nlmsg_end(skb, nlh);
1666 return skb->len;
1667
1668nla_put_failure:
1669nlmsg_failure:
1670 nlmsg_cancel(skb, nlh);
1671 return -1;
1672}
1673
1674static int
1675ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1676{
1677 int cpu;
1678 struct net *net = sock_net(skb->sk);
1679
1680 if (cb->args[0] == nr_cpu_ids)
1681 return 0;
1682
1683 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1684 const struct ip_conntrack_stat *st;
1685
1686 if (!cpu_possible(cpu))
1687 continue;
1688
1689 st = per_cpu_ptr(net->ct.stat, cpu);
1690 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1691 NETLINK_CB(cb->skb).pid,
1692 cb->nlh->nlmsg_seq,
1693 cpu, st) < 0)
1694 break;
1695 }
1696 cb->args[0] = cpu;
1697
1698 return skb->len;
1699}
1700
1701static int
1702ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1703 const struct nlmsghdr *nlh,
1704 const struct nlattr * const cda[])
1705{
1706 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1707 struct netlink_dump_control c = {
1708 .dump = ctnetlink_ct_stat_cpu_dump,
1709 };
1710 return netlink_dump_start(ctnl, skb, nlh, &c);
1711 }
1712
1713 return 0;
1714}
1715
1716static int
1717ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
1718 struct net *net)
1719{
1720 struct nlmsghdr *nlh;
1721 struct nfgenmsg *nfmsg;
1722 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1723 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1724
1725 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1726 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1727 if (nlh == NULL)
1728 goto nlmsg_failure;
1729
1730 nfmsg = nlmsg_data(nlh);
1731 nfmsg->nfgen_family = AF_UNSPEC;
1732 nfmsg->version = NFNETLINK_V0;
1733 nfmsg->res_id = 0;
1734
1735 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1736 goto nla_put_failure;
1737
1738 nlmsg_end(skb, nlh);
1739 return skb->len;
1740
1741nla_put_failure:
1742nlmsg_failure:
1743 nlmsg_cancel(skb, nlh);
1744 return -1;
1745}
1746
1747static int
1748ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1749 const struct nlmsghdr *nlh,
1750 const struct nlattr * const cda[])
1751{
1752 struct sk_buff *skb2;
1753 int err;
1754
1755 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1756 if (skb2 == NULL)
1757 return -ENOMEM;
1758
1759 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
1760 nlh->nlmsg_seq,
1761 NFNL_MSG_TYPE(nlh->nlmsg_type),
1762 sock_net(skb->sk));
1763 if (err <= 0)
1764 goto free;
1765
1766 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1767 if (err < 0)
1768 goto out;
1769
1770 return 0;
1771
1772free:
1773 kfree_skb(skb2);
1774out:
1775 /* this avoids a loop in nfnetlink. */
1776 return err == -EAGAIN ? -ENOBUFS : err;
1777}
1778
1779#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
1780static size_t
1781ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
1782{
1783 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
1784 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
1785 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
1786 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
1787 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
1788 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
1789 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
1790 + nla_total_size(0) /* CTA_PROTOINFO */
1791 + nla_total_size(0) /* CTA_HELP */
1792 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
1793 + ctnetlink_secctx_size(ct)
1794#ifdef CONFIG_NF_NAT_NEEDED
1795 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
1796 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
1797#endif
1798#ifdef CONFIG_NF_CONNTRACK_MARK
1799 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
1800#endif
1801 + ctnetlink_proto_size(ct)
1802 ;
1803}
1804
1805static int
1806ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
1807{
1808 struct nlattr *nest_parms;
1809
1810 rcu_read_lock();
1811 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
1812 if (!nest_parms)
1813 goto nla_put_failure;
1814 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
1815 goto nla_put_failure;
1816 nla_nest_end(skb, nest_parms);
1817
1818 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
1819 if (!nest_parms)
1820 goto nla_put_failure;
1821 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
1822 goto nla_put_failure;
1823 nla_nest_end(skb, nest_parms);
1824
1825 if (nf_ct_zone(ct)) {
1826 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
1827 goto nla_put_failure;
1828 }
1829
1830 if (ctnetlink_dump_id(skb, ct) < 0)
1831 goto nla_put_failure;
1832
1833 if (ctnetlink_dump_status(skb, ct) < 0)
1834 goto nla_put_failure;
1835
1836 if (ctnetlink_dump_timeout(skb, ct) < 0)
1837 goto nla_put_failure;
1838
1839 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
1840 goto nla_put_failure;
1841
1842 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
1843 goto nla_put_failure;
1844
1845#ifdef CONFIG_NF_CONNTRACK_SECMARK
1846 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
1847 goto nla_put_failure;
1848#endif
1849 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
1850 goto nla_put_failure;
1851
1852 if ((ct->status & IPS_SEQ_ADJUST) &&
1853 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
1854 goto nla_put_failure;
1855
1856#ifdef CONFIG_NF_CONNTRACK_MARK
1857 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
1858 goto nla_put_failure;
1859#endif
1860 rcu_read_unlock();
1861 return 0;
1862
1863nla_put_failure:
1864 rcu_read_unlock();
1865 return -ENOSPC;
1866}
1867
1868static int
1869ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
1870{
1871 int err;
1872
1873 if (cda[CTA_TIMEOUT]) {
1874 err = ctnetlink_change_timeout(ct, cda);
1875 if (err < 0)
1876 return err;
1877 }
1878 if (cda[CTA_STATUS]) {
1879 err = ctnetlink_change_status(ct, cda);
1880 if (err < 0)
1881 return err;
1882 }
1883 if (cda[CTA_HELP]) {
1884 err = ctnetlink_change_helper(ct, cda);
1885 if (err < 0)
1886 return err;
1887 }
1888#if defined(CONFIG_NF_CONNTRACK_MARK)
1889 if (cda[CTA_MARK])
1890 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1891#endif
1892 return 0;
1893}
1894
1895static int
1896ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
1897{
1898 struct nlattr *cda[CTA_MAX+1];
1899
1900 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
1901
1902 return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
1903}
1904
1905static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
1906 .build_size = ctnetlink_nfqueue_build_size,
1907 .build = ctnetlink_nfqueue_build,
1908 .parse = ctnetlink_nfqueue_parse,
1909};
1910#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
1911
1623/*********************************************************************** 1912/***********************************************************************
1624 * EXPECT 1913 * EXPECT
1625 ***********************************************************************/ 1914 ***********************************************************************/
@@ -2300,6 +2589,79 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2300 return err; 2589 return err;
2301} 2590}
2302 2591
2592static int
2593ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
2594 const struct ip_conntrack_stat *st)
2595{
2596 struct nlmsghdr *nlh;
2597 struct nfgenmsg *nfmsg;
2598 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
2599
2600 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2601 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
2602 if (nlh == NULL)
2603 goto nlmsg_failure;
2604
2605 nfmsg = nlmsg_data(nlh);
2606 nfmsg->nfgen_family = AF_UNSPEC;
2607 nfmsg->version = NFNETLINK_V0;
2608 nfmsg->res_id = htons(cpu);
2609
2610 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2611 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2612 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2613 goto nla_put_failure;
2614
2615 nlmsg_end(skb, nlh);
2616 return skb->len;
2617
2618nla_put_failure:
2619nlmsg_failure:
2620 nlmsg_cancel(skb, nlh);
2621 return -1;
2622}
2623
2624static int
2625ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2626{
2627 int cpu;
2628 struct net *net = sock_net(skb->sk);
2629
2630 if (cb->args[0] == nr_cpu_ids)
2631 return 0;
2632
2633 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2634 const struct ip_conntrack_stat *st;
2635
2636 if (!cpu_possible(cpu))
2637 continue;
2638
2639 st = per_cpu_ptr(net->ct.stat, cpu);
2640 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
2641 cb->nlh->nlmsg_seq,
2642 cpu, st) < 0)
2643 break;
2644 }
2645 cb->args[0] = cpu;
2646
2647 return skb->len;
2648}
2649
2650static int
2651ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2652 const struct nlmsghdr *nlh,
2653 const struct nlattr * const cda[])
2654{
2655 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2656 struct netlink_dump_control c = {
2657 .dump = ctnetlink_exp_stat_cpu_dump,
2658 };
2659 return netlink_dump_start(ctnl, skb, nlh, &c);
2660 }
2661
2662 return 0;
2663}
2664
2303#ifdef CONFIG_NF_CONNTRACK_EVENTS 2665#ifdef CONFIG_NF_CONNTRACK_EVENTS
2304static struct nf_ct_event_notifier ctnl_notifier = { 2666static struct nf_ct_event_notifier ctnl_notifier = {
2305 .fcn = ctnetlink_conntrack_event, 2667 .fcn = ctnetlink_conntrack_event,
@@ -2323,6 +2685,8 @@ static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2323 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, 2685 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2324 .attr_count = CTA_MAX, 2686 .attr_count = CTA_MAX,
2325 .policy = ct_nla_policy }, 2687 .policy = ct_nla_policy },
2688 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
2689 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
2326}; 2690};
2327 2691
2328static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 2692static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
@@ -2335,6 +2699,7 @@ static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2335 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, 2699 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2336 .attr_count = CTA_EXPECT_MAX, 2700 .attr_count = CTA_EXPECT_MAX,
2337 .policy = exp_nla_policy }, 2701 .policy = exp_nla_policy },
2702 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
2338}; 2703};
2339 2704
2340static const struct nfnetlink_subsystem ctnl_subsys = { 2705static const struct nfnetlink_subsystem ctnl_subsys = {
@@ -2424,7 +2789,10 @@ static int __init ctnetlink_init(void)
2424 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2789 pr_err("ctnetlink_init: cannot register pernet operations\n");
2425 goto err_unreg_exp_subsys; 2790 goto err_unreg_exp_subsys;
2426 } 2791 }
2427 2792#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2793 /* setup interaction between nf_queue and nf_conntrack_netlink. */
2794 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
2795#endif
2428 return 0; 2796 return 0;
2429 2797
2430err_unreg_exp_subsys: 2798err_unreg_exp_subsys:
@@ -2442,6 +2810,9 @@ static void __exit ctnetlink_exit(void)
2442 unregister_pernet_subsys(&ctnetlink_net_ops); 2810 unregister_pernet_subsys(&ctnetlink_net_ops);
2443 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 2811 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2444 nfnetlink_subsys_unregister(&ctnl_subsys); 2812 nfnetlink_subsys_unregister(&ctnl_subsys);
2813#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2814 RCU_INIT_POINTER(nfq_ct_hook, NULL);
2815#endif
2445} 2816}
2446 2817
2447module_init(ctnetlink_init); 2818module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 31d56b23b9e9..6fed9ec35248 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -174,7 +174,7 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
174static void pptp_destroy_siblings(struct nf_conn *ct) 174static void pptp_destroy_siblings(struct nf_conn *ct)
175{ 175{
176 struct net *net = nf_ct_net(ct); 176 struct net *net = nf_ct_net(ct);
177 const struct nf_conn_help *help = nfct_help(ct); 177 const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
178 struct nf_conntrack_tuple t; 178 struct nf_conntrack_tuple t;
179 179
180 nf_ct_gre_keymap_destroy(ct); 180 nf_ct_gre_keymap_destroy(ct);
@@ -182,16 +182,16 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 /* try original (pns->pac) tuple */ 182 /* try original (pns->pac) tuple */
183 memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); 183 memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
184 t.dst.protonum = IPPROTO_GRE; 184 t.dst.protonum = IPPROTO_GRE;
185 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 185 t.src.u.gre.key = ct_pptp_info->pns_call_id;
186 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 186 t.dst.u.gre.key = ct_pptp_info->pac_call_id;
187 if (!destroy_sibling_or_exp(net, ct, &t)) 187 if (!destroy_sibling_or_exp(net, ct, &t))
188 pr_debug("failed to timeout original pns->pac ct/exp\n"); 188 pr_debug("failed to timeout original pns->pac ct/exp\n");
189 189
190 /* try reply (pac->pns) tuple */ 190 /* try reply (pac->pns) tuple */
191 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); 191 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
192 t.dst.protonum = IPPROTO_GRE; 192 t.dst.protonum = IPPROTO_GRE;
193 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 193 t.src.u.gre.key = ct_pptp_info->pac_call_id;
194 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 194 t.dst.u.gre.key = ct_pptp_info->pns_call_id;
195 if (!destroy_sibling_or_exp(net, ct, &t)) 195 if (!destroy_sibling_or_exp(net, ct, &t))
196 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 196 pr_debug("failed to timeout reply pac->pns ct/exp\n");
197} 197}
@@ -269,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
269 struct nf_conn *ct, 269 struct nf_conn *ct,
270 enum ip_conntrack_info ctinfo) 270 enum ip_conntrack_info ctinfo)
271{ 271{
272 struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 272 struct nf_ct_pptp_master *info = nfct_help_data(ct);
273 u_int16_t msg; 273 u_int16_t msg;
274 __be16 cid = 0, pcid = 0; 274 __be16 cid = 0, pcid = 0;
275 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 275 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
@@ -396,7 +396,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
396 struct nf_conn *ct, 396 struct nf_conn *ct,
397 enum ip_conntrack_info ctinfo) 397 enum ip_conntrack_info ctinfo)
398{ 398{
399 struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 399 struct nf_ct_pptp_master *info = nfct_help_data(ct);
400 u_int16_t msg; 400 u_int16_t msg;
401 __be16 cid = 0, pcid = 0; 401 __be16 cid = 0, pcid = 0;
402 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 402 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
@@ -506,7 +506,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
506 506
507{ 507{
508 int dir = CTINFO2DIR(ctinfo); 508 int dir = CTINFO2DIR(ctinfo);
509 const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 509 const struct nf_ct_pptp_master *info = nfct_help_data(ct);
510 const struct tcphdr *tcph; 510 const struct tcphdr *tcph;
511 struct tcphdr _tcph; 511 struct tcphdr _tcph;
512 const struct pptp_pkt_hdr *pptph; 512 const struct pptp_pkt_hdr *pptph;
@@ -592,6 +592,7 @@ static const struct nf_conntrack_expect_policy pptp_exp_policy = {
592static struct nf_conntrack_helper pptp __read_mostly = { 592static struct nf_conntrack_helper pptp __read_mostly = {
593 .name = "pptp", 593 .name = "pptp",
594 .me = THIS_MODULE, 594 .me = THIS_MODULE,
595 .data_len = sizeof(struct nf_ct_pptp_master),
595 .tuple.src.l3num = AF_INET, 596 .tuple.src.l3num = AF_INET,
596 .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), 597 .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
597 .tuple.dst.protonum = IPPROTO_TCP, 598 .tuple.dst.protonum = IPPROTO_TCP,
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 8b631b07a645..0dc63854390f 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,28 +36,32 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
36 36
37#ifdef CONFIG_SYSCTL 37#ifdef CONFIG_SYSCTL
38static int 38static int
39nf_ct_register_sysctl(struct ctl_table_header **header, const char *path, 39nf_ct_register_sysctl(struct net *net,
40 struct ctl_table *table, unsigned int *users) 40 struct ctl_table_header **header,
41 const char *path,
42 struct ctl_table *table)
41{ 43{
42 if (*header == NULL) { 44 if (*header == NULL) {
43 *header = register_net_sysctl(&init_net, path, table); 45 *header = register_net_sysctl(net, path, table);
44 if (*header == NULL) 46 if (*header == NULL)
45 return -ENOMEM; 47 return -ENOMEM;
46 } 48 }
47 if (users != NULL) 49
48 (*users)++;
49 return 0; 50 return 0;
50} 51}
51 52
52static void 53static void
53nf_ct_unregister_sysctl(struct ctl_table_header **header, 54nf_ct_unregister_sysctl(struct ctl_table_header **header,
54 struct ctl_table *table, unsigned int *users) 55 struct ctl_table **table,
56 unsigned int users)
55{ 57{
56 if (users != NULL && --*users > 0) 58 if (users > 0)
57 return; 59 return;
58 60
59 unregister_net_sysctl_table(*header); 61 unregister_net_sysctl_table(*header);
62 kfree(*table);
60 *header = NULL; 63 *header = NULL;
64 *table = NULL;
61} 65}
62#endif 66#endif
63 67
@@ -161,30 +165,56 @@ static int kill_l4proto(struct nf_conn *i, void *data)
161 nf_ct_l3num(i) == l4proto->l3proto; 165 nf_ct_l3num(i) == l4proto->l3proto;
162} 166}
163 167
164static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto) 168static struct nf_ip_net *nf_ct_l3proto_net(struct net *net,
169 struct nf_conntrack_l3proto *l3proto)
165{ 170{
166 int err = 0; 171 if (l3proto->l3proto == PF_INET)
172 return &net->ct.nf_ct_proto;
173 else
174 return NULL;
175}
167 176
168#ifdef CONFIG_SYSCTL 177static int nf_ct_l3proto_register_sysctl(struct net *net,
169 if (l3proto->ctl_table != NULL) { 178 struct nf_conntrack_l3proto *l3proto)
170 err = nf_ct_register_sysctl(&l3proto->ctl_table_header, 179{
180 int err = 0;
181 struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
182 /* nf_conntrack_l3proto_ipv6 doesn't support sysctl */
183 if (in == NULL)
184 return 0;
185
186#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
187 if (in->ctl_table != NULL) {
188 err = nf_ct_register_sysctl(net,
189 &in->ctl_table_header,
171 l3proto->ctl_table_path, 190 l3proto->ctl_table_path,
172 l3proto->ctl_table, NULL); 191 in->ctl_table);
192 if (err < 0) {
193 kfree(in->ctl_table);
194 in->ctl_table = NULL;
195 }
173 } 196 }
174#endif 197#endif
175 return err; 198 return err;
176} 199}
177 200
178static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto) 201static void nf_ct_l3proto_unregister_sysctl(struct net *net,
202 struct nf_conntrack_l3proto *l3proto)
179{ 203{
180#ifdef CONFIG_SYSCTL 204 struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
181 if (l3proto->ctl_table_header != NULL) 205
182 nf_ct_unregister_sysctl(&l3proto->ctl_table_header, 206 if (in == NULL)
183 l3proto->ctl_table, NULL); 207 return;
208#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
209 if (in->ctl_table_header != NULL)
210 nf_ct_unregister_sysctl(&in->ctl_table_header,
211 &in->ctl_table,
212 0);
184#endif 213#endif
185} 214}
186 215
187int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) 216static int
217nf_conntrack_l3proto_register_net(struct nf_conntrack_l3proto *proto)
188{ 218{
189 int ret = 0; 219 int ret = 0;
190 struct nf_conntrack_l3proto *old; 220 struct nf_conntrack_l3proto *old;
@@ -203,10 +233,6 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
203 goto out_unlock; 233 goto out_unlock;
204 } 234 }
205 235
206 ret = nf_ct_l3proto_register_sysctl(proto);
207 if (ret < 0)
208 goto out_unlock;
209
210 if (proto->nlattr_tuple_size) 236 if (proto->nlattr_tuple_size)
211 proto->nla_size = 3 * proto->nlattr_tuple_size(); 237 proto->nla_size = 3 * proto->nlattr_tuple_size();
212 238
@@ -215,13 +241,37 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
215out_unlock: 241out_unlock:
216 mutex_unlock(&nf_ct_proto_mutex); 242 mutex_unlock(&nf_ct_proto_mutex);
217 return ret; 243 return ret;
244
218} 245}
219EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
220 246
221void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) 247int nf_conntrack_l3proto_register(struct net *net,
248 struct nf_conntrack_l3proto *proto)
222{ 249{
223 struct net *net; 250 int ret = 0;
251
252 if (proto->init_net) {
253 ret = proto->init_net(net);
254 if (ret < 0)
255 return ret;
256 }
257
258 ret = nf_ct_l3proto_register_sysctl(net, proto);
259 if (ret < 0)
260 return ret;
224 261
262 if (net == &init_net) {
263 ret = nf_conntrack_l3proto_register_net(proto);
264 if (ret < 0)
265 nf_ct_l3proto_unregister_sysctl(net, proto);
266 }
267
268 return ret;
269}
270EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
271
272static void
273nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
274{
225 BUG_ON(proto->l3proto >= AF_MAX); 275 BUG_ON(proto->l3proto >= AF_MAX);
226 276
227 mutex_lock(&nf_ct_proto_mutex); 277 mutex_lock(&nf_ct_proto_mutex);
@@ -230,68 +280,107 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
230 ) != proto); 280 ) != proto);
231 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], 281 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
232 &nf_conntrack_l3proto_generic); 282 &nf_conntrack_l3proto_generic);
233 nf_ct_l3proto_unregister_sysctl(proto);
234 mutex_unlock(&nf_ct_proto_mutex); 283 mutex_unlock(&nf_ct_proto_mutex);
235 284
236 synchronize_rcu(); 285 synchronize_rcu();
286}
287
288void nf_conntrack_l3proto_unregister(struct net *net,
289 struct nf_conntrack_l3proto *proto)
290{
291 if (net == &init_net)
292 nf_conntrack_l3proto_unregister_net(proto);
293
294 nf_ct_l3proto_unregister_sysctl(net, proto);
237 295
238 /* Remove all contrack entries for this protocol */ 296 /* Remove all contrack entries for this protocol */
239 rtnl_lock(); 297 rtnl_lock();
240 for_each_net(net) 298 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
241 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
242 rtnl_unlock(); 299 rtnl_unlock();
243} 300}
244EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 301EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
245 302
246static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) 303static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
304 struct nf_conntrack_l4proto *l4proto)
305{
306 if (l4proto->get_net_proto) {
307 /* statically built-in protocols use static per-net */
308 return l4proto->get_net_proto(net);
309 } else if (l4proto->net_id) {
310 /* ... and loadable protocols use dynamic per-net */
311 return net_generic(net, *l4proto->net_id);
312 }
313 return NULL;
314}
315
316static
317int nf_ct_l4proto_register_sysctl(struct net *net,
318 struct nf_proto_net *pn,
319 struct nf_conntrack_l4proto *l4proto)
247{ 320{
248 int err = 0; 321 int err = 0;
249 322
250#ifdef CONFIG_SYSCTL 323#ifdef CONFIG_SYSCTL
251 if (l4proto->ctl_table != NULL) { 324 if (pn->ctl_table != NULL) {
252 err = nf_ct_register_sysctl(l4proto->ctl_table_header, 325 err = nf_ct_register_sysctl(net,
326 &pn->ctl_table_header,
253 "net/netfilter", 327 "net/netfilter",
254 l4proto->ctl_table, 328 pn->ctl_table);
255 l4proto->ctl_table_users); 329 if (err < 0) {
256 if (err < 0) 330 if (!pn->users) {
257 goto out; 331 kfree(pn->ctl_table);
332 pn->ctl_table = NULL;
333 }
334 }
258 } 335 }
259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 336#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
260 if (l4proto->ctl_compat_table != NULL) { 337 if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_table != NULL) {
261 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, 338 if (err < 0) {
339 nf_ct_kfree_compat_sysctl_table(pn);
340 goto out;
341 }
342 err = nf_ct_register_sysctl(net,
343 &pn->ctl_compat_header,
262 "net/ipv4/netfilter", 344 "net/ipv4/netfilter",
263 l4proto->ctl_compat_table, NULL); 345 pn->ctl_compat_table);
264 if (err == 0) 346 if (err == 0)
265 goto out; 347 goto out;
266 nf_ct_unregister_sysctl(l4proto->ctl_table_header, 348
267 l4proto->ctl_table, 349 nf_ct_kfree_compat_sysctl_table(pn);
268 l4proto->ctl_table_users); 350 nf_ct_unregister_sysctl(&pn->ctl_table_header,
351 &pn->ctl_table,
352 pn->users);
269 } 353 }
270#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
271out: 354out:
355#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
272#endif /* CONFIG_SYSCTL */ 356#endif /* CONFIG_SYSCTL */
273 return err; 357 return err;
274} 358}
275 359
276static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto) 360static
361void nf_ct_l4proto_unregister_sysctl(struct net *net,
362 struct nf_proto_net *pn,
363 struct nf_conntrack_l4proto *l4proto)
277{ 364{
278#ifdef CONFIG_SYSCTL 365#ifdef CONFIG_SYSCTL
279 if (l4proto->ctl_table_header != NULL && 366 if (pn->ctl_table_header != NULL)
280 *l4proto->ctl_table_header != NULL) 367 nf_ct_unregister_sysctl(&pn->ctl_table_header,
281 nf_ct_unregister_sysctl(l4proto->ctl_table_header, 368 &pn->ctl_table,
282 l4proto->ctl_table, 369 pn->users);
283 l4proto->ctl_table_users); 370
284#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 371#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
285 if (l4proto->ctl_compat_table_header != NULL) 372 if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_header != NULL)
286 nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header, 373 nf_ct_unregister_sysctl(&pn->ctl_compat_header,
287 l4proto->ctl_compat_table, NULL); 374 &pn->ctl_compat_table,
375 0);
288#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 376#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
289#endif /* CONFIG_SYSCTL */ 377#endif /* CONFIG_SYSCTL */
290} 378}
291 379
292/* FIXME: Allow NULL functions and sub in pointers to generic for 380/* FIXME: Allow NULL functions and sub in pointers to generic for
293 them. --RR */ 381 them. --RR */
294int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) 382static int
383nf_conntrack_l4proto_register_net(struct nf_conntrack_l4proto *l4proto)
295{ 384{
296 int ret = 0; 385 int ret = 0;
297 386
@@ -333,10 +422,6 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
333 goto out_unlock; 422 goto out_unlock;
334 } 423 }
335 424
336 ret = nf_ct_l4proto_register_sysctl(l4proto);
337 if (ret < 0)
338 goto out_unlock;
339
340 l4proto->nla_size = 0; 425 l4proto->nla_size = 0;
341 if (l4proto->nlattr_size) 426 if (l4proto->nlattr_size)
342 l4proto->nla_size += l4proto->nlattr_size(); 427 l4proto->nla_size += l4proto->nlattr_size();
@@ -345,17 +430,48 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
345 430
346 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], 431 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
347 l4proto); 432 l4proto);
348
349out_unlock: 433out_unlock:
350 mutex_unlock(&nf_ct_proto_mutex); 434 mutex_unlock(&nf_ct_proto_mutex);
351 return ret; 435 return ret;
352} 436}
353EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
354 437
355void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) 438int nf_conntrack_l4proto_register(struct net *net,
439 struct nf_conntrack_l4proto *l4proto)
356{ 440{
357 struct net *net; 441 int ret = 0;
442 struct nf_proto_net *pn = NULL;
358 443
444 if (l4proto->init_net) {
445 ret = l4proto->init_net(net, l4proto->l3proto);
446 if (ret < 0)
447 goto out;
448 }
449
450 pn = nf_ct_l4proto_net(net, l4proto);
451 if (pn == NULL)
452 goto out;
453
454 ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
455 if (ret < 0)
456 goto out;
457
458 if (net == &init_net) {
459 ret = nf_conntrack_l4proto_register_net(l4proto);
460 if (ret < 0) {
461 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
462 goto out;
463 }
464 }
465
466 pn->users++;
467out:
468 return ret;
469}
470EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
471
472static void
473nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
474{
359 BUG_ON(l4proto->l3proto >= PF_MAX); 475 BUG_ON(l4proto->l3proto >= PF_MAX);
360 476
361 mutex_lock(&nf_ct_proto_mutex); 477 mutex_lock(&nf_ct_proto_mutex);
@@ -365,41 +481,73 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
365 ) != l4proto); 481 ) != l4proto);
366 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], 482 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
367 &nf_conntrack_l4proto_generic); 483 &nf_conntrack_l4proto_generic);
368 nf_ct_l4proto_unregister_sysctl(l4proto);
369 mutex_unlock(&nf_ct_proto_mutex); 484 mutex_unlock(&nf_ct_proto_mutex);
370 485
371 synchronize_rcu(); 486 synchronize_rcu();
487}
488
489void nf_conntrack_l4proto_unregister(struct net *net,
490 struct nf_conntrack_l4proto *l4proto)
491{
492 struct nf_proto_net *pn = NULL;
493
494 if (net == &init_net)
495 nf_conntrack_l4proto_unregister_net(l4proto);
496
497 pn = nf_ct_l4proto_net(net, l4proto);
498 if (pn == NULL)
499 return;
500
501 pn->users--;
502 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
372 503
373 /* Remove all contrack entries for this protocol */ 504 /* Remove all contrack entries for this protocol */
374 rtnl_lock(); 505 rtnl_lock();
375 for_each_net(net) 506 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
376 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
377 rtnl_unlock(); 507 rtnl_unlock();
378} 508}
379EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 509EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
380 510
381int nf_conntrack_proto_init(void) 511int nf_conntrack_proto_init(struct net *net)
382{ 512{
383 unsigned int i; 513 unsigned int i;
384 int err; 514 int err;
515 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
516 &nf_conntrack_l4proto_generic);
385 517
386 err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic); 518 err = nf_conntrack_l4proto_generic.init_net(net,
519 nf_conntrack_l4proto_generic.l3proto);
520 if (err < 0)
521 return err;
522 err = nf_ct_l4proto_register_sysctl(net,
523 pn,
524 &nf_conntrack_l4proto_generic);
387 if (err < 0) 525 if (err < 0)
388 return err; 526 return err;
389 527
390 for (i = 0; i < AF_MAX; i++) 528 if (net == &init_net) {
391 rcu_assign_pointer(nf_ct_l3protos[i], 529 for (i = 0; i < AF_MAX; i++)
392 &nf_conntrack_l3proto_generic); 530 rcu_assign_pointer(nf_ct_l3protos[i],
531 &nf_conntrack_l3proto_generic);
532 }
533
534 pn->users++;
393 return 0; 535 return 0;
394} 536}
395 537
396void nf_conntrack_proto_fini(void) 538void nf_conntrack_proto_fini(struct net *net)
397{ 539{
398 unsigned int i; 540 unsigned int i;
399 541 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
400 nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic); 542 &nf_conntrack_l4proto_generic);
401 543
402 /* free l3proto protocol tables */ 544 pn->users--;
403 for (i = 0; i < PF_MAX; i++) 545 nf_ct_l4proto_unregister_sysctl(net,
404 kfree(nf_ct_protos[i]); 546 pn,
547 &nf_conntrack_l4proto_generic);
548 if (net == &init_net) {
549 /* free l3proto protocol tables */
550 for (i = 0; i < PF_MAX; i++)
551 kfree(nf_ct_protos[i]);
552 }
405} 553}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index ef706a485be1..6535326cf07c 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -387,12 +387,9 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
387/* this module per-net specifics */ 387/* this module per-net specifics */
388static int dccp_net_id __read_mostly; 388static int dccp_net_id __read_mostly;
389struct dccp_net { 389struct dccp_net {
390 struct nf_proto_net pn;
390 int dccp_loose; 391 int dccp_loose;
391 unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 392 unsigned int dccp_timeout[CT_DCCP_MAX + 1];
392#ifdef CONFIG_SYSCTL
393 struct ctl_table_header *sysctl_header;
394 struct ctl_table *sysctl_table;
395#endif
396}; 393};
397 394
398static inline struct dccp_net *dccp_pernet(struct net *net) 395static inline struct dccp_net *dccp_pernet(struct net *net)
@@ -715,9 +712,10 @@ static int dccp_nlattr_size(void)
715#include <linux/netfilter/nfnetlink.h> 712#include <linux/netfilter/nfnetlink.h>
716#include <linux/netfilter/nfnetlink_cttimeout.h> 713#include <linux/netfilter/nfnetlink_cttimeout.h>
717 714
718static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 715static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
716 struct net *net, void *data)
719{ 717{
720 struct dccp_net *dn = dccp_pernet(&init_net); 718 struct dccp_net *dn = dccp_pernet(net);
721 unsigned int *timeouts = data; 719 unsigned int *timeouts = data;
722 int i; 720 int i;
723 721
@@ -817,6 +815,51 @@ static struct ctl_table dccp_sysctl_table[] = {
817}; 815};
818#endif /* CONFIG_SYSCTL */ 816#endif /* CONFIG_SYSCTL */
819 817
818static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
819 struct dccp_net *dn)
820{
821#ifdef CONFIG_SYSCTL
822 if (pn->ctl_table)
823 return 0;
824
825 pn->ctl_table = kmemdup(dccp_sysctl_table,
826 sizeof(dccp_sysctl_table),
827 GFP_KERNEL);
828 if (!pn->ctl_table)
829 return -ENOMEM;
830
831 pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
832 pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
833 pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
834 pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
835 pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
836 pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
837 pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
838 pn->ctl_table[7].data = &dn->dccp_loose;
839#endif
840 return 0;
841}
842
843static int dccp_init_net(struct net *net, u_int16_t proto)
844{
845 struct dccp_net *dn = dccp_pernet(net);
846 struct nf_proto_net *pn = &dn->pn;
847
848 if (!pn->users) {
849 /* default values */
850 dn->dccp_loose = 1;
851 dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
852 dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
853 dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
854 dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
855 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
856 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
857 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
858 }
859
860 return dccp_kmemdup_sysctl_table(pn, dn);
861}
862
820static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { 863static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
821 .l3proto = AF_INET, 864 .l3proto = AF_INET,
822 .l4proto = IPPROTO_DCCP, 865 .l4proto = IPPROTO_DCCP,
@@ -847,6 +890,8 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
847 .nla_policy = dccp_timeout_nla_policy, 890 .nla_policy = dccp_timeout_nla_policy,
848 }, 891 },
849#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 892#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
893 .net_id = &dccp_net_id,
894 .init_net = dccp_init_net,
850}; 895};
851 896
852static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { 897static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -879,55 +924,39 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
879 .nla_policy = dccp_timeout_nla_policy, 924 .nla_policy = dccp_timeout_nla_policy,
880 }, 925 },
881#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 926#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
927 .net_id = &dccp_net_id,
928 .init_net = dccp_init_net,
882}; 929};
883 930
884static __net_init int dccp_net_init(struct net *net) 931static __net_init int dccp_net_init(struct net *net)
885{ 932{
886 struct dccp_net *dn = dccp_pernet(net); 933 int ret = 0;
887 934 ret = nf_conntrack_l4proto_register(net,
888 /* default values */ 935 &dccp_proto4);
889 dn->dccp_loose = 1; 936 if (ret < 0) {
890 dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; 937 pr_err("nf_conntrack_l4proto_dccp4 :protocol register failed.\n");
891 dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; 938 goto out;
892 dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; 939 }
893 dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; 940 ret = nf_conntrack_l4proto_register(net,
894 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 941 &dccp_proto6);
895 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 942 if (ret < 0) {
896 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 943 pr_err("nf_conntrack_l4proto_dccp6 :protocol register failed.\n");
897 944 goto cleanup_dccp4;
898#ifdef CONFIG_SYSCTL
899 dn->sysctl_table = kmemdup(dccp_sysctl_table,
900 sizeof(dccp_sysctl_table), GFP_KERNEL);
901 if (!dn->sysctl_table)
902 return -ENOMEM;
903
904 dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
905 dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
906 dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
907 dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
908 dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
909 dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
910 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
911 dn->sysctl_table[7].data = &dn->dccp_loose;
912
913 dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
914 dn->sysctl_table);
915 if (!dn->sysctl_header) {
916 kfree(dn->sysctl_table);
917 return -ENOMEM;
918 } 945 }
919#endif
920
921 return 0; 946 return 0;
947cleanup_dccp4:
948 nf_conntrack_l4proto_unregister(net,
949 &dccp_proto4);
950out:
951 return ret;
922} 952}
923 953
924static __net_exit void dccp_net_exit(struct net *net) 954static __net_exit void dccp_net_exit(struct net *net)
925{ 955{
926 struct dccp_net *dn = dccp_pernet(net); 956 nf_conntrack_l4proto_unregister(net,
927#ifdef CONFIG_SYSCTL 957 &dccp_proto6);
928 unregister_net_sysctl_table(dn->sysctl_header); 958 nf_conntrack_l4proto_unregister(net,
929 kfree(dn->sysctl_table); 959 &dccp_proto4);
930#endif
931} 960}
932 961
933static struct pernet_operations dccp_net_ops = { 962static struct pernet_operations dccp_net_ops = {
@@ -939,34 +968,12 @@ static struct pernet_operations dccp_net_ops = {
939 968
940static int __init nf_conntrack_proto_dccp_init(void) 969static int __init nf_conntrack_proto_dccp_init(void)
941{ 970{
942 int err; 971 return register_pernet_subsys(&dccp_net_ops);
943
944 err = register_pernet_subsys(&dccp_net_ops);
945 if (err < 0)
946 goto err1;
947
948 err = nf_conntrack_l4proto_register(&dccp_proto4);
949 if (err < 0)
950 goto err2;
951
952 err = nf_conntrack_l4proto_register(&dccp_proto6);
953 if (err < 0)
954 goto err3;
955 return 0;
956
957err3:
958 nf_conntrack_l4proto_unregister(&dccp_proto4);
959err2:
960 unregister_pernet_subsys(&dccp_net_ops);
961err1:
962 return err;
963} 972}
964 973
965static void __exit nf_conntrack_proto_dccp_fini(void) 974static void __exit nf_conntrack_proto_dccp_fini(void)
966{ 975{
967 unregister_pernet_subsys(&dccp_net_ops); 976 unregister_pernet_subsys(&dccp_net_ops);
968 nf_conntrack_l4proto_unregister(&dccp_proto6);
969 nf_conntrack_l4proto_unregister(&dccp_proto4);
970} 977}
971 978
972module_init(nf_conntrack_proto_dccp_init); 979module_init(nf_conntrack_proto_dccp_init);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d8923d54b358..d25f29377648 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,11 @@
14 14
15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; 15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
16 16
17static inline struct nf_generic_net *generic_pernet(struct net *net)
18{
19 return &net->ct.nf_ct_proto.generic;
20}
21
17static bool generic_pkt_to_tuple(const struct sk_buff *skb, 22static bool generic_pkt_to_tuple(const struct sk_buff *skb,
18 unsigned int dataoff, 23 unsigned int dataoff,
19 struct nf_conntrack_tuple *tuple) 24 struct nf_conntrack_tuple *tuple)
@@ -42,7 +47,7 @@ static int generic_print_tuple(struct seq_file *s,
42 47
43static unsigned int *generic_get_timeouts(struct net *net) 48static unsigned int *generic_get_timeouts(struct net *net)
44{ 49{
45 return &nf_ct_generic_timeout; 50 return &(generic_pernet(net)->timeout);
46} 51}
47 52
48/* Returns verdict for packet, or -1 for invalid. */ 53/* Returns verdict for packet, or -1 for invalid. */
@@ -70,16 +75,18 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
70#include <linux/netfilter/nfnetlink.h> 75#include <linux/netfilter/nfnetlink.h>
71#include <linux/netfilter/nfnetlink_cttimeout.h> 76#include <linux/netfilter/nfnetlink_cttimeout.h>
72 77
73static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 78static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
79 struct net *net, void *data)
74{ 80{
75 unsigned int *timeout = data; 81 unsigned int *timeout = data;
82 struct nf_generic_net *gn = generic_pernet(net);
76 83
77 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) 84 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
78 *timeout = 85 *timeout =
79 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; 86 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
80 else { 87 else {
81 /* Set default generic timeout. */ 88 /* Set default generic timeout. */
82 *timeout = nf_ct_generic_timeout; 89 *timeout = gn->timeout;
83 } 90 }
84 91
85 return 0; 92 return 0;
@@ -106,11 +113,9 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
106#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 113#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
107 114
108#ifdef CONFIG_SYSCTL 115#ifdef CONFIG_SYSCTL
109static struct ctl_table_header *generic_sysctl_header;
110static struct ctl_table generic_sysctl_table[] = { 116static struct ctl_table generic_sysctl_table[] = {
111 { 117 {
112 .procname = "nf_conntrack_generic_timeout", 118 .procname = "nf_conntrack_generic_timeout",
113 .data = &nf_ct_generic_timeout,
114 .maxlen = sizeof(unsigned int), 119 .maxlen = sizeof(unsigned int),
115 .mode = 0644, 120 .mode = 0644,
116 .proc_handler = proc_dointvec_jiffies, 121 .proc_handler = proc_dointvec_jiffies,
@@ -121,7 +126,6 @@ static struct ctl_table generic_sysctl_table[] = {
121static struct ctl_table generic_compat_sysctl_table[] = { 126static struct ctl_table generic_compat_sysctl_table[] = {
122 { 127 {
123 .procname = "ip_conntrack_generic_timeout", 128 .procname = "ip_conntrack_generic_timeout",
124 .data = &nf_ct_generic_timeout,
125 .maxlen = sizeof(unsigned int), 129 .maxlen = sizeof(unsigned int),
126 .mode = 0644, 130 .mode = 0644,
127 .proc_handler = proc_dointvec_jiffies, 131 .proc_handler = proc_dointvec_jiffies,
@@ -131,6 +135,62 @@ static struct ctl_table generic_compat_sysctl_table[] = {
131#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 135#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
132#endif /* CONFIG_SYSCTL */ 136#endif /* CONFIG_SYSCTL */
133 137
138static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
139 struct nf_generic_net *gn)
140{
141#ifdef CONFIG_SYSCTL
142 pn->ctl_table = kmemdup(generic_sysctl_table,
143 sizeof(generic_sysctl_table),
144 GFP_KERNEL);
145 if (!pn->ctl_table)
146 return -ENOMEM;
147
148 pn->ctl_table[0].data = &gn->timeout;
149#endif
150 return 0;
151}
152
153static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
154 struct nf_generic_net *gn)
155{
156#ifdef CONFIG_SYSCTL
157#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
158 pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table,
159 sizeof(generic_compat_sysctl_table),
160 GFP_KERNEL);
161 if (!pn->ctl_compat_table)
162 return -ENOMEM;
163
164 pn->ctl_compat_table[0].data = &gn->timeout;
165#endif
166#endif
167 return 0;
168}
169
170static int generic_init_net(struct net *net, u_int16_t proto)
171{
172 int ret;
173 struct nf_generic_net *gn = generic_pernet(net);
174 struct nf_proto_net *pn = &gn->pn;
175
176 gn->timeout = nf_ct_generic_timeout;
177
178 ret = generic_kmemdup_compat_sysctl_table(pn, gn);
179 if (ret < 0)
180 return ret;
181
182 ret = generic_kmemdup_sysctl_table(pn, gn);
183 if (ret < 0)
184 nf_ct_kfree_compat_sysctl_table(pn);
185
186 return ret;
187}
188
189static struct nf_proto_net *generic_get_net_proto(struct net *net)
190{
191 return &net->ct.nf_ct_proto.generic.pn;
192}
193
134struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = 194struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
135{ 195{
136 .l3proto = PF_UNSPEC, 196 .l3proto = PF_UNSPEC,
@@ -151,11 +211,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
151 .nla_policy = generic_timeout_nla_policy, 211 .nla_policy = generic_timeout_nla_policy,
152 }, 212 },
153#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 213#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
154#ifdef CONFIG_SYSCTL 214 .init_net = generic_init_net,
155 .ctl_table_header = &generic_sysctl_header, 215 .get_net_proto = generic_get_net_proto,
156 .ctl_table = generic_sysctl_table,
157#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
158 .ctl_compat_table = generic_compat_sysctl_table,
159#endif
160#endif
161}; 216};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 4bf6b4e4b776..b09b7af7f6f8 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -54,13 +54,20 @@ static unsigned int gre_timeouts[GRE_CT_MAX] = {
54 54
55static int proto_gre_net_id __read_mostly; 55static int proto_gre_net_id __read_mostly;
56struct netns_proto_gre { 56struct netns_proto_gre {
57 struct nf_proto_net nf;
57 rwlock_t keymap_lock; 58 rwlock_t keymap_lock;
58 struct list_head keymap_list; 59 struct list_head keymap_list;
60 unsigned int gre_timeouts[GRE_CT_MAX];
59}; 61};
60 62
63static inline struct netns_proto_gre *gre_pernet(struct net *net)
64{
65 return net_generic(net, proto_gre_net_id);
66}
67
61void nf_ct_gre_keymap_flush(struct net *net) 68void nf_ct_gre_keymap_flush(struct net *net)
62{ 69{
63 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 70 struct netns_proto_gre *net_gre = gre_pernet(net);
64 struct nf_ct_gre_keymap *km, *tmp; 71 struct nf_ct_gre_keymap *km, *tmp;
65 72
66 write_lock_bh(&net_gre->keymap_lock); 73 write_lock_bh(&net_gre->keymap_lock);
@@ -85,7 +92,7 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
85/* look up the source key for a given tuple */ 92/* look up the source key for a given tuple */
86static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t) 93static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
87{ 94{
88 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 95 struct netns_proto_gre *net_gre = gre_pernet(net);
89 struct nf_ct_gre_keymap *km; 96 struct nf_ct_gre_keymap *km;
90 __be16 key = 0; 97 __be16 key = 0;
91 98
@@ -109,11 +116,11 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
109 struct nf_conntrack_tuple *t) 116 struct nf_conntrack_tuple *t)
110{ 117{
111 struct net *net = nf_ct_net(ct); 118 struct net *net = nf_ct_net(ct);
112 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 119 struct netns_proto_gre *net_gre = gre_pernet(net);
113 struct nf_conn_help *help = nfct_help(ct); 120 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
114 struct nf_ct_gre_keymap **kmp, *km; 121 struct nf_ct_gre_keymap **kmp, *km;
115 122
116 kmp = &help->help.ct_pptp_info.keymap[dir]; 123 kmp = &ct_pptp_info->keymap[dir];
117 if (*kmp) { 124 if (*kmp) {
118 /* check whether it's a retransmission */ 125 /* check whether it's a retransmission */
119 read_lock_bh(&net_gre->keymap_lock); 126 read_lock_bh(&net_gre->keymap_lock);
@@ -150,20 +157,20 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
150void nf_ct_gre_keymap_destroy(struct nf_conn *ct) 157void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
151{ 158{
152 struct net *net = nf_ct_net(ct); 159 struct net *net = nf_ct_net(ct);
153 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 160 struct netns_proto_gre *net_gre = gre_pernet(net);
154 struct nf_conn_help *help = nfct_help(ct); 161 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
155 enum ip_conntrack_dir dir; 162 enum ip_conntrack_dir dir;
156 163
157 pr_debug("entering for ct %p\n", ct); 164 pr_debug("entering for ct %p\n", ct);
158 165
159 write_lock_bh(&net_gre->keymap_lock); 166 write_lock_bh(&net_gre->keymap_lock);
160 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { 167 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
161 if (help->help.ct_pptp_info.keymap[dir]) { 168 if (ct_pptp_info->keymap[dir]) {
162 pr_debug("removing %p from list\n", 169 pr_debug("removing %p from list\n",
163 help->help.ct_pptp_info.keymap[dir]); 170 ct_pptp_info->keymap[dir]);
164 list_del(&help->help.ct_pptp_info.keymap[dir]->list); 171 list_del(&ct_pptp_info->keymap[dir]->list);
165 kfree(help->help.ct_pptp_info.keymap[dir]); 172 kfree(ct_pptp_info->keymap[dir]);
166 help->help.ct_pptp_info.keymap[dir] = NULL; 173 ct_pptp_info->keymap[dir] = NULL;
167 } 174 }
168 } 175 }
169 write_unlock_bh(&net_gre->keymap_lock); 176 write_unlock_bh(&net_gre->keymap_lock);
@@ -237,7 +244,7 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
237 244
238static unsigned int *gre_get_timeouts(struct net *net) 245static unsigned int *gre_get_timeouts(struct net *net)
239{ 246{
240 return gre_timeouts; 247 return gre_pernet(net)->gre_timeouts;
241} 248}
242 249
243/* Returns verdict for packet, and may modify conntrack */ 250/* Returns verdict for packet, and may modify conntrack */
@@ -297,13 +304,15 @@ static void gre_destroy(struct nf_conn *ct)
297#include <linux/netfilter/nfnetlink.h> 304#include <linux/netfilter/nfnetlink.h>
298#include <linux/netfilter/nfnetlink_cttimeout.h> 305#include <linux/netfilter/nfnetlink_cttimeout.h>
299 306
300static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 307static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
308 struct net *net, void *data)
301{ 309{
302 unsigned int *timeouts = data; 310 unsigned int *timeouts = data;
311 struct netns_proto_gre *net_gre = gre_pernet(net);
303 312
304 /* set default timeouts for GRE. */ 313 /* set default timeouts for GRE. */
305 timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED]; 314 timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
306 timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED]; 315 timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
307 316
308 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { 317 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
309 timeouts[GRE_CT_UNREPLIED] = 318 timeouts[GRE_CT_UNREPLIED] =
@@ -339,6 +348,19 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
339}; 348};
340#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 349#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
341 350
351static int gre_init_net(struct net *net, u_int16_t proto)
352{
353 struct netns_proto_gre *net_gre = gre_pernet(net);
354 int i;
355
356 rwlock_init(&net_gre->keymap_lock);
357 INIT_LIST_HEAD(&net_gre->keymap_list);
358 for (i = 0; i < GRE_CT_MAX; i++)
359 net_gre->gre_timeouts[i] = gre_timeouts[i];
360
361 return 0;
362}
363
342/* protocol helper struct */ 364/* protocol helper struct */
343static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { 365static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
344 .l3proto = AF_INET, 366 .l3proto = AF_INET,
@@ -368,20 +390,22 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
368 .nla_policy = gre_timeout_nla_policy, 390 .nla_policy = gre_timeout_nla_policy,
369 }, 391 },
370#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 392#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
393 .net_id = &proto_gre_net_id,
394 .init_net = gre_init_net,
371}; 395};
372 396
373static int proto_gre_net_init(struct net *net) 397static int proto_gre_net_init(struct net *net)
374{ 398{
375 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 399 int ret = 0;
376 400 ret = nf_conntrack_l4proto_register(net, &nf_conntrack_l4proto_gre4);
377 rwlock_init(&net_gre->keymap_lock); 401 if (ret < 0)
378 INIT_LIST_HEAD(&net_gre->keymap_list); 402 pr_err("nf_conntrack_l4proto_gre4 :protocol register failed.\n");
379 403 return ret;
380 return 0;
381} 404}
382 405
383static void proto_gre_net_exit(struct net *net) 406static void proto_gre_net_exit(struct net *net)
384{ 407{
408 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_gre4);
385 nf_ct_gre_keymap_flush(net); 409 nf_ct_gre_keymap_flush(net);
386} 410}
387 411
@@ -394,20 +418,11 @@ static struct pernet_operations proto_gre_net_ops = {
394 418
395static int __init nf_ct_proto_gre_init(void) 419static int __init nf_ct_proto_gre_init(void)
396{ 420{
397 int rv; 421 return register_pernet_subsys(&proto_gre_net_ops);
398
399 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
400 if (rv < 0)
401 return rv;
402 rv = register_pernet_subsys(&proto_gre_net_ops);
403 if (rv < 0)
404 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
405 return rv;
406} 422}
407 423
408static void __exit nf_ct_proto_gre_fini(void) 424static void __exit nf_ct_proto_gre_fini(void)
409{ 425{
410 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
411 unregister_pernet_subsys(&proto_gre_net_ops); 426 unregister_pernet_subsys(&proto_gre_net_ops);
412} 427}
413 428
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 996db2fa21f7..c746d61f83ed 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -127,6 +127,17 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
127 } 127 }
128}; 128};
129 129
130static int sctp_net_id __read_mostly;
131struct sctp_net {
132 struct nf_proto_net pn;
133 unsigned int timeouts[SCTP_CONNTRACK_MAX];
134};
135
136static inline struct sctp_net *sctp_pernet(struct net *net)
137{
138 return net_generic(net, sctp_net_id);
139}
140
130static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 141static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
131 struct nf_conntrack_tuple *tuple) 142 struct nf_conntrack_tuple *tuple)
132{ 143{
@@ -281,7 +292,7 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
281 292
282static unsigned int *sctp_get_timeouts(struct net *net) 293static unsigned int *sctp_get_timeouts(struct net *net)
283{ 294{
284 return sctp_timeouts; 295 return sctp_pernet(net)->timeouts;
285} 296}
286 297
287/* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 298/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
@@ -551,14 +562,16 @@ static int sctp_nlattr_size(void)
551#include <linux/netfilter/nfnetlink.h> 562#include <linux/netfilter/nfnetlink.h>
552#include <linux/netfilter/nfnetlink_cttimeout.h> 563#include <linux/netfilter/nfnetlink_cttimeout.h>
553 564
554static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 565static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
566 struct net *net, void *data)
555{ 567{
556 unsigned int *timeouts = data; 568 unsigned int *timeouts = data;
569 struct sctp_net *sn = sctp_pernet(net);
557 int i; 570 int i;
558 571
559 /* set default SCTP timeouts. */ 572 /* set default SCTP timeouts. */
560 for (i=0; i<SCTP_CONNTRACK_MAX; i++) 573 for (i=0; i<SCTP_CONNTRACK_MAX; i++)
561 timeouts[i] = sctp_timeouts[i]; 574 timeouts[i] = sn->timeouts[i];
562 575
563 /* there's a 1:1 mapping between attributes and protocol states. */ 576 /* there's a 1:1 mapping between attributes and protocol states. */
564 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) { 577 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
@@ -599,54 +612,45 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
599 612
600 613
601#ifdef CONFIG_SYSCTL 614#ifdef CONFIG_SYSCTL
602static unsigned int sctp_sysctl_table_users;
603static struct ctl_table_header *sctp_sysctl_header;
604static struct ctl_table sctp_sysctl_table[] = { 615static struct ctl_table sctp_sysctl_table[] = {
605 { 616 {
606 .procname = "nf_conntrack_sctp_timeout_closed", 617 .procname = "nf_conntrack_sctp_timeout_closed",
607 .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
608 .maxlen = sizeof(unsigned int), 618 .maxlen = sizeof(unsigned int),
609 .mode = 0644, 619 .mode = 0644,
610 .proc_handler = proc_dointvec_jiffies, 620 .proc_handler = proc_dointvec_jiffies,
611 }, 621 },
612 { 622 {
613 .procname = "nf_conntrack_sctp_timeout_cookie_wait", 623 .procname = "nf_conntrack_sctp_timeout_cookie_wait",
614 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
615 .maxlen = sizeof(unsigned int), 624 .maxlen = sizeof(unsigned int),
616 .mode = 0644, 625 .mode = 0644,
617 .proc_handler = proc_dointvec_jiffies, 626 .proc_handler = proc_dointvec_jiffies,
618 }, 627 },
619 { 628 {
620 .procname = "nf_conntrack_sctp_timeout_cookie_echoed", 629 .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
621 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
622 .maxlen = sizeof(unsigned int), 630 .maxlen = sizeof(unsigned int),
623 .mode = 0644, 631 .mode = 0644,
624 .proc_handler = proc_dointvec_jiffies, 632 .proc_handler = proc_dointvec_jiffies,
625 }, 633 },
626 { 634 {
627 .procname = "nf_conntrack_sctp_timeout_established", 635 .procname = "nf_conntrack_sctp_timeout_established",
628 .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
629 .maxlen = sizeof(unsigned int), 636 .maxlen = sizeof(unsigned int),
630 .mode = 0644, 637 .mode = 0644,
631 .proc_handler = proc_dointvec_jiffies, 638 .proc_handler = proc_dointvec_jiffies,
632 }, 639 },
633 { 640 {
634 .procname = "nf_conntrack_sctp_timeout_shutdown_sent", 641 .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
635 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
636 .maxlen = sizeof(unsigned int), 642 .maxlen = sizeof(unsigned int),
637 .mode = 0644, 643 .mode = 0644,
638 .proc_handler = proc_dointvec_jiffies, 644 .proc_handler = proc_dointvec_jiffies,
639 }, 645 },
640 { 646 {
641 .procname = "nf_conntrack_sctp_timeout_shutdown_recd", 647 .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
642 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
643 .maxlen = sizeof(unsigned int), 648 .maxlen = sizeof(unsigned int),
644 .mode = 0644, 649 .mode = 0644,
645 .proc_handler = proc_dointvec_jiffies, 650 .proc_handler = proc_dointvec_jiffies,
646 }, 651 },
647 { 652 {
648 .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", 653 .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
649 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
650 .maxlen = sizeof(unsigned int), 654 .maxlen = sizeof(unsigned int),
651 .mode = 0644, 655 .mode = 0644,
652 .proc_handler = proc_dointvec_jiffies, 656 .proc_handler = proc_dointvec_jiffies,
@@ -658,49 +662,42 @@ static struct ctl_table sctp_sysctl_table[] = {
658static struct ctl_table sctp_compat_sysctl_table[] = { 662static struct ctl_table sctp_compat_sysctl_table[] = {
659 { 663 {
660 .procname = "ip_conntrack_sctp_timeout_closed", 664 .procname = "ip_conntrack_sctp_timeout_closed",
661 .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
662 .maxlen = sizeof(unsigned int), 665 .maxlen = sizeof(unsigned int),
663 .mode = 0644, 666 .mode = 0644,
664 .proc_handler = proc_dointvec_jiffies, 667 .proc_handler = proc_dointvec_jiffies,
665 }, 668 },
666 { 669 {
667 .procname = "ip_conntrack_sctp_timeout_cookie_wait", 670 .procname = "ip_conntrack_sctp_timeout_cookie_wait",
668 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
669 .maxlen = sizeof(unsigned int), 671 .maxlen = sizeof(unsigned int),
670 .mode = 0644, 672 .mode = 0644,
671 .proc_handler = proc_dointvec_jiffies, 673 .proc_handler = proc_dointvec_jiffies,
672 }, 674 },
673 { 675 {
674 .procname = "ip_conntrack_sctp_timeout_cookie_echoed", 676 .procname = "ip_conntrack_sctp_timeout_cookie_echoed",
675 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
676 .maxlen = sizeof(unsigned int), 677 .maxlen = sizeof(unsigned int),
677 .mode = 0644, 678 .mode = 0644,
678 .proc_handler = proc_dointvec_jiffies, 679 .proc_handler = proc_dointvec_jiffies,
679 }, 680 },
680 { 681 {
681 .procname = "ip_conntrack_sctp_timeout_established", 682 .procname = "ip_conntrack_sctp_timeout_established",
682 .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
683 .maxlen = sizeof(unsigned int), 683 .maxlen = sizeof(unsigned int),
684 .mode = 0644, 684 .mode = 0644,
685 .proc_handler = proc_dointvec_jiffies, 685 .proc_handler = proc_dointvec_jiffies,
686 }, 686 },
687 { 687 {
688 .procname = "ip_conntrack_sctp_timeout_shutdown_sent", 688 .procname = "ip_conntrack_sctp_timeout_shutdown_sent",
689 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
690 .maxlen = sizeof(unsigned int), 689 .maxlen = sizeof(unsigned int),
691 .mode = 0644, 690 .mode = 0644,
692 .proc_handler = proc_dointvec_jiffies, 691 .proc_handler = proc_dointvec_jiffies,
693 }, 692 },
694 { 693 {
695 .procname = "ip_conntrack_sctp_timeout_shutdown_recd", 694 .procname = "ip_conntrack_sctp_timeout_shutdown_recd",
696 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
697 .maxlen = sizeof(unsigned int), 695 .maxlen = sizeof(unsigned int),
698 .mode = 0644, 696 .mode = 0644,
699 .proc_handler = proc_dointvec_jiffies, 697 .proc_handler = proc_dointvec_jiffies,
700 }, 698 },
701 { 699 {
702 .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", 700 .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
703 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
704 .maxlen = sizeof(unsigned int), 701 .maxlen = sizeof(unsigned int),
705 .mode = 0644, 702 .mode = 0644,
706 .proc_handler = proc_dointvec_jiffies, 703 .proc_handler = proc_dointvec_jiffies,
@@ -710,6 +707,80 @@ static struct ctl_table sctp_compat_sysctl_table[] = {
710#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 707#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
711#endif 708#endif
712 709
710static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
711 struct sctp_net *sn)
712{
713#ifdef CONFIG_SYSCTL
714 if (pn->ctl_table)
715 return 0;
716
717 pn->ctl_table = kmemdup(sctp_sysctl_table,
718 sizeof(sctp_sysctl_table),
719 GFP_KERNEL);
720 if (!pn->ctl_table)
721 return -ENOMEM;
722
723 pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
724 pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
725 pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
726 pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
727 pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
728 pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
729 pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
730#endif
731 return 0;
732}
733
734static int sctp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
735 struct sctp_net *sn)
736{
737#ifdef CONFIG_SYSCTL
738#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
739 pn->ctl_compat_table = kmemdup(sctp_compat_sysctl_table,
740 sizeof(sctp_compat_sysctl_table),
741 GFP_KERNEL);
742 if (!pn->ctl_compat_table)
743 return -ENOMEM;
744
745 pn->ctl_compat_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
746 pn->ctl_compat_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
747 pn->ctl_compat_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
748 pn->ctl_compat_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
749 pn->ctl_compat_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
750 pn->ctl_compat_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
751 pn->ctl_compat_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
752#endif
753#endif
754 return 0;
755}
756
757static int sctp_init_net(struct net *net, u_int16_t proto)
758{
759 int ret;
760 struct sctp_net *sn = sctp_pernet(net);
761 struct nf_proto_net *pn = &sn->pn;
762
763 if (!pn->users) {
764 int i;
765
766 for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
767 sn->timeouts[i] = sctp_timeouts[i];
768 }
769
770 if (proto == AF_INET) {
771 ret = sctp_kmemdup_compat_sysctl_table(pn, sn);
772 if (ret < 0)
773 return ret;
774
775 ret = sctp_kmemdup_sysctl_table(pn, sn);
776 if (ret < 0)
777 nf_ct_kfree_compat_sysctl_table(pn);
778 } else
779 ret = sctp_kmemdup_sysctl_table(pn, sn);
780
781 return ret;
782}
783
713static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { 784static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
714 .l3proto = PF_INET, 785 .l3proto = PF_INET,
715 .l4proto = IPPROTO_SCTP, 786 .l4proto = IPPROTO_SCTP,
@@ -740,14 +811,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
740 .nla_policy = sctp_timeout_nla_policy, 811 .nla_policy = sctp_timeout_nla_policy,
741 }, 812 },
742#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 813#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
743#ifdef CONFIG_SYSCTL 814 .net_id = &sctp_net_id,
744 .ctl_table_users = &sctp_sysctl_table_users, 815 .init_net = sctp_init_net,
745 .ctl_table_header = &sctp_sysctl_header,
746 .ctl_table = sctp_sysctl_table,
747#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
748 .ctl_compat_table = sctp_compat_sysctl_table,
749#endif
750#endif
751}; 816};
752 817
753static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { 818static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
@@ -780,40 +845,58 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
780 }, 845 },
781#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 846#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
782#endif 847#endif
783#ifdef CONFIG_SYSCTL 848 .net_id = &sctp_net_id,
784 .ctl_table_users = &sctp_sysctl_table_users, 849 .init_net = sctp_init_net,
785 .ctl_table_header = &sctp_sysctl_header,
786 .ctl_table = sctp_sysctl_table,
787#endif
788}; 850};
789 851
790static int __init nf_conntrack_proto_sctp_init(void) 852static int sctp_net_init(struct net *net)
791{ 853{
792 int ret; 854 int ret = 0;
793 855
794 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); 856 ret = nf_conntrack_l4proto_register(net,
795 if (ret) { 857 &nf_conntrack_l4proto_sctp4);
796 pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n"); 858 if (ret < 0) {
859 pr_err("nf_conntrack_l4proto_sctp4 :protocol register failed.\n");
797 goto out; 860 goto out;
798 } 861 }
799 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); 862 ret = nf_conntrack_l4proto_register(net,
800 if (ret) { 863 &nf_conntrack_l4proto_sctp6);
801 pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n"); 864 if (ret < 0) {
865 pr_err("nf_conntrack_l4proto_sctp6 :protocol register failed.\n");
802 goto cleanup_sctp4; 866 goto cleanup_sctp4;
803 } 867 }
868 return 0;
804 869
870cleanup_sctp4:
871 nf_conntrack_l4proto_unregister(net,
872 &nf_conntrack_l4proto_sctp4);
873out:
805 return ret; 874 return ret;
875}
806 876
807 cleanup_sctp4: 877static void sctp_net_exit(struct net *net)
808 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 878{
809 out: 879 nf_conntrack_l4proto_unregister(net,
810 return ret; 880 &nf_conntrack_l4proto_sctp6);
881 nf_conntrack_l4proto_unregister(net,
882 &nf_conntrack_l4proto_sctp4);
883}
884
885static struct pernet_operations sctp_net_ops = {
886 .init = sctp_net_init,
887 .exit = sctp_net_exit,
888 .id = &sctp_net_id,
889 .size = sizeof(struct sctp_net),
890};
891
892static int __init nf_conntrack_proto_sctp_init(void)
893{
894 return register_pernet_subsys(&sctp_net_ops);
811} 895}
812 896
813static void __exit nf_conntrack_proto_sctp_fini(void) 897static void __exit nf_conntrack_proto_sctp_fini(void)
814{ 898{
815 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); 899 unregister_pernet_subsys(&sctp_net_ops);
816 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
817} 900}
818 901
819module_init(nf_conntrack_proto_sctp_init); 902module_init(nf_conntrack_proto_sctp_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 21ff1a99f534..a5ac11ebef33 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -270,6 +270,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
270 } 270 }
271}; 271};
272 272
273static inline struct nf_tcp_net *tcp_pernet(struct net *net)
274{
275 return &net->ct.nf_ct_proto.tcp;
276}
277
273static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 278static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
274 struct nf_conntrack_tuple *tuple) 279 struct nf_conntrack_tuple *tuple)
275{ 280{
@@ -516,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
516 u_int8_t pf) 521 u_int8_t pf)
517{ 522{
518 struct net *net = nf_ct_net(ct); 523 struct net *net = nf_ct_net(ct);
524 struct nf_tcp_net *tn = tcp_pernet(net);
519 struct ip_ct_tcp_state *sender = &state->seen[dir]; 525 struct ip_ct_tcp_state *sender = &state->seen[dir];
520 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 526 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
521 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 527 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -720,7 +726,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
720 } else { 726 } else {
721 res = false; 727 res = false;
722 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || 728 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
723 nf_ct_tcp_be_liberal) 729 tn->tcp_be_liberal)
724 res = true; 730 res = true;
725 if (!res && LOG_INVALID(net, IPPROTO_TCP)) 731 if (!res && LOG_INVALID(net, IPPROTO_TCP))
726 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 732 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -815,7 +821,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
815 821
816static unsigned int *tcp_get_timeouts(struct net *net) 822static unsigned int *tcp_get_timeouts(struct net *net)
817{ 823{
818 return tcp_timeouts; 824 return tcp_pernet(net)->timeouts;
819} 825}
820 826
821/* Returns verdict for packet, or -1 for invalid. */ 827/* Returns verdict for packet, or -1 for invalid. */
@@ -828,6 +834,7 @@ static int tcp_packet(struct nf_conn *ct,
828 unsigned int *timeouts) 834 unsigned int *timeouts)
829{ 835{
830 struct net *net = nf_ct_net(ct); 836 struct net *net = nf_ct_net(ct);
837 struct nf_tcp_net *tn = tcp_pernet(net);
831 struct nf_conntrack_tuple *tuple; 838 struct nf_conntrack_tuple *tuple;
832 enum tcp_conntrack new_state, old_state; 839 enum tcp_conntrack new_state, old_state;
833 enum ip_conntrack_dir dir; 840 enum ip_conntrack_dir dir;
@@ -1020,7 +1027,7 @@ static int tcp_packet(struct nf_conn *ct,
1020 && new_state == TCP_CONNTRACK_FIN_WAIT) 1027 && new_state == TCP_CONNTRACK_FIN_WAIT)
1021 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1028 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1022 1029
1023 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && 1030 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1024 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) 1031 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1025 timeout = timeouts[TCP_CONNTRACK_RETRANS]; 1032 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1026 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & 1033 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
@@ -1065,6 +1072,8 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1065 enum tcp_conntrack new_state; 1072 enum tcp_conntrack new_state;
1066 const struct tcphdr *th; 1073 const struct tcphdr *th;
1067 struct tcphdr _tcph; 1074 struct tcphdr _tcph;
1075 struct net *net = nf_ct_net(ct);
1076 struct nf_tcp_net *tn = tcp_pernet(net);
1068 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; 1077 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
1069 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; 1078 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
1070 1079
@@ -1093,7 +1102,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1093 ct->proto.tcp.seen[0].td_end; 1102 ct->proto.tcp.seen[0].td_end;
1094 1103
1095 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); 1104 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1096 } else if (nf_ct_tcp_loose == 0) { 1105 } else if (tn->tcp_loose == 0) {
1097 /* Don't try to pick up connections. */ 1106 /* Don't try to pick up connections. */
1098 return false; 1107 return false;
1099 } else { 1108 } else {
@@ -1251,14 +1260,16 @@ static int tcp_nlattr_tuple_size(void)
1251#include <linux/netfilter/nfnetlink.h> 1260#include <linux/netfilter/nfnetlink.h>
1252#include <linux/netfilter/nfnetlink_cttimeout.h> 1261#include <linux/netfilter/nfnetlink_cttimeout.h>
1253 1262
1254static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 1263static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1264 struct net *net, void *data)
1255{ 1265{
1256 unsigned int *timeouts = data; 1266 unsigned int *timeouts = data;
1267 struct nf_tcp_net *tn = tcp_pernet(net);
1257 int i; 1268 int i;
1258 1269
1259 /* set default TCP timeouts. */ 1270 /* set default TCP timeouts. */
1260 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++) 1271 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1261 timeouts[i] = tcp_timeouts[i]; 1272 timeouts[i] = tn->timeouts[i];
1262 1273
1263 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) { 1274 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1264 timeouts[TCP_CONNTRACK_SYN_SENT] = 1275 timeouts[TCP_CONNTRACK_SYN_SENT] =
@@ -1355,96 +1366,81 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1355#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1366#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1356 1367
1357#ifdef CONFIG_SYSCTL 1368#ifdef CONFIG_SYSCTL
1358static unsigned int tcp_sysctl_table_users;
1359static struct ctl_table_header *tcp_sysctl_header;
1360static struct ctl_table tcp_sysctl_table[] = { 1369static struct ctl_table tcp_sysctl_table[] = {
1361 { 1370 {
1362 .procname = "nf_conntrack_tcp_timeout_syn_sent", 1371 .procname = "nf_conntrack_tcp_timeout_syn_sent",
1363 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
1364 .maxlen = sizeof(unsigned int), 1372 .maxlen = sizeof(unsigned int),
1365 .mode = 0644, 1373 .mode = 0644,
1366 .proc_handler = proc_dointvec_jiffies, 1374 .proc_handler = proc_dointvec_jiffies,
1367 }, 1375 },
1368 { 1376 {
1369 .procname = "nf_conntrack_tcp_timeout_syn_recv", 1377 .procname = "nf_conntrack_tcp_timeout_syn_recv",
1370 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
1371 .maxlen = sizeof(unsigned int), 1378 .maxlen = sizeof(unsigned int),
1372 .mode = 0644, 1379 .mode = 0644,
1373 .proc_handler = proc_dointvec_jiffies, 1380 .proc_handler = proc_dointvec_jiffies,
1374 }, 1381 },
1375 { 1382 {
1376 .procname = "nf_conntrack_tcp_timeout_established", 1383 .procname = "nf_conntrack_tcp_timeout_established",
1377 .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
1378 .maxlen = sizeof(unsigned int), 1384 .maxlen = sizeof(unsigned int),
1379 .mode = 0644, 1385 .mode = 0644,
1380 .proc_handler = proc_dointvec_jiffies, 1386 .proc_handler = proc_dointvec_jiffies,
1381 }, 1387 },
1382 { 1388 {
1383 .procname = "nf_conntrack_tcp_timeout_fin_wait", 1389 .procname = "nf_conntrack_tcp_timeout_fin_wait",
1384 .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
1385 .maxlen = sizeof(unsigned int), 1390 .maxlen = sizeof(unsigned int),
1386 .mode = 0644, 1391 .mode = 0644,
1387 .proc_handler = proc_dointvec_jiffies, 1392 .proc_handler = proc_dointvec_jiffies,
1388 }, 1393 },
1389 { 1394 {
1390 .procname = "nf_conntrack_tcp_timeout_close_wait", 1395 .procname = "nf_conntrack_tcp_timeout_close_wait",
1391 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
1392 .maxlen = sizeof(unsigned int), 1396 .maxlen = sizeof(unsigned int),
1393 .mode = 0644, 1397 .mode = 0644,
1394 .proc_handler = proc_dointvec_jiffies, 1398 .proc_handler = proc_dointvec_jiffies,
1395 }, 1399 },
1396 { 1400 {
1397 .procname = "nf_conntrack_tcp_timeout_last_ack", 1401 .procname = "nf_conntrack_tcp_timeout_last_ack",
1398 .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
1399 .maxlen = sizeof(unsigned int), 1402 .maxlen = sizeof(unsigned int),
1400 .mode = 0644, 1403 .mode = 0644,
1401 .proc_handler = proc_dointvec_jiffies, 1404 .proc_handler = proc_dointvec_jiffies,
1402 }, 1405 },
1403 { 1406 {
1404 .procname = "nf_conntrack_tcp_timeout_time_wait", 1407 .procname = "nf_conntrack_tcp_timeout_time_wait",
1405 .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
1406 .maxlen = sizeof(unsigned int), 1408 .maxlen = sizeof(unsigned int),
1407 .mode = 0644, 1409 .mode = 0644,
1408 .proc_handler = proc_dointvec_jiffies, 1410 .proc_handler = proc_dointvec_jiffies,
1409 }, 1411 },
1410 { 1412 {
1411 .procname = "nf_conntrack_tcp_timeout_close", 1413 .procname = "nf_conntrack_tcp_timeout_close",
1412 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
1413 .maxlen = sizeof(unsigned int), 1414 .maxlen = sizeof(unsigned int),
1414 .mode = 0644, 1415 .mode = 0644,
1415 .proc_handler = proc_dointvec_jiffies, 1416 .proc_handler = proc_dointvec_jiffies,
1416 }, 1417 },
1417 { 1418 {
1418 .procname = "nf_conntrack_tcp_timeout_max_retrans", 1419 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1419 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1420 .maxlen = sizeof(unsigned int), 1420 .maxlen = sizeof(unsigned int),
1421 .mode = 0644, 1421 .mode = 0644,
1422 .proc_handler = proc_dointvec_jiffies, 1422 .proc_handler = proc_dointvec_jiffies,
1423 }, 1423 },
1424 { 1424 {
1425 .procname = "nf_conntrack_tcp_timeout_unacknowledged", 1425 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1426 .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
1427 .maxlen = sizeof(unsigned int), 1426 .maxlen = sizeof(unsigned int),
1428 .mode = 0644, 1427 .mode = 0644,
1429 .proc_handler = proc_dointvec_jiffies, 1428 .proc_handler = proc_dointvec_jiffies,
1430 }, 1429 },
1431 { 1430 {
1432 .procname = "nf_conntrack_tcp_loose", 1431 .procname = "nf_conntrack_tcp_loose",
1433 .data = &nf_ct_tcp_loose,
1434 .maxlen = sizeof(unsigned int), 1432 .maxlen = sizeof(unsigned int),
1435 .mode = 0644, 1433 .mode = 0644,
1436 .proc_handler = proc_dointvec, 1434 .proc_handler = proc_dointvec,
1437 }, 1435 },
1438 { 1436 {
1439 .procname = "nf_conntrack_tcp_be_liberal", 1437 .procname = "nf_conntrack_tcp_be_liberal",
1440 .data = &nf_ct_tcp_be_liberal,
1441 .maxlen = sizeof(unsigned int), 1438 .maxlen = sizeof(unsigned int),
1442 .mode = 0644, 1439 .mode = 0644,
1443 .proc_handler = proc_dointvec, 1440 .proc_handler = proc_dointvec,
1444 }, 1441 },
1445 { 1442 {
1446 .procname = "nf_conntrack_tcp_max_retrans", 1443 .procname = "nf_conntrack_tcp_max_retrans",
1447 .data = &nf_ct_tcp_max_retrans,
1448 .maxlen = sizeof(unsigned int), 1444 .maxlen = sizeof(unsigned int),
1449 .mode = 0644, 1445 .mode = 0644,
1450 .proc_handler = proc_dointvec, 1446 .proc_handler = proc_dointvec,
@@ -1456,91 +1452,78 @@ static struct ctl_table tcp_sysctl_table[] = {
1456static struct ctl_table tcp_compat_sysctl_table[] = { 1452static struct ctl_table tcp_compat_sysctl_table[] = {
1457 { 1453 {
1458 .procname = "ip_conntrack_tcp_timeout_syn_sent", 1454 .procname = "ip_conntrack_tcp_timeout_syn_sent",
1459 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
1460 .maxlen = sizeof(unsigned int), 1455 .maxlen = sizeof(unsigned int),
1461 .mode = 0644, 1456 .mode = 0644,
1462 .proc_handler = proc_dointvec_jiffies, 1457 .proc_handler = proc_dointvec_jiffies,
1463 }, 1458 },
1464 { 1459 {
1465 .procname = "ip_conntrack_tcp_timeout_syn_sent2", 1460 .procname = "ip_conntrack_tcp_timeout_syn_sent2",
1466 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
1467 .maxlen = sizeof(unsigned int), 1461 .maxlen = sizeof(unsigned int),
1468 .mode = 0644, 1462 .mode = 0644,
1469 .proc_handler = proc_dointvec_jiffies, 1463 .proc_handler = proc_dointvec_jiffies,
1470 }, 1464 },
1471 { 1465 {
1472 .procname = "ip_conntrack_tcp_timeout_syn_recv", 1466 .procname = "ip_conntrack_tcp_timeout_syn_recv",
1473 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
1474 .maxlen = sizeof(unsigned int), 1467 .maxlen = sizeof(unsigned int),
1475 .mode = 0644, 1468 .mode = 0644,
1476 .proc_handler = proc_dointvec_jiffies, 1469 .proc_handler = proc_dointvec_jiffies,
1477 }, 1470 },
1478 { 1471 {
1479 .procname = "ip_conntrack_tcp_timeout_established", 1472 .procname = "ip_conntrack_tcp_timeout_established",
1480 .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
1481 .maxlen = sizeof(unsigned int), 1473 .maxlen = sizeof(unsigned int),
1482 .mode = 0644, 1474 .mode = 0644,
1483 .proc_handler = proc_dointvec_jiffies, 1475 .proc_handler = proc_dointvec_jiffies,
1484 }, 1476 },
1485 { 1477 {
1486 .procname = "ip_conntrack_tcp_timeout_fin_wait", 1478 .procname = "ip_conntrack_tcp_timeout_fin_wait",
1487 .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
1488 .maxlen = sizeof(unsigned int), 1479 .maxlen = sizeof(unsigned int),
1489 .mode = 0644, 1480 .mode = 0644,
1490 .proc_handler = proc_dointvec_jiffies, 1481 .proc_handler = proc_dointvec_jiffies,
1491 }, 1482 },
1492 { 1483 {
1493 .procname = "ip_conntrack_tcp_timeout_close_wait", 1484 .procname = "ip_conntrack_tcp_timeout_close_wait",
1494 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
1495 .maxlen = sizeof(unsigned int), 1485 .maxlen = sizeof(unsigned int),
1496 .mode = 0644, 1486 .mode = 0644,
1497 .proc_handler = proc_dointvec_jiffies, 1487 .proc_handler = proc_dointvec_jiffies,
1498 }, 1488 },
1499 { 1489 {
1500 .procname = "ip_conntrack_tcp_timeout_last_ack", 1490 .procname = "ip_conntrack_tcp_timeout_last_ack",
1501 .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
1502 .maxlen = sizeof(unsigned int), 1491 .maxlen = sizeof(unsigned int),
1503 .mode = 0644, 1492 .mode = 0644,
1504 .proc_handler = proc_dointvec_jiffies, 1493 .proc_handler = proc_dointvec_jiffies,
1505 }, 1494 },
1506 { 1495 {
1507 .procname = "ip_conntrack_tcp_timeout_time_wait", 1496 .procname = "ip_conntrack_tcp_timeout_time_wait",
1508 .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
1509 .maxlen = sizeof(unsigned int), 1497 .maxlen = sizeof(unsigned int),
1510 .mode = 0644, 1498 .mode = 0644,
1511 .proc_handler = proc_dointvec_jiffies, 1499 .proc_handler = proc_dointvec_jiffies,
1512 }, 1500 },
1513 { 1501 {
1514 .procname = "ip_conntrack_tcp_timeout_close", 1502 .procname = "ip_conntrack_tcp_timeout_close",
1515 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
1516 .maxlen = sizeof(unsigned int), 1503 .maxlen = sizeof(unsigned int),
1517 .mode = 0644, 1504 .mode = 0644,
1518 .proc_handler = proc_dointvec_jiffies, 1505 .proc_handler = proc_dointvec_jiffies,
1519 }, 1506 },
1520 { 1507 {
1521 .procname = "ip_conntrack_tcp_timeout_max_retrans", 1508 .procname = "ip_conntrack_tcp_timeout_max_retrans",
1522 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1523 .maxlen = sizeof(unsigned int), 1509 .maxlen = sizeof(unsigned int),
1524 .mode = 0644, 1510 .mode = 0644,
1525 .proc_handler = proc_dointvec_jiffies, 1511 .proc_handler = proc_dointvec_jiffies,
1526 }, 1512 },
1527 { 1513 {
1528 .procname = "ip_conntrack_tcp_loose", 1514 .procname = "ip_conntrack_tcp_loose",
1529 .data = &nf_ct_tcp_loose,
1530 .maxlen = sizeof(unsigned int), 1515 .maxlen = sizeof(unsigned int),
1531 .mode = 0644, 1516 .mode = 0644,
1532 .proc_handler = proc_dointvec, 1517 .proc_handler = proc_dointvec,
1533 }, 1518 },
1534 { 1519 {
1535 .procname = "ip_conntrack_tcp_be_liberal", 1520 .procname = "ip_conntrack_tcp_be_liberal",
1536 .data = &nf_ct_tcp_be_liberal,
1537 .maxlen = sizeof(unsigned int), 1521 .maxlen = sizeof(unsigned int),
1538 .mode = 0644, 1522 .mode = 0644,
1539 .proc_handler = proc_dointvec, 1523 .proc_handler = proc_dointvec,
1540 }, 1524 },
1541 { 1525 {
1542 .procname = "ip_conntrack_tcp_max_retrans", 1526 .procname = "ip_conntrack_tcp_max_retrans",
1543 .data = &nf_ct_tcp_max_retrans,
1544 .maxlen = sizeof(unsigned int), 1527 .maxlen = sizeof(unsigned int),
1545 .mode = 0644, 1528 .mode = 0644,
1546 .proc_handler = proc_dointvec, 1529 .proc_handler = proc_dointvec,
@@ -1550,6 +1533,101 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1550#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 1533#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
1551#endif /* CONFIG_SYSCTL */ 1534#endif /* CONFIG_SYSCTL */
1552 1535
1536static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1537 struct nf_tcp_net *tn)
1538{
1539#ifdef CONFIG_SYSCTL
1540 if (pn->ctl_table)
1541 return 0;
1542
1543 pn->ctl_table = kmemdup(tcp_sysctl_table,
1544 sizeof(tcp_sysctl_table),
1545 GFP_KERNEL);
1546 if (!pn->ctl_table)
1547 return -ENOMEM;
1548
1549 pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1550 pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1551 pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1552 pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1553 pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1554 pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1555 pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1556 pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1557 pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1558 pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
1559 pn->ctl_table[10].data = &tn->tcp_loose;
1560 pn->ctl_table[11].data = &tn->tcp_be_liberal;
1561 pn->ctl_table[12].data = &tn->tcp_max_retrans;
1562#endif
1563 return 0;
1564}
1565
1566static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
1567 struct nf_tcp_net *tn)
1568{
1569#ifdef CONFIG_SYSCTL
1570#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1571 pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
1572 sizeof(tcp_compat_sysctl_table),
1573 GFP_KERNEL);
1574 if (!pn->ctl_compat_table)
1575 return -ENOMEM;
1576
1577 pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1578 pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
1579 pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1580 pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1581 pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1582 pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1583 pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1584 pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1585 pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1586 pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1587 pn->ctl_compat_table[10].data = &tn->tcp_loose;
1588 pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
1589 pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
1590#endif
1591#endif
1592 return 0;
1593}
1594
1595static int tcp_init_net(struct net *net, u_int16_t proto)
1596{
1597 int ret;
1598 struct nf_tcp_net *tn = tcp_pernet(net);
1599 struct nf_proto_net *pn = &tn->pn;
1600
1601 if (!pn->users) {
1602 int i;
1603
1604 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1605 tn->timeouts[i] = tcp_timeouts[i];
1606
1607 tn->tcp_loose = nf_ct_tcp_loose;
1608 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1609 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1610 }
1611
1612 if (proto == AF_INET) {
1613 ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
1614 if (ret < 0)
1615 return ret;
1616
1617 ret = tcp_kmemdup_sysctl_table(pn, tn);
1618 if (ret < 0)
1619 nf_ct_kfree_compat_sysctl_table(pn);
1620 } else
1621 ret = tcp_kmemdup_sysctl_table(pn, tn);
1622
1623 return ret;
1624}
1625
1626static struct nf_proto_net *tcp_get_net_proto(struct net *net)
1627{
1628 return &net->ct.nf_ct_proto.tcp.pn;
1629}
1630
1553struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = 1631struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1554{ 1632{
1555 .l3proto = PF_INET, 1633 .l3proto = PF_INET,
@@ -1582,14 +1660,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1582 .nla_policy = tcp_timeout_nla_policy, 1660 .nla_policy = tcp_timeout_nla_policy,
1583 }, 1661 },
1584#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1662#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1585#ifdef CONFIG_SYSCTL 1663 .init_net = tcp_init_net,
1586 .ctl_table_users = &tcp_sysctl_table_users, 1664 .get_net_proto = tcp_get_net_proto,
1587 .ctl_table_header = &tcp_sysctl_header,
1588 .ctl_table = tcp_sysctl_table,
1589#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1590 .ctl_compat_table = tcp_compat_sysctl_table,
1591#endif
1592#endif
1593}; 1665};
1594EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); 1666EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1595 1667
@@ -1625,10 +1697,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1625 .nla_policy = tcp_timeout_nla_policy, 1697 .nla_policy = tcp_timeout_nla_policy,
1626 }, 1698 },
1627#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1699#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1628#ifdef CONFIG_SYSCTL 1700 .init_net = tcp_init_net,
1629 .ctl_table_users = &tcp_sysctl_table_users, 1701 .get_net_proto = tcp_get_net_proto,
1630 .ctl_table_header = &tcp_sysctl_header,
1631 .ctl_table = tcp_sysctl_table,
1632#endif
1633}; 1702};
1634EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6); 1703EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7259a6bdeb49..59623cc56e8d 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,17 +25,16 @@
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
27 27
28enum udp_conntrack {
29 UDP_CT_UNREPLIED,
30 UDP_CT_REPLIED,
31 UDP_CT_MAX
32};
33
34static unsigned int udp_timeouts[UDP_CT_MAX] = { 28static unsigned int udp_timeouts[UDP_CT_MAX] = {
35 [UDP_CT_UNREPLIED] = 30*HZ, 29 [UDP_CT_UNREPLIED] = 30*HZ,
36 [UDP_CT_REPLIED] = 180*HZ, 30 [UDP_CT_REPLIED] = 180*HZ,
37}; 31};
38 32
33static inline struct nf_udp_net *udp_pernet(struct net *net)
34{
35 return &net->ct.nf_ct_proto.udp;
36}
37
39static bool udp_pkt_to_tuple(const struct sk_buff *skb, 38static bool udp_pkt_to_tuple(const struct sk_buff *skb,
40 unsigned int dataoff, 39 unsigned int dataoff,
41 struct nf_conntrack_tuple *tuple) 40 struct nf_conntrack_tuple *tuple)
@@ -73,7 +72,7 @@ static int udp_print_tuple(struct seq_file *s,
73 72
74static unsigned int *udp_get_timeouts(struct net *net) 73static unsigned int *udp_get_timeouts(struct net *net)
75{ 74{
76 return udp_timeouts; 75 return udp_pernet(net)->timeouts;
77} 76}
78 77
79/* Returns verdict for packet, and may modify conntracktype */ 78/* Returns verdict for packet, and may modify conntracktype */
@@ -157,13 +156,15 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
157#include <linux/netfilter/nfnetlink.h> 156#include <linux/netfilter/nfnetlink.h>
158#include <linux/netfilter/nfnetlink_cttimeout.h> 157#include <linux/netfilter/nfnetlink_cttimeout.h>
159 158
160static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 159static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
160 struct net *net, void *data)
161{ 161{
162 unsigned int *timeouts = data; 162 unsigned int *timeouts = data;
163 struct nf_udp_net *un = udp_pernet(net);
163 164
164 /* set default timeouts for UDP. */ 165 /* set default timeouts for UDP. */
165 timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED]; 166 timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
166 timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED]; 167 timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
167 168
168 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) { 169 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
169 timeouts[UDP_CT_UNREPLIED] = 170 timeouts[UDP_CT_UNREPLIED] =
@@ -200,19 +201,15 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
200#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 201#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
201 202
202#ifdef CONFIG_SYSCTL 203#ifdef CONFIG_SYSCTL
203static unsigned int udp_sysctl_table_users;
204static struct ctl_table_header *udp_sysctl_header;
205static struct ctl_table udp_sysctl_table[] = { 204static struct ctl_table udp_sysctl_table[] = {
206 { 205 {
207 .procname = "nf_conntrack_udp_timeout", 206 .procname = "nf_conntrack_udp_timeout",
208 .data = &udp_timeouts[UDP_CT_UNREPLIED],
209 .maxlen = sizeof(unsigned int), 207 .maxlen = sizeof(unsigned int),
210 .mode = 0644, 208 .mode = 0644,
211 .proc_handler = proc_dointvec_jiffies, 209 .proc_handler = proc_dointvec_jiffies,
212 }, 210 },
213 { 211 {
214 .procname = "nf_conntrack_udp_timeout_stream", 212 .procname = "nf_conntrack_udp_timeout_stream",
215 .data = &udp_timeouts[UDP_CT_REPLIED],
216 .maxlen = sizeof(unsigned int), 213 .maxlen = sizeof(unsigned int),
217 .mode = 0644, 214 .mode = 0644,
218 .proc_handler = proc_dointvec_jiffies, 215 .proc_handler = proc_dointvec_jiffies,
@@ -223,14 +220,12 @@ static struct ctl_table udp_sysctl_table[] = {
223static struct ctl_table udp_compat_sysctl_table[] = { 220static struct ctl_table udp_compat_sysctl_table[] = {
224 { 221 {
225 .procname = "ip_conntrack_udp_timeout", 222 .procname = "ip_conntrack_udp_timeout",
226 .data = &udp_timeouts[UDP_CT_UNREPLIED],
227 .maxlen = sizeof(unsigned int), 223 .maxlen = sizeof(unsigned int),
228 .mode = 0644, 224 .mode = 0644,
229 .proc_handler = proc_dointvec_jiffies, 225 .proc_handler = proc_dointvec_jiffies,
230 }, 226 },
231 { 227 {
232 .procname = "ip_conntrack_udp_timeout_stream", 228 .procname = "ip_conntrack_udp_timeout_stream",
233 .data = &udp_timeouts[UDP_CT_REPLIED],
234 .maxlen = sizeof(unsigned int), 229 .maxlen = sizeof(unsigned int),
235 .mode = 0644, 230 .mode = 0644,
236 .proc_handler = proc_dointvec_jiffies, 231 .proc_handler = proc_dointvec_jiffies,
@@ -240,6 +235,73 @@ static struct ctl_table udp_compat_sysctl_table[] = {
240#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 235#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
241#endif /* CONFIG_SYSCTL */ 236#endif /* CONFIG_SYSCTL */
242 237
238static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
239 struct nf_udp_net *un)
240{
241#ifdef CONFIG_SYSCTL
242 if (pn->ctl_table)
243 return 0;
244 pn->ctl_table = kmemdup(udp_sysctl_table,
245 sizeof(udp_sysctl_table),
246 GFP_KERNEL);
247 if (!pn->ctl_table)
248 return -ENOMEM;
249 pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
250 pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
251#endif
252 return 0;
253}
254
255static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
256 struct nf_udp_net *un)
257{
258#ifdef CONFIG_SYSCTL
259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
260 pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table,
261 sizeof(udp_compat_sysctl_table),
262 GFP_KERNEL);
263 if (!pn->ctl_compat_table)
264 return -ENOMEM;
265
266 pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
267 pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED];
268#endif
269#endif
270 return 0;
271}
272
273static int udp_init_net(struct net *net, u_int16_t proto)
274{
275 int ret;
276 struct nf_udp_net *un = udp_pernet(net);
277 struct nf_proto_net *pn = &un->pn;
278
279 if (!pn->users) {
280 int i;
281
282 for (i = 0; i < UDP_CT_MAX; i++)
283 un->timeouts[i] = udp_timeouts[i];
284 }
285
286 if (proto == AF_INET) {
287 ret = udp_kmemdup_compat_sysctl_table(pn, un);
288 if (ret < 0)
289 return ret;
290
291 ret = udp_kmemdup_sysctl_table(pn, un);
292 if (ret < 0)
293 nf_ct_kfree_compat_sysctl_table(pn);
294 } else
295 ret = udp_kmemdup_sysctl_table(pn, un);
296
297 return ret;
298}
299
300static struct nf_proto_net *udp_get_net_proto(struct net *net)
301{
302 return &net->ct.nf_ct_proto.udp.pn;
303}
304
243struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = 305struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
244{ 306{
245 .l3proto = PF_INET, 307 .l3proto = PF_INET,
@@ -267,14 +329,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
267 .nla_policy = udp_timeout_nla_policy, 329 .nla_policy = udp_timeout_nla_policy,
268 }, 330 },
269#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 331#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
270#ifdef CONFIG_SYSCTL 332 .init_net = udp_init_net,
271 .ctl_table_users = &udp_sysctl_table_users, 333 .get_net_proto = udp_get_net_proto,
272 .ctl_table_header = &udp_sysctl_header,
273 .ctl_table = udp_sysctl_table,
274#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
275 .ctl_compat_table = udp_compat_sysctl_table,
276#endif
277#endif
278}; 334};
279EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); 335EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
280 336
@@ -305,10 +361,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
305 .nla_policy = udp_timeout_nla_policy, 361 .nla_policy = udp_timeout_nla_policy,
306 }, 362 },
307#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 363#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
308#ifdef CONFIG_SYSCTL 364 .init_net = udp_init_net,
309 .ctl_table_users = &udp_sysctl_table_users, 365 .get_net_proto = udp_get_net_proto,
310 .ctl_table_header = &udp_sysctl_header,
311 .ctl_table = udp_sysctl_table,
312#endif
313}; 366};
314EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); 367EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4d60a5376aa6..4b66df209286 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -35,6 +35,17 @@ static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
35 [UDPLITE_CT_REPLIED] = 180*HZ, 35 [UDPLITE_CT_REPLIED] = 180*HZ,
36}; 36};
37 37
38static int udplite_net_id __read_mostly;
39struct udplite_net {
40 struct nf_proto_net pn;
41 unsigned int timeouts[UDPLITE_CT_MAX];
42};
43
44static inline struct udplite_net *udplite_pernet(struct net *net)
45{
46 return net_generic(net, udplite_net_id);
47}
48
38static bool udplite_pkt_to_tuple(const struct sk_buff *skb, 49static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
39 unsigned int dataoff, 50 unsigned int dataoff,
40 struct nf_conntrack_tuple *tuple) 51 struct nf_conntrack_tuple *tuple)
@@ -70,7 +81,7 @@ static int udplite_print_tuple(struct seq_file *s,
70 81
71static unsigned int *udplite_get_timeouts(struct net *net) 82static unsigned int *udplite_get_timeouts(struct net *net)
72{ 83{
73 return udplite_timeouts; 84 return udplite_pernet(net)->timeouts;
74} 85}
75 86
76/* Returns verdict for packet, and may modify conntracktype */ 87/* Returns verdict for packet, and may modify conntracktype */
@@ -161,13 +172,15 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
161#include <linux/netfilter/nfnetlink.h> 172#include <linux/netfilter/nfnetlink.h>
162#include <linux/netfilter/nfnetlink_cttimeout.h> 173#include <linux/netfilter/nfnetlink_cttimeout.h>
163 174
164static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 175static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
176 struct net *net, void *data)
165{ 177{
166 unsigned int *timeouts = data; 178 unsigned int *timeouts = data;
179 struct udplite_net *un = udplite_pernet(net);
167 180
168 /* set default timeouts for UDPlite. */ 181 /* set default timeouts for UDPlite. */
169 timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED]; 182 timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
170 timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED]; 183 timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
171 184
172 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { 185 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
173 timeouts[UDPLITE_CT_UNREPLIED] = 186 timeouts[UDPLITE_CT_UNREPLIED] =
@@ -204,19 +217,15 @@ udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
204#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 217#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
205 218
206#ifdef CONFIG_SYSCTL 219#ifdef CONFIG_SYSCTL
207static unsigned int udplite_sysctl_table_users;
208static struct ctl_table_header *udplite_sysctl_header;
209static struct ctl_table udplite_sysctl_table[] = { 220static struct ctl_table udplite_sysctl_table[] = {
210 { 221 {
211 .procname = "nf_conntrack_udplite_timeout", 222 .procname = "nf_conntrack_udplite_timeout",
212 .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
213 .maxlen = sizeof(unsigned int), 223 .maxlen = sizeof(unsigned int),
214 .mode = 0644, 224 .mode = 0644,
215 .proc_handler = proc_dointvec_jiffies, 225 .proc_handler = proc_dointvec_jiffies,
216 }, 226 },
217 { 227 {
218 .procname = "nf_conntrack_udplite_timeout_stream", 228 .procname = "nf_conntrack_udplite_timeout_stream",
219 .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
220 .maxlen = sizeof(unsigned int), 229 .maxlen = sizeof(unsigned int),
221 .mode = 0644, 230 .mode = 0644,
222 .proc_handler = proc_dointvec_jiffies, 231 .proc_handler = proc_dointvec_jiffies,
@@ -225,6 +234,40 @@ static struct ctl_table udplite_sysctl_table[] = {
225}; 234};
226#endif /* CONFIG_SYSCTL */ 235#endif /* CONFIG_SYSCTL */
227 236
237static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
238 struct udplite_net *un)
239{
240#ifdef CONFIG_SYSCTL
241 if (pn->ctl_table)
242 return 0;
243
244 pn->ctl_table = kmemdup(udplite_sysctl_table,
245 sizeof(udplite_sysctl_table),
246 GFP_KERNEL);
247 if (!pn->ctl_table)
248 return -ENOMEM;
249
250 pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
251 pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
252#endif
253 return 0;
254}
255
256static int udplite_init_net(struct net *net, u_int16_t proto)
257{
258 struct udplite_net *un = udplite_pernet(net);
259 struct nf_proto_net *pn = &un->pn;
260
261 if (!pn->users) {
262 int i;
263
264 for (i = 0 ; i < UDPLITE_CT_MAX; i++)
265 un->timeouts[i] = udplite_timeouts[i];
266 }
267
268 return udplite_kmemdup_sysctl_table(pn, un);
269}
270
228static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = 271static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
229{ 272{
230 .l3proto = PF_INET, 273 .l3proto = PF_INET,
@@ -253,11 +296,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
253 .nla_policy = udplite_timeout_nla_policy, 296 .nla_policy = udplite_timeout_nla_policy,
254 }, 297 },
255#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 298#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
256#ifdef CONFIG_SYSCTL 299 .net_id = &udplite_net_id,
257 .ctl_table_users = &udplite_sysctl_table_users, 300 .init_net = udplite_init_net,
258 .ctl_table_header = &udplite_sysctl_header,
259 .ctl_table = udplite_sysctl_table,
260#endif
261}; 301};
262 302
263static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = 303static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
@@ -288,34 +328,55 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
288 .nla_policy = udplite_timeout_nla_policy, 328 .nla_policy = udplite_timeout_nla_policy,
289 }, 329 },
290#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 330#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
291#ifdef CONFIG_SYSCTL 331 .net_id = &udplite_net_id,
292 .ctl_table_users = &udplite_sysctl_table_users, 332 .init_net = udplite_init_net,
293 .ctl_table_header = &udplite_sysctl_header,
294 .ctl_table = udplite_sysctl_table,
295#endif
296}; 333};
297 334
298static int __init nf_conntrack_proto_udplite_init(void) 335static int udplite_net_init(struct net *net)
299{ 336{
300 int err; 337 int ret = 0;
301 338
302 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite4); 339 ret = nf_conntrack_l4proto_register(net,
303 if (err < 0) 340 &nf_conntrack_l4proto_udplite4);
304 goto err1; 341 if (ret < 0) {
305 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite6); 342 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
306 if (err < 0) 343 goto out;
307 goto err2; 344 }
345 ret = nf_conntrack_l4proto_register(net,
346 &nf_conntrack_l4proto_udplite6);
347 if (ret < 0) {
348 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
349 goto cleanup_udplite4;
350 }
308 return 0; 351 return 0;
309err2: 352
310 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 353cleanup_udplite4:
311err1: 354 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
312 return err; 355out:
356 return ret;
357}
358
359static void udplite_net_exit(struct net *net)
360{
361 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite6);
362 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
363}
364
365static struct pernet_operations udplite_net_ops = {
366 .init = udplite_net_init,
367 .exit = udplite_net_exit,
368 .id = &udplite_net_id,
369 .size = sizeof(struct udplite_net),
370};
371
372static int __init nf_conntrack_proto_udplite_init(void)
373{
374 return register_pernet_subsys(&udplite_net_ops);
313} 375}
314 376
315static void __exit nf_conntrack_proto_udplite_exit(void) 377static void __exit nf_conntrack_proto_udplite_exit(void)
316{ 378{
317 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite6); 379 unregister_pernet_subsys(&udplite_net_ops);
318 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
319} 380}
320 381
321module_init(nf_conntrack_proto_udplite_init); 382module_init(nf_conntrack_proto_udplite_init);
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 8501823b3f9b..295429f39088 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -69,13 +69,12 @@ static int help(struct sk_buff *skb,
69 void *sb_ptr; 69 void *sb_ptr;
70 int ret = NF_ACCEPT; 70 int ret = NF_ACCEPT;
71 int dir = CTINFO2DIR(ctinfo); 71 int dir = CTINFO2DIR(ctinfo);
72 struct nf_ct_sane_master *ct_sane_info; 72 struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
73 struct nf_conntrack_expect *exp; 73 struct nf_conntrack_expect *exp;
74 struct nf_conntrack_tuple *tuple; 74 struct nf_conntrack_tuple *tuple;
75 struct sane_request *req; 75 struct sane_request *req;
76 struct sane_reply_net_start *reply; 76 struct sane_reply_net_start *reply;
77 77
78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
79 /* Until there's been traffic both ways, don't look in packets. */ 78 /* Until there's been traffic both ways, don't look in packets. */
80 if (ctinfo != IP_CT_ESTABLISHED && 79 if (ctinfo != IP_CT_ESTABLISHED &&
81 ctinfo != IP_CT_ESTABLISHED_REPLY) 80 ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -163,7 +162,6 @@ out:
163} 162}
164 163
165static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly; 164static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
166static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
167 165
168static const struct nf_conntrack_expect_policy sane_exp_policy = { 166static const struct nf_conntrack_expect_policy sane_exp_policy = {
169 .max_expected = 1, 167 .max_expected = 1,
@@ -190,7 +188,6 @@ static void nf_conntrack_sane_fini(void)
190static int __init nf_conntrack_sane_init(void) 188static int __init nf_conntrack_sane_init(void)
191{ 189{
192 int i, j = -1, ret = 0; 190 int i, j = -1, ret = 0;
193 char *tmpname;
194 191
195 sane_buffer = kmalloc(65536, GFP_KERNEL); 192 sane_buffer = kmalloc(65536, GFP_KERNEL);
196 if (!sane_buffer) 193 if (!sane_buffer)
@@ -205,17 +202,16 @@ static int __init nf_conntrack_sane_init(void)
205 sane[i][0].tuple.src.l3num = PF_INET; 202 sane[i][0].tuple.src.l3num = PF_INET;
206 sane[i][1].tuple.src.l3num = PF_INET6; 203 sane[i][1].tuple.src.l3num = PF_INET6;
207 for (j = 0; j < 2; j++) { 204 for (j = 0; j < 2; j++) {
205 sane[i][j].data_len = sizeof(struct nf_ct_sane_master);
208 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]); 206 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
209 sane[i][j].tuple.dst.protonum = IPPROTO_TCP; 207 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
210 sane[i][j].expect_policy = &sane_exp_policy; 208 sane[i][j].expect_policy = &sane_exp_policy;
211 sane[i][j].me = THIS_MODULE; 209 sane[i][j].me = THIS_MODULE;
212 sane[i][j].help = help; 210 sane[i][j].help = help;
213 tmpname = &sane_names[i][j][0];
214 if (ports[i] == SANE_PORT) 211 if (ports[i] == SANE_PORT)
215 sprintf(tmpname, "sane"); 212 sprintf(sane[i][j].name, "sane");
216 else 213 else
217 sprintf(tmpname, "sane-%d", ports[i]); 214 sprintf(sane[i][j].name, "sane-%d", ports[i]);
218 sane[i][j].name = tmpname;
219 215
220 pr_debug("nf_ct_sane: registering helper for pf: %d " 216 pr_debug("nf_ct_sane: registering helper for pf: %d "
221 "port: %d\n", 217 "port: %d\n",
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3a637..758a1bacc126 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1075,12 +1075,12 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
1075{ 1075{
1076 enum ip_conntrack_info ctinfo; 1076 enum ip_conntrack_info ctinfo;
1077 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1077 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1078 struct nf_conn_help *help = nfct_help(ct); 1078 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1079 1079
1080 if ((code >= 100 && code <= 199) || 1080 if ((code >= 100 && code <= 199) ||
1081 (code >= 200 && code <= 299)) 1081 (code >= 200 && code <= 299))
1082 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1082 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1083 else if (help->help.ct_sip_info.invite_cseq == cseq) 1083 else if (ct_sip_info->invite_cseq == cseq)
1084 flush_expectations(ct, true); 1084 flush_expectations(ct, true);
1085 return NF_ACCEPT; 1085 return NF_ACCEPT;
1086} 1086}
@@ -1091,12 +1091,12 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
1091{ 1091{
1092 enum ip_conntrack_info ctinfo; 1092 enum ip_conntrack_info ctinfo;
1093 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1093 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1094 struct nf_conn_help *help = nfct_help(ct); 1094 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1095 1095
1096 if ((code >= 100 && code <= 199) || 1096 if ((code >= 100 && code <= 199) ||
1097 (code >= 200 && code <= 299)) 1097 (code >= 200 && code <= 299))
1098 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1098 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1099 else if (help->help.ct_sip_info.invite_cseq == cseq) 1099 else if (ct_sip_info->invite_cseq == cseq)
1100 flush_expectations(ct, true); 1100 flush_expectations(ct, true);
1101 return NF_ACCEPT; 1101 return NF_ACCEPT;
1102} 1102}
@@ -1107,12 +1107,12 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1107{ 1107{
1108 enum ip_conntrack_info ctinfo; 1108 enum ip_conntrack_info ctinfo;
1109 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1109 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1110 struct nf_conn_help *help = nfct_help(ct); 1110 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1111 1111
1112 if ((code >= 100 && code <= 199) || 1112 if ((code >= 100 && code <= 199) ||
1113 (code >= 200 && code <= 299)) 1113 (code >= 200 && code <= 299))
1114 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1114 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1115 else if (help->help.ct_sip_info.invite_cseq == cseq) 1115 else if (ct_sip_info->invite_cseq == cseq)
1116 flush_expectations(ct, true); 1116 flush_expectations(ct, true);
1117 return NF_ACCEPT; 1117 return NF_ACCEPT;
1118} 1118}
@@ -1123,13 +1123,13 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1123{ 1123{
1124 enum ip_conntrack_info ctinfo; 1124 enum ip_conntrack_info ctinfo;
1125 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1125 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1126 struct nf_conn_help *help = nfct_help(ct); 1126 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1127 unsigned int ret; 1127 unsigned int ret;
1128 1128
1129 flush_expectations(ct, true); 1129 flush_expectations(ct, true);
1130 ret = process_sdp(skb, dataoff, dptr, datalen, cseq); 1130 ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
1131 if (ret == NF_ACCEPT) 1131 if (ret == NF_ACCEPT)
1132 help->help.ct_sip_info.invite_cseq = cseq; 1132 ct_sip_info->invite_cseq = cseq;
1133 return ret; 1133 return ret;
1134} 1134}
1135 1135
@@ -1154,7 +1154,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1154{ 1154{
1155 enum ip_conntrack_info ctinfo; 1155 enum ip_conntrack_info ctinfo;
1156 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1156 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1157 struct nf_conn_help *help = nfct_help(ct); 1157 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1158 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1158 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1159 unsigned int matchoff, matchlen; 1159 unsigned int matchoff, matchlen;
1160 struct nf_conntrack_expect *exp; 1160 struct nf_conntrack_expect *exp;
@@ -1235,7 +1235,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1235 1235
1236store_cseq: 1236store_cseq:
1237 if (ret == NF_ACCEPT) 1237 if (ret == NF_ACCEPT)
1238 help->help.ct_sip_info.register_cseq = cseq; 1238 ct_sip_info->register_cseq = cseq;
1239 return ret; 1239 return ret;
1240} 1240}
1241 1241
@@ -1245,7 +1245,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1245{ 1245{
1246 enum ip_conntrack_info ctinfo; 1246 enum ip_conntrack_info ctinfo;
1247 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1247 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1248 struct nf_conn_help *help = nfct_help(ct); 1248 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1249 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1249 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1250 union nf_inet_addr addr; 1250 union nf_inet_addr addr;
1251 __be16 port; 1251 __be16 port;
@@ -1262,7 +1262,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1262 * responses, so we store the sequence number of the last valid 1262 * responses, so we store the sequence number of the last valid
1263 * request and compare it here. 1263 * request and compare it here.
1264 */ 1264 */
1265 if (help->help.ct_sip_info.register_cseq != cseq) 1265 if (ct_sip_info->register_cseq != cseq)
1266 return NF_ACCEPT; 1266 return NF_ACCEPT;
1267 1267
1268 if (code >= 100 && code <= 199) 1268 if (code >= 100 && code <= 199)
@@ -1556,7 +1556,6 @@ static void nf_conntrack_sip_fini(void)
1556static int __init nf_conntrack_sip_init(void) 1556static int __init nf_conntrack_sip_init(void)
1557{ 1557{
1558 int i, j, ret; 1558 int i, j, ret;
1559 char *tmpname;
1560 1559
1561 if (ports_c == 0) 1560 if (ports_c == 0)
1562 ports[ports_c++] = SIP_PORT; 1561 ports[ports_c++] = SIP_PORT;
@@ -1579,17 +1578,16 @@ static int __init nf_conntrack_sip_init(void)
1579 sip[i][3].help = sip_help_tcp; 1578 sip[i][3].help = sip_help_tcp;
1580 1579
1581 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { 1580 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1581 sip[i][j].data_len = sizeof(struct nf_ct_sip_master);
1582 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1582 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
1583 sip[i][j].expect_policy = sip_exp_policy; 1583 sip[i][j].expect_policy = sip_exp_policy;
1584 sip[i][j].expect_class_max = SIP_EXPECT_MAX; 1584 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
1585 sip[i][j].me = THIS_MODULE; 1585 sip[i][j].me = THIS_MODULE;
1586 1586
1587 tmpname = &sip_names[i][j][0];
1588 if (ports[i] == SIP_PORT) 1587 if (ports[i] == SIP_PORT)
1589 sprintf(tmpname, "sip"); 1588 sprintf(sip_names[i][j], "sip");
1590 else 1589 else
1591 sprintf(tmpname, "sip-%u", i); 1590 sprintf(sip_names[i][j], "sip-%u", i);
1592 sip[i][j].name = tmpname;
1593 1591
1594 pr_debug("port #%u: %u\n", i, ports[i]); 1592 pr_debug("port #%u: %u\n", i, ports[i]);
1595 1593
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 75466fd72f4f..81fc61c05263 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -92,7 +92,6 @@ static int tftp_help(struct sk_buff *skb,
92} 92}
93 93
94static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; 94static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
95static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
96 95
97static const struct nf_conntrack_expect_policy tftp_exp_policy = { 96static const struct nf_conntrack_expect_policy tftp_exp_policy = {
98 .max_expected = 1, 97 .max_expected = 1,
@@ -112,7 +111,6 @@ static void nf_conntrack_tftp_fini(void)
112static int __init nf_conntrack_tftp_init(void) 111static int __init nf_conntrack_tftp_init(void)
113{ 112{
114 int i, j, ret; 113 int i, j, ret;
115 char *tmpname;
116 114
117 if (ports_c == 0) 115 if (ports_c == 0)
118 ports[ports_c++] = TFTP_PORT; 116 ports[ports_c++] = TFTP_PORT;
@@ -129,12 +127,10 @@ static int __init nf_conntrack_tftp_init(void)
129 tftp[i][j].me = THIS_MODULE; 127 tftp[i][j].me = THIS_MODULE;
130 tftp[i][j].help = tftp_help; 128 tftp[i][j].help = tftp_help;
131 129
132 tmpname = &tftp_names[i][j][0];
133 if (ports[i] == TFTP_PORT) 130 if (ports[i] == TFTP_PORT)
134 sprintf(tmpname, "tftp"); 131 sprintf(tftp[i][j].name, "tftp");
135 else 132 else
136 sprintf(tmpname, "tftp-%u", i); 133 sprintf(tftp[i][j].name, "tftp-%u", i);
137 tftp[i][j].name = tmpname;
138 134
139 ret = nf_conntrack_helper_register(&tftp[i][j]); 135 ret = nf_conntrack_helper_register(&tftp[i][j]);
140 if (ret) { 136 if (ret) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 3e797d1fcb94..a26503342e71 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -39,6 +39,15 @@ static char __initdata nfversion[] = "0.30";
39static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT]; 39static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT];
40static DEFINE_MUTEX(nfnl_mutex); 40static DEFINE_MUTEX(nfnl_mutex);
41 41
42static const int nfnl_group2type[NFNLGRP_MAX+1] = {
43 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
44 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
45 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
46 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
47 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
48 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
49};
50
42void nfnl_lock(void) 51void nfnl_lock(void)
43{ 52{
44 mutex_lock(&nfnl_mutex); 53 mutex_lock(&nfnl_mutex);
@@ -169,8 +178,10 @@ replay:
169 178
170 err = nla_parse(cda, ss->cb[cb_id].attr_count, 179 err = nla_parse(cda, ss->cb[cb_id].attr_count,
171 attr, attrlen, ss->cb[cb_id].policy); 180 attr, attrlen, ss->cb[cb_id].policy);
172 if (err < 0) 181 if (err < 0) {
182 rcu_read_unlock();
173 return err; 183 return err;
184 }
174 185
175 if (nc->call_rcu) { 186 if (nc->call_rcu) {
176 err = nc->call_rcu(net->nfnl, skb, nlh, 187 err = nc->call_rcu(net->nfnl, skb, nlh,
@@ -184,9 +195,11 @@ replay:
184 lockdep_is_held(&nfnl_mutex)) != ss || 195 lockdep_is_held(&nfnl_mutex)) != ss ||
185 nfnetlink_find_client(type, ss) != nc) 196 nfnetlink_find_client(type, ss) != nc)
186 err = -EAGAIN; 197 err = -EAGAIN;
187 else 198 else if (nc->call)
188 err = nc->call(net->nfnl, skb, nlh, 199 err = nc->call(net->nfnl, skb, nlh,
189 (const struct nlattr **)cda); 200 (const struct nlattr **)cda);
201 else
202 err = -EINVAL;
190 nfnl_unlock(); 203 nfnl_unlock();
191 } 204 }
192 if (err == -EAGAIN) 205 if (err == -EAGAIN)
@@ -200,12 +213,35 @@ static void nfnetlink_rcv(struct sk_buff *skb)
200 netlink_rcv_skb(skb, &nfnetlink_rcv_msg); 213 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
201} 214}
202 215
216#ifdef CONFIG_MODULES
217static void nfnetlink_bind(int group)
218{
219 const struct nfnetlink_subsystem *ss;
220 int type = nfnl_group2type[group];
221
222 rcu_read_lock();
223 ss = nfnetlink_get_subsys(type);
224 if (!ss) {
225 rcu_read_unlock();
226 request_module("nfnetlink-subsys-%d", type);
227 return;
228 }
229 rcu_read_unlock();
230}
231#endif
232
203static int __net_init nfnetlink_net_init(struct net *net) 233static int __net_init nfnetlink_net_init(struct net *net)
204{ 234{
205 struct sock *nfnl; 235 struct sock *nfnl;
236 struct netlink_kernel_cfg cfg = {
237 .groups = NFNLGRP_MAX,
238 .input = nfnetlink_rcv,
239#ifdef CONFIG_MODULES
240 .bind = nfnetlink_bind,
241#endif
242 };
206 243
207 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, 244 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
208 nfnetlink_rcv, NULL, THIS_MODULE);
209 if (!nfnl) 245 if (!nfnl)
210 return -ENOMEM; 246 return -ENOMEM;
211 net->nfnl_stash = nfnl; 247 net->nfnl_stash = nfnl;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
new file mode 100644
index 000000000000..d6836193d479
--- /dev/null
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -0,0 +1,672 @@
1/*
2 * (C) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation (or any later at your option).
7 *
8 * This software has been sponsored by Vyatta Inc. <http://www.vyatta.com>
9 */
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/netlink.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/types.h>
18#include <linux/list.h>
19#include <linux/errno.h>
20#include <net/netlink.h>
21#include <net/sock.h>
22
23#include <net/netfilter/nf_conntrack_helper.h>
24#include <net/netfilter/nf_conntrack_expect.h>
25#include <net/netfilter/nf_conntrack_ecache.h>
26
27#include <linux/netfilter/nfnetlink.h>
28#include <linux/netfilter/nfnetlink_conntrack.h>
29#include <linux/netfilter/nfnetlink_cthelper.h>
30
31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34
35static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
38{
39 const struct nf_conn_help *help;
40 struct nf_conntrack_helper *helper;
41
42 help = nfct_help(ct);
43 if (help == NULL)
44 return NF_DROP;
45
46 /* rcu_read_lock()ed by nf_hook_slow */
47 helper = rcu_dereference(help->helper);
48 if (helper == NULL)
49 return NF_DROP;
50
51 /* This is an user-space helper not yet configured, skip. */
52 if ((helper->flags &
53 (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) ==
54 NF_CT_HELPER_F_USERSPACE)
55 return NF_ACCEPT;
56
57 /* If the user-space helper is not available, don't block traffic. */
58 return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS;
59}
60
61static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = {
62 [NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, },
63 [NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, },
64};
65
66static int
67nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
68 const struct nlattr *attr)
69{
70 struct nlattr *tb[NFCTH_TUPLE_MAX+1];
71
72 nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol);
73
74 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
75 return -EINVAL;
76
77 tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
78 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
79
80 return 0;
81}
82
83static int
84nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
85{
86 const struct nf_conn_help *help = nfct_help(ct);
87
88 if (help->helper->data_len == 0)
89 return -EINVAL;
90
91 memcpy(&help->data, nla_data(attr), help->helper->data_len);
92 return 0;
93}
94
95static int
96nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
97{
98 const struct nf_conn_help *help = nfct_help(ct);
99
100 if (help->helper->data_len &&
101 nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
102 goto nla_put_failure;
103
104 return 0;
105
106nla_put_failure:
107 return -ENOSPC;
108}
109
110static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = {
111 [NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING,
112 .len = NF_CT_HELPER_NAME_LEN-1 },
113 [NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, },
114 [NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, },
115};
116
117static int
118nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
119 const struct nlattr *attr)
120{
121 struct nlattr *tb[NFCTH_POLICY_MAX+1];
122
123 nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol);
124
125 if (!tb[NFCTH_POLICY_NAME] ||
126 !tb[NFCTH_POLICY_EXPECT_MAX] ||
127 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
128 return -EINVAL;
129
130 strncpy(expect_policy->name,
131 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
132 expect_policy->max_expected =
133 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
134 expect_policy->timeout =
135 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
136
137 return 0;
138}
139
140static const struct nla_policy
141nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = {
142 [NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, },
143};
144
145static int
146nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
147 const struct nlattr *attr)
148{
149 int i, ret;
150 struct nf_conntrack_expect_policy *expect_policy;
151 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
152
153 nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
154 nfnl_cthelper_expect_policy_set);
155
156 if (!tb[NFCTH_POLICY_SET_NUM])
157 return -EINVAL;
158
159 helper->expect_class_max =
160 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
161
162 if (helper->expect_class_max != 0 &&
163 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
164 return -EOVERFLOW;
165
166 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
167 helper->expect_class_max, GFP_KERNEL);
168 if (expect_policy == NULL)
169 return -ENOMEM;
170
171 for (i=0; i<helper->expect_class_max; i++) {
172 if (!tb[NFCTH_POLICY_SET+i])
173 goto err;
174
175 ret = nfnl_cthelper_expect_policy(&expect_policy[i],
176 tb[NFCTH_POLICY_SET+i]);
177 if (ret < 0)
178 goto err;
179 }
180 helper->expect_policy = expect_policy;
181 return 0;
182err:
183 kfree(expect_policy);
184 return -EINVAL;
185}
186
187static int
188nfnl_cthelper_create(const struct nlattr * const tb[],
189 struct nf_conntrack_tuple *tuple)
190{
191 struct nf_conntrack_helper *helper;
192 int ret;
193
194 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
195 return -EINVAL;
196
197 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
198 if (helper == NULL)
199 return -ENOMEM;
200
201 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
202 if (ret < 0)
203 goto err;
204
205 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
206 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
207 helper->flags |= NF_CT_HELPER_F_USERSPACE;
208 memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
209
210 helper->me = THIS_MODULE;
211 helper->help = nfnl_userspace_cthelper;
212 helper->from_nlattr = nfnl_cthelper_from_nlattr;
213 helper->to_nlattr = nfnl_cthelper_to_nlattr;
214
215 /* Default to queue number zero, this can be updated at any time. */
216 if (tb[NFCTH_QUEUE_NUM])
217 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
218
219 if (tb[NFCTH_STATUS]) {
220 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
221
222 switch(status) {
223 case NFCT_HELPER_STATUS_ENABLED:
224 helper->flags |= NF_CT_HELPER_F_CONFIGURED;
225 break;
226 case NFCT_HELPER_STATUS_DISABLED:
227 helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
228 break;
229 }
230 }
231
232 ret = nf_conntrack_helper_register(helper);
233 if (ret < 0)
234 goto err;
235
236 return 0;
237err:
238 kfree(helper);
239 return ret;
240}
241
242static int
243nfnl_cthelper_update(const struct nlattr * const tb[],
244 struct nf_conntrack_helper *helper)
245{
246 int ret;
247
248 if (tb[NFCTH_PRIV_DATA_LEN])
249 return -EBUSY;
250
251 if (tb[NFCTH_POLICY]) {
252 ret = nfnl_cthelper_parse_expect_policy(helper,
253 tb[NFCTH_POLICY]);
254 if (ret < 0)
255 return ret;
256 }
257 if (tb[NFCTH_QUEUE_NUM])
258 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
259
260 if (tb[NFCTH_STATUS]) {
261 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
262
263 switch(status) {
264 case NFCT_HELPER_STATUS_ENABLED:
265 helper->flags |= NF_CT_HELPER_F_CONFIGURED;
266 break;
267 case NFCT_HELPER_STATUS_DISABLED:
268 helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
269 break;
270 }
271 }
272 return 0;
273}
274
275static int
276nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
277 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
278{
279 const char *helper_name;
280 struct nf_conntrack_helper *cur, *helper = NULL;
281 struct nf_conntrack_tuple tuple;
282 struct hlist_node *n;
283 int ret = 0, i;
284
285 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
286 return -EINVAL;
287
288 helper_name = nla_data(tb[NFCTH_NAME]);
289
290 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
291 if (ret < 0)
292 return ret;
293
294 rcu_read_lock();
295 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
296 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
297
298 /* skip non-userspace conntrack helpers. */
299 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
300 continue;
301
302 if (strncmp(cur->name, helper_name,
303 NF_CT_HELPER_NAME_LEN) != 0)
304 continue;
305
306 if ((tuple.src.l3num != cur->tuple.src.l3num ||
307 tuple.dst.protonum != cur->tuple.dst.protonum))
308 continue;
309
310 if (nlh->nlmsg_flags & NLM_F_EXCL) {
311 ret = -EEXIST;
312 goto err;
313 }
314 helper = cur;
315 break;
316 }
317 }
318 rcu_read_unlock();
319
320 if (helper == NULL)
321 ret = nfnl_cthelper_create(tb, &tuple);
322 else
323 ret = nfnl_cthelper_update(tb, helper);
324
325 return ret;
326err:
327 rcu_read_unlock();
328 return ret;
329}
330
331static int
332nfnl_cthelper_dump_tuple(struct sk_buff *skb,
333 struct nf_conntrack_helper *helper)
334{
335 struct nlattr *nest_parms;
336
337 nest_parms = nla_nest_start(skb, NFCTH_TUPLE | NLA_F_NESTED);
338 if (nest_parms == NULL)
339 goto nla_put_failure;
340
341 if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
342 htons(helper->tuple.src.l3num)))
343 goto nla_put_failure;
344
345 if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
346 goto nla_put_failure;
347
348 nla_nest_end(skb, nest_parms);
349 return 0;
350
351nla_put_failure:
352 return -1;
353}
354
355static int
356nfnl_cthelper_dump_policy(struct sk_buff *skb,
357 struct nf_conntrack_helper *helper)
358{
359 int i;
360 struct nlattr *nest_parms1, *nest_parms2;
361
362 nest_parms1 = nla_nest_start(skb, NFCTH_POLICY | NLA_F_NESTED);
363 if (nest_parms1 == NULL)
364 goto nla_put_failure;
365
366 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
367 htonl(helper->expect_class_max)))
368 goto nla_put_failure;
369
370 for (i=0; i<helper->expect_class_max; i++) {
371 nest_parms2 = nla_nest_start(skb,
372 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
373 if (nest_parms2 == NULL)
374 goto nla_put_failure;
375
376 if (nla_put_string(skb, NFCTH_POLICY_NAME,
377 helper->expect_policy[i].name))
378 goto nla_put_failure;
379
380 if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
381 htonl(helper->expect_policy[i].max_expected)))
382 goto nla_put_failure;
383
384 if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
385 htonl(helper->expect_policy[i].timeout)))
386 goto nla_put_failure;
387
388 nla_nest_end(skb, nest_parms2);
389 }
390 nla_nest_end(skb, nest_parms1);
391 return 0;
392
393nla_put_failure:
394 return -1;
395}
396
397static int
398nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
399 int event, struct nf_conntrack_helper *helper)
400{
401 struct nlmsghdr *nlh;
402 struct nfgenmsg *nfmsg;
403 unsigned int flags = pid ? NLM_F_MULTI : 0;
404 int status;
405
406 event |= NFNL_SUBSYS_CTHELPER << 8;
407 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
408 if (nlh == NULL)
409 goto nlmsg_failure;
410
411 nfmsg = nlmsg_data(nlh);
412 nfmsg->nfgen_family = AF_UNSPEC;
413 nfmsg->version = NFNETLINK_V0;
414 nfmsg->res_id = 0;
415
416 if (nla_put_string(skb, NFCTH_NAME, helper->name))
417 goto nla_put_failure;
418
419 if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
420 goto nla_put_failure;
421
422 if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
423 goto nla_put_failure;
424
425 if (nfnl_cthelper_dump_policy(skb, helper) < 0)
426 goto nla_put_failure;
427
428 if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
429 goto nla_put_failure;
430
431 if (helper->flags & NF_CT_HELPER_F_CONFIGURED)
432 status = NFCT_HELPER_STATUS_ENABLED;
433 else
434 status = NFCT_HELPER_STATUS_DISABLED;
435
436 if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
437 goto nla_put_failure;
438
439 nlmsg_end(skb, nlh);
440 return skb->len;
441
442nlmsg_failure:
443nla_put_failure:
444 nlmsg_cancel(skb, nlh);
445 return -1;
446}
447
448static int
449nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
450{
451 struct nf_conntrack_helper *cur, *last;
452 struct hlist_node *n;
453
454 rcu_read_lock();
455 last = (struct nf_conntrack_helper *)cb->args[1];
456 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
457restart:
458 hlist_for_each_entry_rcu(cur, n,
459 &nf_ct_helper_hash[cb->args[0]], hnode) {
460
461 /* skip non-userspace conntrack helpers. */
462 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
463 continue;
464
465 if (cb->args[1]) {
466 if (cur != last)
467 continue;
468 cb->args[1] = 0;
469 }
470 if (nfnl_cthelper_fill_info(skb,
471 NETLINK_CB(cb->skb).pid,
472 cb->nlh->nlmsg_seq,
473 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
474 NFNL_MSG_CTHELPER_NEW, cur) < 0) {
475 cb->args[1] = (unsigned long)cur;
476 goto out;
477 }
478 }
479 }
480 if (cb->args[1]) {
481 cb->args[1] = 0;
482 goto restart;
483 }
484out:
485 rcu_read_unlock();
486 return skb->len;
487}
488
489static int
490nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
491 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
492{
493 int ret = -ENOENT, i;
494 struct nf_conntrack_helper *cur;
495 struct hlist_node *n;
496 struct sk_buff *skb2;
497 char *helper_name = NULL;
498 struct nf_conntrack_tuple tuple;
499 bool tuple_set = false;
500
501 if (nlh->nlmsg_flags & NLM_F_DUMP) {
502 struct netlink_dump_control c = {
503 .dump = nfnl_cthelper_dump_table,
504 };
505 return netlink_dump_start(nfnl, skb, nlh, &c);
506 }
507
508 if (tb[NFCTH_NAME])
509 helper_name = nla_data(tb[NFCTH_NAME]);
510
511 if (tb[NFCTH_TUPLE]) {
512 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
513 if (ret < 0)
514 return ret;
515
516 tuple_set = true;
517 }
518
519 for (i = 0; i < nf_ct_helper_hsize; i++) {
520 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
521
522 /* skip non-userspace conntrack helpers. */
523 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
524 continue;
525
526 if (helper_name && strncmp(cur->name, helper_name,
527 NF_CT_HELPER_NAME_LEN) != 0) {
528 continue;
529 }
530 if (tuple_set &&
531 (tuple.src.l3num != cur->tuple.src.l3num ||
532 tuple.dst.protonum != cur->tuple.dst.protonum))
533 continue;
534
535 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
536 if (skb2 == NULL) {
537 ret = -ENOMEM;
538 break;
539 }
540
541 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
542 nlh->nlmsg_seq,
543 NFNL_MSG_TYPE(nlh->nlmsg_type),
544 NFNL_MSG_CTHELPER_NEW, cur);
545 if (ret <= 0) {
546 kfree_skb(skb2);
547 break;
548 }
549
550 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
551 MSG_DONTWAIT);
552 if (ret > 0)
553 ret = 0;
554
555 /* this avoids a loop in nfnetlink. */
556 return ret == -EAGAIN ? -ENOBUFS : ret;
557 }
558 }
559 return ret;
560}
561
562static int
563nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
564 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
565{
566 char *helper_name = NULL;
567 struct nf_conntrack_helper *cur;
568 struct hlist_node *n, *tmp;
569 struct nf_conntrack_tuple tuple;
570 bool tuple_set = false, found = false;
571 int i, j = 0, ret;
572
573 if (tb[NFCTH_NAME])
574 helper_name = nla_data(tb[NFCTH_NAME]);
575
576 if (tb[NFCTH_TUPLE]) {
577 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
578 if (ret < 0)
579 return ret;
580
581 tuple_set = true;
582 }
583
584 for (i = 0; i < nf_ct_helper_hsize; i++) {
585 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
586 hnode) {
587 /* skip non-userspace conntrack helpers. */
588 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
589 continue;
590
591 j++;
592
593 if (helper_name && strncmp(cur->name, helper_name,
594 NF_CT_HELPER_NAME_LEN) != 0) {
595 continue;
596 }
597 if (tuple_set &&
598 (tuple.src.l3num != cur->tuple.src.l3num ||
599 tuple.dst.protonum != cur->tuple.dst.protonum))
600 continue;
601
602 found = true;
603 nf_conntrack_helper_unregister(cur);
604 }
605 }
606 /* Make sure we return success if we flush and there is no helpers */
607 return (found || j == 0) ? 0 : -ENOENT;
608}
609
610static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
611 [NFCTH_NAME] = { .type = NLA_NUL_STRING,
612 .len = NF_CT_HELPER_NAME_LEN-1 },
613 [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
614};
615
616static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
617 [NFNL_MSG_CTHELPER_NEW] = { .call = nfnl_cthelper_new,
618 .attr_count = NFCTH_MAX,
619 .policy = nfnl_cthelper_policy },
620 [NFNL_MSG_CTHELPER_GET] = { .call = nfnl_cthelper_get,
621 .attr_count = NFCTH_MAX,
622 .policy = nfnl_cthelper_policy },
623 [NFNL_MSG_CTHELPER_DEL] = { .call = nfnl_cthelper_del,
624 .attr_count = NFCTH_MAX,
625 .policy = nfnl_cthelper_policy },
626};
627
628static const struct nfnetlink_subsystem nfnl_cthelper_subsys = {
629 .name = "cthelper",
630 .subsys_id = NFNL_SUBSYS_CTHELPER,
631 .cb_count = NFNL_MSG_CTHELPER_MAX,
632 .cb = nfnl_cthelper_cb,
633};
634
635MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER);
636
637static int __init nfnl_cthelper_init(void)
638{
639 int ret;
640
641 ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys);
642 if (ret < 0) {
643 pr_err("nfnl_cthelper: cannot register with nfnetlink.\n");
644 goto err_out;
645 }
646 return 0;
647err_out:
648 return ret;
649}
650
651static void __exit nfnl_cthelper_exit(void)
652{
653 struct nf_conntrack_helper *cur;
654 struct hlist_node *n, *tmp;
655 int i;
656
657 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
658
659 for (i=0; i<nf_ct_helper_hsize; i++) {
660 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
661 hnode) {
662 /* skip non-userspace conntrack helpers. */
663 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
664 continue;
665
666 nf_conntrack_helper_unregister(cur);
667 }
668 }
669}
670
671module_init(nfnl_cthelper_init);
672module_exit(nfnl_cthelper_exit);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 3e655288d1d6..cdecbc8fe965 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,8 +49,9 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
49 49
50static int 50static int
51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout, 51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
52 struct nf_conntrack_l4proto *l4proto, 52 struct nf_conntrack_l4proto *l4proto,
53 const struct nlattr *attr) 53 struct net *net,
54 const struct nlattr *attr)
54{ 55{
55 int ret = 0; 56 int ret = 0;
56 57
@@ -60,7 +61,8 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
60 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max, 61 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
61 attr, l4proto->ctnl_timeout.nla_policy); 62 attr, l4proto->ctnl_timeout.nla_policy);
62 63
63 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data); 64 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
65 &timeout->data);
64 } 66 }
65 return ret; 67 return ret;
66} 68}
@@ -74,6 +76,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
74 __u8 l4num; 76 __u8 l4num;
75 struct nf_conntrack_l4proto *l4proto; 77 struct nf_conntrack_l4proto *l4proto;
76 struct ctnl_timeout *timeout, *matching = NULL; 78 struct ctnl_timeout *timeout, *matching = NULL;
79 struct net *net = sock_net(skb->sk);
77 char *name; 80 char *name;
78 int ret; 81 int ret;
79 82
@@ -117,7 +120,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
117 goto err_proto_put; 120 goto err_proto_put;
118 } 121 }
119 122
120 ret = ctnl_timeout_parse_policy(matching, l4proto, 123 ret = ctnl_timeout_parse_policy(matching, l4proto, net,
121 cda[CTA_TIMEOUT_DATA]); 124 cda[CTA_TIMEOUT_DATA]);
122 return ret; 125 return ret;
123 } 126 }
@@ -132,7 +135,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
132 goto err_proto_put; 135 goto err_proto_put;
133 } 136 }
134 137
135 ret = ctnl_timeout_parse_policy(timeout, l4proto, 138 ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
136 cda[CTA_TIMEOUT_DATA]); 139 cda[CTA_TIMEOUT_DATA]);
137 if (ret < 0) 140 if (ret < 0)
138 goto err; 141 goto err;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c3cfc0cc9b5..169ab59ed9d4 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -326,18 +326,20 @@ __nfulnl_send(struct nfulnl_instance *inst)
326{ 326{
327 int status = -1; 327 int status = -1;
328 328
329 if (inst->qlen > 1) 329 if (inst->qlen > 1) {
330 NLMSG_PUT(inst->skb, 0, 0, 330 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
331 NLMSG_DONE, 331 NLMSG_DONE,
332 sizeof(struct nfgenmsg)); 332 sizeof(struct nfgenmsg),
333 333 0);
334 if (!nlh)
335 goto out;
336 }
334 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, 337 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
335 MSG_DONTWAIT); 338 MSG_DONTWAIT);
336 339
337 inst->qlen = 0; 340 inst->qlen = 0;
338 inst->skb = NULL; 341 inst->skb = NULL;
339 342out:
340nlmsg_failure:
341 return status; 343 return status;
342} 344}
343 345
@@ -380,10 +382,12 @@ __build_packet_message(struct nfulnl_instance *inst,
380 struct nfgenmsg *nfmsg; 382 struct nfgenmsg *nfmsg;
381 sk_buff_data_t old_tail = inst->skb->tail; 383 sk_buff_data_t old_tail = inst->skb->tail;
382 384
383 nlh = NLMSG_PUT(inst->skb, 0, 0, 385 nlh = nlmsg_put(inst->skb, 0, 0,
384 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, 386 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
385 sizeof(struct nfgenmsg)); 387 sizeof(struct nfgenmsg), 0);
386 nfmsg = NLMSG_DATA(nlh); 388 if (!nlh)
389 return -1;
390 nfmsg = nlmsg_data(nlh);
387 nfmsg->nfgen_family = pf; 391 nfmsg->nfgen_family = pf;
388 nfmsg->version = NFNETLINK_V0; 392 nfmsg->version = NFNETLINK_V0;
389 nfmsg->res_id = htons(inst->group_num); 393 nfmsg->res_id = htons(inst->group_num);
@@ -526,7 +530,7 @@ __build_packet_message(struct nfulnl_instance *inst,
526 530
527 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { 531 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
528 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); 532 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
529 goto nlmsg_failure; 533 return -1;
530 } 534 }
531 535
532 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); 536 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
@@ -540,7 +544,6 @@ __build_packet_message(struct nfulnl_instance *inst,
540 nlh->nlmsg_len = inst->skb->tail - old_tail; 544 nlh->nlmsg_len = inst->skb->tail - old_tail;
541 return 0; 545 return 0;
542 546
543nlmsg_failure:
544nla_put_failure: 547nla_put_failure:
545 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); 548 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
546 return -1; 549 return -1;
@@ -745,7 +748,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
745 const struct nlmsghdr *nlh, 748 const struct nlmsghdr *nlh,
746 const struct nlattr * const nfula[]) 749 const struct nlattr * const nfula[])
747{ 750{
748 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 751 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
749 u_int16_t group_num = ntohs(nfmsg->res_id); 752 u_int16_t group_num = ntohs(nfmsg->res_id);
750 struct nfulnl_instance *inst; 753 struct nfulnl_instance *inst;
751 struct nfulnl_msg_config_cmd *cmd = NULL; 754 struct nfulnl_msg_config_cmd *cmd = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue_core.c
index 4162437b8361..c0496a55ad0c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netfilter/nf_queue.h> 32#include <net/netfilter/nf_queue.h>
33#include <net/netfilter/nfnetlink_queue.h>
33 34
34#include <linux/atomic.h> 35#include <linux/atomic.h>
35 36
@@ -52,6 +53,7 @@ struct nfqnl_instance {
52 53
53 u_int16_t queue_num; /* number of this queue */ 54 u_int16_t queue_num; /* number of this queue */
54 u_int8_t copy_mode; 55 u_int8_t copy_mode;
56 u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
55/* 57/*
56 * Following fields are dirtied for each queued packet, 58 * Following fields are dirtied for each queued packet,
57 * keep them in same cache line if possible. 59 * keep them in same cache line if possible.
@@ -232,6 +234,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
232 struct sk_buff *entskb = entry->skb; 234 struct sk_buff *entskb = entry->skb;
233 struct net_device *indev; 235 struct net_device *indev;
234 struct net_device *outdev; 236 struct net_device *outdev;
237 struct nf_conn *ct = NULL;
238 enum ip_conntrack_info uninitialized_var(ctinfo);
235 239
236 size = NLMSG_SPACE(sizeof(struct nfgenmsg)) 240 size = NLMSG_SPACE(sizeof(struct nfgenmsg))
237 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 241 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -265,16 +269,22 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
265 break; 269 break;
266 } 270 }
267 271
272 if (queue->flags & NFQA_CFG_F_CONNTRACK)
273 ct = nfqnl_ct_get(entskb, &size, &ctinfo);
268 274
269 skb = alloc_skb(size, GFP_ATOMIC); 275 skb = alloc_skb(size, GFP_ATOMIC);
270 if (!skb) 276 if (!skb)
271 goto nlmsg_failure; 277 return NULL;
272 278
273 old_tail = skb->tail; 279 old_tail = skb->tail;
274 nlh = NLMSG_PUT(skb, 0, 0, 280 nlh = nlmsg_put(skb, 0, 0,
275 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 281 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
276 sizeof(struct nfgenmsg)); 282 sizeof(struct nfgenmsg), 0);
277 nfmsg = NLMSG_DATA(nlh); 283 if (!nlh) {
284 kfree_skb(skb);
285 return NULL;
286 }
287 nfmsg = nlmsg_data(nlh);
278 nfmsg->nfgen_family = entry->pf; 288 nfmsg->nfgen_family = entry->pf;
279 nfmsg->version = NFNETLINK_V0; 289 nfmsg->version = NFNETLINK_V0;
280 nfmsg->res_id = htons(queue->queue_num); 290 nfmsg->res_id = htons(queue->queue_num);
@@ -377,7 +387,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
377 387
378 if (skb_tailroom(skb) < nla_total_size(data_len)) { 388 if (skb_tailroom(skb) < nla_total_size(data_len)) {
379 printk(KERN_WARNING "nf_queue: no tailroom!\n"); 389 printk(KERN_WARNING "nf_queue: no tailroom!\n");
380 goto nlmsg_failure; 390 kfree_skb(skb);
391 return NULL;
381 } 392 }
382 393
383 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); 394 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
@@ -388,10 +399,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
388 BUG(); 399 BUG();
389 } 400 }
390 401
402 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
403 goto nla_put_failure;
404
391 nlh->nlmsg_len = skb->tail - old_tail; 405 nlh->nlmsg_len = skb->tail - old_tail;
392 return skb; 406 return skb;
393 407
394nlmsg_failure:
395nla_put_failure: 408nla_put_failure:
396 if (skb) 409 if (skb)
397 kfree_skb(skb); 410 kfree_skb(skb);
@@ -406,6 +419,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
406 struct nfqnl_instance *queue; 419 struct nfqnl_instance *queue;
407 int err = -ENOBUFS; 420 int err = -ENOBUFS;
408 __be32 *packet_id_ptr; 421 __be32 *packet_id_ptr;
422 int failopen = 0;
409 423
410 /* rcu_read_lock()ed by nf_hook_slow() */ 424 /* rcu_read_lock()ed by nf_hook_slow() */
411 queue = instance_lookup(queuenum); 425 queue = instance_lookup(queuenum);
@@ -431,9 +445,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
431 goto err_out_free_nskb; 445 goto err_out_free_nskb;
432 } 446 }
433 if (queue->queue_total >= queue->queue_maxlen) { 447 if (queue->queue_total >= queue->queue_maxlen) {
434 queue->queue_dropped++; 448 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
435 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", 449 failopen = 1;
436 queue->queue_total); 450 err = 0;
451 } else {
452 queue->queue_dropped++;
453 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
454 queue->queue_total);
455 }
437 goto err_out_free_nskb; 456 goto err_out_free_nskb;
438 } 457 }
439 entry->id = ++queue->id_sequence; 458 entry->id = ++queue->id_sequence;
@@ -455,17 +474,17 @@ err_out_free_nskb:
455 kfree_skb(nskb); 474 kfree_skb(nskb);
456err_out_unlock: 475err_out_unlock:
457 spin_unlock_bh(&queue->lock); 476 spin_unlock_bh(&queue->lock);
477 if (failopen)
478 nf_reinject(entry, NF_ACCEPT);
458err_out: 479err_out:
459 return err; 480 return err;
460} 481}
461 482
462static int 483static int
463nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) 484nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
464{ 485{
465 struct sk_buff *nskb; 486 struct sk_buff *nskb;
466 int diff;
467 487
468 diff = data_len - e->skb->len;
469 if (diff < 0) { 488 if (diff < 0) {
470 if (pskb_trim(e->skb, data_len)) 489 if (pskb_trim(e->skb, data_len))
471 return -ENOMEM; 490 return -ENOMEM;
@@ -623,6 +642,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
623 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 642 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
624 [NFQA_MARK] = { .type = NLA_U32 }, 643 [NFQA_MARK] = { .type = NLA_U32 },
625 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 644 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
645 [NFQA_CT] = { .type = NLA_UNSPEC },
626}; 646};
627 647
628static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 648static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -670,7 +690,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
670 const struct nlmsghdr *nlh, 690 const struct nlmsghdr *nlh,
671 const struct nlattr * const nfqa[]) 691 const struct nlattr * const nfqa[])
672{ 692{
673 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 693 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
674 struct nf_queue_entry *entry, *tmp; 694 struct nf_queue_entry *entry, *tmp;
675 unsigned int verdict, maxid; 695 unsigned int verdict, maxid;
676 struct nfqnl_msg_verdict_hdr *vhdr; 696 struct nfqnl_msg_verdict_hdr *vhdr;
@@ -716,13 +736,15 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
716 const struct nlmsghdr *nlh, 736 const struct nlmsghdr *nlh,
717 const struct nlattr * const nfqa[]) 737 const struct nlattr * const nfqa[])
718{ 738{
719 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 739 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
720 u_int16_t queue_num = ntohs(nfmsg->res_id); 740 u_int16_t queue_num = ntohs(nfmsg->res_id);
721 741
722 struct nfqnl_msg_verdict_hdr *vhdr; 742 struct nfqnl_msg_verdict_hdr *vhdr;
723 struct nfqnl_instance *queue; 743 struct nfqnl_instance *queue;
724 unsigned int verdict; 744 unsigned int verdict;
725 struct nf_queue_entry *entry; 745 struct nf_queue_entry *entry;
746 enum ip_conntrack_info uninitialized_var(ctinfo);
747 struct nf_conn *ct = NULL;
726 748
727 queue = instance_lookup(queue_num); 749 queue = instance_lookup(queue_num);
728 if (!queue) 750 if (!queue)
@@ -741,11 +763,22 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
741 if (entry == NULL) 763 if (entry == NULL)
742 return -ENOENT; 764 return -ENOENT;
743 765
766 rcu_read_lock();
767 if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
768 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
769
744 if (nfqa[NFQA_PAYLOAD]) { 770 if (nfqa[NFQA_PAYLOAD]) {
771 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
772 int diff = payload_len - entry->skb->len;
773
745 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 774 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
746 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) 775 payload_len, entry, diff) < 0)
747 verdict = NF_DROP; 776 verdict = NF_DROP;
777
778 if (ct)
779 nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
748 } 780 }
781 rcu_read_unlock();
749 782
750 if (nfqa[NFQA_MARK]) 783 if (nfqa[NFQA_MARK])
751 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 784 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
@@ -777,7 +810,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
777 const struct nlmsghdr *nlh, 810 const struct nlmsghdr *nlh,
778 const struct nlattr * const nfqa[]) 811 const struct nlattr * const nfqa[])
779{ 812{
780 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 813 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
781 u_int16_t queue_num = ntohs(nfmsg->res_id); 814 u_int16_t queue_num = ntohs(nfmsg->res_id);
782 struct nfqnl_instance *queue; 815 struct nfqnl_instance *queue;
783 struct nfqnl_msg_config_cmd *cmd = NULL; 816 struct nfqnl_msg_config_cmd *cmd = NULL;
@@ -858,6 +891,36 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
858 spin_unlock_bh(&queue->lock); 891 spin_unlock_bh(&queue->lock);
859 } 892 }
860 893
894 if (nfqa[NFQA_CFG_FLAGS]) {
895 __u32 flags, mask;
896
897 if (!queue) {
898 ret = -ENODEV;
899 goto err_out_unlock;
900 }
901
902 if (!nfqa[NFQA_CFG_MASK]) {
903 /* A mask is needed to specify which flags are being
904 * changed.
905 */
906 ret = -EINVAL;
907 goto err_out_unlock;
908 }
909
910 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
911 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
912
913 if (flags >= NFQA_CFG_F_MAX) {
914 ret = -EOPNOTSUPP;
915 goto err_out_unlock;
916 }
917
918 spin_lock_bh(&queue->lock);
919 queue->flags &= ~mask;
920 queue->flags |= flags & mask;
921 spin_unlock_bh(&queue->lock);
922 }
923
861err_out_unlock: 924err_out_unlock:
862 rcu_read_unlock(); 925 rcu_read_unlock();
863 return ret; 926 return ret;
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
new file mode 100644
index 000000000000..ab61d66bc0b9
--- /dev/null
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -0,0 +1,98 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/skbuff.h>
11#include <linux/netfilter.h>
12#include <linux/netfilter/nfnetlink.h>
13#include <linux/netfilter/nfnetlink_queue.h>
14#include <net/netfilter/nf_conntrack.h>
15#include <net/netfilter/nfnetlink_queue.h>
16
17struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
18 enum ip_conntrack_info *ctinfo)
19{
20 struct nfq_ct_hook *nfq_ct;
21 struct nf_conn *ct;
22
23 /* rcu_read_lock()ed by __nf_queue already. */
24 nfq_ct = rcu_dereference(nfq_ct_hook);
25 if (nfq_ct == NULL)
26 return NULL;
27
28 ct = nf_ct_get(entskb, ctinfo);
29 if (ct) {
30 if (!nf_ct_is_untracked(ct))
31 *size += nfq_ct->build_size(ct);
32 else
33 ct = NULL;
34 }
35 return ct;
36}
37
38struct nf_conn *
39nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
40 enum ip_conntrack_info *ctinfo)
41{
42 struct nfq_ct_hook *nfq_ct;
43 struct nf_conn *ct;
44
45 /* rcu_read_lock()ed by __nf_queue already. */
46 nfq_ct = rcu_dereference(nfq_ct_hook);
47 if (nfq_ct == NULL)
48 return NULL;
49
50 ct = nf_ct_get(skb, ctinfo);
51 if (ct && !nf_ct_is_untracked(ct))
52 nfq_ct->parse(attr, ct);
53
54 return ct;
55}
56
57int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
58 enum ip_conntrack_info ctinfo)
59{
60 struct nfq_ct_hook *nfq_ct;
61 struct nlattr *nest_parms;
62 u_int32_t tmp;
63
64 nfq_ct = rcu_dereference(nfq_ct_hook);
65 if (nfq_ct == NULL)
66 return 0;
67
68 nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
69 if (!nest_parms)
70 goto nla_put_failure;
71
72 if (nfq_ct->build(skb, ct) < 0)
73 goto nla_put_failure;
74
75 nla_nest_end(skb, nest_parms);
76
77 tmp = ctinfo;
78 if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
79 goto nla_put_failure;
80
81 return 0;
82
83nla_put_failure:
84 return -1;
85}
86
87void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
88 enum ip_conntrack_info ctinfo, int diff)
89{
90 struct nfq_ct_nat_hook *nfq_nat_ct;
91
92 nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
93 if (nfq_nat_ct == NULL)
94 return;
95
96 if ((ct->status & IPS_NAT_MASK) && diff)
97 nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
98}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index a51de9b052be..116018560c60 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -112,6 +112,8 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
112 goto err3; 112 goto err3;
113 113
114 if (info->helper[0]) { 114 if (info->helper[0]) {
115 struct nf_conntrack_helper *helper;
116
115 ret = -ENOENT; 117 ret = -ENOENT;
116 proto = xt_ct_find_proto(par); 118 proto = xt_ct_find_proto(par);
117 if (!proto) { 119 if (!proto) {
@@ -120,19 +122,21 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
120 goto err3; 122 goto err3;
121 } 123 }
122 124
123 ret = -ENOMEM;
124 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
125 if (help == NULL)
126 goto err3;
127
128 ret = -ENOENT; 125 ret = -ENOENT;
129 help->helper = nf_conntrack_helper_try_module_get(info->helper, 126 helper = nf_conntrack_helper_try_module_get(info->helper,
130 par->family, 127 par->family,
131 proto); 128 proto);
132 if (help->helper == NULL) { 129 if (helper == NULL) {
133 pr_info("No such helper \"%s\"\n", info->helper); 130 pr_info("No such helper \"%s\"\n", info->helper);
134 goto err3; 131 goto err3;
135 } 132 }
133
134 ret = -ENOMEM;
135 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
136 if (help == NULL)
137 goto err3;
138
139 help->helper = helper;
136 } 140 }
137 141
138 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 142 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -202,6 +206,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
202 goto err3; 206 goto err3;
203 207
204 if (info->helper[0]) { 208 if (info->helper[0]) {
209 struct nf_conntrack_helper *helper;
210
205 ret = -ENOENT; 211 ret = -ENOENT;
206 proto = xt_ct_find_proto(par); 212 proto = xt_ct_find_proto(par);
207 if (!proto) { 213 if (!proto) {
@@ -210,19 +216,21 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
210 goto err3; 216 goto err3;
211 } 217 }
212 218
213 ret = -ENOMEM;
214 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
215 if (help == NULL)
216 goto err3;
217
218 ret = -ENOENT; 219 ret = -ENOENT;
219 help->helper = nf_conntrack_helper_try_module_get(info->helper, 220 helper = nf_conntrack_helper_try_module_get(info->helper,
220 par->family, 221 par->family,
221 proto); 222 proto);
222 if (help->helper == NULL) { 223 if (helper == NULL) {
223 pr_info("No such helper \"%s\"\n", info->helper); 224 pr_info("No such helper \"%s\"\n", info->helper);
224 goto err3; 225 goto err3;
225 } 226 }
227
228 ret = -ENOMEM;
229 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
230 if (help == NULL)
231 goto err3;
232
233 help->helper = helper;
226 } 234 }
227 235
228#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 236#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 95237c89607a..7babe7d68716 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -41,26 +41,36 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
41static u32 hash_v4(const struct sk_buff *skb) 41static u32 hash_v4(const struct sk_buff *skb)
42{ 42{
43 const struct iphdr *iph = ip_hdr(skb); 43 const struct iphdr *iph = ip_hdr(skb);
44 __be32 ipaddr;
45 44
46 /* packets in either direction go into same queue */ 45 /* packets in either direction go into same queue */
47 ipaddr = iph->saddr ^ iph->daddr; 46 if (iph->saddr < iph->daddr)
47 return jhash_3words((__force u32)iph->saddr,
48 (__force u32)iph->daddr, iph->protocol, jhash_initval);
48 49
49 return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); 50 return jhash_3words((__force u32)iph->daddr,
51 (__force u32)iph->saddr, iph->protocol, jhash_initval);
50} 52}
51 53
52#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 54#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
53static u32 hash_v6(const struct sk_buff *skb) 55static u32 hash_v6(const struct sk_buff *skb)
54{ 56{
55 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 57 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
56 __be32 addr[4]; 58 u32 a, b, c;
59
60 if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
61 a = (__force u32) ip6h->saddr.s6_addr32[3];
62 b = (__force u32) ip6h->daddr.s6_addr32[3];
63 } else {
64 b = (__force u32) ip6h->saddr.s6_addr32[3];
65 a = (__force u32) ip6h->daddr.s6_addr32[3];
66 }
57 67
58 addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0]; 68 if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
59 addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1]; 69 c = (__force u32) ip6h->saddr.s6_addr32[1];
60 addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2]; 70 else
61 addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3]; 71 c = (__force u32) ip6h->daddr.s6_addr32[1];
62 72
63 return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval); 73 return jhash_3words(a, b, c, jhash_initval);
64} 74}
65#endif 75#endif
66 76
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 146033a86de8..d7f195388f66 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -69,7 +69,7 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
69} 69}
70 70
71/** 71/**
72 * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections 72 * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
73 * @skb: The skb being processed. 73 * @skb: The skb being processed.
74 * @laddr: IPv4 address to redirect to or zero. 74 * @laddr: IPv4 address to redirect to or zero.
75 * @lport: TCP port to redirect to or zero. 75 * @lport: TCP port to redirect to or zero.
@@ -220,7 +220,7 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
220} 220}
221 221
222/** 222/**
223 * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections 223 * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
224 * @skb: The skb being processed. 224 * @skb: The skb being processed.
225 * @tproto: Transport protocol. 225 * @tproto: Transport protocol.
226 * @thoff: Transport protocol header offset. 226 * @thoff: Transport protocol header offset.
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index c6d5a83450c9..70b5591a2586 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -274,38 +274,25 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
274 kfree(info->data); 274 kfree(info->data);
275} 275}
276 276
277static struct xt_match connlimit_mt_reg[] __read_mostly = { 277static struct xt_match connlimit_mt_reg __read_mostly = {
278 { 278 .name = "connlimit",
279 .name = "connlimit", 279 .revision = 1,
280 .revision = 0, 280 .family = NFPROTO_UNSPEC,
281 .family = NFPROTO_UNSPEC, 281 .checkentry = connlimit_mt_check,
282 .checkentry = connlimit_mt_check, 282 .match = connlimit_mt,
283 .match = connlimit_mt, 283 .matchsize = sizeof(struct xt_connlimit_info),
284 .matchsize = sizeof(struct xt_connlimit_info), 284 .destroy = connlimit_mt_destroy,
285 .destroy = connlimit_mt_destroy, 285 .me = THIS_MODULE,
286 .me = THIS_MODULE,
287 },
288 {
289 .name = "connlimit",
290 .revision = 1,
291 .family = NFPROTO_UNSPEC,
292 .checkentry = connlimit_mt_check,
293 .match = connlimit_mt,
294 .matchsize = sizeof(struct xt_connlimit_info),
295 .destroy = connlimit_mt_destroy,
296 .me = THIS_MODULE,
297 },
298}; 286};
299 287
300static int __init connlimit_mt_init(void) 288static int __init connlimit_mt_init(void)
301{ 289{
302 return xt_register_matches(connlimit_mt_reg, 290 return xt_register_match(&connlimit_mt_reg);
303 ARRAY_SIZE(connlimit_mt_reg));
304} 291}
305 292
306static void __exit connlimit_mt_exit(void) 293static void __exit connlimit_mt_exit(void)
307{ 294{
308 xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg)); 295 xt_unregister_match(&connlimit_mt_reg);
309} 296}
310 297
311module_init(connlimit_mt_init); 298module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc0d6dbe5d17..ae2ad1eec8d0 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -75,6 +75,7 @@ struct recent_entry {
75struct recent_table { 75struct recent_table {
76 struct list_head list; 76 struct list_head list;
77 char name[XT_RECENT_NAME_LEN]; 77 char name[XT_RECENT_NAME_LEN];
78 union nf_inet_addr mask;
78 unsigned int refcnt; 79 unsigned int refcnt;
79 unsigned int entries; 80 unsigned int entries;
80 struct list_head lru_list; 81 struct list_head lru_list;
@@ -228,10 +229,10 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
228{ 229{
229 struct net *net = dev_net(par->in ? par->in : par->out); 230 struct net *net = dev_net(par->in ? par->in : par->out);
230 struct recent_net *recent_net = recent_pernet(net); 231 struct recent_net *recent_net = recent_pernet(net);
231 const struct xt_recent_mtinfo *info = par->matchinfo; 232 const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
232 struct recent_table *t; 233 struct recent_table *t;
233 struct recent_entry *e; 234 struct recent_entry *e;
234 union nf_inet_addr addr = {}; 235 union nf_inet_addr addr = {}, addr_mask;
235 u_int8_t ttl; 236 u_int8_t ttl;
236 bool ret = info->invert; 237 bool ret = info->invert;
237 238
@@ -261,12 +262,15 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
261 262
262 spin_lock_bh(&recent_lock); 263 spin_lock_bh(&recent_lock);
263 t = recent_table_lookup(recent_net, info->name); 264 t = recent_table_lookup(recent_net, info->name);
264 e = recent_entry_lookup(t, &addr, par->family, 265
266 nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
267
268 e = recent_entry_lookup(t, &addr_mask, par->family,
265 (info->check_set & XT_RECENT_TTL) ? ttl : 0); 269 (info->check_set & XT_RECENT_TTL) ? ttl : 0);
266 if (e == NULL) { 270 if (e == NULL) {
267 if (!(info->check_set & XT_RECENT_SET)) 271 if (!(info->check_set & XT_RECENT_SET))
268 goto out; 272 goto out;
269 e = recent_entry_init(t, &addr, par->family, ttl); 273 e = recent_entry_init(t, &addr_mask, par->family, ttl);
270 if (e == NULL) 274 if (e == NULL)
271 par->hotdrop = true; 275 par->hotdrop = true;
272 ret = !ret; 276 ret = !ret;
@@ -306,10 +310,10 @@ out:
306 return ret; 310 return ret;
307} 311}
308 312
309static int recent_mt_check(const struct xt_mtchk_param *par) 313static int recent_mt_check(const struct xt_mtchk_param *par,
314 const struct xt_recent_mtinfo_v1 *info)
310{ 315{
311 struct recent_net *recent_net = recent_pernet(par->net); 316 struct recent_net *recent_net = recent_pernet(par->net);
312 const struct xt_recent_mtinfo *info = par->matchinfo;
313 struct recent_table *t; 317 struct recent_table *t;
314#ifdef CONFIG_PROC_FS 318#ifdef CONFIG_PROC_FS
315 struct proc_dir_entry *pde; 319 struct proc_dir_entry *pde;
@@ -361,6 +365,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
361 goto out; 365 goto out;
362 } 366 }
363 t->refcnt = 1; 367 t->refcnt = 1;
368
369 memcpy(&t->mask, &info->mask, sizeof(t->mask));
364 strcpy(t->name, info->name); 370 strcpy(t->name, info->name);
365 INIT_LIST_HEAD(&t->lru_list); 371 INIT_LIST_HEAD(&t->lru_list);
366 for (i = 0; i < ip_list_hash_size; i++) 372 for (i = 0; i < ip_list_hash_size; i++)
@@ -385,10 +391,28 @@ out:
385 return ret; 391 return ret;
386} 392}
387 393
394static int recent_mt_check_v0(const struct xt_mtchk_param *par)
395{
396 const struct xt_recent_mtinfo_v0 *info_v0 = par->matchinfo;
397 struct xt_recent_mtinfo_v1 info_v1;
398
399 /* Copy revision 0 structure to revision 1 */
400 memcpy(&info_v1, info_v0, sizeof(struct xt_recent_mtinfo));
401 /* Set default mask to ensure backward compatible behaviour */
402 memset(info_v1.mask.all, 0xFF, sizeof(info_v1.mask.all));
403
404 return recent_mt_check(par, &info_v1);
405}
406
407static int recent_mt_check_v1(const struct xt_mtchk_param *par)
408{
409 return recent_mt_check(par, par->matchinfo);
410}
411
388static void recent_mt_destroy(const struct xt_mtdtor_param *par) 412static void recent_mt_destroy(const struct xt_mtdtor_param *par)
389{ 413{
390 struct recent_net *recent_net = recent_pernet(par->net); 414 struct recent_net *recent_net = recent_pernet(par->net);
391 const struct xt_recent_mtinfo *info = par->matchinfo; 415 const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
392 struct recent_table *t; 416 struct recent_table *t;
393 417
394 mutex_lock(&recent_mutex); 418 mutex_lock(&recent_mutex);
@@ -625,7 +649,7 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
625 .family = NFPROTO_IPV4, 649 .family = NFPROTO_IPV4,
626 .match = recent_mt, 650 .match = recent_mt,
627 .matchsize = sizeof(struct xt_recent_mtinfo), 651 .matchsize = sizeof(struct xt_recent_mtinfo),
628 .checkentry = recent_mt_check, 652 .checkentry = recent_mt_check_v0,
629 .destroy = recent_mt_destroy, 653 .destroy = recent_mt_destroy,
630 .me = THIS_MODULE, 654 .me = THIS_MODULE,
631 }, 655 },
@@ -635,10 +659,30 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
635 .family = NFPROTO_IPV6, 659 .family = NFPROTO_IPV6,
636 .match = recent_mt, 660 .match = recent_mt,
637 .matchsize = sizeof(struct xt_recent_mtinfo), 661 .matchsize = sizeof(struct xt_recent_mtinfo),
638 .checkentry = recent_mt_check, 662 .checkentry = recent_mt_check_v0,
663 .destroy = recent_mt_destroy,
664 .me = THIS_MODULE,
665 },
666 {
667 .name = "recent",
668 .revision = 1,
669 .family = NFPROTO_IPV4,
670 .match = recent_mt,
671 .matchsize = sizeof(struct xt_recent_mtinfo_v1),
672 .checkentry = recent_mt_check_v1,
639 .destroy = recent_mt_destroy, 673 .destroy = recent_mt_destroy,
640 .me = THIS_MODULE, 674 .me = THIS_MODULE,
641 }, 675 },
676 {
677 .name = "recent",
678 .revision = 1,
679 .family = NFPROTO_IPV6,
680 .match = recent_mt,
681 .matchsize = sizeof(struct xt_recent_mtinfo_v1),
682 .checkentry = recent_mt_check_v1,
683 .destroy = recent_mt_destroy,
684 .me = THIS_MODULE,
685 }
642}; 686};
643 687
644static int __init recent_mt_init(void) 688static int __init recent_mt_init(void)
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 035960ec5cb9..c6f7db720d84 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter/xt_set.h> 18#include <linux/netfilter/xt_set.h>
19#include <linux/netfilter/ipset/ip_set_timeout.h>
19 20
20MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 22MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
310 info->del_set.flags, 0, UINT_MAX); 311 info->del_set.flags, 0, UINT_MAX);
311 312
312 /* Normalize to fit into jiffies */ 313 /* Normalize to fit into jiffies */
313 if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) 314 if (add_opt.timeout != IPSET_NO_TIMEOUT &&
315 add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
314 add_opt.timeout = UINT_MAX/MSEC_PER_SEC; 316 add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
315 if (info->add_set.index != IPSET_INVALID_ID) 317 if (info->add_set.index != IPSET_INVALID_ID)
316 ip_set_add(info->add_set.index, skb, par, &add_opt); 318 ip_set_add(info->add_set.index, skb, par, &add_opt);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b3025a603d56..5463969da45b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -80,6 +80,7 @@ struct netlink_sock {
80 struct mutex *cb_mutex; 80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex; 81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb); 82 void (*netlink_rcv)(struct sk_buff *skb);
83 void (*netlink_bind)(int group);
83 struct module *module; 84 struct module *module;
84}; 85};
85 86
@@ -124,6 +125,7 @@ struct netlink_table {
124 unsigned int groups; 125 unsigned int groups;
125 struct mutex *cb_mutex; 126 struct mutex *cb_mutex;
126 struct module *module; 127 struct module *module;
128 void (*bind)(int group);
127 int registered; 129 int registered;
128}; 130};
129 131
@@ -444,6 +446,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
444 struct module *module = NULL; 446 struct module *module = NULL;
445 struct mutex *cb_mutex; 447 struct mutex *cb_mutex;
446 struct netlink_sock *nlk; 448 struct netlink_sock *nlk;
449 void (*bind)(int group);
447 int err = 0; 450 int err = 0;
448 451
449 sock->state = SS_UNCONNECTED; 452 sock->state = SS_UNCONNECTED;
@@ -468,6 +471,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
468 else 471 else
469 err = -EPROTONOSUPPORT; 472 err = -EPROTONOSUPPORT;
470 cb_mutex = nl_table[protocol].cb_mutex; 473 cb_mutex = nl_table[protocol].cb_mutex;
474 bind = nl_table[protocol].bind;
471 netlink_unlock_table(); 475 netlink_unlock_table();
472 476
473 if (err < 0) 477 if (err < 0)
@@ -483,6 +487,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
483 487
484 nlk = nlk_sk(sock->sk); 488 nlk = nlk_sk(sock->sk);
485 nlk->module = module; 489 nlk->module = module;
490 nlk->netlink_bind = bind;
486out: 491out:
487 return err; 492 return err;
488 493
@@ -683,6 +688,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
683 netlink_update_listeners(sk); 688 netlink_update_listeners(sk);
684 netlink_table_ungrab(); 689 netlink_table_ungrab();
685 690
691 if (nlk->netlink_bind && nlk->groups[0]) {
692 int i;
693
694 for (i=0; i<nlk->ngroups; i++) {
695 if (test_bit(i, nlk->groups))
696 nlk->netlink_bind(i);
697 }
698 }
699
686 return 0; 700 return 0;
687} 701}
688 702
@@ -1239,6 +1253,10 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1239 netlink_update_socket_mc(nlk, val, 1253 netlink_update_socket_mc(nlk, val,
1240 optname == NETLINK_ADD_MEMBERSHIP); 1254 optname == NETLINK_ADD_MEMBERSHIP);
1241 netlink_table_ungrab(); 1255 netlink_table_ungrab();
1256
1257 if (nlk->netlink_bind)
1258 nlk->netlink_bind(val);
1259
1242 err = 0; 1260 err = 0;
1243 break; 1261 break;
1244 } 1262 }
@@ -1503,14 +1521,16 @@ static void netlink_data_ready(struct sock *sk, int len)
1503 */ 1521 */
1504 1522
1505struct sock * 1523struct sock *
1506netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1524netlink_kernel_create(struct net *net, int unit,
1507 void (*input)(struct sk_buff *skb), 1525 struct module *module,
1508 struct mutex *cb_mutex, struct module *module) 1526 struct netlink_kernel_cfg *cfg)
1509{ 1527{
1510 struct socket *sock; 1528 struct socket *sock;
1511 struct sock *sk; 1529 struct sock *sk;
1512 struct netlink_sock *nlk; 1530 struct netlink_sock *nlk;
1513 struct listeners *listeners = NULL; 1531 struct listeners *listeners = NULL;
1532 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1533 unsigned int groups;
1514 1534
1515 BUG_ON(!nl_table); 1535 BUG_ON(!nl_table);
1516 1536
@@ -1532,16 +1552,18 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1532 sk = sock->sk; 1552 sk = sock->sk;
1533 sk_change_net(sk, net); 1553 sk_change_net(sk, net);
1534 1554
1535 if (groups < 32) 1555 if (!cfg || cfg->groups < 32)
1536 groups = 32; 1556 groups = 32;
1557 else
1558 groups = cfg->groups;
1537 1559
1538 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 1560 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1539 if (!listeners) 1561 if (!listeners)
1540 goto out_sock_release; 1562 goto out_sock_release;
1541 1563
1542 sk->sk_data_ready = netlink_data_ready; 1564 sk->sk_data_ready = netlink_data_ready;
1543 if (input) 1565 if (cfg && cfg->input)
1544 nlk_sk(sk)->netlink_rcv = input; 1566 nlk_sk(sk)->netlink_rcv = cfg->input;
1545 1567
1546 if (netlink_insert(sk, net, 0)) 1568 if (netlink_insert(sk, net, 0))
1547 goto out_sock_release; 1569 goto out_sock_release;
@@ -1555,6 +1577,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1555 rcu_assign_pointer(nl_table[unit].listeners, listeners); 1577 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1556 nl_table[unit].cb_mutex = cb_mutex; 1578 nl_table[unit].cb_mutex = cb_mutex;
1557 nl_table[unit].module = module; 1579 nl_table[unit].module = module;
1580 nl_table[unit].bind = cfg ? cfg->bind : NULL;
1558 nl_table[unit].registered = 1; 1581 nl_table[unit].registered = 1;
1559 } else { 1582 } else {
1560 kfree(listeners); 1583 kfree(listeners);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2cc7c1ee7690..fda497412fc3 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,7 +33,7 @@ void genl_unlock(void)
33} 33}
34EXPORT_SYMBOL(genl_unlock); 34EXPORT_SYMBOL(genl_unlock);
35 35
36#ifdef CONFIG_PROVE_LOCKING 36#ifdef CONFIG_LOCKDEP
37int lockdep_genl_is_held(void) 37int lockdep_genl_is_held(void)
38{ 38{
39 return lockdep_is_held(&genl_mutex); 39 return lockdep_is_held(&genl_mutex);
@@ -504,7 +504,7 @@ EXPORT_SYMBOL(genl_unregister_family);
504 * @pid: netlink pid the message is addressed to 504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender) 505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family 506 * @family: generic netlink family
507 * @flags netlink message flags 507 * @flags: netlink message flags
508 * @cmd: generic netlink command 508 * @cmd: generic netlink command
509 * 509 *
510 * Returns pointer to user specific header 510 * Returns pointer to user specific header
@@ -915,10 +915,14 @@ static struct genl_multicast_group notify_grp = {
915 915
916static int __net_init genl_pernet_init(struct net *net) 916static int __net_init genl_pernet_init(struct net *net)
917{ 917{
918 struct netlink_kernel_cfg cfg = {
919 .input = genl_rcv,
920 .cb_mutex = &genl_mutex,
921 };
922
918 /* we'll bump the group number right afterwards */ 923 /* we'll bump the group number right afterwards */
919 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0, 924 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
920 genl_rcv, &genl_mutex, 925 THIS_MODULE, &cfg);
921 THIS_MODULE);
922 926
923 if (!net->genl_sock && net_eq(net, &init_net)) 927 if (!net->genl_sock && net_eq(net, &init_net))
924 panic("GENL: Cannot initialize generic netlink\n"); 928 panic("GENL: Cannot initialize generic netlink\n");
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 9f6ce011d35d..ff749794bc5b 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -29,6 +29,8 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/nfc.h> 30#include <linux/nfc.h>
31 31
32#include <net/genetlink.h>
33
32#include "nfc.h" 34#include "nfc.h"
33 35
34#define VERSION "0.1" 36#define VERSION "0.1"
@@ -121,14 +123,14 @@ error:
121 * The device remains polling for targets until a target is found or 123 * The device remains polling for targets until a target is found or
122 * the nfc_stop_poll function is called. 124 * the nfc_stop_poll function is called.
123 */ 125 */
124int nfc_start_poll(struct nfc_dev *dev, u32 protocols) 126int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
125{ 127{
126 int rc; 128 int rc;
127 129
128 pr_debug("dev_name=%s protocols=0x%x\n", 130 pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n",
129 dev_name(&dev->dev), protocols); 131 dev_name(&dev->dev), im_protocols, tm_protocols);
130 132
131 if (!protocols) 133 if (!im_protocols && !tm_protocols)
132 return -EINVAL; 134 return -EINVAL;
133 135
134 device_lock(&dev->dev); 136 device_lock(&dev->dev);
@@ -143,9 +145,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
143 goto error; 145 goto error;
144 } 146 }
145 147
146 rc = dev->ops->start_poll(dev, protocols); 148 rc = dev->ops->start_poll(dev, im_protocols, tm_protocols);
147 if (!rc) 149 if (!rc) {
148 dev->polling = true; 150 dev->polling = true;
151 dev->rf_mode = NFC_RF_NONE;
152 }
149 153
150error: 154error:
151 device_unlock(&dev->dev); 155 device_unlock(&dev->dev);
@@ -235,8 +239,10 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
235 } 239 }
236 240
237 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); 241 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
238 if (!rc) 242 if (!rc) {
239 dev->active_target = target; 243 dev->active_target = target;
244 dev->rf_mode = NFC_RF_INITIATOR;
245 }
240 246
241error: 247error:
242 device_unlock(&dev->dev); 248 device_unlock(&dev->dev);
@@ -264,11 +270,6 @@ int nfc_dep_link_down(struct nfc_dev *dev)
264 goto error; 270 goto error;
265 } 271 }
266 272
267 if (dev->dep_rf_mode == NFC_RF_TARGET) {
268 rc = -EOPNOTSUPP;
269 goto error;
270 }
271
272 rc = dev->ops->dep_link_down(dev); 273 rc = dev->ops->dep_link_down(dev);
273 if (!rc) { 274 if (!rc) {
274 dev->dep_link_up = false; 275 dev->dep_link_up = false;
@@ -286,7 +287,6 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
286 u8 comm_mode, u8 rf_mode) 287 u8 comm_mode, u8 rf_mode)
287{ 288{
288 dev->dep_link_up = true; 289 dev->dep_link_up = true;
289 dev->dep_rf_mode = rf_mode;
290 290
291 nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); 291 nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
292 292
@@ -330,6 +330,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
330 rc = dev->ops->activate_target(dev, target, protocol); 330 rc = dev->ops->activate_target(dev, target, protocol);
331 if (!rc) { 331 if (!rc) {
332 dev->active_target = target; 332 dev->active_target = target;
333 dev->rf_mode = NFC_RF_INITIATOR;
333 334
334 if (dev->ops->check_presence) 335 if (dev->ops->check_presence)
335 mod_timer(&dev->check_pres_timer, jiffies + 336 mod_timer(&dev->check_pres_timer, jiffies +
@@ -409,27 +410,30 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
409 goto error; 410 goto error;
410 } 411 }
411 412
412 if (dev->active_target == NULL) { 413 if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) {
413 rc = -ENOTCONN; 414 if (dev->active_target->idx != target_idx) {
414 kfree_skb(skb); 415 rc = -EADDRNOTAVAIL;
415 goto error; 416 kfree_skb(skb);
416 } 417 goto error;
418 }
417 419
418 if (dev->active_target->idx != target_idx) { 420 if (dev->ops->check_presence)
419 rc = -EADDRNOTAVAIL; 421 del_timer_sync(&dev->check_pres_timer);
422
423 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
424 cb_context);
425
426 if (!rc && dev->ops->check_presence)
427 mod_timer(&dev->check_pres_timer, jiffies +
428 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
429 } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) {
430 rc = dev->ops->tm_send(dev, skb);
431 } else {
432 rc = -ENOTCONN;
420 kfree_skb(skb); 433 kfree_skb(skb);
421 goto error; 434 goto error;
422 } 435 }
423 436
424 if (dev->ops->check_presence)
425 del_timer_sync(&dev->check_pres_timer);
426
427 rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
428 cb_context);
429
430 if (!rc && dev->ops->check_presence)
431 mod_timer(&dev->check_pres_timer, jiffies +
432 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
433 437
434error: 438error:
435 device_unlock(&dev->dev); 439 device_unlock(&dev->dev);
@@ -447,6 +451,63 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
447} 451}
448EXPORT_SYMBOL(nfc_set_remote_general_bytes); 452EXPORT_SYMBOL(nfc_set_remote_general_bytes);
449 453
454u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len)
455{
456 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
457
458 return nfc_llcp_general_bytes(dev, gb_len);
459}
460EXPORT_SYMBOL(nfc_get_local_general_bytes);
461
462int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
463{
464 /* Only LLCP target mode for now */
465 if (dev->dep_link_up == false) {
466 kfree_skb(skb);
467 return -ENOLINK;
468 }
469
470 return nfc_llcp_data_received(dev, skb);
471}
472EXPORT_SYMBOL(nfc_tm_data_received);
473
474int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
475 u8 *gb, size_t gb_len)
476{
477 int rc;
478
479 device_lock(&dev->dev);
480
481 dev->polling = false;
482
483 if (gb != NULL) {
484 rc = nfc_set_remote_general_bytes(dev, gb, gb_len);
485 if (rc < 0)
486 goto out;
487 }
488
489 dev->rf_mode = NFC_RF_TARGET;
490
491 if (protocol == NFC_PROTO_NFC_DEP_MASK)
492 nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET);
493
494 rc = nfc_genl_tm_activated(dev, protocol);
495
496out:
497 device_unlock(&dev->dev);
498
499 return rc;
500}
501EXPORT_SYMBOL(nfc_tm_activated);
502
503int nfc_tm_deactivated(struct nfc_dev *dev)
504{
505 dev->dep_link_up = false;
506
507 return nfc_genl_tm_deactivated(dev);
508}
509EXPORT_SYMBOL(nfc_tm_deactivated);
510
450/** 511/**
451 * nfc_alloc_send_skb - allocate a skb for data exchange responses 512 * nfc_alloc_send_skb - allocate a skb for data exchange responses
452 * 513 *
@@ -501,6 +562,8 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
501 * The device driver must call this function when one or many nfc targets 562 * The device driver must call this function when one or many nfc targets
502 * are found. After calling this function, the device driver must stop 563 * are found. After calling this function, the device driver must stop
503 * polling for targets. 564 * polling for targets.
565 * NOTE: This function can be called with targets=NULL and n_targets=0 to
566 * notify a driver error, meaning that the polling operation cannot complete.
504 * IMPORTANT: this function must not be called from an atomic context. 567 * IMPORTANT: this function must not be called from an atomic context.
505 * In addition, it must also not be called from a context that would prevent 568 * In addition, it must also not be called from a context that would prevent
506 * the NFC Core to call other nfc ops entry point concurrently. 569 * the NFC Core to call other nfc ops entry point concurrently.
@@ -512,23 +575,33 @@ int nfc_targets_found(struct nfc_dev *dev,
512 575
513 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); 576 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
514 577
515 dev->polling = false;
516
517 for (i = 0; i < n_targets; i++) 578 for (i = 0; i < n_targets; i++)
518 targets[i].idx = dev->target_next_idx++; 579 targets[i].idx = dev->target_next_idx++;
519 580
520 device_lock(&dev->dev); 581 device_lock(&dev->dev);
521 582
583 if (dev->polling == false) {
584 device_unlock(&dev->dev);
585 return 0;
586 }
587
588 dev->polling = false;
589
522 dev->targets_generation++; 590 dev->targets_generation++;
523 591
524 kfree(dev->targets); 592 kfree(dev->targets);
525 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), 593 dev->targets = NULL;
526 GFP_ATOMIC);
527 594
528 if (!dev->targets) { 595 if (targets) {
529 dev->n_targets = 0; 596 dev->targets = kmemdup(targets,
530 device_unlock(&dev->dev); 597 n_targets * sizeof(struct nfc_target),
531 return -ENOMEM; 598 GFP_ATOMIC);
599
600 if (!dev->targets) {
601 dev->n_targets = 0;
602 device_unlock(&dev->dev);
603 return -ENOMEM;
604 }
532 } 605 }
533 606
534 dev->n_targets = n_targets; 607 dev->n_targets = n_targets;
@@ -592,6 +665,12 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
592} 665}
593EXPORT_SYMBOL(nfc_target_lost); 666EXPORT_SYMBOL(nfc_target_lost);
594 667
668inline void nfc_driver_failure(struct nfc_dev *dev, int err)
669{
670 nfc_targets_found(dev, NULL, 0);
671}
672EXPORT_SYMBOL(nfc_driver_failure);
673
595static void nfc_release(struct device *d) 674static void nfc_release(struct device *d)
596{ 675{
597 struct nfc_dev *dev = to_nfc_dev(d); 676 struct nfc_dev *dev = to_nfc_dev(d);
@@ -678,7 +757,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
678 struct nfc_dev *dev; 757 struct nfc_dev *dev;
679 758
680 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 759 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
681 !ops->deactivate_target || !ops->data_exchange) 760 !ops->deactivate_target || !ops->im_transceive)
682 return NULL; 761 return NULL;
683 762
684 if (!supported_protocols) 763 if (!supported_protocols)
@@ -847,3 +926,5 @@ MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
847MODULE_DESCRIPTION("NFC Core ver " VERSION); 926MODULE_DESCRIPTION("NFC Core ver " VERSION);
848MODULE_VERSION(VERSION); 927MODULE_VERSION(VERSION);
849MODULE_LICENSE("GPL"); 928MODULE_LICENSE("GPL");
929MODULE_ALIAS_NETPROTO(PF_NFC);
930MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 8729abf5f18b..46362ef979db 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -28,26 +28,14 @@
28 28
29#include "hci.h" 29#include "hci.h"
30 30
31static int nfc_hci_result_to_errno(u8 result) 31static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err,
32{
33 switch (result) {
34 case NFC_HCI_ANY_OK:
35 return 0;
36 case NFC_HCI_ANY_E_TIMEOUT:
37 return -ETIMEDOUT;
38 default:
39 return -1;
40 }
41}
42
43static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
44 struct sk_buff *skb, void *cb_data) 32 struct sk_buff *skb, void *cb_data)
45{ 33{
46 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data; 34 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
47 35
48 pr_debug("HCI Cmd completed with HCI result=%d\n", result); 36 pr_debug("HCI Cmd completed with result=%d\n", err);
49 37
50 hcp_ew->exec_result = nfc_hci_result_to_errno(result); 38 hcp_ew->exec_result = err;
51 if (hcp_ew->exec_result == 0) 39 if (hcp_ew->exec_result == 0)
52 hcp_ew->result_skb = skb; 40 hcp_ew->result_skb = skb;
53 else 41 else
@@ -311,9 +299,9 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
311} 299}
312EXPORT_SYMBOL(nfc_hci_disconnect_all_gates); 300EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
313 301
314int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate) 302int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
303 u8 pipe)
315{ 304{
316 u8 pipe = NFC_HCI_INVALID_PIPE;
317 bool pipe_created = false; 305 bool pipe_created = false;
318 int r; 306 int r;
319 307
@@ -322,6 +310,9 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
322 if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) 310 if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
323 return -EADDRINUSE; 311 return -EADDRINUSE;
324 312
313 if (pipe != NFC_HCI_INVALID_PIPE)
314 goto pipe_is_open;
315
325 switch (dest_gate) { 316 switch (dest_gate) {
326 case NFC_HCI_LINK_MGMT_GATE: 317 case NFC_HCI_LINK_MGMT_GATE:
327 pipe = NFC_HCI_LINK_MGMT_PIPE; 318 pipe = NFC_HCI_LINK_MGMT_PIPE;
@@ -347,6 +338,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
347 return r; 338 return r;
348 } 339 }
349 340
341pipe_is_open:
350 hdev->gate2pipe[dest_gate] = pipe; 342 hdev->gate2pipe[dest_gate] = pipe;
351 343
352 return 0; 344 return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index e1a640d2b588..1ac7b3fac6c9 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -32,6 +32,18 @@
32/* Largest headroom needed for outgoing HCI commands */ 32/* Largest headroom needed for outgoing HCI commands */
33#define HCI_CMDS_HEADROOM 1 33#define HCI_CMDS_HEADROOM 1
34 34
35static int nfc_hci_result_to_errno(u8 result)
36{
37 switch (result) {
38 case NFC_HCI_ANY_OK:
39 return 0;
40 case NFC_HCI_ANY_E_TIMEOUT:
41 return -ETIME;
42 default:
43 return -1;
44 }
45}
46
35static void nfc_hci_msg_tx_work(struct work_struct *work) 47static void nfc_hci_msg_tx_work(struct work_struct *work)
36{ 48{
37 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, 49 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
@@ -46,7 +58,7 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
46 if (timer_pending(&hdev->cmd_timer) == 0) { 58 if (timer_pending(&hdev->cmd_timer) == 0) {
47 if (hdev->cmd_pending_msg->cb) 59 if (hdev->cmd_pending_msg->cb)
48 hdev->cmd_pending_msg->cb(hdev, 60 hdev->cmd_pending_msg->cb(hdev,
49 NFC_HCI_ANY_E_TIMEOUT, 61 -ETIME,
50 NULL, 62 NULL,
51 hdev-> 63 hdev->
52 cmd_pending_msg-> 64 cmd_pending_msg->
@@ -71,8 +83,7 @@ next_msg:
71 kfree_skb(skb); 83 kfree_skb(skb);
72 skb_queue_purge(&msg->msg_frags); 84 skb_queue_purge(&msg->msg_frags);
73 if (msg->cb) 85 if (msg->cb)
74 msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL, 86 msg->cb(hdev, r, NULL, msg->cb_context);
75 msg->cb_context);
76 kfree(msg); 87 kfree(msg);
77 break; 88 break;
78 } 89 }
@@ -116,20 +127,13 @@ static void nfc_hci_msg_rx_work(struct work_struct *work)
116 } 127 }
117} 128}
118 129
119void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, 130static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
120 struct sk_buff *skb) 131 struct sk_buff *skb)
121{ 132{
122 mutex_lock(&hdev->msg_tx_mutex);
123
124 if (hdev->cmd_pending_msg == NULL) {
125 kfree_skb(skb);
126 goto exit;
127 }
128
129 del_timer_sync(&hdev->cmd_timer); 133 del_timer_sync(&hdev->cmd_timer);
130 134
131 if (hdev->cmd_pending_msg->cb) 135 if (hdev->cmd_pending_msg->cb)
132 hdev->cmd_pending_msg->cb(hdev, result, skb, 136 hdev->cmd_pending_msg->cb(hdev, err, skb,
133 hdev->cmd_pending_msg->cb_context); 137 hdev->cmd_pending_msg->cb_context);
134 else 138 else
135 kfree_skb(skb); 139 kfree_skb(skb);
@@ -138,6 +142,19 @@ void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
138 hdev->cmd_pending_msg = NULL; 142 hdev->cmd_pending_msg = NULL;
139 143
140 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); 144 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
145}
146
147void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
148 struct sk_buff *skb)
149{
150 mutex_lock(&hdev->msg_tx_mutex);
151
152 if (hdev->cmd_pending_msg == NULL) {
153 kfree_skb(skb);
154 goto exit;
155 }
156
157 __nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
141 158
142exit: 159exit:
143 mutex_unlock(&hdev->msg_tx_mutex); 160 mutex_unlock(&hdev->msg_tx_mutex);
@@ -170,6 +187,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
170 struct nfc_target *targets; 187 struct nfc_target *targets;
171 struct sk_buff *atqa_skb = NULL; 188 struct sk_buff *atqa_skb = NULL;
172 struct sk_buff *sak_skb = NULL; 189 struct sk_buff *sak_skb = NULL;
190 struct sk_buff *uid_skb = NULL;
173 int r; 191 int r;
174 192
175 pr_debug("from gate %d\n", gate); 193 pr_debug("from gate %d\n", gate);
@@ -205,6 +223,19 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
205 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data); 223 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
206 targets->sel_res = sak_skb->data[0]; 224 targets->sel_res = sak_skb->data[0];
207 225
226 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
227 NFC_HCI_RF_READER_A_UID, &uid_skb);
228 if (r < 0)
229 goto exit;
230
231 if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
232 r = -EPROTO;
233 goto exit;
234 }
235
236 memcpy(targets->nfcid1, uid_skb->data, uid_skb->len);
237 targets->nfcid1_len = uid_skb->len;
238
208 if (hdev->ops->complete_target_discovered) { 239 if (hdev->ops->complete_target_discovered) {
209 r = hdev->ops->complete_target_discovered(hdev, gate, 240 r = hdev->ops->complete_target_discovered(hdev, gate,
210 targets); 241 targets);
@@ -213,7 +244,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
213 } 244 }
214 break; 245 break;
215 case NFC_HCI_RF_READER_B_GATE: 246 case NFC_HCI_RF_READER_B_GATE:
216 targets->supported_protocols = NFC_PROTO_ISO14443_MASK; 247 targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
217 break; 248 break;
218 default: 249 default:
219 if (hdev->ops->target_from_gate) 250 if (hdev->ops->target_from_gate)
@@ -240,6 +271,7 @@ exit:
240 kfree(targets); 271 kfree(targets);
241 kfree_skb(atqa_skb); 272 kfree_skb(atqa_skb);
242 kfree_skb(sak_skb); 273 kfree_skb(sak_skb);
274 kfree_skb(uid_skb);
243 275
244 return r; 276 return r;
245} 277}
@@ -298,15 +330,15 @@ static void nfc_hci_cmd_timeout(unsigned long data)
298} 330}
299 331
300static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count, 332static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
301 u8 gates[]) 333 struct nfc_hci_gate *gates)
302{ 334{
303 int r; 335 int r;
304 u8 *p = gates;
305 while (gate_count--) { 336 while (gate_count--) {
306 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p); 337 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
338 gates->gate, gates->pipe);
307 if (r < 0) 339 if (r < 0)
308 return r; 340 return r;
309 p++; 341 gates++;
310 } 342 }
311 343
312 return 0; 344 return 0;
@@ -316,14 +348,13 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
316{ 348{
317 struct sk_buff *skb = NULL; 349 struct sk_buff *skb = NULL;
318 int r; 350 int r;
319 u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */ 351
320 NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE, 352 if (hdev->init_data.gates[0].gate != NFC_HCI_ADMIN_GATE)
321 NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE, 353 return -EPROTO;
322 NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
323 };
324 354
325 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, 355 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
326 NFC_HCI_ADMIN_GATE); 356 hdev->init_data.gates[0].gate,
357 hdev->init_data.gates[0].pipe);
327 if (r < 0) 358 if (r < 0)
328 goto exit; 359 goto exit;
329 360
@@ -351,10 +382,6 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
351 if (r < 0) 382 if (r < 0)
352 goto exit; 383 goto exit;
353 384
354 r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
355 if (r < 0)
356 goto disconnect_all;
357
358 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, 385 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
359 hdev->init_data.gates); 386 hdev->init_data.gates);
360 if (r < 0) 387 if (r < 0)
@@ -481,12 +508,13 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
481 return 0; 508 return 0;
482} 509}
483 510
484static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 511static int hci_start_poll(struct nfc_dev *nfc_dev,
512 u32 im_protocols, u32 tm_protocols)
485{ 513{
486 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 514 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
487 515
488 if (hdev->ops->start_poll) 516 if (hdev->ops->start_poll)
489 return hdev->ops->start_poll(hdev, protocols); 517 return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
490 else 518 else
491 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 519 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
492 NFC_HCI_EVT_READER_REQUESTED, NULL, 0); 520 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
@@ -511,9 +539,9 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
511{ 539{
512} 540}
513 541
514static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, 542static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
515 struct sk_buff *skb, data_exchange_cb_t cb, 543 struct sk_buff *skb, data_exchange_cb_t cb,
516 void *cb_context) 544 void *cb_context)
517{ 545{
518 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 546 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
519 int r; 547 int r;
@@ -579,7 +607,7 @@ static struct nfc_ops hci_nfc_ops = {
579 .stop_poll = hci_stop_poll, 607 .stop_poll = hci_stop_poll,
580 .activate_target = hci_activate_target, 608 .activate_target = hci_activate_target,
581 .deactivate_target = hci_deactivate_target, 609 .deactivate_target = hci_deactivate_target,
582 .data_exchange = hci_data_exchange, 610 .im_transceive = hci_transceive,
583 .check_presence = hci_check_presence, 611 .check_presence = hci_check_presence,
584}; 612};
585 613
@@ -682,13 +710,12 @@ EXPORT_SYMBOL(nfc_hci_register_device);
682 710
683void nfc_hci_unregister_device(struct nfc_hci_dev *hdev) 711void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
684{ 712{
685 struct hci_msg *msg; 713 struct hci_msg *msg, *n;
686 714
687 skb_queue_purge(&hdev->rx_hcp_frags); 715 skb_queue_purge(&hdev->rx_hcp_frags);
688 skb_queue_purge(&hdev->msg_rx_queue); 716 skb_queue_purge(&hdev->msg_rx_queue);
689 717
690 while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, 718 list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) {
691 msg_l)) != NULL) {
692 list_del(&msg->msg_l); 719 list_del(&msg->msg_l);
693 skb_queue_purge(&msg->msg_frags); 720 skb_queue_purge(&msg->msg_frags);
694 kfree(msg); 721 kfree(msg);
@@ -716,6 +743,27 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
716} 743}
717EXPORT_SYMBOL(nfc_hci_get_clientdata); 744EXPORT_SYMBOL(nfc_hci_get_clientdata);
718 745
746static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
747{
748 mutex_lock(&hdev->msg_tx_mutex);
749
750 if (hdev->cmd_pending_msg == NULL) {
751 nfc_driver_failure(hdev->ndev, err);
752 goto exit;
753 }
754
755 __nfc_hci_cmd_completion(hdev, err, NULL);
756
757exit:
758 mutex_unlock(&hdev->msg_tx_mutex);
759}
760
761void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
762{
763 nfc_hci_failure(hdev, err);
764}
765EXPORT_SYMBOL(nfc_hci_driver_failure);
766
719void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) 767void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
720{ 768{
721 struct hcp_packet *packet; 769 struct hcp_packet *packet;
@@ -726,16 +774,6 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
726 struct sk_buff *frag_skb; 774 struct sk_buff *frag_skb;
727 int msg_len; 775 int msg_len;
728 776
729 if (skb == NULL) {
730 /* TODO ELa: lower layer had permanent failure, need to
731 * propagate that up
732 */
733
734 skb_queue_purge(&hdev->rx_hcp_frags);
735
736 return;
737 }
738
739 packet = (struct hcp_packet *)skb->data; 777 packet = (struct hcp_packet *)skb->data;
740 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) { 778 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
741 skb_queue_tail(&hdev->rx_hcp_frags, skb); 779 skb_queue_tail(&hdev->rx_hcp_frags, skb);
@@ -756,9 +794,8 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
756 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN + 794 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
757 msg_len, GFP_KERNEL); 795 msg_len, GFP_KERNEL);
758 if (hcp_skb == NULL) { 796 if (hcp_skb == NULL) {
759 /* TODO ELa: cannot deliver HCP message. How to 797 nfc_hci_failure(hdev, -ENOMEM);
760 * propagate error up? 798 return;
761 */
762 } 799 }
763 800
764 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe; 801 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
index 45f2fe4fd486..fa9a21e92239 100644
--- a/net/nfc/hci/hci.h
+++ b/net/nfc/hci/hci.h
@@ -37,10 +37,11 @@ struct hcp_packet {
37 37
38/* 38/*
39 * HCI command execution completion callback. 39 * HCI command execution completion callback.
40 * result will be one of the HCI response codes. 40 * result will be a standard linux error (may be converted from HCI response)
41 * skb contains the response data and must be disposed. 41 * skb contains the response data and must be disposed, or may be NULL if
42 * an error occured
42 */ 43 */
43typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result, 44typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
44 struct sk_buff *skb, void *cb_data); 45 struct sk_buff *skb, void *cb_data);
45 46
46struct hcp_exec_waiter { 47struct hcp_exec_waiter {
@@ -131,9 +132,4 @@ void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
131#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a 132#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
132#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b 133#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
133 134
134/* Pipes */
135#define NFC_HCI_INVALID_PIPE 0x80
136#define NFC_HCI_LINK_MGMT_PIPE 0x00
137#define NFC_HCI_ADMIN_PIPE 0x01
138
139#endif /* __LOCAL_HCI_H */ 135#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index 7212cf2c5785..f4dad1a89740 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -105,7 +105,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
105 } 105 }
106 106
107 mutex_lock(&hdev->msg_tx_mutex); 107 mutex_lock(&hdev->msg_tx_mutex);
108 list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l); 108 list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
109 mutex_unlock(&hdev->msg_tx_mutex); 109 mutex_unlock(&hdev->msg_tx_mutex);
110 110
111 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); 111 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 5665dc6d893a..6f840c18c892 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -340,15 +340,6 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
340 shdlc->state = SHDLC_CONNECTED; 340 shdlc->state = SHDLC_CONNECTED;
341 } else { 341 } else {
342 shdlc->state = SHDLC_DISCONNECTED; 342 shdlc->state = SHDLC_DISCONNECTED;
343
344 /*
345 * TODO: Could it be possible that there are pending
346 * executing commands that are waiting for connect to complete
347 * before they can be carried? As connect is a blocking
348 * operation, it would require that the userspace process can
349 * send commands on the same device from a second thread before
350 * the device is up. I don't think that is possible, is it?
351 */
352 } 343 }
353 344
354 shdlc->connect_result = r; 345 shdlc->connect_result = r;
@@ -413,12 +404,12 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
413 r = nfc_shdlc_connect_send_ua(shdlc); 404 r = nfc_shdlc_connect_send_ua(shdlc);
414 nfc_shdlc_connect_complete(shdlc, r); 405 nfc_shdlc_connect_complete(shdlc, r);
415 } 406 }
416 } else if (shdlc->state > SHDLC_NEGOCIATING) { 407 } else if (shdlc->state == SHDLC_CONNECTED) {
417 /* 408 /*
418 * TODO: Chip wants to reset link 409 * Chip wants to reset link. This is unexpected and
419 * send ua, empty skb lists, reset counters 410 * unsupported.
420 * propagate info to HCI layer
421 */ 411 */
412 shdlc->hard_fault = -ECONNRESET;
422 } 413 }
423 break; 414 break;
424 case U_FRAME_UA: 415 case U_FRAME_UA:
@@ -523,10 +514,6 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
523 514
524 r = shdlc->ops->xmit(shdlc, skb); 515 r = shdlc->ops->xmit(shdlc, skb);
525 if (r < 0) { 516 if (r < 0) {
526 /*
527 * TODO: Cannot send, shdlc machine is dead, we
528 * must propagate the information up to HCI.
529 */
530 shdlc->hard_fault = r; 517 shdlc->hard_fault = r;
531 break; 518 break;
532 } 519 }
@@ -590,6 +577,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
590 skb_queue_purge(&shdlc->ack_pending_q); 577 skb_queue_purge(&shdlc->ack_pending_q);
591 break; 578 break;
592 case SHDLC_CONNECTING: 579 case SHDLC_CONNECTING:
580 if (shdlc->hard_fault) {
581 nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
582 break;
583 }
584
593 if (shdlc->connect_tries++ < 5) 585 if (shdlc->connect_tries++ < 5)
594 r = nfc_shdlc_connect_initiate(shdlc); 586 r = nfc_shdlc_connect_initiate(shdlc);
595 else 587 else
@@ -610,6 +602,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
610 } 602 }
611 603
612 nfc_shdlc_handle_rcv_queue(shdlc); 604 nfc_shdlc_handle_rcv_queue(shdlc);
605
606 if (shdlc->hard_fault) {
607 nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
608 break;
609 }
613 break; 610 break;
614 case SHDLC_CONNECTED: 611 case SHDLC_CONNECTED:
615 nfc_shdlc_handle_rcv_queue(shdlc); 612 nfc_shdlc_handle_rcv_queue(shdlc);
@@ -637,10 +634,7 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
637 } 634 }
638 635
639 if (shdlc->hard_fault) { 636 if (shdlc->hard_fault) {
640 /* 637 nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault);
641 * TODO: Handle hard_fault that occured during
642 * this invocation of the shdlc worker
643 */
644 } 638 }
645 break; 639 break;
646 default: 640 default:
@@ -765,14 +759,16 @@ static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
765 return 0; 759 return 0;
766} 760}
767 761
768static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols) 762static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
763 u32 im_protocols, u32 tm_protocols)
769{ 764{
770 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); 765 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
771 766
772 pr_debug("\n"); 767 pr_debug("\n");
773 768
774 if (shdlc->ops->start_poll) 769 if (shdlc->ops->start_poll)
775 return shdlc->ops->start_poll(shdlc, protocols); 770 return shdlc->ops->start_poll(shdlc,
771 im_protocols, tm_protocols);
776 772
777 return 0; 773 return 0;
778} 774}
@@ -921,8 +917,6 @@ void nfc_shdlc_free(struct nfc_shdlc *shdlc)
921{ 917{
922 pr_debug("\n"); 918 pr_debug("\n");
923 919
924 /* TODO: Check that this cannot be called while still in use */
925
926 nfc_hci_unregister_device(shdlc->hdev); 920 nfc_hci_unregister_device(shdlc->hdev);
927 nfc_hci_free_device(shdlc->hdev); 921 nfc_hci_free_device(shdlc->hdev);
928 922
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index bf8ae4f0b90c..b982b5b890d7 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -51,7 +51,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type)
51 return tlv[2]; 51 return tlv[2];
52} 52}
53 53
54static u8 llcp_tlv16(u8 *tlv, u8 type) 54static u16 llcp_tlv16(u8 *tlv, u8 type)
55{ 55{
56 if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) 56 if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
57 return 0; 57 return 0;
@@ -67,7 +67,7 @@ static u8 llcp_tlv_version(u8 *tlv)
67 67
68static u16 llcp_tlv_miux(u8 *tlv) 68static u16 llcp_tlv_miux(u8 *tlv)
69{ 69{
70 return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f; 70 return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
71} 71}
72 72
73static u16 llcp_tlv_wks(u8 *tlv) 73static u16 llcp_tlv_wks(u8 *tlv)
@@ -117,8 +117,8 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
117 return tlv; 117 return tlv;
118} 118}
119 119
120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 120int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 121 u8 *tlv_array, u16 tlv_array_len)
122{ 122{
123 u8 *tlv = tlv_array, type, length, offset = 0; 123 u8 *tlv = tlv_array, type, length, offset = 0;
124 124
@@ -149,8 +149,45 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
149 case LLCP_TLV_OPT: 149 case LLCP_TLV_OPT:
150 local->remote_opt = llcp_tlv_opt(tlv); 150 local->remote_opt = llcp_tlv_opt(tlv);
151 break; 151 break;
152 default:
153 pr_err("Invalid gt tlv value 0x%x\n", type);
154 break;
155 }
156
157 offset += length + 2;
158 tlv += length + 2;
159 }
160
161 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n",
162 local->remote_version, local->remote_miu,
163 local->remote_lto, local->remote_opt,
164 local->remote_wks);
165
166 return 0;
167}
168
169int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
170 u8 *tlv_array, u16 tlv_array_len)
171{
172 u8 *tlv = tlv_array, type, length, offset = 0;
173
174 pr_debug("TLV array length %d\n", tlv_array_len);
175
176 if (sock == NULL)
177 return -ENOTCONN;
178
179 while (offset < tlv_array_len) {
180 type = tlv[0];
181 length = tlv[1];
182
183 pr_debug("type 0x%x length %d\n", type, length);
184
185 switch (type) {
186 case LLCP_TLV_MIUX:
187 sock->miu = llcp_tlv_miux(tlv) + 128;
188 break;
152 case LLCP_TLV_RW: 189 case LLCP_TLV_RW:
153 local->remote_rw = llcp_tlv_rw(tlv); 190 sock->rw = llcp_tlv_rw(tlv);
154 break; 191 break;
155 case LLCP_TLV_SN: 192 case LLCP_TLV_SN:
156 break; 193 break;
@@ -163,10 +200,7 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
163 tlv += length + 2; 200 tlv += length + 2;
164 } 201 }
165 202
166 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", 203 pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu);
167 local->remote_version, local->remote_miu,
168 local->remote_lto, local->remote_opt,
169 local->remote_wks, local->remote_rw);
170 204
171 return 0; 205 return 0;
172} 206}
@@ -474,7 +508,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
474 508
475 while (remaining_len > 0) { 509 while (remaining_len > 0) {
476 510
477 frag_len = min_t(size_t, local->remote_miu, remaining_len); 511 frag_len = min_t(size_t, sock->miu, remaining_len);
478 512
479 pr_debug("Fragment %zd bytes remaining %zd", 513 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len); 514 frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 42994fac26d6..82f0f7588b46 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -31,47 +31,41 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
31 31
32static struct list_head llcp_devices; 32static struct list_head llcp_devices;
33 33
34static void nfc_llcp_socket_release(struct nfc_llcp_local *local) 34void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk)
35{ 35{
36 struct nfc_llcp_sock *parent, *s, *n; 36 write_lock(&l->lock);
37 struct sock *sk, *parent_sk; 37 sk_add_node(sk, &l->head);
38 int i; 38 write_unlock(&l->lock);
39 39}
40 mutex_lock(&local->socket_lock);
41
42 for (i = 0; i < LLCP_MAX_SAP; i++) {
43 parent = local->sockets[i];
44 if (parent == NULL)
45 continue;
46
47 /* Release all child sockets */
48 list_for_each_entry_safe(s, n, &parent->list, list) {
49 list_del_init(&s->list);
50 sk = &s->sk;
51
52 lock_sock(sk);
53
54 if (sk->sk_state == LLCP_CONNECTED)
55 nfc_put_device(s->dev);
56 40
57 sk->sk_state = LLCP_CLOSED; 41void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
42{
43 write_lock(&l->lock);
44 sk_del_node_init(sk);
45 write_unlock(&l->lock);
46}
58 47
59 release_sock(sk); 48static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
49{
50 struct sock *sk;
51 struct hlist_node *node, *tmp;
52 struct nfc_llcp_sock *llcp_sock;
60 53
61 sock_orphan(sk); 54 write_lock(&local->sockets.lock);
62 55
63 s->local = NULL; 56 sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
64 } 57 llcp_sock = nfc_llcp_sock(sk);
65 58
66 parent_sk = &parent->sk; 59 lock_sock(sk);
67 60
68 lock_sock(parent_sk); 61 if (sk->sk_state == LLCP_CONNECTED)
62 nfc_put_device(llcp_sock->dev);
69 63
70 if (parent_sk->sk_state == LLCP_LISTEN) { 64 if (sk->sk_state == LLCP_LISTEN) {
71 struct nfc_llcp_sock *lsk, *n; 65 struct nfc_llcp_sock *lsk, *n;
72 struct sock *accept_sk; 66 struct sock *accept_sk;
73 67
74 list_for_each_entry_safe(lsk, n, &parent->accept_queue, 68 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
75 accept_queue) { 69 accept_queue) {
76 accept_sk = &lsk->sk; 70 accept_sk = &lsk->sk;
77 lock_sock(accept_sk); 71 lock_sock(accept_sk);
@@ -83,35 +77,94 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
83 release_sock(accept_sk); 77 release_sock(accept_sk);
84 78
85 sock_orphan(accept_sk); 79 sock_orphan(accept_sk);
80 }
86 81
87 lsk->local = NULL; 82 if (listen == true) {
83 release_sock(sk);
84 continue;
88 } 85 }
89 } 86 }
90 87
91 if (parent_sk->sk_state == LLCP_CONNECTED) 88 sk->sk_state = LLCP_CLOSED;
92 nfc_put_device(parent->dev);
93
94 parent_sk->sk_state = LLCP_CLOSED;
95 89
96 release_sock(parent_sk); 90 release_sock(sk);
97 91
98 sock_orphan(parent_sk); 92 sock_orphan(sk);
99 93
100 parent->local = NULL; 94 sk_del_node_init(sk);
101 } 95 }
102 96
103 mutex_unlock(&local->socket_lock); 97 write_unlock(&local->sockets.lock);
104} 98}
105 99
106static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local) 100struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
107{ 101{
108 mutex_lock(&local->sdp_lock); 102 kref_get(&local->ref);
109 103
110 local->local_wks = 0; 104 return local;
111 local->local_sdp = 0; 105}
112 local->local_sap = 0;
113 106
114 mutex_unlock(&local->sdp_lock); 107static void local_release(struct kref *ref)
108{
109 struct nfc_llcp_local *local;
110
111 local = container_of(ref, struct nfc_llcp_local, ref);
112
113 list_del(&local->list);
114 nfc_llcp_socket_release(local, false);
115 del_timer_sync(&local->link_timer);
116 skb_queue_purge(&local->tx_queue);
117 destroy_workqueue(local->tx_wq);
118 destroy_workqueue(local->rx_wq);
119 destroy_workqueue(local->timeout_wq);
120 kfree_skb(local->rx_pending);
121 kfree(local);
122}
123
124int nfc_llcp_local_put(struct nfc_llcp_local *local)
125{
126 if (local == NULL)
127 return 0;
128
129 return kref_put(&local->ref, local_release);
130}
131
132static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
133 u8 ssap, u8 dsap)
134{
135 struct sock *sk;
136 struct hlist_node *node;
137 struct nfc_llcp_sock *llcp_sock;
138
139 pr_debug("ssap dsap %d %d\n", ssap, dsap);
140
141 if (ssap == 0 && dsap == 0)
142 return NULL;
143
144 read_lock(&local->sockets.lock);
145
146 llcp_sock = NULL;
147
148 sk_for_each(sk, node, &local->sockets.head) {
149 llcp_sock = nfc_llcp_sock(sk);
150
151 if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap)
152 break;
153 }
154
155 read_unlock(&local->sockets.lock);
156
157 if (llcp_sock == NULL)
158 return NULL;
159
160 sock_hold(&llcp_sock->sk);
161
162 return llcp_sock;
163}
164
165static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
166{
167 sock_put(&sock->sk);
115} 168}
116 169
117static void nfc_llcp_timeout_work(struct work_struct *work) 170static void nfc_llcp_timeout_work(struct work_struct *work)
@@ -174,6 +227,51 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
174 return -EINVAL; 227 return -EINVAL;
175} 228}
176 229
230static
231struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
232 u8 *sn, size_t sn_len)
233{
234 struct sock *sk;
235 struct hlist_node *node;
236 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
237
238 pr_debug("sn %zd %p\n", sn_len, sn);
239
240 if (sn == NULL || sn_len == 0)
241 return NULL;
242
243 read_lock(&local->sockets.lock);
244
245 llcp_sock = NULL;
246
247 sk_for_each(sk, node, &local->sockets.head) {
248 tmp_sock = nfc_llcp_sock(sk);
249
250 pr_debug("llcp sock %p\n", tmp_sock);
251
252 if (tmp_sock->sk.sk_state != LLCP_LISTEN)
253 continue;
254
255 if (tmp_sock->service_name == NULL ||
256 tmp_sock->service_name_len == 0)
257 continue;
258
259 if (tmp_sock->service_name_len != sn_len)
260 continue;
261
262 if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
263 llcp_sock = tmp_sock;
264 break;
265 }
266 }
267
268 read_unlock(&local->sockets.lock);
269
270 pr_debug("Found llcp sock %p\n", llcp_sock);
271
272 return llcp_sock;
273}
274
177u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 275u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
178 struct nfc_llcp_sock *sock) 276 struct nfc_llcp_sock *sock)
179{ 277{
@@ -200,41 +298,26 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
200 } 298 }
201 299
202 /* 300 /*
203 * This is not a well known service, 301 * Check if there already is a non WKS socket bound
204 * we should try to find a local SDP free spot 302 * to this service name.
205 */ 303 */
206 ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP); 304 if (nfc_llcp_sock_from_sn(local, sock->service_name,
207 if (ssap == LLCP_SDP_NUM_SAP) { 305 sock->service_name_len) != NULL) {
208 mutex_unlock(&local->sdp_lock); 306 mutex_unlock(&local->sdp_lock);
209 307
210 return LLCP_SAP_MAX; 308 return LLCP_SAP_MAX;
211 } 309 }
212 310
213 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
214
215 set_bit(ssap, &local->local_sdp);
216 mutex_unlock(&local->sdp_lock); 311 mutex_unlock(&local->sdp_lock);
217 312
218 return LLCP_WKS_NUM_SAP + ssap; 313 return LLCP_SDP_UNBOUND;
219 314
220 } else if (sock->ssap != 0) { 315 } else if (sock->ssap != 0 && sock->ssap < LLCP_WKS_NUM_SAP) {
221 if (sock->ssap < LLCP_WKS_NUM_SAP) { 316 if (!test_bit(sock->ssap, &local->local_wks)) {
222 if (!test_bit(sock->ssap, &local->local_wks)) { 317 set_bit(sock->ssap, &local->local_wks);
223 set_bit(sock->ssap, &local->local_wks); 318 mutex_unlock(&local->sdp_lock);
224 mutex_unlock(&local->sdp_lock);
225
226 return sock->ssap;
227 }
228
229 } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
230 if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
231 &local->local_sdp)) {
232 set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
233 &local->local_sdp);
234 mutex_unlock(&local->sdp_lock);
235 319
236 return sock->ssap; 320 return sock->ssap;
237 }
238 } 321 }
239 } 322 }
240 323
@@ -271,8 +354,34 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
271 local_ssap = ssap; 354 local_ssap = ssap;
272 sdp = &local->local_wks; 355 sdp = &local->local_wks;
273 } else if (ssap < LLCP_LOCAL_NUM_SAP) { 356 } else if (ssap < LLCP_LOCAL_NUM_SAP) {
357 atomic_t *client_cnt;
358
274 local_ssap = ssap - LLCP_WKS_NUM_SAP; 359 local_ssap = ssap - LLCP_WKS_NUM_SAP;
275 sdp = &local->local_sdp; 360 sdp = &local->local_sdp;
361 client_cnt = &local->local_sdp_cnt[local_ssap];
362
363 pr_debug("%d clients\n", atomic_read(client_cnt));
364
365 mutex_lock(&local->sdp_lock);
366
367 if (atomic_dec_and_test(client_cnt)) {
368 struct nfc_llcp_sock *l_sock;
369
370 pr_debug("No more clients for SAP %d\n", ssap);
371
372 clear_bit(local_ssap, sdp);
373
374 /* Find the listening sock and set it back to UNBOUND */
375 l_sock = nfc_llcp_sock_get(local, ssap, LLCP_SAP_SDP);
376 if (l_sock) {
377 l_sock->ssap = LLCP_SDP_UNBOUND;
378 nfc_llcp_sock_put(l_sock);
379 }
380 }
381
382 mutex_unlock(&local->sdp_lock);
383
384 return;
276 } else if (ssap < LLCP_MAX_SAP) { 385 } else if (ssap < LLCP_MAX_SAP) {
277 local_ssap = ssap - LLCP_LOCAL_NUM_SAP; 386 local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
278 sdp = &local->local_sap; 387 sdp = &local->local_sap;
@@ -287,19 +396,26 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
287 mutex_unlock(&local->sdp_lock); 396 mutex_unlock(&local->sdp_lock);
288} 397}
289 398
290u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) 399static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
291{ 400{
292 struct nfc_llcp_local *local; 401 u8 ssap;
293 402
294 local = nfc_llcp_find_local(dev); 403 mutex_lock(&local->sdp_lock);
295 if (local == NULL) { 404
296 *general_bytes_len = 0; 405 ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
297 return NULL; 406 if (ssap == LLCP_SDP_NUM_SAP) {
407 mutex_unlock(&local->sdp_lock);
408
409 return LLCP_SAP_MAX;
298 } 410 }
299 411
300 *general_bytes_len = local->gb_len; 412 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
301 413
302 return local->gb; 414 set_bit(ssap, &local->local_sdp);
415
416 mutex_unlock(&local->sdp_lock);
417
418 return LLCP_WKS_NUM_SAP + ssap;
303} 419}
304 420
305static int nfc_llcp_build_gb(struct nfc_llcp_local *local) 421static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
@@ -363,6 +479,23 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
363 return 0; 479 return 0;
364} 480}
365 481
482u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
483{
484 struct nfc_llcp_local *local;
485
486 local = nfc_llcp_find_local(dev);
487 if (local == NULL) {
488 *general_bytes_len = 0;
489 return NULL;
490 }
491
492 nfc_llcp_build_gb(local);
493
494 *general_bytes_len = local->gb_len;
495
496 return local->gb;
497}
498
366int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) 499int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
367{ 500{
368 struct nfc_llcp_local *local = nfc_llcp_find_local(dev); 501 struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
@@ -384,31 +517,9 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
384 return -EINVAL; 517 return -EINVAL;
385 } 518 }
386 519
387 return nfc_llcp_parse_tlv(local, 520 return nfc_llcp_parse_gb_tlv(local,
388 &local->remote_gb[3], 521 &local->remote_gb[3],
389 local->remote_gb_len - 3); 522 local->remote_gb_len - 3);
390}
391
392static void nfc_llcp_tx_work(struct work_struct *work)
393{
394 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
395 tx_work);
396 struct sk_buff *skb;
397
398 skb = skb_dequeue(&local->tx_queue);
399 if (skb != NULL) {
400 pr_debug("Sending pending skb\n");
401 print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
402 16, 1, skb->data, skb->len, true);
403
404 nfc_data_exchange(local->dev, local->target_idx,
405 skb, nfc_llcp_recv, local);
406 } else {
407 nfc_llcp_send_symm(local->dev);
408 }
409
410 mod_timer(&local->link_timer,
411 jiffies + msecs_to_jiffies(local->remote_lto));
412} 523}
413 524
414static u8 nfc_llcp_dsap(struct sk_buff *pdu) 525static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -443,51 +554,84 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
443 sock->recv_ack_n = (sock->recv_n - 1) % 16; 554 sock->recv_ack_n = (sock->recv_n - 1) % 16;
444} 555}
445 556
446static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, 557static void nfc_llcp_tx_work(struct work_struct *work)
447 u8 ssap, u8 dsap)
448{ 558{
449 struct nfc_llcp_sock *sock, *llcp_sock, *n; 559 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
560 tx_work);
561 struct sk_buff *skb;
562 struct sock *sk;
563 struct nfc_llcp_sock *llcp_sock;
450 564
451 pr_debug("ssap dsap %d %d\n", ssap, dsap); 565 skb = skb_dequeue(&local->tx_queue);
566 if (skb != NULL) {
567 sk = skb->sk;
568 llcp_sock = nfc_llcp_sock(sk);
569 if (llcp_sock != NULL) {
570 int ret;
571
572 pr_debug("Sending pending skb\n");
573 print_hex_dump(KERN_DEBUG, "LLCP Tx: ",
574 DUMP_PREFIX_OFFSET, 16, 1,
575 skb->data, skb->len, true);
576
577 ret = nfc_data_exchange(local->dev, local->target_idx,
578 skb, nfc_llcp_recv, local);
579
580 if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
581 skb = skb_get(skb);
582 skb_queue_tail(&llcp_sock->tx_pending_queue,
583 skb);
584 }
585 } else {
586 nfc_llcp_send_symm(local->dev);
587 }
588 } else {
589 nfc_llcp_send_symm(local->dev);
590 }
452 591
453 if (ssap == 0 && dsap == 0) 592 mod_timer(&local->link_timer,
454 return NULL; 593 jiffies + msecs_to_jiffies(2 * local->remote_lto));
594}
455 595
456 mutex_lock(&local->socket_lock); 596static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local,
457 sock = local->sockets[ssap]; 597 u8 ssap)
458 if (sock == NULL) { 598{
459 mutex_unlock(&local->socket_lock); 599 struct sock *sk;
460 return NULL; 600 struct nfc_llcp_sock *llcp_sock;
461 } 601 struct hlist_node *node;
462 602
463 pr_debug("root dsap %d (%d)\n", sock->dsap, dsap); 603 read_lock(&local->connecting_sockets.lock);
464 604
465 if (sock->dsap == dsap) { 605 sk_for_each(sk, node, &local->connecting_sockets.head) {
466 sock_hold(&sock->sk); 606 llcp_sock = nfc_llcp_sock(sk);
467 mutex_unlock(&local->socket_lock);
468 return sock;
469 }
470 607
471 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { 608 if (llcp_sock->ssap == ssap) {
472 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
473 &llcp_sock->sk, llcp_sock->dsap);
474 if (llcp_sock->dsap == dsap) {
475 sock_hold(&llcp_sock->sk); 609 sock_hold(&llcp_sock->sk);
476 mutex_unlock(&local->socket_lock); 610 goto out;
477 return llcp_sock;
478 } 611 }
479 } 612 }
480 613
481 pr_err("Could not find socket for %d %d\n", ssap, dsap); 614 llcp_sock = NULL;
482 615
483 mutex_unlock(&local->socket_lock); 616out:
617 read_unlock(&local->connecting_sockets.lock);
484 618
485 return NULL; 619 return llcp_sock;
486} 620}
487 621
488static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) 622static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
623 u8 *sn, size_t sn_len)
489{ 624{
490 sock_put(&sock->sk); 625 struct nfc_llcp_sock *llcp_sock;
626
627 llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
628
629 if (llcp_sock == NULL)
630 return NULL;
631
632 sock_hold(&llcp_sock->sk);
633
634 return llcp_sock;
491} 635}
492 636
493static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len) 637static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
@@ -518,35 +662,19 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
518{ 662{
519 struct sock *new_sk, *parent; 663 struct sock *new_sk, *parent;
520 struct nfc_llcp_sock *sock, *new_sock; 664 struct nfc_llcp_sock *sock, *new_sock;
521 u8 dsap, ssap, bound_sap, reason; 665 u8 dsap, ssap, reason;
522 666
523 dsap = nfc_llcp_dsap(skb); 667 dsap = nfc_llcp_dsap(skb);
524 ssap = nfc_llcp_ssap(skb); 668 ssap = nfc_llcp_ssap(skb);
525 669
526 pr_debug("%d %d\n", dsap, ssap); 670 pr_debug("%d %d\n", dsap, ssap);
527 671
528 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
529 skb->len - LLCP_HEADER_SIZE);
530
531 if (dsap != LLCP_SAP_SDP) { 672 if (dsap != LLCP_SAP_SDP) {
532 bound_sap = dsap; 673 sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
533 674 if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) {
534 mutex_lock(&local->socket_lock);
535 sock = local->sockets[dsap];
536 if (sock == NULL) {
537 mutex_unlock(&local->socket_lock);
538 reason = LLCP_DM_NOBOUND; 675 reason = LLCP_DM_NOBOUND;
539 goto fail; 676 goto fail;
540 } 677 }
541
542 sock_hold(&sock->sk);
543 mutex_unlock(&local->socket_lock);
544
545 lock_sock(&sock->sk);
546
547 if (sock->dsap == LLCP_SAP_SDP &&
548 sock->sk.sk_state == LLCP_LISTEN)
549 goto enqueue;
550 } else { 678 } else {
551 u8 *sn; 679 u8 *sn;
552 size_t sn_len; 680 size_t sn_len;
@@ -559,40 +687,15 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
559 687
560 pr_debug("Service name length %zu\n", sn_len); 688 pr_debug("Service name length %zu\n", sn_len);
561 689
562 mutex_lock(&local->socket_lock); 690 sock = nfc_llcp_sock_get_sn(local, sn, sn_len);
563 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; 691 if (sock == NULL) {
564 bound_sap++) { 692 reason = LLCP_DM_NOBOUND;
565 sock = local->sockets[bound_sap]; 693 goto fail;
566 if (sock == NULL)
567 continue;
568
569 if (sock->service_name == NULL ||
570 sock->service_name_len == 0)
571 continue;
572
573 if (sock->service_name_len != sn_len)
574 continue;
575
576 if (sock->dsap == LLCP_SAP_SDP &&
577 sock->sk.sk_state == LLCP_LISTEN &&
578 !memcmp(sn, sock->service_name, sn_len)) {
579 pr_debug("Found service name at SAP %d\n",
580 bound_sap);
581 sock_hold(&sock->sk);
582 mutex_unlock(&local->socket_lock);
583
584 lock_sock(&sock->sk);
585
586 goto enqueue;
587 }
588 } 694 }
589 mutex_unlock(&local->socket_lock);
590 } 695 }
591 696
592 reason = LLCP_DM_NOBOUND; 697 lock_sock(&sock->sk);
593 goto fail;
594 698
595enqueue:
596 parent = &sock->sk; 699 parent = &sock->sk;
597 700
598 if (sk_acceptq_is_full(parent)) { 701 if (sk_acceptq_is_full(parent)) {
@@ -602,6 +705,21 @@ enqueue:
602 goto fail; 705 goto fail;
603 } 706 }
604 707
708 if (sock->ssap == LLCP_SDP_UNBOUND) {
709 u8 ssap = nfc_llcp_reserve_sdp_ssap(local);
710
711 pr_debug("First client, reserving %d\n", ssap);
712
713 if (ssap == LLCP_SAP_MAX) {
714 reason = LLCP_DM_REJ;
715 release_sock(&sock->sk);
716 sock_put(&sock->sk);
717 goto fail;
718 }
719
720 sock->ssap = ssap;
721 }
722
605 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC); 723 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
606 if (new_sk == NULL) { 724 if (new_sk == NULL) {
607 reason = LLCP_DM_REJ; 725 reason = LLCP_DM_REJ;
@@ -612,15 +730,31 @@ enqueue:
612 730
613 new_sock = nfc_llcp_sock(new_sk); 731 new_sock = nfc_llcp_sock(new_sk);
614 new_sock->dev = local->dev; 732 new_sock->dev = local->dev;
615 new_sock->local = local; 733 new_sock->local = nfc_llcp_local_get(local);
734 new_sock->miu = local->remote_miu;
616 new_sock->nfc_protocol = sock->nfc_protocol; 735 new_sock->nfc_protocol = sock->nfc_protocol;
617 new_sock->ssap = bound_sap;
618 new_sock->dsap = ssap; 736 new_sock->dsap = ssap;
737 new_sock->target_idx = local->target_idx;
619 new_sock->parent = parent; 738 new_sock->parent = parent;
739 new_sock->ssap = sock->ssap;
740 if (sock->ssap < LLCP_LOCAL_NUM_SAP && sock->ssap >= LLCP_WKS_NUM_SAP) {
741 atomic_t *client_count;
742
743 pr_debug("reserved_ssap %d for %p\n", sock->ssap, new_sock);
744
745 client_count =
746 &local->local_sdp_cnt[sock->ssap - LLCP_WKS_NUM_SAP];
747
748 atomic_inc(client_count);
749 new_sock->reserved_ssap = sock->ssap;
750 }
751
752 nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE],
753 skb->len - LLCP_HEADER_SIZE);
620 754
621 pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); 755 pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
622 756
623 list_add_tail(&new_sock->list, &sock->list); 757 nfc_llcp_sock_link(&local->sockets, new_sk);
624 758
625 nfc_llcp_accept_enqueue(&sock->sk, new_sk); 759 nfc_llcp_accept_enqueue(&sock->sk, new_sk);
626 760
@@ -654,12 +788,12 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
654 788
655 pr_debug("Remote ready %d tx queue len %d remote rw %d", 789 pr_debug("Remote ready %d tx queue len %d remote rw %d",
656 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), 790 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
657 local->remote_rw); 791 sock->rw);
658 792
659 /* Try to queue some I frames for transmission */ 793 /* Try to queue some I frames for transmission */
660 while (sock->remote_ready && 794 while (sock->remote_ready &&
661 skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) { 795 skb_queue_len(&sock->tx_pending_queue) < sock->rw) {
662 struct sk_buff *pdu, *pending_pdu; 796 struct sk_buff *pdu;
663 797
664 pdu = skb_dequeue(&sock->tx_queue); 798 pdu = skb_dequeue(&sock->tx_queue);
665 if (pdu == NULL) 799 if (pdu == NULL)
@@ -668,10 +802,7 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
668 /* Update N(S)/N(R) */ 802 /* Update N(S)/N(R) */
669 nfc_llcp_set_nrns(sock, pdu); 803 nfc_llcp_set_nrns(sock, pdu);
670 804
671 pending_pdu = skb_clone(pdu, GFP_KERNEL);
672
673 skb_queue_tail(&local->tx_queue, pdu); 805 skb_queue_tail(&local->tx_queue, pdu);
674 skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
675 nr_frames++; 806 nr_frames++;
676 } 807 }
677 808
@@ -728,11 +859,21 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
728 859
729 llcp_sock->send_ack_n = nr; 860 llcp_sock->send_ack_n = nr;
730 861
731 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) 862 /* Remove and free all skbs until ns == nr */
732 if (nfc_llcp_ns(s) <= nr) { 863 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
733 skb_unlink(s, &llcp_sock->tx_pending_queue); 864 skb_unlink(s, &llcp_sock->tx_pending_queue);
734 kfree_skb(s); 865 kfree_skb(s);
735 } 866
867 if (nfc_llcp_ns(s) == nr)
868 break;
869 }
870
871 /* Re-queue the remaining skbs for transmission */
872 skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue,
873 s, tmp) {
874 skb_unlink(s, &llcp_sock->tx_pending_queue);
875 skb_queue_head(&local->tx_queue, s);
876 }
736 } 877 }
737 878
738 if (ptype == LLCP_PDU_RR) 879 if (ptype == LLCP_PDU_RR)
@@ -740,7 +881,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
740 else if (ptype == LLCP_PDU_RNR) 881 else if (ptype == LLCP_PDU_RNR)
741 llcp_sock->remote_ready = false; 882 llcp_sock->remote_ready = false;
742 883
743 if (nfc_llcp_queue_i_frames(llcp_sock) == 0) 884 if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I)
744 nfc_llcp_send_rr(llcp_sock); 885 nfc_llcp_send_rr(llcp_sock);
745 886
746 release_sock(sk); 887 release_sock(sk);
@@ -791,11 +932,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
791 dsap = nfc_llcp_dsap(skb); 932 dsap = nfc_llcp_dsap(skb);
792 ssap = nfc_llcp_ssap(skb); 933 ssap = nfc_llcp_ssap(skb);
793 934
794 llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); 935 llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
795
796 if (llcp_sock == NULL)
797 llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
798
799 if (llcp_sock == NULL) { 936 if (llcp_sock == NULL) {
800 pr_err("Invalid CC\n"); 937 pr_err("Invalid CC\n");
801 nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); 938 nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
@@ -803,11 +940,15 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
803 return; 940 return;
804 } 941 }
805 942
806 llcp_sock->dsap = ssap;
807 sk = &llcp_sock->sk; 943 sk = &llcp_sock->sk;
808 944
809 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 945 /* Unlink from connecting and link to the client array */
810 skb->len - LLCP_HEADER_SIZE); 946 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
947 nfc_llcp_sock_link(&local->sockets, sk);
948 llcp_sock->dsap = ssap;
949
950 nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE],
951 skb->len - LLCP_HEADER_SIZE);
811 952
812 sk->sk_state = LLCP_CONNECTED; 953 sk->sk_state = LLCP_CONNECTED;
813 sk->sk_state_change(sk); 954 sk->sk_state_change(sk);
@@ -815,6 +956,45 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
815 nfc_llcp_sock_put(llcp_sock); 956 nfc_llcp_sock_put(llcp_sock);
816} 957}
817 958
959static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
960{
961 struct nfc_llcp_sock *llcp_sock;
962 struct sock *sk;
963 u8 dsap, ssap, reason;
964
965 dsap = nfc_llcp_dsap(skb);
966 ssap = nfc_llcp_ssap(skb);
967 reason = skb->data[2];
968
969 pr_debug("%d %d reason %d\n", ssap, dsap, reason);
970
971 switch (reason) {
972 case LLCP_DM_NOBOUND:
973 case LLCP_DM_REJ:
974 llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
975 break;
976
977 default:
978 llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
979 break;
980 }
981
982 if (llcp_sock == NULL) {
983 pr_err("Invalid DM\n");
984 return;
985 }
986
987 sk = &llcp_sock->sk;
988
989 sk->sk_err = ENXIO;
990 sk->sk_state = LLCP_CLOSED;
991 sk->sk_state_change(sk);
992
993 nfc_llcp_sock_put(llcp_sock);
994
995 return;
996}
997
818static void nfc_llcp_rx_work(struct work_struct *work) 998static void nfc_llcp_rx_work(struct work_struct *work)
819{ 999{
820 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 1000 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -858,6 +1038,11 @@ static void nfc_llcp_rx_work(struct work_struct *work)
858 nfc_llcp_recv_cc(local, skb); 1038 nfc_llcp_recv_cc(local, skb);
859 break; 1039 break;
860 1040
1041 case LLCP_PDU_DM:
1042 pr_debug("DM\n");
1043 nfc_llcp_recv_dm(local, skb);
1044 break;
1045
861 case LLCP_PDU_I: 1046 case LLCP_PDU_I:
862 case LLCP_PDU_RR: 1047 case LLCP_PDU_RR:
863 case LLCP_PDU_RNR: 1048 case LLCP_PDU_RNR:
@@ -891,6 +1076,21 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
891 return; 1076 return;
892} 1077}
893 1078
1079int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
1080{
1081 struct nfc_llcp_local *local;
1082
1083 local = nfc_llcp_find_local(dev);
1084 if (local == NULL)
1085 return -ENODEV;
1086
1087 local->rx_pending = skb_get(skb);
1088 del_timer(&local->link_timer);
1089 queue_work(local->rx_wq, &local->rx_work);
1090
1091 return 0;
1092}
1093
894void nfc_llcp_mac_is_down(struct nfc_dev *dev) 1094void nfc_llcp_mac_is_down(struct nfc_dev *dev)
895{ 1095{
896 struct nfc_llcp_local *local; 1096 struct nfc_llcp_local *local;
@@ -899,10 +1099,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
899 if (local == NULL) 1099 if (local == NULL)
900 return; 1100 return;
901 1101
902 nfc_llcp_clear_sdp(local);
903
904 /* Close and purge all existing sockets */ 1102 /* Close and purge all existing sockets */
905 nfc_llcp_socket_release(local); 1103 nfc_llcp_socket_release(local, true);
906} 1104}
907 1105
908void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, 1106void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -943,8 +1141,8 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
943 1141
944 local->dev = ndev; 1142 local->dev = ndev;
945 INIT_LIST_HEAD(&local->list); 1143 INIT_LIST_HEAD(&local->list);
1144 kref_init(&local->ref);
946 mutex_init(&local->sdp_lock); 1145 mutex_init(&local->sdp_lock);
947 mutex_init(&local->socket_lock);
948 init_timer(&local->link_timer); 1146 init_timer(&local->link_timer);
949 local->link_timer.data = (unsigned long) local; 1147 local->link_timer.data = (unsigned long) local;
950 local->link_timer.function = nfc_llcp_symm_timer; 1148 local->link_timer.function = nfc_llcp_symm_timer;
@@ -984,11 +1182,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
984 goto err_rx_wq; 1182 goto err_rx_wq;
985 } 1183 }
986 1184
1185 local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
1186 local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
1187
987 nfc_llcp_build_gb(local); 1188 nfc_llcp_build_gb(local);
988 1189
989 local->remote_miu = LLCP_DEFAULT_MIU; 1190 local->remote_miu = LLCP_DEFAULT_MIU;
990 local->remote_lto = LLCP_DEFAULT_LTO; 1191 local->remote_lto = LLCP_DEFAULT_LTO;
991 local->remote_rw = LLCP_DEFAULT_RW;
992 1192
993 list_add(&llcp_devices, &local->list); 1193 list_add(&llcp_devices, &local->list);
994 1194
@@ -1015,14 +1215,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
1015 return; 1215 return;
1016 } 1216 }
1017 1217
1018 list_del(&local->list); 1218 nfc_llcp_local_put(local);
1019 nfc_llcp_socket_release(local);
1020 del_timer_sync(&local->link_timer);
1021 skb_queue_purge(&local->tx_queue);
1022 destroy_workqueue(local->tx_wq);
1023 destroy_workqueue(local->rx_wq);
1024 kfree_skb(local->rx_pending);
1025 kfree(local);
1026} 1219}
1027 1220
1028int __init nfc_llcp_init(void) 1221int __init nfc_llcp_init(void)
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 50680ce5ae43..83b8bba5a280 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -37,15 +37,22 @@ enum llcp_state {
37#define LLCP_LOCAL_NUM_SAP 32 37#define LLCP_LOCAL_NUM_SAP 32
38#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP) 38#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
39#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP) 39#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
40#define LLCP_SDP_UNBOUND (LLCP_MAX_SAP + 1)
40 41
41struct nfc_llcp_sock; 42struct nfc_llcp_sock;
42 43
44struct llcp_sock_list {
45 struct hlist_head head;
46 rwlock_t lock;
47};
48
43struct nfc_llcp_local { 49struct nfc_llcp_local {
44 struct list_head list; 50 struct list_head list;
45 struct nfc_dev *dev; 51 struct nfc_dev *dev;
46 52
53 struct kref ref;
54
47 struct mutex sdp_lock; 55 struct mutex sdp_lock;
48 struct mutex socket_lock;
49 56
50 struct timer_list link_timer; 57 struct timer_list link_timer;
51 struct sk_buff_head tx_queue; 58 struct sk_buff_head tx_queue;
@@ -63,6 +70,7 @@ struct nfc_llcp_local {
63 unsigned long local_wks; /* Well known services */ 70 unsigned long local_wks; /* Well known services */
64 unsigned long local_sdp; /* Local services */ 71 unsigned long local_sdp; /* Local services */
65 unsigned long local_sap; /* Local SAPs, not available for discovery */ 72 unsigned long local_sap; /* Local SAPs, not available for discovery */
73 atomic_t local_sdp_cnt[LLCP_SDP_NUM_SAP];
66 74
67 /* local */ 75 /* local */
68 u8 gb[NFC_MAX_GT_LEN]; 76 u8 gb[NFC_MAX_GT_LEN];
@@ -77,24 +85,26 @@ struct nfc_llcp_local {
77 u16 remote_lto; 85 u16 remote_lto;
78 u8 remote_opt; 86 u8 remote_opt;
79 u16 remote_wks; 87 u16 remote_wks;
80 u8 remote_rw;
81 88
82 /* sockets array */ 89 /* sockets array */
83 struct nfc_llcp_sock *sockets[LLCP_MAX_SAP]; 90 struct llcp_sock_list sockets;
91 struct llcp_sock_list connecting_sockets;
84}; 92};
85 93
86struct nfc_llcp_sock { 94struct nfc_llcp_sock {
87 struct sock sk; 95 struct sock sk;
88 struct list_head list;
89 struct nfc_dev *dev; 96 struct nfc_dev *dev;
90 struct nfc_llcp_local *local; 97 struct nfc_llcp_local *local;
91 u32 target_idx; 98 u32 target_idx;
92 u32 nfc_protocol; 99 u32 nfc_protocol;
93 100
101 /* Link parameters */
94 u8 ssap; 102 u8 ssap;
95 u8 dsap; 103 u8 dsap;
96 char *service_name; 104 char *service_name;
97 size_t service_name_len; 105 size_t service_name_len;
106 u8 rw;
107 u16 miu;
98 108
99 /* Link variables */ 109 /* Link variables */
100 u8 send_n; 110 u8 send_n;
@@ -105,6 +115,9 @@ struct nfc_llcp_sock {
105 /* Is the remote peer ready to receive */ 115 /* Is the remote peer ready to receive */
106 u8 remote_ready; 116 u8 remote_ready;
107 117
118 /* Reserved source SAP */
119 u8 reserved_ssap;
120
108 struct sk_buff_head tx_queue; 121 struct sk_buff_head tx_queue;
109 struct sk_buff_head tx_pending_queue; 122 struct sk_buff_head tx_pending_queue;
110 struct sk_buff_head tx_backlog_queue; 123 struct sk_buff_head tx_backlog_queue;
@@ -164,7 +177,11 @@ struct nfc_llcp_sock {
164#define LLCP_DM_REJ 0x03 177#define LLCP_DM_REJ 0x03
165 178
166 179
180void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
181void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
167struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 182struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
183struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
184int nfc_llcp_local_put(struct nfc_llcp_local *local);
168u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 185u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
169 struct nfc_llcp_sock *sock); 186 struct nfc_llcp_sock *sock);
170u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); 187u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
@@ -179,8 +196,10 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
179struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock); 196struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
180 197
181/* TLV API */ 198/* TLV API */
182int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 199int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
183 u8 *tlv_array, u16 tlv_array_len); 200 u8 *tlv_array, u16 tlv_array_len);
201int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
202 u8 *tlv_array, u16 tlv_array_len);
184 203
185/* Commands API */ 204/* Commands API */
186void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 205void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 17a707db40eb..ddeb9aa398f0 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -78,11 +78,11 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
78 struct sockaddr_nfc_llcp llcp_addr; 78 struct sockaddr_nfc_llcp llcp_addr;
79 int len, ret = 0; 79 int len, ret = 0;
80 80
81 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
82
83 if (!addr || addr->sa_family != AF_NFC) 81 if (!addr || addr->sa_family != AF_NFC)
84 return -EINVAL; 82 return -EINVAL;
85 83
84 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
85
86 memset(&llcp_addr, 0, sizeof(llcp_addr)); 86 memset(&llcp_addr, 0, sizeof(llcp_addr));
87 len = min_t(unsigned int, sizeof(llcp_addr), alen); 87 len = min_t(unsigned int, sizeof(llcp_addr), alen);
88 memcpy(&llcp_addr, addr, len); 88 memcpy(&llcp_addr, addr, len);
@@ -111,7 +111,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
111 } 111 }
112 112
113 llcp_sock->dev = dev; 113 llcp_sock->dev = dev;
114 llcp_sock->local = local; 114 llcp_sock->local = nfc_llcp_local_get(local);
115 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; 115 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
116 llcp_sock->service_name_len = min_t(unsigned int, 116 llcp_sock->service_name_len = min_t(unsigned int,
117 llcp_addr.service_name_len, 117 llcp_addr.service_name_len,
@@ -121,10 +121,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
121 GFP_KERNEL); 121 GFP_KERNEL);
122 122
123 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); 123 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
124 if (llcp_sock->ssap == LLCP_MAX_SAP) 124 if (llcp_sock->ssap == LLCP_SAP_MAX) {
125 ret = -EADDRINUSE;
125 goto put_dev; 126 goto put_dev;
127 }
128
129 llcp_sock->reserved_ssap = llcp_sock->ssap;
126 130
127 local->sockets[llcp_sock->ssap] = llcp_sock; 131 nfc_llcp_sock_link(&local->sockets, sk);
128 132
129 pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); 133 pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
130 134
@@ -283,22 +287,28 @@ error:
283 return ret; 287 return ret;
284} 288}
285 289
286static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, 290static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
287 int *len, int peer) 291 int *len, int peer)
288{ 292{
289 struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
290 struct sock *sk = sock->sk; 293 struct sock *sk = sock->sk;
291 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 294 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
295 DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr);
292 296
293 pr_debug("%p\n", sk); 297 if (llcp_sock == NULL || llcp_sock->dev == NULL)
298 return -EBADFD;
299
300 pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
301 llcp_sock->dsap, llcp_sock->ssap);
294 302
295 if (llcp_sock == NULL) 303 if (llcp_sock == NULL || llcp_sock->dev == NULL)
296 return -EBADFD; 304 return -EBADFD;
297 305
298 addr->sa_family = AF_NFC; 306 uaddr->sa_family = AF_NFC;
307
299 *len = sizeof(struct sockaddr_nfc_llcp); 308 *len = sizeof(struct sockaddr_nfc_llcp);
300 309
301 llcp_addr->dev_idx = llcp_sock->dev->idx; 310 llcp_addr->dev_idx = llcp_sock->dev->idx;
311 llcp_addr->target_idx = llcp_sock->target_idx;
302 llcp_addr->dsap = llcp_sock->dsap; 312 llcp_addr->dsap = llcp_sock->dsap;
303 llcp_addr->ssap = llcp_sock->ssap; 313 llcp_addr->ssap = llcp_sock->ssap;
304 llcp_addr->service_name_len = llcp_sock->service_name_len; 314 llcp_addr->service_name_len = llcp_sock->service_name_len;
@@ -382,15 +392,6 @@ static int llcp_sock_release(struct socket *sock)
382 goto out; 392 goto out;
383 } 393 }
384 394
385 mutex_lock(&local->socket_lock);
386
387 if (llcp_sock == local->sockets[llcp_sock->ssap])
388 local->sockets[llcp_sock->ssap] = NULL;
389 else
390 list_del_init(&llcp_sock->list);
391
392 mutex_unlock(&local->socket_lock);
393
394 lock_sock(sk); 395 lock_sock(sk);
395 396
396 /* Send a DISC */ 397 /* Send a DISC */
@@ -415,14 +416,13 @@ static int llcp_sock_release(struct socket *sock)
415 } 416 }
416 } 417 }
417 418
418 /* Freeing the SAP */ 419 if (llcp_sock->reserved_ssap < LLCP_SAP_MAX)
419 if ((sk->sk_state == LLCP_CONNECTED
420 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
421 sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
422 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); 420 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
423 421
424 release_sock(sk); 422 release_sock(sk);
425 423
424 nfc_llcp_sock_unlink(&local->sockets, sk);
425
426out: 426out:
427 sock_orphan(sk); 427 sock_orphan(sk);
428 sock_put(sk); 428 sock_put(sk);
@@ -490,12 +490,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
490 } 490 }
491 491
492 llcp_sock->dev = dev; 492 llcp_sock->dev = dev;
493 llcp_sock->local = local; 493 llcp_sock->local = nfc_llcp_local_get(local);
494 llcp_sock->miu = llcp_sock->local->remote_miu;
494 llcp_sock->ssap = nfc_llcp_get_local_ssap(local); 495 llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
495 if (llcp_sock->ssap == LLCP_SAP_MAX) { 496 if (llcp_sock->ssap == LLCP_SAP_MAX) {
496 ret = -ENOMEM; 497 ret = -ENOMEM;
497 goto put_dev; 498 goto put_dev;
498 } 499 }
500
501 llcp_sock->reserved_ssap = llcp_sock->ssap;
502
499 if (addr->service_name_len == 0) 503 if (addr->service_name_len == 0)
500 llcp_sock->dsap = addr->dsap; 504 llcp_sock->dsap = addr->dsap;
501 else 505 else
@@ -508,21 +512,26 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
508 llcp_sock->service_name_len, 512 llcp_sock->service_name_len,
509 GFP_KERNEL); 513 GFP_KERNEL);
510 514
511 local->sockets[llcp_sock->ssap] = llcp_sock; 515 nfc_llcp_sock_link(&local->connecting_sockets, sk);
512 516
513 ret = nfc_llcp_send_connect(llcp_sock); 517 ret = nfc_llcp_send_connect(llcp_sock);
514 if (ret) 518 if (ret)
515 goto put_dev; 519 goto sock_unlink;
516 520
517 ret = sock_wait_state(sk, LLCP_CONNECTED, 521 ret = sock_wait_state(sk, LLCP_CONNECTED,
518 sock_sndtimeo(sk, flags & O_NONBLOCK)); 522 sock_sndtimeo(sk, flags & O_NONBLOCK));
519 if (ret) 523 if (ret)
520 goto put_dev; 524 goto sock_unlink;
521 525
522 release_sock(sk); 526 release_sock(sk);
523 527
524 return 0; 528 return 0;
525 529
530sock_unlink:
531 nfc_llcp_put_ssap(local, llcp_sock->ssap);
532
533 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
534
526put_dev: 535put_dev:
527 nfc_put_device(dev); 536 nfc_put_device(dev);
528 537
@@ -687,13 +696,15 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
687 696
688 llcp_sock->ssap = 0; 697 llcp_sock->ssap = 0;
689 llcp_sock->dsap = LLCP_SAP_SDP; 698 llcp_sock->dsap = LLCP_SAP_SDP;
699 llcp_sock->rw = LLCP_DEFAULT_RW;
700 llcp_sock->miu = LLCP_DEFAULT_MIU;
690 llcp_sock->send_n = llcp_sock->send_ack_n = 0; 701 llcp_sock->send_n = llcp_sock->send_ack_n = 0;
691 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; 702 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
692 llcp_sock->remote_ready = 1; 703 llcp_sock->remote_ready = 1;
704 llcp_sock->reserved_ssap = LLCP_SAP_MAX;
693 skb_queue_head_init(&llcp_sock->tx_queue); 705 skb_queue_head_init(&llcp_sock->tx_queue);
694 skb_queue_head_init(&llcp_sock->tx_pending_queue); 706 skb_queue_head_init(&llcp_sock->tx_pending_queue);
695 skb_queue_head_init(&llcp_sock->tx_backlog_queue); 707 skb_queue_head_init(&llcp_sock->tx_backlog_queue);
696 INIT_LIST_HEAD(&llcp_sock->list);
697 INIT_LIST_HEAD(&llcp_sock->accept_queue); 708 INIT_LIST_HEAD(&llcp_sock->accept_queue);
698 709
699 if (sock != NULL) 710 if (sock != NULL)
@@ -704,8 +715,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
704 715
705void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) 716void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
706{ 717{
707 struct nfc_llcp_local *local = sock->local;
708
709 kfree(sock->service_name); 718 kfree(sock->service_name);
710 719
711 skb_queue_purge(&sock->tx_queue); 720 skb_queue_purge(&sock->tx_queue);
@@ -714,12 +723,9 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
714 723
715 list_del_init(&sock->accept_queue); 724 list_del_init(&sock->accept_queue);
716 725
717 if (local != NULL && sock == local->sockets[sock->ssap])
718 local->sockets[sock->ssap] = NULL;
719 else
720 list_del_init(&sock->list);
721
722 sock->parent = NULL; 726 sock->parent = NULL;
727
728 nfc_llcp_local_put(sock->local);
723} 729}
724 730
725static int llcp_sock_create(struct net *net, struct socket *sock, 731static int llcp_sock_create(struct net *net, struct socket *sock,
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index d560e6f13072..f81efe13985a 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -27,6 +27,7 @@
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ 28#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29 29
30#include <linux/module.h>
30#include <linux/types.h> 31#include <linux/types.h>
31#include <linux/workqueue.h> 32#include <linux/workqueue.h>
32#include <linux/completion.h> 33#include <linux/completion.h>
@@ -194,7 +195,7 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
194 } 195 }
195 196
196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 197 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
197 (protocols & NFC_PROTO_ISO14443_MASK)) { 198 (protocols & NFC_PROTO_ISO14443_B_MASK)) {
198 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 199 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
199 NCI_NFC_B_PASSIVE_POLL_MODE; 200 NCI_NFC_B_PASSIVE_POLL_MODE;
200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 201 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -387,7 +388,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
387 return nci_close_device(ndev); 388 return nci_close_device(ndev);
388} 389}
389 390
390static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) 391static int nci_start_poll(struct nfc_dev *nfc_dev,
392 __u32 im_protocols, __u32 tm_protocols)
391{ 393{
392 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 394 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
393 int rc; 395 int rc;
@@ -413,11 +415,11 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
413 return -EBUSY; 415 return -EBUSY;
414 } 416 }
415 417
416 rc = nci_request(ndev, nci_rf_discover_req, protocols, 418 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
417 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 419 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
418 420
419 if (!rc) 421 if (!rc)
420 ndev->poll_prots = protocols; 422 ndev->poll_prots = im_protocols;
421 423
422 return rc; 424 return rc;
423} 425}
@@ -485,7 +487,8 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
485 param.rf_protocol = NCI_RF_PROTOCOL_T2T; 487 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
486 else if (protocol == NFC_PROTO_FELICA) 488 else if (protocol == NFC_PROTO_FELICA)
487 param.rf_protocol = NCI_RF_PROTOCOL_T3T; 489 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
488 else if (protocol == NFC_PROTO_ISO14443) 490 else if (protocol == NFC_PROTO_ISO14443 ||
491 protocol == NFC_PROTO_ISO14443_B)
489 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 492 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
490 else 493 else
491 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 494 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
@@ -521,9 +524,9 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
521 } 524 }
522} 525}
523 526
524static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, 527static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
525 struct sk_buff *skb, 528 struct sk_buff *skb,
526 data_exchange_cb_t cb, void *cb_context) 529 data_exchange_cb_t cb, void *cb_context)
527{ 530{
528 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 531 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
529 int rc; 532 int rc;
@@ -556,7 +559,7 @@ static struct nfc_ops nci_nfc_ops = {
556 .stop_poll = nci_stop_poll, 559 .stop_poll = nci_stop_poll,
557 .activate_target = nci_activate_target, 560 .activate_target = nci_activate_target,
558 .deactivate_target = nci_deactivate_target, 561 .deactivate_target = nci_deactivate_target,
559 .data_exchange = nci_data_exchange, 562 .im_transceive = nci_transceive,
560}; 563};
561 564
562/* ---- Interface to NCI drivers ---- */ 565/* ---- Interface to NCI drivers ---- */
@@ -878,3 +881,5 @@ static void nci_cmd_work(struct work_struct *work)
878 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); 881 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
879 } 882 }
880} 883}
884
885MODULE_LICENSE("GPL");
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index cb2646179e5f..af7a93b04393 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
106 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); 106 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
107 data += 2; 107 data += 2;
108 108
109 nfca_poll->nfcid1_len = *data++; 109 nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
110 110
111 pr_debug("sens_res 0x%x, nfcid1_len %d\n", 111 pr_debug("sens_res 0x%x, nfcid1_len %d\n",
112 nfca_poll->sens_res, nfca_poll->nfcid1_len); 112 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
130 struct rf_tech_specific_params_nfcb_poll *nfcb_poll, 130 struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
131 __u8 *data) 131 __u8 *data)
132{ 132{
133 nfcb_poll->sensb_res_len = *data++; 133 nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
134 134
135 pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len); 135 pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
136 136
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
145 __u8 *data) 145 __u8 *data)
146{ 146{
147 nfcf_poll->bit_rate = *data++; 147 nfcf_poll->bit_rate = *data++;
148 nfcf_poll->sensf_res_len = *data++; 148 nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
149 149
150 pr_debug("bit_rate %d, sensf_res_len %d\n", 150 pr_debug("bit_rate %d, sensf_res_len %d\n",
151 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len); 151 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -170,7 +170,10 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
170 if (rf_protocol == NCI_RF_PROTOCOL_T2T) 170 if (rf_protocol == NCI_RF_PROTOCOL_T2T)
171 protocol = NFC_PROTO_MIFARE_MASK; 171 protocol = NFC_PROTO_MIFARE_MASK;
172 else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) 172 else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
173 protocol = NFC_PROTO_ISO14443_MASK; 173 if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE)
174 protocol = NFC_PROTO_ISO14443_MASK;
175 else
176 protocol = NFC_PROTO_ISO14443_B_MASK;
174 else if (rf_protocol == NCI_RF_PROTOCOL_T3T) 177 else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
175 protocol = NFC_PROTO_FELICA_MASK; 178 protocol = NFC_PROTO_FELICA_MASK;
176 else 179 else
@@ -331,7 +334,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
331 switch (ntf->activation_rf_tech_and_mode) { 334 switch (ntf->activation_rf_tech_and_mode) {
332 case NCI_NFC_A_PASSIVE_POLL_MODE: 335 case NCI_NFC_A_PASSIVE_POLL_MODE:
333 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; 336 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
334 nfca_poll->rats_res_len = *data++; 337 nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
335 pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len); 338 pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
336 if (nfca_poll->rats_res_len > 0) { 339 if (nfca_poll->rats_res_len > 0) {
337 memcpy(nfca_poll->rats_res, 340 memcpy(nfca_poll->rats_res,
@@ -341,7 +344,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
341 344
342 case NCI_NFC_B_PASSIVE_POLL_MODE: 345 case NCI_NFC_B_PASSIVE_POLL_MODE:
343 nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep; 346 nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
344 nfcb_poll->attrib_res_len = *data++; 347 nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
345 pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len); 348 pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
346 if (nfcb_poll->attrib_res_len > 0) { 349 if (nfcb_poll->attrib_res_len > 0) {
347 memcpy(nfcb_poll->attrib_res, 350 memcpy(nfcb_poll->attrib_res,
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 581d419083aa..4c51714ee741 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -49,6 +49,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, 49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, 50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, 51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
52 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
53 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
52}; 54};
53 55
54static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 56static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -165,7 +167,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
165 167
166 dev->genl_data.poll_req_pid = 0; 168 dev->genl_data.poll_req_pid = 0;
167 169
168 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 170 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
169 if (!msg) 171 if (!msg)
170 return -ENOMEM; 172 return -ENOMEM;
171 173
@@ -193,7 +195,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
193 struct sk_buff *msg; 195 struct sk_buff *msg;
194 void *hdr; 196 void *hdr;
195 197
196 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 198 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
197 if (!msg) 199 if (!msg)
198 return -ENOMEM; 200 return -ENOMEM;
199 201
@@ -219,12 +221,74 @@ free_msg:
219 return -EMSGSIZE; 221 return -EMSGSIZE;
220} 222}
221 223
224int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol)
225{
226 struct sk_buff *msg;
227 void *hdr;
228
229 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
230 if (!msg)
231 return -ENOMEM;
232
233 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
234 NFC_EVENT_TM_ACTIVATED);
235 if (!hdr)
236 goto free_msg;
237
238 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
239 goto nla_put_failure;
240 if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol))
241 goto nla_put_failure;
242
243 genlmsg_end(msg, hdr);
244
245 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
246
247 return 0;
248
249nla_put_failure:
250 genlmsg_cancel(msg, hdr);
251free_msg:
252 nlmsg_free(msg);
253 return -EMSGSIZE;
254}
255
256int nfc_genl_tm_deactivated(struct nfc_dev *dev)
257{
258 struct sk_buff *msg;
259 void *hdr;
260
261 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
262 if (!msg)
263 return -ENOMEM;
264
265 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
266 NFC_EVENT_TM_DEACTIVATED);
267 if (!hdr)
268 goto free_msg;
269
270 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
271 goto nla_put_failure;
272
273 genlmsg_end(msg, hdr);
274
275 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
276
277 return 0;
278
279nla_put_failure:
280 genlmsg_cancel(msg, hdr);
281free_msg:
282 nlmsg_free(msg);
283 return -EMSGSIZE;
284}
285
222int nfc_genl_device_added(struct nfc_dev *dev) 286int nfc_genl_device_added(struct nfc_dev *dev)
223{ 287{
224 struct sk_buff *msg; 288 struct sk_buff *msg;
225 void *hdr; 289 void *hdr;
226 290
227 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 291 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
228 if (!msg) 292 if (!msg)
229 return -ENOMEM; 293 return -ENOMEM;
230 294
@@ -257,7 +321,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
257 struct sk_buff *msg; 321 struct sk_buff *msg;
258 void *hdr; 322 void *hdr;
259 323
260 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 324 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
261 if (!msg) 325 if (!msg)
262 return -ENOMEM; 326 return -ENOMEM;
263 327
@@ -370,7 +434,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
370 434
371 pr_debug("DEP link is up\n"); 435 pr_debug("DEP link is up\n");
372 436
373 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 437 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
374 if (!msg) 438 if (!msg)
375 return -ENOMEM; 439 return -ENOMEM;
376 440
@@ -409,7 +473,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
409 473
410 pr_debug("DEP link is down\n"); 474 pr_debug("DEP link is down\n");
411 475
412 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 476 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
413 if (!msg) 477 if (!msg)
414 return -ENOMEM; 478 return -ENOMEM;
415 479
@@ -450,7 +514,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
450 if (!dev) 514 if (!dev)
451 return -ENODEV; 515 return -ENODEV;
452 516
453 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 517 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
454 if (!msg) { 518 if (!msg) {
455 rc = -ENOMEM; 519 rc = -ENOMEM;
456 goto out_putdev; 520 goto out_putdev;
@@ -519,16 +583,25 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
519 struct nfc_dev *dev; 583 struct nfc_dev *dev;
520 int rc; 584 int rc;
521 u32 idx; 585 u32 idx;
522 u32 protocols; 586 u32 im_protocols = 0, tm_protocols = 0;
523 587
524 pr_debug("Poll start\n"); 588 pr_debug("Poll start\n");
525 589
526 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 590 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
527 !info->attrs[NFC_ATTR_PROTOCOLS]) 591 ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
592 !info->attrs[NFC_ATTR_PROTOCOLS]) &&
593 !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
528 return -EINVAL; 594 return -EINVAL;
529 595
530 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 596 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
531 protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); 597
598 if (info->attrs[NFC_ATTR_TM_PROTOCOLS])
599 tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]);
600
601 if (info->attrs[NFC_ATTR_IM_PROTOCOLS])
602 im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]);
603 else if (info->attrs[NFC_ATTR_PROTOCOLS])
604 im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
532 605
533 dev = nfc_get_device(idx); 606 dev = nfc_get_device(idx);
534 if (!dev) 607 if (!dev)
@@ -536,7 +609,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
536 609
537 mutex_lock(&dev->genl_data.genl_data_mutex); 610 mutex_lock(&dev->genl_data.genl_data_mutex);
538 611
539 rc = nfc_start_poll(dev, protocols); 612 rc = nfc_start_poll(dev, im_protocols, tm_protocols);
540 if (!rc) 613 if (!rc)
541 dev->genl_data.poll_req_pid = info->snd_pid; 614 dev->genl_data.poll_req_pid = info->snd_pid;
542 615
@@ -561,6 +634,15 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
561 if (!dev) 634 if (!dev)
562 return -ENODEV; 635 return -ENODEV;
563 636
637 device_lock(&dev->dev);
638
639 if (!dev->polling) {
640 device_unlock(&dev->dev);
641 return -EINVAL;
642 }
643
644 device_unlock(&dev->dev);
645
564 mutex_lock(&dev->genl_data.genl_data_mutex); 646 mutex_lock(&dev->genl_data.genl_data_mutex);
565 647
566 if (dev->genl_data.poll_req_pid != info->snd_pid) { 648 if (dev->genl_data.poll_req_pid != info->snd_pid) {
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 3dd4232ae664..c5e42b79a418 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -55,6 +55,7 @@ int nfc_llcp_register_device(struct nfc_dev *dev);
55void nfc_llcp_unregister_device(struct nfc_dev *dev); 55void nfc_llcp_unregister_device(struct nfc_dev *dev);
56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); 56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); 57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
58int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
58int __init nfc_llcp_init(void); 59int __init nfc_llcp_init(void);
59void nfc_llcp_exit(void); 60void nfc_llcp_exit(void);
60 61
@@ -90,6 +91,12 @@ static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
90 return NULL; 91 return NULL;
91} 92}
92 93
94static inline int nfc_llcp_data_received(struct nfc_dev *dev,
95 struct sk_buff *skb)
96{
97 return 0;
98}
99
93static inline int nfc_llcp_init(void) 100static inline int nfc_llcp_init(void)
94{ 101{
95 return 0; 102 return 0;
@@ -128,6 +135,9 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
128 u8 comm_mode, u8 rf_mode); 135 u8 comm_mode, u8 rf_mode);
129int nfc_genl_dep_link_down_event(struct nfc_dev *dev); 136int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
130 137
138int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
139int nfc_genl_tm_deactivated(struct nfc_dev *dev);
140
131struct nfc_dev *nfc_get_device(unsigned int idx); 141struct nfc_dev *nfc_get_device(unsigned int idx);
132 142
133static inline void nfc_put_device(struct nfc_dev *dev) 143static inline void nfc_put_device(struct nfc_dev *dev)
@@ -158,7 +168,7 @@ int nfc_dev_up(struct nfc_dev *dev);
158 168
159int nfc_dev_down(struct nfc_dev *dev); 169int nfc_dev_down(struct nfc_dev *dev);
160 170
161int nfc_start_poll(struct nfc_dev *dev, u32 protocols); 171int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols);
162 172
163int nfc_stop_poll(struct nfc_dev *dev); 173int nfc_stop_poll(struct nfc_dev *dev);
164 174
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index ec1134c9e07f..8b8a6a2b2bad 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
54{ 54{
55 struct sock *sk = sock->sk; 55 struct sock *sk = sock->sk;
56 56
57 pr_debug("sock=%p\n", sock); 57 pr_debug("sock=%p sk=%p\n", sock, sk);
58
59 if (!sk)
60 return 0;
58 61
59 sock_orphan(sk); 62 sock_orphan(sk);
60 sock_put(sk); 63 sock_put(sk);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 48badffaafc1..f3f96badf5aa 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2012 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2c74daa5aca5..d8277d29e710 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2012 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -263,14 +263,15 @@ err:
263static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, 263static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
264 const struct dp_upcall_info *upcall_info) 264 const struct dp_upcall_info *upcall_info)
265{ 265{
266 unsigned short gso_type = skb_shinfo(skb)->gso_type;
266 struct dp_upcall_info later_info; 267 struct dp_upcall_info later_info;
267 struct sw_flow_key later_key; 268 struct sw_flow_key later_key;
268 struct sk_buff *segs, *nskb; 269 struct sk_buff *segs, *nskb;
269 int err; 270 int err;
270 271
271 segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); 272 segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
272 if (IS_ERR(skb)) 273 if (IS_ERR(segs))
273 return PTR_ERR(skb); 274 return PTR_ERR(segs);
274 275
275 /* Queue all of the segments. */ 276 /* Queue all of the segments. */
276 skb = segs; 277 skb = segs;
@@ -279,7 +280,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
279 if (err) 280 if (err)
280 break; 281 break;
281 282
282 if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { 283 if (skb == segs && gso_type & SKB_GSO_UDP) {
283 /* The initial flow key extracted by ovs_flow_extract() 284 /* The initial flow key extracted by ovs_flow_extract()
284 * in this case is for a first fragment, so we need to 285 * in this case is for a first fragment, so we need to
285 * properly mark later fragments. 286 * properly mark later fragments.
@@ -1649,7 +1650,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1649 1650
1650 if (!err && a[OVS_VPORT_ATTR_OPTIONS]) 1651 if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1651 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 1652 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1652 if (!err && a[OVS_VPORT_ATTR_UPCALL_PID]) 1653 if (err)
1654 goto exit_unlock;
1655 if (a[OVS_VPORT_ATTR_UPCALL_PID])
1653 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); 1656 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1654 1657
1655 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1658 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index c73370cc1f02..c1105c147531 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 46736518c453..36dcee8fc84a 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 6d4d8097cf96..b7f38b161909 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2011 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -182,7 +182,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
182{ 182{
183 u8 tcp_flags = 0; 183 u8 tcp_flags = 0;
184 184
185 if (flow->key.eth.type == htons(ETH_P_IP) && 185 if ((flow->key.eth.type == htons(ETH_P_IP) ||
186 flow->key.eth.type == htons(ETH_P_IPV6)) &&
186 flow->key.ip.proto == IPPROTO_TCP && 187 flow->key.ip.proto == IPPROTO_TCP &&
187 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 188 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
188 u8 *tcp = (u8 *)tcp_hdr(skb); 189 u8 *tcp = (u8 *)tcp_hdr(skb);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2747dc2c4ac1..9b75617ca4e0 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2011 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index b6b1d7daa3cb..4061b9ee07f7 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -24,6 +24,9 @@
24#include <linux/ethtool.h> 24#include <linux/ethtool.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26 26
27#include <net/dst.h>
28#include <net/xfrm.h>
29
27#include "datapath.h" 30#include "datapath.h"
28#include "vport-internal_dev.h" 31#include "vport-internal_dev.h"
29#include "vport-netdev.h" 32#include "vport-netdev.h"
@@ -209,6 +212,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
209 int len; 212 int len;
210 213
211 len = skb->len; 214 len = skb->len;
215
216 skb_dst_drop(skb);
217 nf_reset(skb);
218 secpath_reset(skb);
219
212 skb->dev = netdev; 220 skb->dev = netdev;
213 skb->pkt_type = PACKET_HOST; 221 skb->pkt_type = PACKET_HOST;
214 skb->protocol = eth_type_trans(skb, netdev); 222 skb->protocol = eth_type_trans(skb, netdev);
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
index 3454447c5f11..9a7d30ecc6a2 100644
--- a/net/openvswitch/vport-internal_dev.h
+++ b/net/openvswitch/vport-internal_dev.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2011 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 3fd6c0d88e12..6ea3551cc78c 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index fd9b008a0e6e..f7072a25c604 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2011 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6c066ba25dc7..6140336e79d7 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 19609629dabd..aac680ca2b06 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0f661745df0f..ceaca7c134a0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -531,6 +531,7 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532 struct ethtool_cmd ecmd; 532 struct ethtool_cmd ecmd;
533 int err; 533 int err;
534 u32 speed;
534 535
535 rtnl_lock(); 536 rtnl_lock();
536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
@@ -539,25 +540,18 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
539 return DEFAULT_PRB_RETIRE_TOV; 540 return DEFAULT_PRB_RETIRE_TOV;
540 } 541 }
541 err = __ethtool_get_settings(dev, &ecmd); 542 err = __ethtool_get_settings(dev, &ecmd);
543 speed = ethtool_cmd_speed(&ecmd);
542 rtnl_unlock(); 544 rtnl_unlock();
543 if (!err) { 545 if (!err) {
544 switch (ecmd.speed) {
545 case SPEED_10000:
546 msec = 1;
547 div = 10000/1000;
548 break;
549 case SPEED_1000:
550 msec = 1;
551 div = 1000/1000;
552 break;
553 /* 546 /*
554 * If the link speed is so slow you don't really 547 * If the link speed is so slow you don't really
555 * need to worry about perf anyways 548 * need to worry about perf anyways
556 */ 549 */
557 case SPEED_100: 550 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
558 case SPEED_10:
559 default:
560 return DEFAULT_PRB_RETIRE_TOV; 551 return DEFAULT_PRB_RETIRE_TOV;
552 } else {
553 msec = 1;
554 div = speed / 1000;
561 } 555 }
562 } 556 }
563 557
@@ -592,7 +586,7 @@ static void init_prb_bdqc(struct packet_sock *po,
592 p1->knxt_seq_num = 1; 586 p1->knxt_seq_num = 1;
593 p1->pkbdq = pg_vec; 587 p1->pkbdq = pg_vec;
594 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 588 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
595 p1->pkblk_start = (char *)pg_vec[0].buffer; 589 p1->pkblk_start = pg_vec[0].buffer;
596 p1->kblk_size = req_u->req3.tp_block_size; 590 p1->kblk_size = req_u->req3.tp_block_size;
597 p1->knum_blocks = req_u->req3.tp_block_nr; 591 p1->knum_blocks = req_u->req3.tp_block_nr;
598 p1->hdrlen = po->tp_hdrlen; 592 p1->hdrlen = po->tp_hdrlen;
@@ -824,8 +818,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
824 h1->ts_first_pkt.ts_sec = ts.tv_sec; 818 h1->ts_first_pkt.ts_sec = ts.tv_sec;
825 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 819 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
826 pkc1->pkblk_start = (char *)pbd1; 820 pkc1->pkblk_start = (char *)pbd1;
827 pkc1->nxt_offset = (char *)(pkc1->pkblk_start + 821 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
828 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
829 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 822 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 823 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
831 pbd1->version = pkc1->version; 824 pbd1->version = pkc1->version;
@@ -1018,7 +1011,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1018 struct tpacket_block_desc *pbd; 1011 struct tpacket_block_desc *pbd;
1019 char *curr, *end; 1012 char *curr, *end;
1020 1013
1021 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring)); 1014 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1022 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1015 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023 1016
1024 /* Queue is frozen when user space is lagging behind */ 1017 /* Queue is frozen when user space is lagging behind */
@@ -1044,7 +1037,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1044 smp_mb(); 1037 smp_mb();
1045 curr = pkc->nxt_offset; 1038 curr = pkc->nxt_offset;
1046 pkc->skb = skb; 1039 pkc->skb = skb;
1047 end = (char *) ((char *)pbd + pkc->kblk_size); 1040 end = (char *)pbd + pkc->kblk_size;
1048 1041
1049 /* first try the current block */ 1042 /* first try the current block */
1050 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1043 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
@@ -1476,7 +1469,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1476 * Find the device first to size check it 1469 * Find the device first to size check it
1477 */ 1470 */
1478 1471
1479 saddr->spkt_device[13] = 0; 1472 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1480retry: 1473retry:
1481 rcu_read_lock(); 1474 rcu_read_lock();
1482 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1475 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 779ce4ff92ec..5a940dbd74a3 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 9 * Rémi Denis-Courmont
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index bf35b4e1a14c..12c30f3e643e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 9 * Rémi Denis-Courmont
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d01208968c83..a2fba7edfd1f 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Author: Rémi Denis-Courmont
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9dd4f926f7d1..576f22c9c76e 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Author: Rémi Denis-Courmont
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 36f75a9e2c3d..5bf6341e2dd4 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 9 * Rémi Denis-Courmont
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index cfdf135fcd69..7dd762a464e5 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 9 * Remi Denis-Courmont
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 89cfa9ce4939..0acc943f713a 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 9 * Rémi Denis-Courmont
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index 696348fd31a1..d6bbbbd0af18 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Author: Rémi Denis-Courmont
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/net/rds/page.c b/net/rds/page.c
index 2499cd108421..9005a2c920ee 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -74,11 +74,12 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
74} 74}
75EXPORT_SYMBOL_GPL(rds_page_copy_user); 75EXPORT_SYMBOL_GPL(rds_page_copy_user);
76 76
77/* 77/**
78 * Message allocation uses this to build up regions of a message. 78 * rds_page_remainder_alloc - build up regions of a message.
79 * 79 *
80 * @bytes - the number of bytes needed. 80 * @scat: Scatter list for message
81 * @gfp - the waiting behaviour of the allocation 81 * @bytes: the number of bytes needed.
82 * @gfp: the waiting behaviour of the allocation
82 * 83 *
83 * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to 84 * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to
84 * kmap the pages, etc. 85 * kmap the pages, etc.
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 5c6e9f132026..9f0f17cf6bf9 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
410 410
411 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo); 411 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
412 412
413 msg->msg_namelen = 0;
414
413 if (msg_flags & MSG_OOB) 415 if (msg_flags & MSG_OOB)
414 goto out; 416 goto out;
415 417
@@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
485 sin->sin_port = inc->i_hdr.h_sport; 487 sin->sin_port = inc->i_hdr.h_sport;
486 sin->sin_addr.s_addr = inc->i_saddr; 488 sin->sin_addr.s_addr = inc->i_saddr;
487 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 489 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
490 msg->msg_namelen = sizeof(*sin);
488 } 491 }
489 break; 492 break;
490 } 493 }
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f974961754ca..752b72360ebc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -325,7 +325,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
325 325
326 rfkill_global_states[type].cur = blocked; 326 rfkill_global_states[type].cur = blocked;
327 list_for_each_entry(rfkill, &rfkill_list, node) { 327 list_for_each_entry(rfkill, &rfkill_list, node) {
328 if (rfkill->type != type) 328 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
329 continue; 329 continue;
330 330
331 rfkill_set_block(rfkill, blocked); 331 rfkill_set_block(rfkill, blocked);
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5d6b572a6704..a9206087b4d7 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -81,10 +81,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
81 _net("I/F MTU %u", mtu); 81 _net("I/F MTU %u", mtu);
82 } 82 }
83 83
84 /* ip_rt_frag_needed() may have eaten the info */
85 if (mtu == 0)
86 mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
87
88 if (mtu == 0) { 84 if (mtu == 0) {
89 /* they didn't give us a size, estimate one */ 85 /* they didn't give us a size, estimate one */
90 if (mtu > 1500) { 86 if (mtu > 1500) {
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 16ae88762d00..e1ac183d50bb 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -242,7 +242,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
242 242
243EXPORT_SYMBOL(rxrpc_kernel_send_data); 243EXPORT_SYMBOL(rxrpc_kernel_send_data);
244 244
245/* 245/**
246 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 246 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
247 * @call: The call to be aborted 247 * @call: The call to be aborted
248 * @abort_code: The abort code to stick into the ABORT packet 248 * @abort_code: The abort code to stick into the ABORT packet
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 2754f098d436..bebaa43484bc 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -229,7 +229,7 @@ found_UDP_peer:
229 return peer; 229 return peer;
230 230
231new_UDP_peer: 231new_UDP_peer:
232 _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); 232 _net("Rx UDP DGRAM from NEW peer");
233 read_unlock_bh(&rxrpc_peer_lock); 233 read_unlock_bh(&rxrpc_peer_lock);
234 _leave(" = -EBUSY [new]"); 234 _leave(" = -EBUSY [new]");
235 return ERR_PTR(-EBUSY); 235 return ERR_PTR(-EBUSY);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index e7a8976bf25c..62fb51face8a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -507,6 +507,26 @@ config NET_EMATCH_TEXT
507 To compile this code as a module, choose M here: the 507 To compile this code as a module, choose M here: the
508 module will be called em_text. 508 module will be called em_text.
509 509
510config NET_EMATCH_CANID
511 tristate "CAN Identifier"
512 depends on NET_EMATCH && CAN
513 ---help---
514 Say Y here if you want to be able to classify CAN frames based
515 on CAN Identifier.
516
517 To compile this code as a module, choose M here: the
518 module will be called em_canid.
519
520config NET_EMATCH_IPSET
521 tristate "IPset"
522 depends on NET_EMATCH && IP_SET
523 ---help---
524 Say Y here if you want to be able to classify packets based on
525 ipset membership.
526
527 To compile this code as a module, choose M here: the
528 module will be called em_ipset.
529
510config NET_CLS_ACT 530config NET_CLS_ACT
511 bool "Actions" 531 bool "Actions"
512 ---help--- 532 ---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 5940a1992f0d..978cbf004e80 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -55,3 +55,5 @@ obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
55obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o 55obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o
56obj-$(CONFIG_NET_EMATCH_META) += em_meta.o 56obj-$(CONFIG_NET_EMATCH_META) += em_meta.o
57obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o 57obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o
58obj-$(CONFIG_NET_EMATCH_CANID) += em_canid.o
59obj-$(CONFIG_NET_EMATCH_IPSET) += em_ipset.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5cfb160df063..e3d2c78cb52c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -652,27 +652,27 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
652 unsigned char *b = skb_tail_pointer(skb); 652 unsigned char *b = skb_tail_pointer(skb);
653 struct nlattr *nest; 653 struct nlattr *nest;
654 654
655 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 655 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
656 656 if (!nlh)
657 t = NLMSG_DATA(nlh); 657 goto out_nlmsg_trim;
658 t = nlmsg_data(nlh);
658 t->tca_family = AF_UNSPEC; 659 t->tca_family = AF_UNSPEC;
659 t->tca__pad1 = 0; 660 t->tca__pad1 = 0;
660 t->tca__pad2 = 0; 661 t->tca__pad2 = 0;
661 662
662 nest = nla_nest_start(skb, TCA_ACT_TAB); 663 nest = nla_nest_start(skb, TCA_ACT_TAB);
663 if (nest == NULL) 664 if (nest == NULL)
664 goto nla_put_failure; 665 goto out_nlmsg_trim;
665 666
666 if (tcf_action_dump(skb, a, bind, ref) < 0) 667 if (tcf_action_dump(skb, a, bind, ref) < 0)
667 goto nla_put_failure; 668 goto out_nlmsg_trim;
668 669
669 nla_nest_end(skb, nest); 670 nla_nest_end(skb, nest);
670 671
671 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 672 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
672 return skb->len; 673 return skb->len;
673 674
674nla_put_failure: 675out_nlmsg_trim:
675nlmsg_failure:
676 nlmsg_trim(skb, b); 676 nlmsg_trim(skb, b);
677 return -1; 677 return -1;
678} 678}
@@ -799,19 +799,21 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
799 if (a->ops == NULL) 799 if (a->ops == NULL)
800 goto err_out; 800 goto err_out;
801 801
802 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 802 nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
803 t = NLMSG_DATA(nlh); 803 if (!nlh)
804 goto out_module_put;
805 t = nlmsg_data(nlh);
804 t->tca_family = AF_UNSPEC; 806 t->tca_family = AF_UNSPEC;
805 t->tca__pad1 = 0; 807 t->tca__pad1 = 0;
806 t->tca__pad2 = 0; 808 t->tca__pad2 = 0;
807 809
808 nest = nla_nest_start(skb, TCA_ACT_TAB); 810 nest = nla_nest_start(skb, TCA_ACT_TAB);
809 if (nest == NULL) 811 if (nest == NULL)
810 goto nla_put_failure; 812 goto out_module_put;
811 813
812 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 814 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
813 if (err < 0) 815 if (err < 0)
814 goto nla_put_failure; 816 goto out_module_put;
815 if (err == 0) 817 if (err == 0)
816 goto noflush_out; 818 goto noflush_out;
817 819
@@ -828,8 +830,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
828 830
829 return err; 831 return err;
830 832
831nla_put_failure: 833out_module_put:
832nlmsg_failure:
833 module_put(a->ops->owner); 834 module_put(a->ops->owner);
834err_out: 835err_out:
835noflush_out: 836noflush_out:
@@ -919,18 +920,20 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
919 920
920 b = skb_tail_pointer(skb); 921 b = skb_tail_pointer(skb);
921 922
922 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 923 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
923 t = NLMSG_DATA(nlh); 924 if (!nlh)
925 goto out_kfree_skb;
926 t = nlmsg_data(nlh);
924 t->tca_family = AF_UNSPEC; 927 t->tca_family = AF_UNSPEC;
925 t->tca__pad1 = 0; 928 t->tca__pad1 = 0;
926 t->tca__pad2 = 0; 929 t->tca__pad2 = 0;
927 930
928 nest = nla_nest_start(skb, TCA_ACT_TAB); 931 nest = nla_nest_start(skb, TCA_ACT_TAB);
929 if (nest == NULL) 932 if (nest == NULL)
930 goto nla_put_failure; 933 goto out_kfree_skb;
931 934
932 if (tcf_action_dump(skb, a, 0, 0) < 0) 935 if (tcf_action_dump(skb, a, 0, 0) < 0)
933 goto nla_put_failure; 936 goto out_kfree_skb;
934 937
935 nla_nest_end(skb, nest); 938 nla_nest_end(skb, nest);
936 939
@@ -942,8 +945,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
942 err = 0; 945 err = 0;
943 return err; 946 return err;
944 947
945nla_put_failure: 948out_kfree_skb:
946nlmsg_failure:
947 kfree_skb(skb); 949 kfree_skb(skb);
948 return -1; 950 return -1;
949} 951}
@@ -1062,7 +1064,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1062 struct tc_action_ops *a_o; 1064 struct tc_action_ops *a_o;
1063 struct tc_action a; 1065 struct tc_action a;
1064 int ret = 0; 1066 int ret = 0;
1065 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1067 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1066 struct nlattr *kind = find_dump_kind(cb->nlh); 1068 struct nlattr *kind = find_dump_kind(cb->nlh);
1067 1069
1068 if (kind == NULL) { 1070 if (kind == NULL) {
@@ -1080,23 +1082,25 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1080 if (a_o->walk == NULL) { 1082 if (a_o->walk == NULL) {
1081 WARN(1, "tc_dump_action: %s !capable of dumping table\n", 1083 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1082 a_o->kind); 1084 a_o->kind);
1083 goto nla_put_failure; 1085 goto out_module_put;
1084 } 1086 }
1085 1087
1086 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1088 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
1087 cb->nlh->nlmsg_type, sizeof(*t)); 1089 cb->nlh->nlmsg_type, sizeof(*t), 0);
1088 t = NLMSG_DATA(nlh); 1090 if (!nlh)
1091 goto out_module_put;
1092 t = nlmsg_data(nlh);
1089 t->tca_family = AF_UNSPEC; 1093 t->tca_family = AF_UNSPEC;
1090 t->tca__pad1 = 0; 1094 t->tca__pad1 = 0;
1091 t->tca__pad2 = 0; 1095 t->tca__pad2 = 0;
1092 1096
1093 nest = nla_nest_start(skb, TCA_ACT_TAB); 1097 nest = nla_nest_start(skb, TCA_ACT_TAB);
1094 if (nest == NULL) 1098 if (nest == NULL)
1095 goto nla_put_failure; 1099 goto out_module_put;
1096 1100
1097 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1101 ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
1098 if (ret < 0) 1102 if (ret < 0)
1099 goto nla_put_failure; 1103 goto out_module_put;
1100 1104
1101 if (ret > 0) { 1105 if (ret > 0) {
1102 nla_nest_end(skb, nest); 1106 nla_nest_end(skb, nest);
@@ -1110,8 +1114,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1110 module_put(a_o->owner); 1114 module_put(a_o->owner);
1111 return skb->len; 1115 return skb->len;
1112 1116
1113nla_put_failure: 1117out_module_put:
1114nlmsg_failure:
1115 module_put(a_o->owner); 1118 module_put(a_o->owner);
1116 nlmsg_trim(skb, b); 1119 nlmsg_trim(skb, b);
1117 return skb->len; 1120 return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f452f696b4b3..6dd1131f2ec1 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -140,7 +140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
140 int tp_created = 0; 140 int tp_created = 0;
141 141
142replay: 142replay:
143 t = NLMSG_DATA(n); 143 t = nlmsg_data(n);
144 protocol = TC_H_MIN(t->tcm_info); 144 protocol = TC_H_MIN(t->tcm_info);
145 prio = TC_H_MAJ(t->tcm_info); 145 prio = TC_H_MAJ(t->tcm_info);
146 nprio = prio; 146 nprio = prio;
@@ -349,8 +349,10 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
349 struct nlmsghdr *nlh; 349 struct nlmsghdr *nlh;
350 unsigned char *b = skb_tail_pointer(skb); 350 unsigned char *b = skb_tail_pointer(skb);
351 351
352 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 352 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
353 tcm = NLMSG_DATA(nlh); 353 if (!nlh)
354 goto out_nlmsg_trim;
355 tcm = nlmsg_data(nlh);
354 tcm->tcm_family = AF_UNSPEC; 356 tcm->tcm_family = AF_UNSPEC;
355 tcm->tcm__pad1 = 0; 357 tcm->tcm__pad1 = 0;
356 tcm->tcm__pad2 = 0; 358 tcm->tcm__pad2 = 0;
@@ -368,7 +370,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
368 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 370 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
369 return skb->len; 371 return skb->len;
370 372
371nlmsg_failure: 373out_nlmsg_trim:
372nla_put_failure: 374nla_put_failure:
373 nlmsg_trim(skb, b); 375 nlmsg_trim(skb, b);
374 return -1; 376 return -1;
@@ -418,7 +420,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
418 struct net_device *dev; 420 struct net_device *dev;
419 struct Qdisc *q; 421 struct Qdisc *q;
420 struct tcf_proto *tp, **chain; 422 struct tcf_proto *tp, **chain;
421 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); 423 struct tcmsg *tcm = nlmsg_data(cb->nlh);
422 unsigned long cl = 0; 424 unsigned long cl = 0;
423 const struct Qdisc_class_ops *cops; 425 const struct Qdisc_class_ops *cops;
424 struct tcf_dump_args arg; 426 struct tcf_dump_args arg;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 36fec4227401..44f405cb9aaf 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
143 if (head == NULL) 143 if (head == NULL)
144 goto old_method; 144 goto old_method;
145 145
146 iif = ((struct rtable *)dst)->rt_iif; 146 iif = inet_iif(skb);
147 147
148 h = route4_fastmap_hash(id, iif); 148 h = route4_fastmap_hash(id, iif);
149 if (id == head->fastmap[h].id && 149 if (id == head->fastmap[h].id &&
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
new file mode 100644
index 000000000000..bfd34e4c1afc
--- /dev/null
+++ b/net/sched/em_canid.c
@@ -0,0 +1,240 @@
1/*
2 * em_canid.c Ematch rule to match CAN frames according to their CAN IDs
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Idea: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
10 * Copyright: (c) 2011 Czech Technical University in Prague
11 * (c) 2011 Volkswagen Group Research
12 * Authors: Michal Sojka <sojkam1@fel.cvut.cz>
13 * Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Rostislav Lisovy <lisovy@gmail.cz>
15 * Funded by: Volkswagen Group Research
16 */
17
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/skbuff.h>
24#include <net/pkt_cls.h>
25#include <linux/can.h>
26
27#define EM_CAN_RULES_MAX 500
28
29struct canid_match {
30 /* For each SFF CAN ID (11 bit) there is one record in this bitfield */
31 DECLARE_BITMAP(match_sff, (1 << CAN_SFF_ID_BITS));
32
33 int rules_count;
34 int sff_rules_count;
35 int eff_rules_count;
36
37 /*
38 * Raw rules copied from netlink message; Used for sending
39 * information to userspace (when 'tc filter show' is invoked)
40 * AND when matching EFF frames
41 */
42 struct can_filter rules_raw[];
43};
44
45/**
46 * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
47 */
48static canid_t em_canid_get_id(struct sk_buff *skb)
49{
50 /* CAN ID is stored within the data field */
51 struct can_frame *cf = (struct can_frame *)skb->data;
52
53 return cf->can_id;
54}
55
56static void em_canid_sff_match_add(struct canid_match *cm, u32 can_id,
57 u32 can_mask)
58{
59 int i;
60
61 /*
62 * Limit can_mask and can_id to SFF range to
63 * protect against write after end of array
64 */
65 can_mask &= CAN_SFF_MASK;
66 can_id &= can_mask;
67
68 /* Single frame */
69 if (can_mask == CAN_SFF_MASK) {
70 set_bit(can_id, cm->match_sff);
71 return;
72 }
73
74 /* All frames */
75 if (can_mask == 0) {
76 bitmap_fill(cm->match_sff, (1 << CAN_SFF_ID_BITS));
77 return;
78 }
79
80 /*
81 * Individual frame filter.
82 * Add record (set bit to 1) for each ID that
83 * conforms particular rule
84 */
85 for (i = 0; i < (1 << CAN_SFF_ID_BITS); i++) {
86 if ((i & can_mask) == can_id)
87 set_bit(i, cm->match_sff);
88 }
89}
90
91static inline struct canid_match *em_canid_priv(struct tcf_ematch *m)
92{
93 return (struct canid_match *)m->data;
94}
95
96static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
97 struct tcf_pkt_info *info)
98{
99 struct canid_match *cm = em_canid_priv(m);
100 canid_t can_id;
101 int match = 0;
102 int i;
103 const struct can_filter *lp;
104
105 can_id = em_canid_get_id(skb);
106
107 if (can_id & CAN_EFF_FLAG) {
108 for (i = 0, lp = cm->rules_raw;
109 i < cm->eff_rules_count; i++, lp++) {
110 if (!(((lp->can_id ^ can_id) & lp->can_mask))) {
111 match = 1;
112 break;
113 }
114 }
115 } else { /* SFF */
116 can_id &= CAN_SFF_MASK;
117 match = (test_bit(can_id, cm->match_sff) ? 1 : 0);
118 }
119
120 return match;
121}
122
123static int em_canid_change(struct tcf_proto *tp, void *data, int len,
124 struct tcf_ematch *m)
125{
126 struct can_filter *conf = data; /* Array with rules */
127 struct canid_match *cm;
128 struct canid_match *cm_old = (struct canid_match *)m->data;
129 int i;
130
131 if (!len)
132 return -EINVAL;
133
134 if (len % sizeof(struct can_filter))
135 return -EINVAL;
136
137 if (len > sizeof(struct can_filter) * EM_CAN_RULES_MAX)
138 return -EINVAL;
139
140 cm = kzalloc(sizeof(struct canid_match) + len, GFP_KERNEL);
141 if (!cm)
142 return -ENOMEM;
143
144 cm->rules_count = len / sizeof(struct can_filter);
145
146 /*
147 * We need two for() loops for copying rules into two contiguous
148 * areas in rules_raw to process all eff rules with a simple loop.
149 * NB: The configuration interface supports sff and eff rules.
150 * We do not support filters here that match for the same can_id
151 * provided in a SFF and EFF frame (e.g. 0x123 / 0x80000123).
152 * For this (unusual case) two filters have to be specified. The
153 * SFF/EFF separation is done with the CAN_EFF_FLAG in the can_id.
154 */
155
156 /* Fill rules_raw with EFF rules first */
157 for (i = 0; i < cm->rules_count; i++) {
158 if (conf[i].can_id & CAN_EFF_FLAG) {
159 memcpy(cm->rules_raw + cm->eff_rules_count,
160 &conf[i],
161 sizeof(struct can_filter));
162
163 cm->eff_rules_count++;
164 }
165 }
166
167 /* append SFF frame rules */
168 for (i = 0; i < cm->rules_count; i++) {
169 if (!(conf[i].can_id & CAN_EFF_FLAG)) {
170 memcpy(cm->rules_raw
171 + cm->eff_rules_count
172 + cm->sff_rules_count,
173 &conf[i], sizeof(struct can_filter));
174
175 cm->sff_rules_count++;
176
177 em_canid_sff_match_add(cm,
178 conf[i].can_id, conf[i].can_mask);
179 }
180 }
181
182 m->datalen = sizeof(struct canid_match) + len;
183 m->data = (unsigned long)cm;
184
185 if (cm_old != NULL) {
186 pr_err("canid: Configuring an existing ematch!\n");
187 kfree(cm_old);
188 }
189
190 return 0;
191}
192
193static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
194{
195 struct canid_match *cm = em_canid_priv(m);
196
197 kfree(cm);
198}
199
200static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
201{
202 struct canid_match *cm = em_canid_priv(m);
203
204 /*
205 * When configuring this ematch 'rules_count' is set not to exceed
206 * 'rules_raw' array size
207 */
208 if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
209 &cm->rules_raw) < 0)
210 return -EMSGSIZE;
211
212 return 0;
213}
214
215static struct tcf_ematch_ops em_canid_ops = {
216 .kind = TCF_EM_CANID,
217 .change = em_canid_change,
218 .match = em_canid_match,
219 .destroy = em_canid_destroy,
220 .dump = em_canid_dump,
221 .owner = THIS_MODULE,
222 .link = LIST_HEAD_INIT(em_canid_ops.link)
223};
224
225static int __init init_em_canid(void)
226{
227 return tcf_em_register(&em_canid_ops);
228}
229
230static void __exit exit_em_canid(void)
231{
232 tcf_em_unregister(&em_canid_ops);
233}
234
235MODULE_LICENSE("GPL");
236
237module_init(init_em_canid);
238module_exit(exit_em_canid);
239
240MODULE_ALIAS_TCF_EMATCH(TCF_EM_CANID);
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
new file mode 100644
index 000000000000..3130320997e2
--- /dev/null
+++ b/net/sched/em_ipset.c
@@ -0,0 +1,135 @@
1/*
2 * net/sched/em_ipset.c ipset ematch
3 *
4 * Copyright (c) 2012 Florian Westphal <fw@strlen.de>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/gfp.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/skbuff.h>
17#include <linux/netfilter/xt_set.h>
18#include <linux/ipv6.h>
19#include <net/ip.h>
20#include <net/pkt_cls.h>
21
22static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
23 struct tcf_ematch *em)
24{
25 struct xt_set_info *set = data;
26 ip_set_id_t index;
27
28 if (data_len != sizeof(*set))
29 return -EINVAL;
30
31 index = ip_set_nfnl_get_byindex(set->index);
32 if (index == IPSET_INVALID_ID)
33 return -ENOENT;
34
35 em->datalen = sizeof(*set);
36 em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
37 if (em->data)
38 return 0;
39
40 ip_set_nfnl_put(index);
41 return -ENOMEM;
42}
43
44static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
45{
46 const struct xt_set_info *set = (const void *) em->data;
47 if (set) {
48 ip_set_nfnl_put(set->index);
49 kfree((void *) em->data);
50 }
51}
52
53static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
54 struct tcf_pkt_info *info)
55{
56 struct ip_set_adt_opt opt;
57 struct xt_action_param acpar;
58 const struct xt_set_info *set = (const void *) em->data;
59 struct net_device *dev, *indev = NULL;
60 int ret, network_offset;
61
62 switch (skb->protocol) {
63 case htons(ETH_P_IP):
64 acpar.family = NFPROTO_IPV4;
65 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
66 return 0;
67 acpar.thoff = ip_hdrlen(skb);
68 break;
69 case htons(ETH_P_IPV6):
70 acpar.family = NFPROTO_IPV6;
71 if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
72 return 0;
73 /* doesn't call ipv6_find_hdr() because ipset doesn't use thoff, yet */
74 acpar.thoff = sizeof(struct ipv6hdr);
75 break;
76 default:
77 return 0;
78 }
79
80 acpar.hooknum = 0;
81
82 opt.family = acpar.family;
83 opt.dim = set->dim;
84 opt.flags = set->flags;
85 opt.cmdflags = 0;
86 opt.timeout = ~0u;
87
88 network_offset = skb_network_offset(skb);
89 skb_pull(skb, network_offset);
90
91 dev = skb->dev;
92
93 rcu_read_lock();
94
95 if (dev && skb->skb_iif)
96 indev = dev_get_by_index_rcu(dev_net(dev), skb->skb_iif);
97
98 acpar.in = indev ? indev : dev;
99 acpar.out = dev;
100
101 ret = ip_set_test(set->index, skb, &acpar, &opt);
102
103 rcu_read_unlock();
104
105 skb_push(skb, network_offset);
106 return ret;
107}
108
109static struct tcf_ematch_ops em_ipset_ops = {
110 .kind = TCF_EM_IPSET,
111 .change = em_ipset_change,
112 .destroy = em_ipset_destroy,
113 .match = em_ipset_match,
114 .owner = THIS_MODULE,
115 .link = LIST_HEAD_INIT(em_ipset_ops.link)
116};
117
118static int __init init_em_ipset(void)
119{
120 return tcf_em_register(&em_ipset_ops);
121}
122
123static void __exit exit_em_ipset(void)
124{
125 tcf_em_unregister(&em_ipset_ops);
126}
127
128MODULE_LICENSE("GPL");
129MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
130MODULE_DESCRIPTION("TC extended match for IP sets");
131
132module_init(init_em_ipset);
133module_exit(exit_em_ipset);
134
135MODULE_ALIAS_TCF_EMATCH(TCF_EM_IPSET);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 4790c696cbce..4ab6e3325573 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif)
264 if (unlikely(skb_rtable(skb) == NULL)) 264 if (unlikely(skb_rtable(skb) == NULL))
265 *err = -1; 265 *err = -1;
266 else 266 else
267 dst->value = skb_rtable(skb)->rt_iif; 267 dst->value = inet_iif(skb);
268} 268}
269 269
270/************************************************************************** 270/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 085ce53d570a..a08b4ab3e421 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -973,7 +973,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
973static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 973static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
974{ 974{
975 struct net *net = sock_net(skb->sk); 975 struct net *net = sock_net(skb->sk);
976 struct tcmsg *tcm = NLMSG_DATA(n); 976 struct tcmsg *tcm = nlmsg_data(n);
977 struct nlattr *tca[TCA_MAX + 1]; 977 struct nlattr *tca[TCA_MAX + 1];
978 struct net_device *dev; 978 struct net_device *dev;
979 u32 clid = tcm->tcm_parent; 979 u32 clid = tcm->tcm_parent;
@@ -1046,7 +1046,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1046 1046
1047replay: 1047replay:
1048 /* Reinit, just in case something touches this. */ 1048 /* Reinit, just in case something touches this. */
1049 tcm = NLMSG_DATA(n); 1049 tcm = nlmsg_data(n);
1050 clid = tcm->tcm_parent; 1050 clid = tcm->tcm_parent;
1051 q = p = NULL; 1051 q = p = NULL;
1052 1052
@@ -1193,8 +1193,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1193 struct gnet_dump d; 1193 struct gnet_dump d;
1194 struct qdisc_size_table *stab; 1194 struct qdisc_size_table *stab;
1195 1195
1196 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1196 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
1197 tcm = NLMSG_DATA(nlh); 1197 if (!nlh)
1198 goto out_nlmsg_trim;
1199 tcm = nlmsg_data(nlh);
1198 tcm->tcm_family = AF_UNSPEC; 1200 tcm->tcm_family = AF_UNSPEC;
1199 tcm->tcm__pad1 = 0; 1201 tcm->tcm__pad1 = 0;
1200 tcm->tcm__pad2 = 0; 1202 tcm->tcm__pad2 = 0;
@@ -1230,7 +1232,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1230 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1232 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1231 return skb->len; 1233 return skb->len;
1232 1234
1233nlmsg_failure: 1235out_nlmsg_trim:
1234nla_put_failure: 1236nla_put_failure:
1235 nlmsg_trim(skb, b); 1237 nlmsg_trim(skb, b);
1236 return -1; 1238 return -1;
@@ -1366,7 +1368,7 @@ done:
1366static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1368static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1367{ 1369{
1368 struct net *net = sock_net(skb->sk); 1370 struct net *net = sock_net(skb->sk);
1369 struct tcmsg *tcm = NLMSG_DATA(n); 1371 struct tcmsg *tcm = nlmsg_data(n);
1370 struct nlattr *tca[TCA_MAX + 1]; 1372 struct nlattr *tca[TCA_MAX + 1];
1371 struct net_device *dev; 1373 struct net_device *dev;
1372 struct Qdisc *q = NULL; 1374 struct Qdisc *q = NULL;
@@ -1498,8 +1500,10 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1498 struct gnet_dump d; 1500 struct gnet_dump d;
1499 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1501 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1500 1502
1501 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1503 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
1502 tcm = NLMSG_DATA(nlh); 1504 if (!nlh)
1505 goto out_nlmsg_trim;
1506 tcm = nlmsg_data(nlh);
1503 tcm->tcm_family = AF_UNSPEC; 1507 tcm->tcm_family = AF_UNSPEC;
1504 tcm->tcm__pad1 = 0; 1508 tcm->tcm__pad1 = 0;
1505 tcm->tcm__pad2 = 0; 1509 tcm->tcm__pad2 = 0;
@@ -1525,7 +1529,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1525 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1529 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1526 return skb->len; 1530 return skb->len;
1527 1531
1528nlmsg_failure: 1532out_nlmsg_trim:
1529nla_put_failure: 1533nla_put_failure:
1530 nlmsg_trim(skb, b); 1534 nlmsg_trim(skb, b);
1531 return -1; 1535 return -1;
@@ -1616,7 +1620,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1616 1620
1617static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1621static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1618{ 1622{
1619 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); 1623 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1620 struct net *net = sock_net(skb->sk); 1624 struct net *net = sock_net(skb->sk);
1621 struct netdev_queue *dev_queue; 1625 struct netdev_queue *dev_queue;
1622 struct net_device *dev; 1626 struct net_device *dev;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c2..298c0ddfb57e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
331 return PSCHED_NS2TICKS(ticks); 331 return PSCHED_NS2TICKS(ticks);
332} 332}
333 333
334static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 334static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
335{ 335{
336 struct sk_buff_head *list = &sch->q; 336 struct sk_buff_head *list = &sch->q;
337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
338 struct sk_buff *skb; 338 struct sk_buff *skb = skb_peek_tail(list);
339
340 if (likely(skb_queue_len(list) < sch->limit)) {
341 skb = skb_peek_tail(list);
342 /* Optimize for add at tail */
343 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
344 return qdisc_enqueue_tail(nskb, sch);
345 339
346 skb_queue_reverse_walk(list, skb) { 340 /* Optimize for add at tail */
347 if (tnext >= netem_skb_cb(skb)->time_to_send) 341 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
348 break; 342 return __skb_queue_tail(list, nskb);
349 }
350 343
351 __skb_queue_after(list, skb, nskb); 344 skb_queue_reverse_walk(list, skb) {
352 sch->qstats.backlog += qdisc_pkt_len(nskb); 345 if (tnext >= netem_skb_cb(skb)->time_to_send)
353 return NET_XMIT_SUCCESS; 346 break;
354 } 347 }
355 348
356 return qdisc_reshape_fail(nskb, sch); 349 __skb_queue_after(list, skb, nskb);
357} 350}
358 351
359/* 352/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
368 /* We don't fill cb now as skb_unshare() may invalidate it */ 361 /* We don't fill cb now as skb_unshare() may invalidate it */
369 struct netem_skb_cb *cb; 362 struct netem_skb_cb *cb;
370 struct sk_buff *skb2; 363 struct sk_buff *skb2;
371 int ret;
372 int count = 1; 364 int count = 1;
373 365
374 /* Random duplication */ 366 /* Random duplication */
@@ -388,7 +380,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
388 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 380 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
389 } 381 }
390 382
391 skb_orphan(skb); 383 /* If a delay is expected, orphan the skb. (orphaning usually takes
384 * place at TX completion time, so _before_ the link transit delay)
385 * Ideally, this orphaning should be done after the rate limiting
386 * module, because this breaks TCP Small Queue, and other mechanisms
387 * based on socket sk_wmem_alloc.
388 */
389 if (q->latency || q->jitter)
390 skb_orphan(skb);
392 391
393 /* 392 /*
394 * If we need to duplicate packet, then re-insert at top of the 393 * If we need to duplicate packet, then re-insert at top of the
@@ -419,6 +418,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 418 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
420 } 419 }
421 420
421 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
422 return qdisc_reshape_fail(skb, sch);
423
424 sch->qstats.backlog += qdisc_pkt_len(skb);
425
422 cb = netem_skb_cb(skb); 426 cb = netem_skb_cb(skb);
423 if (q->gap == 0 || /* not doing reordering */ 427 if (q->gap == 0 || /* not doing reordering */
424 q->counter < q->gap - 1 || /* inside last reordering gap */ 428 q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +454,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
450 454
451 cb->time_to_send = now + delay; 455 cb->time_to_send = now + delay;
452 ++q->counter; 456 ++q->counter;
453 ret = tfifo_enqueue(skb, sch); 457 tfifo_enqueue(skb, sch);
454 } else { 458 } else {
455 /* 459 /*
456 * Do re-ordering by putting one out of N packets at the front 460 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +464,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
460 q->counter = 0; 464 q->counter = 0;
461 465
462 __skb_queue_head(&sch->q, skb); 466 __skb_queue_head(&sch->q, skb);
463 sch->qstats.backlog += qdisc_pkt_len(skb);
464 sch->qstats.requeues++; 467 sch->qstats.requeues++;
465 ret = NET_XMIT_SUCCESS;
466 }
467
468 if (ret != NET_XMIT_SUCCESS) {
469 if (net_xmit_drop_count(ret)) {
470 sch->qstats.drops++;
471 return ret;
472 }
473 } 468 }
474 469
475 return NET_XMIT_SUCCESS; 470 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 74305c883bd3..30ea4674cabd 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
570 570
571 sch->qstats.backlog = q->qdisc->qstats.backlog; 571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS); 572 opts = nla_nest_start(skb, TCA_OPTIONS);
573 if (opts == NULL)
574 goto nla_put_failure;
573 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 575 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
574 goto nla_put_failure; 576 goto nla_put_failure;
575 return nla_nest_end(skb, opts); 577 return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ca0c29695d51..474167162947 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -67,7 +67,6 @@ struct teql_master {
67struct teql_sched_data { 67struct teql_sched_data {
68 struct Qdisc *next; 68 struct Qdisc *next;
69 struct teql_master *m; 69 struct teql_master *m;
70 struct neighbour *ncache;
71 struct sk_buff_head q; 70 struct sk_buff_head q;
72}; 71};
73 72
@@ -134,7 +133,6 @@ teql_reset(struct Qdisc *sch)
134 133
135 skb_queue_purge(&dat->q); 134 skb_queue_purge(&dat->q);
136 sch->q.qlen = 0; 135 sch->q.qlen = 0;
137 teql_neigh_release(xchg(&dat->ncache, NULL));
138} 136}
139 137
140static void 138static void
@@ -166,7 +164,6 @@ teql_destroy(struct Qdisc *sch)
166 } 164 }
167 } 165 }
168 skb_queue_purge(&dat->q); 166 skb_queue_purge(&dat->q);
169 teql_neigh_release(xchg(&dat->ncache, NULL));
170 break; 167 break;
171 } 168 }
172 169
@@ -225,21 +222,25 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
225static int 222static int
226__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, 223__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
227 struct net_device *dev, struct netdev_queue *txq, 224 struct net_device *dev, struct netdev_queue *txq,
228 struct neighbour *mn) 225 struct dst_entry *dst)
229{ 226{
230 struct teql_sched_data *q = qdisc_priv(txq->qdisc); 227 struct neighbour *n;
231 struct neighbour *n = q->ncache; 228 int err = 0;
232 229
233 if (mn->tbl == NULL) 230 n = dst_neigh_lookup_skb(dst, skb);
234 return -EINVAL; 231 if (!n)
235 if (n && n->tbl == mn->tbl && 232 return -ENOENT;
236 memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { 233
237 atomic_inc(&n->refcnt); 234 if (dst->dev != dev) {
238 } else { 235 struct neighbour *mn;
239 n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); 236
240 if (IS_ERR(n)) 237 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
241 return PTR_ERR(n); 238 neigh_release(n);
239 if (IS_ERR(mn))
240 return PTR_ERR(mn);
241 n = mn;
242 } 242 }
243
243 if (neigh_event_send(n, skb_res) == 0) { 244 if (neigh_event_send(n, skb_res) == 0) {
244 int err; 245 int err;
245 char haddr[MAX_ADDR_LEN]; 246 char haddr[MAX_ADDR_LEN];
@@ -248,15 +249,13 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
248 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr, 249 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
249 NULL, skb->len); 250 NULL, skb->len);
250 251
251 if (err < 0) { 252 if (err < 0)
252 neigh_release(n); 253 err = -EINVAL;
253 return -EINVAL; 254 } else {
254 } 255 err = (skb_res == NULL) ? -EAGAIN : 1;
255 teql_neigh_release(xchg(&q->ncache, n));
256 return 0;
257 } 256 }
258 neigh_release(n); 257 neigh_release(n);
259 return (skb_res == NULL) ? -EAGAIN : 1; 258 return err;
260} 259}
261 260
262static inline int teql_resolve(struct sk_buff *skb, 261static inline int teql_resolve(struct sk_buff *skb,
@@ -265,7 +264,6 @@ static inline int teql_resolve(struct sk_buff *skb,
265 struct netdev_queue *txq) 264 struct netdev_queue *txq)
266{ 265{
267 struct dst_entry *dst = skb_dst(skb); 266 struct dst_entry *dst = skb_dst(skb);
268 struct neighbour *mn;
269 int res; 267 int res;
270 268
271 if (txq->qdisc == &noop_qdisc) 269 if (txq->qdisc == &noop_qdisc)
@@ -275,8 +273,7 @@ static inline int teql_resolve(struct sk_buff *skb,
275 return 0; 273 return 0;
276 274
277 rcu_read_lock(); 275 rcu_read_lock();
278 mn = dst_get_neighbour_noref(dst); 276 res = __teql_resolve(skb, skb_res, dev, txq, dst);
279 res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
280 rcu_read_unlock(); 277 rcu_read_unlock();
281 278
282 return res; 279 return res;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5bc9ab161b37..ebaef3ed6065 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -124,6 +124,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
124 * socket values. 124 * socket values.
125 */ 125 */
126 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 126 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
127 asoc->pf_retrans = sctp_pf_retrans;
128
127 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 129 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
128 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 130 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
129 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); 131 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
@@ -271,6 +273,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
271 */ 273 */
272 asoc->peer.sack_needed = 1; 274 asoc->peer.sack_needed = 1;
273 asoc->peer.sack_cnt = 0; 275 asoc->peer.sack_cnt = 0;
276 asoc->peer.sack_generation = 1;
274 277
275 /* Assume that the peer will tell us if he recognizes ASCONF 278 /* Assume that the peer will tell us if he recognizes ASCONF
276 * as part of INIT exchange. 279 * as part of INIT exchange.
@@ -685,6 +688,9 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
685 /* Set the path max_retrans. */ 688 /* Set the path max_retrans. */
686 peer->pathmaxrxt = asoc->pathmaxrxt; 689 peer->pathmaxrxt = asoc->pathmaxrxt;
687 690
691 /* And the partial failure retrnas threshold */
692 peer->pf_retrans = asoc->pf_retrans;
693
688 /* Initialize the peer's SACK delay timeout based on the 694 /* Initialize the peer's SACK delay timeout based on the
689 * association configured value. 695 * association configured value.
690 */ 696 */
@@ -840,6 +846,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
840 struct sctp_ulpevent *event; 846 struct sctp_ulpevent *event;
841 struct sockaddr_storage addr; 847 struct sockaddr_storage addr;
842 int spc_state = 0; 848 int spc_state = 0;
849 bool ulp_notify = true;
843 850
844 /* Record the transition on the transport. */ 851 /* Record the transition on the transport. */
845 switch (command) { 852 switch (command) {
@@ -853,6 +860,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
853 spc_state = SCTP_ADDR_CONFIRMED; 860 spc_state = SCTP_ADDR_CONFIRMED;
854 else 861 else
855 spc_state = SCTP_ADDR_AVAILABLE; 862 spc_state = SCTP_ADDR_AVAILABLE;
863 /* Don't inform ULP about transition from PF to
864 * active state and set cwnd to 1, see SCTP
865 * Quick failover draft section 5.1, point 5
866 */
867 if (transport->state == SCTP_PF) {
868 ulp_notify = false;
869 transport->cwnd = 1;
870 }
856 transport->state = SCTP_ACTIVE; 871 transport->state = SCTP_ACTIVE;
857 break; 872 break;
858 873
@@ -871,6 +886,11 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
871 spc_state = SCTP_ADDR_UNREACHABLE; 886 spc_state = SCTP_ADDR_UNREACHABLE;
872 break; 887 break;
873 888
889 case SCTP_TRANSPORT_PF:
890 transport->state = SCTP_PF;
891 ulp_notify = false;
892 break;
893
874 default: 894 default:
875 return; 895 return;
876 } 896 }
@@ -878,12 +898,15 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
878 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the 898 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
879 * user. 899 * user.
880 */ 900 */
881 memset(&addr, 0, sizeof(struct sockaddr_storage)); 901 if (ulp_notify) {
882 memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); 902 memset(&addr, 0, sizeof(struct sockaddr_storage));
883 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 903 memcpy(&addr, &transport->ipaddr,
884 0, spc_state, error, GFP_ATOMIC); 904 transport->af_specific->sockaddr_len);
885 if (event) 905 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
886 sctp_ulpq_tail_event(&asoc->ulpq, event); 906 0, spc_state, error, GFP_ATOMIC);
907 if (event)
908 sctp_ulpq_tail_event(&asoc->ulpq, event);
909 }
887 910
888 /* Select new active and retran paths. */ 911 /* Select new active and retran paths. */
889 912
@@ -899,7 +922,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
899 transports) { 922 transports) {
900 923
901 if ((t->state == SCTP_INACTIVE) || 924 if ((t->state == SCTP_INACTIVE) ||
902 (t->state == SCTP_UNCONFIRMED)) 925 (t->state == SCTP_UNCONFIRMED) ||
926 (t->state == SCTP_PF))
903 continue; 927 continue;
904 if (!first || t->last_time_heard > first->last_time_heard) { 928 if (!first || t->last_time_heard > first->last_time_heard) {
905 second = first; 929 second = first;
@@ -1359,7 +1383,7 @@ struct sctp_transport *sctp_assoc_choose_alter_transport(
1359/* Update the association's pmtu and frag_point by going through all the 1383/* Update the association's pmtu and frag_point by going through all the
1360 * transports. This routine is called when a transport's PMTU has changed. 1384 * transports. This routine is called when a transport's PMTU has changed.
1361 */ 1385 */
1362void sctp_assoc_sync_pmtu(struct sctp_association *asoc) 1386void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1363{ 1387{
1364 struct sctp_transport *t; 1388 struct sctp_transport *t;
1365 __u32 pmtu = 0; 1389 __u32 pmtu = 0;
@@ -1371,7 +1395,7 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1371 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1395 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1372 transports) { 1396 transports) {
1373 if (t->pmtu_pending && t->dst) { 1397 if (t->pmtu_pending && t->dst) {
1374 sctp_transport_update_pmtu(t, dst_mtu(t->dst)); 1398 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1375 t->pmtu_pending = 0; 1399 t->pmtu_pending = 0;
1376 } 1400 }
1377 if (!pmtu || (t->pathmtu < pmtu)) 1401 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80564fe03024..e64d5210ed13 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -408,10 +408,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
408 408
409 if (t->param_flags & SPP_PMTUD_ENABLE) { 409 if (t->param_flags & SPP_PMTUD_ENABLE) {
410 /* Update transports view of the MTU */ 410 /* Update transports view of the MTU */
411 sctp_transport_update_pmtu(t, pmtu); 411 sctp_transport_update_pmtu(sk, t, pmtu);
412 412
413 /* Update association pmtu. */ 413 /* Update association pmtu. */
414 sctp_assoc_sync_pmtu(asoc); 414 sctp_assoc_sync_pmtu(sk, asoc);
415 } 415 }
416 416
417 /* Retransmit with the new pmtu setting. 417 /* Retransmit with the new pmtu setting.
@@ -423,6 +423,18 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
423 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); 423 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
424} 424}
425 425
426void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
427 struct sk_buff *skb)
428{
429 struct dst_entry *dst;
430
431 if (!t)
432 return;
433 dst = sctp_transport_dst_check(t);
434 if (dst)
435 dst->ops->redirect(dst, sk, skb);
436}
437
426/* 438/*
427 * SCTP Implementer's Guide, 2.37 ICMP handling procedures 439 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
428 * 440 *
@@ -628,6 +640,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
628 640
629 err = EHOSTUNREACH; 641 err = EHOSTUNREACH;
630 break; 642 break;
643 case ICMP_REDIRECT:
644 sctp_icmp_redirect(sk, transport, skb);
645 err = 0;
646 break;
631 default: 647 default:
632 goto out_unlock; 648 goto out_unlock;
633 } 649 }
@@ -736,15 +752,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
736 752
737 epb = &ep->base; 753 epb = &ep->base;
738 754
739 if (hlist_unhashed(&epb->node))
740 return;
741
742 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 755 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
743 756
744 head = &sctp_ep_hashtable[epb->hashent]; 757 head = &sctp_ep_hashtable[epb->hashent];
745 758
746 sctp_write_lock(&head->lock); 759 sctp_write_lock(&head->lock);
747 __hlist_del(&epb->node); 760 hlist_del_init(&epb->node);
748 sctp_write_unlock(&head->lock); 761 sctp_write_unlock(&head->lock);
749} 762}
750 763
@@ -825,7 +838,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
825 head = &sctp_assoc_hashtable[epb->hashent]; 838 head = &sctp_assoc_hashtable[epb->hashent];
826 839
827 sctp_write_lock(&head->lock); 840 sctp_write_lock(&head->lock);
828 __hlist_del(&epb->node); 841 hlist_del_init(&epb->node);
829 sctp_write_unlock(&head->lock); 842 sctp_write_unlock(&head->lock);
830} 843}
831 844
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 91f479121c55..ed7139ea7978 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -185,6 +185,9 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
185 goto out_unlock; 185 goto out_unlock;
186 } 186 }
187 break; 187 break;
188 case NDISC_REDIRECT:
189 sctp_icmp_redirect(sk, transport, skb);
190 break;
188 default: 191 default:
189 break; 192 break;
190 } 193 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index f1b7d4bb591e..838e18b4d7ea 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -64,6 +64,8 @@
64#include <net/sctp/checksum.h> 64#include <net/sctp/checksum.h>
65 65
66/* Forward declarations for private helpers. */ 66/* Forward declarations for private helpers. */
67static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
68 struct sctp_chunk *chunk);
67static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, 69static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
68 struct sctp_chunk *chunk); 70 struct sctp_chunk *chunk);
69static void sctp_packet_append_data(struct sctp_packet *packet, 71static void sctp_packet_append_data(struct sctp_packet *packet,
@@ -224,7 +226,10 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
224 if (!auth) 226 if (!auth)
225 return retval; 227 return retval;
226 228
227 retval = sctp_packet_append_chunk(pkt, auth); 229 retval = __sctp_packet_append_chunk(pkt, auth);
230
231 if (retval != SCTP_XMIT_OK)
232 sctp_chunk_free(auth);
228 233
229 return retval; 234 return retval;
230} 235}
@@ -248,51 +253,39 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
248 /* If the SACK timer is running, we have a pending SACK */ 253 /* If the SACK timer is running, we have a pending SACK */
249 if (timer_pending(timer)) { 254 if (timer_pending(timer)) {
250 struct sctp_chunk *sack; 255 struct sctp_chunk *sack;
256
257 if (pkt->transport->sack_generation !=
258 pkt->transport->asoc->peer.sack_generation)
259 return retval;
260
251 asoc->a_rwnd = asoc->rwnd; 261 asoc->a_rwnd = asoc->rwnd;
252 sack = sctp_make_sack(asoc); 262 sack = sctp_make_sack(asoc);
253 if (sack) { 263 if (sack) {
254 retval = sctp_packet_append_chunk(pkt, sack); 264 retval = __sctp_packet_append_chunk(pkt, sack);
265 if (retval != SCTP_XMIT_OK) {
266 sctp_chunk_free(sack);
267 goto out;
268 }
255 asoc->peer.sack_needed = 0; 269 asoc->peer.sack_needed = 0;
256 if (del_timer(timer)) 270 if (del_timer(timer))
257 sctp_association_put(asoc); 271 sctp_association_put(asoc);
258 } 272 }
259 } 273 }
260 } 274 }
275out:
261 return retval; 276 return retval;
262} 277}
263 278
279
264/* Append a chunk to the offered packet reporting back any inability to do 280/* Append a chunk to the offered packet reporting back any inability to do
265 * so. 281 * so.
266 */ 282 */
267sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, 283static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
268 struct sctp_chunk *chunk) 284 struct sctp_chunk *chunk)
269{ 285{
270 sctp_xmit_t retval = SCTP_XMIT_OK; 286 sctp_xmit_t retval = SCTP_XMIT_OK;
271 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); 287 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
272 288
273 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
274 chunk);
275
276 /* Data chunks are special. Before seeing what else we can
277 * bundle into this packet, check to see if we are allowed to
278 * send this DATA.
279 */
280 if (sctp_chunk_is_data(chunk)) {
281 retval = sctp_packet_can_append_data(packet, chunk);
282 if (retval != SCTP_XMIT_OK)
283 goto finish;
284 }
285
286 /* Try to bundle AUTH chunk */
287 retval = sctp_packet_bundle_auth(packet, chunk);
288 if (retval != SCTP_XMIT_OK)
289 goto finish;
290
291 /* Try to bundle SACK chunk */
292 retval = sctp_packet_bundle_sack(packet, chunk);
293 if (retval != SCTP_XMIT_OK)
294 goto finish;
295
296 /* Check to see if this chunk will fit into the packet */ 289 /* Check to see if this chunk will fit into the packet */
297 retval = sctp_packet_will_fit(packet, chunk, chunk_len); 290 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
298 if (retval != SCTP_XMIT_OK) 291 if (retval != SCTP_XMIT_OK)
@@ -334,6 +327,43 @@ finish:
334 return retval; 327 return retval;
335} 328}
336 329
330/* Append a chunk to the offered packet reporting back any inability to do
331 * so.
332 */
333sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
334 struct sctp_chunk *chunk)
335{
336 sctp_xmit_t retval = SCTP_XMIT_OK;
337
338 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
339 chunk);
340
341 /* Data chunks are special. Before seeing what else we can
342 * bundle into this packet, check to see if we are allowed to
343 * send this DATA.
344 */
345 if (sctp_chunk_is_data(chunk)) {
346 retval = sctp_packet_can_append_data(packet, chunk);
347 if (retval != SCTP_XMIT_OK)
348 goto finish;
349 }
350
351 /* Try to bundle AUTH chunk */
352 retval = sctp_packet_bundle_auth(packet, chunk);
353 if (retval != SCTP_XMIT_OK)
354 goto finish;
355
356 /* Try to bundle SACK chunk */
357 retval = sctp_packet_bundle_sack(packet, chunk);
358 if (retval != SCTP_XMIT_OK)
359 goto finish;
360
361 retval = __sctp_packet_append_chunk(packet, chunk);
362
363finish:
364 return retval;
365}
366
337/* All packets are sent to the network through this function from 367/* All packets are sent to the network through this function from
338 * sctp_outq_tail(). 368 * sctp_outq_tail().
339 * 369 *
@@ -380,7 +410,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
380 if (!sctp_transport_dst_check(tp)) { 410 if (!sctp_transport_dst_check(tp)) {
381 sctp_transport_route(tp, NULL, sctp_sk(sk)); 411 sctp_transport_route(tp, NULL, sctp_sk(sk));
382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 412 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
383 sctp_assoc_sync_pmtu(asoc); 413 sctp_assoc_sync_pmtu(sk, asoc);
384 } 414 }
385 } 415 }
386 dst = dst_clone(tp->dst); 416 dst = dst_clone(tp->dst);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a0fa19f5650c..e7aa177c9522 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -792,7 +792,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
792 if (!new_transport) 792 if (!new_transport)
793 new_transport = asoc->peer.active_path; 793 new_transport = asoc->peer.active_path;
794 } else if ((new_transport->state == SCTP_INACTIVE) || 794 } else if ((new_transport->state == SCTP_INACTIVE) ||
795 (new_transport->state == SCTP_UNCONFIRMED)) { 795 (new_transport->state == SCTP_UNCONFIRMED) ||
796 (new_transport->state == SCTP_PF)) {
796 /* If the chunk is Heartbeat or Heartbeat Ack, 797 /* If the chunk is Heartbeat or Heartbeat Ack,
797 * send it to chunk->transport, even if it's 798 * send it to chunk->transport, even if it's
798 * inactive. 799 * inactive.
@@ -987,7 +988,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
987 new_transport = chunk->transport; 988 new_transport = chunk->transport;
988 if (!new_transport || 989 if (!new_transport ||
989 ((new_transport->state == SCTP_INACTIVE) || 990 ((new_transport->state == SCTP_INACTIVE) ||
990 (new_transport->state == SCTP_UNCONFIRMED))) 991 (new_transport->state == SCTP_UNCONFIRMED) ||
992 (new_transport->state == SCTP_PF)))
991 new_transport = asoc->peer.active_path; 993 new_transport = asoc->peer.active_path;
992 if (new_transport->state == SCTP_UNCONFIRMED) 994 if (new_transport->state == SCTP_UNCONFIRMED)
993 continue; 995 continue;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5942d27b1444..1f89c4e69645 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -568,7 +568,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
568/* What interface did this skb arrive on? */ 568/* What interface did this skb arrive on? */
569static int sctp_v4_skb_iif(const struct sk_buff *skb) 569static int sctp_v4_skb_iif(const struct sk_buff *skb)
570{ 570{
571 return skb_rtable(skb)->rt_iif; 571 return inet_iif(skb);
572} 572}
573 573
574/* Was this packet marked by Explicit Congestion Notification? */ 574/* Was this packet marked by Explicit Congestion Notification? */
@@ -673,7 +673,9 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
673 SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n"); 673 SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
674 sctp_bh_unlock_sock(sk); 674 sctp_bh_unlock_sock(sk);
675 } 675 }
676#if IS_ENABLED(CONFIG_IPV6)
676free_next: 677free_next:
678#endif
677 list_del(&addrw->list); 679 list_del(&addrw->list);
678 kfree(addrw); 680 kfree(addrw);
679 } 681 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a85eeeb55dd0..479a70ef6ff8 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -132,7 +132,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
132 * abort chunk. Differs from sctp_init_cause in that it won't oops 132 * abort chunk. Differs from sctp_init_cause in that it won't oops
133 * if there isn't enough space in the op error chunk 133 * if there isn't enough space in the op error chunk
134 */ 134 */
135int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, 135static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
136 size_t paylen) 136 size_t paylen)
137{ 137{
138 sctp_errhdr_t err; 138 sctp_errhdr_t err;
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
734 int len; 734 int len;
735 __u32 ctsn; 735 __u32 ctsn;
736 __u16 num_gabs, num_dup_tsns; 736 __u16 num_gabs, num_dup_tsns;
737 struct sctp_association *aptr = (struct sctp_association *)asoc;
737 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 738 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
738 struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; 739 struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
740 struct sctp_transport *trans;
739 741
740 memset(gabs, 0, sizeof(gabs)); 742 memset(gabs, 0, sizeof(gabs));
741 ctsn = sctp_tsnmap_get_ctsn(map); 743 ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
805 sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, 807 sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
806 sctp_tsnmap_get_dups(map)); 808 sctp_tsnmap_get_dups(map));
807 809
810 /* Once we have a sack generated, check to see what our sack
811 * generation is, if its 0, reset the transports to 0, and reset
812 * the association generation to 1
813 *
814 * The idea is that zero is never used as a valid generation for the
815 * association so no transport will match after a wrap event like this,
816 * Until the next sack
817 */
818 if (++aptr->peer.sack_generation == 0) {
819 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
820 transports)
821 trans->sack_generation = 0;
822 aptr->peer.sack_generation = 1;
823 }
808nodata: 824nodata:
809 return retval; 825 return retval;
810} 826}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c96d1a81cf42..fe99628e1257 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -76,6 +76,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
76 sctp_cmd_seq_t *commands, 76 sctp_cmd_seq_t *commands,
77 gfp_t gfp); 77 gfp_t gfp);
78 78
79static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
80 struct sctp_transport *t);
79/******************************************************************** 81/********************************************************************
80 * Helper functions 82 * Helper functions
81 ********************************************************************/ 83 ********************************************************************/
@@ -470,7 +472,8 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
470 * notification SHOULD be sent to the upper layer. 472 * notification SHOULD be sent to the upper layer.
471 * 473 *
472 */ 474 */
473static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, 475static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
476 struct sctp_association *asoc,
474 struct sctp_transport *transport, 477 struct sctp_transport *transport,
475 int is_hb) 478 int is_hb)
476{ 479{
@@ -495,6 +498,23 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
495 transport->error_count++; 498 transport->error_count++;
496 } 499 }
497 500
501 /* If the transport error count is greater than the pf_retrans
502 * threshold, and less than pathmaxrtx, then mark this transport
503 * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
504 * point 1
505 */
506 if ((transport->state != SCTP_PF) &&
507 (asoc->pf_retrans < transport->pathmaxrxt) &&
508 (transport->error_count > asoc->pf_retrans)) {
509
510 sctp_assoc_control_transport(asoc, transport,
511 SCTP_TRANSPORT_PF,
512 0);
513
514 /* Update the hb timer to resend a heartbeat every rto */
515 sctp_cmd_hb_timer_update(commands, transport);
516 }
517
498 if (transport->state != SCTP_INACTIVE && 518 if (transport->state != SCTP_INACTIVE &&
499 (transport->error_count > transport->pathmaxrxt)) { 519 (transport->error_count > transport->pathmaxrxt)) {
500 SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", 520 SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
@@ -699,6 +719,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
699 SCTP_HEARTBEAT_SUCCESS); 719 SCTP_HEARTBEAT_SUCCESS);
700 } 720 }
701 721
722 if (t->state == SCTP_PF)
723 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
724 SCTP_HEARTBEAT_SUCCESS);
725
702 /* The receiver of the HEARTBEAT ACK should also perform an 726 /* The receiver of the HEARTBEAT ACK should also perform an
703 * RTT measurement for that destination transport address 727 * RTT measurement for that destination transport address
704 * using the time value carried in the HEARTBEAT ACK chunk. 728 * using the time value carried in the HEARTBEAT ACK chunk.
@@ -1268,7 +1292,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1268 case SCTP_CMD_REPORT_TSN: 1292 case SCTP_CMD_REPORT_TSN:
1269 /* Record the arrival of a TSN. */ 1293 /* Record the arrival of a TSN. */
1270 error = sctp_tsnmap_mark(&asoc->peer.tsn_map, 1294 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1271 cmd->obj.u32); 1295 cmd->obj.u32, NULL);
1272 break; 1296 break;
1273 1297
1274 case SCTP_CMD_REPORT_FWDTSN: 1298 case SCTP_CMD_REPORT_FWDTSN:
@@ -1565,8 +1589,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1565 1589
1566 case SCTP_CMD_STRIKE: 1590 case SCTP_CMD_STRIKE:
1567 /* Mark one strike against a transport. */ 1591 /* Mark one strike against a transport. */
1568 sctp_do_8_2_transport_strike(asoc, cmd->obj.transport, 1592 sctp_do_8_2_transport_strike(commands, asoc,
1569 0); 1593 cmd->obj.transport, 0);
1570 break; 1594 break;
1571 1595
1572 case SCTP_CMD_TRANSPORT_IDLE: 1596 case SCTP_CMD_TRANSPORT_IDLE:
@@ -1576,7 +1600,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1576 1600
1577 case SCTP_CMD_TRANSPORT_HB_SENT: 1601 case SCTP_CMD_TRANSPORT_HB_SENT:
1578 t = cmd->obj.transport; 1602 t = cmd->obj.transport;
1579 sctp_do_8_2_transport_strike(asoc, t, 1); 1603 sctp_do_8_2_transport_strike(commands, asoc,
1604 t, 1);
1580 t->hb_sent = 1; 1605 t->hb_sent = 1;
1581 break; 1606 break;
1582 1607
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b3b8a8d813eb..5e259817a7f3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1231,8 +1231,14 @@ out_free:
1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" 1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1232 " kaddrs: %p err: %d\n", 1232 " kaddrs: %p err: %d\n",
1233 asoc, kaddrs, err); 1233 asoc, kaddrs, err);
1234 if (asoc) 1234 if (asoc) {
1235 /* sctp_primitive_ASSOCIATE may have added this association
1236 * To the hash table, try to unhash it, just in case, its a noop
1237 * if it wasn't hashed so we're safe
1238 */
1239 sctp_unhash_established(asoc);
1235 sctp_association_free(asoc); 1240 sctp_association_free(asoc);
1241 }
1236 return err; 1242 return err;
1237} 1243}
1238 1244
@@ -1853,7 +1859,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1853 } 1859 }
1854 1860
1855 if (asoc->pmtu_pending) 1861 if (asoc->pmtu_pending)
1856 sctp_assoc_pending_pmtu(asoc); 1862 sctp_assoc_pending_pmtu(sk, asoc);
1857 1863
1858 /* If fragmentation is disabled and the message length exceeds the 1864 /* If fragmentation is disabled and the message length exceeds the
1859 * association fragmentation point, return EMSGSIZE. The I-D 1865 * association fragmentation point, return EMSGSIZE. The I-D
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1942 goto out_unlock; 1948 goto out_unlock;
1943 1949
1944out_free: 1950out_free:
1945 if (new_asoc) 1951 if (new_asoc) {
1952 sctp_unhash_established(asoc);
1946 sctp_association_free(asoc); 1953 sctp_association_free(asoc);
1954 }
1947out_unlock: 1955out_unlock:
1948 sctp_release_sock(sk); 1956 sctp_release_sock(sk);
1949 1957
@@ -2365,7 +2373,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2365 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2373 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2366 if (trans) { 2374 if (trans) {
2367 trans->pathmtu = params->spp_pathmtu; 2375 trans->pathmtu = params->spp_pathmtu;
2368 sctp_assoc_sync_pmtu(asoc); 2376 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2369 } else if (asoc) { 2377 } else if (asoc) {
2370 asoc->pathmtu = params->spp_pathmtu; 2378 asoc->pathmtu = params->spp_pathmtu;
2371 sctp_frag_point(asoc, params->spp_pathmtu); 2379 sctp_frag_point(asoc, params->spp_pathmtu);
@@ -2382,7 +2390,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2382 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2390 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2383 if (update) { 2391 if (update) {
2384 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2392 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2385 sctp_assoc_sync_pmtu(asoc); 2393 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2386 } 2394 }
2387 } else if (asoc) { 2395 } else if (asoc) {
2388 asoc->param_flags = 2396 asoc->param_flags =
@@ -3470,6 +3478,56 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3470} 3478}
3471 3479
3472 3480
3481/*
3482 * SCTP_PEER_ADDR_THLDS
3483 *
3484 * This option allows us to alter the partially failed threshold for one or all
3485 * transports in an association. See Section 6.1 of:
3486 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3487 */
3488static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3489 char __user *optval,
3490 unsigned int optlen)
3491{
3492 struct sctp_paddrthlds val;
3493 struct sctp_transport *trans;
3494 struct sctp_association *asoc;
3495
3496 if (optlen < sizeof(struct sctp_paddrthlds))
3497 return -EINVAL;
3498 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3499 sizeof(struct sctp_paddrthlds)))
3500 return -EFAULT;
3501
3502
3503 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3504 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3505 if (!asoc)
3506 return -ENOENT;
3507 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3508 transports) {
3509 if (val.spt_pathmaxrxt)
3510 trans->pathmaxrxt = val.spt_pathmaxrxt;
3511 trans->pf_retrans = val.spt_pathpfthld;
3512 }
3513
3514 if (val.spt_pathmaxrxt)
3515 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3516 asoc->pf_retrans = val.spt_pathpfthld;
3517 } else {
3518 trans = sctp_addr_id2transport(sk, &val.spt_address,
3519 val.spt_assoc_id);
3520 if (!trans)
3521 return -ENOENT;
3522
3523 if (val.spt_pathmaxrxt)
3524 trans->pathmaxrxt = val.spt_pathmaxrxt;
3525 trans->pf_retrans = val.spt_pathpfthld;
3526 }
3527
3528 return 0;
3529}
3530
3473/* API 6.2 setsockopt(), getsockopt() 3531/* API 6.2 setsockopt(), getsockopt()
3474 * 3532 *
3475 * Applications use setsockopt() and getsockopt() to set or retrieve 3533 * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3619,6 +3677,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3619 case SCTP_AUTO_ASCONF: 3677 case SCTP_AUTO_ASCONF:
3620 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3678 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
3621 break; 3679 break;
3680 case SCTP_PEER_ADDR_THLDS:
3681 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
3682 break;
3622 default: 3683 default:
3623 retval = -ENOPROTOOPT; 3684 retval = -ENOPROTOOPT;
3624 break; 3685 break;
@@ -5490,6 +5551,51 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5490 return 0; 5551 return 0;
5491} 5552}
5492 5553
5554/*
5555 * SCTP_PEER_ADDR_THLDS
5556 *
5557 * This option allows us to fetch the partially failed threshold for one or all
5558 * transports in an association. See Section 6.1 of:
5559 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
5560 */
5561static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
5562 char __user *optval,
5563 int len,
5564 int __user *optlen)
5565{
5566 struct sctp_paddrthlds val;
5567 struct sctp_transport *trans;
5568 struct sctp_association *asoc;
5569
5570 if (len < sizeof(struct sctp_paddrthlds))
5571 return -EINVAL;
5572 len = sizeof(struct sctp_paddrthlds);
5573 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
5574 return -EFAULT;
5575
5576 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
5577 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
5578 if (!asoc)
5579 return -ENOENT;
5580
5581 val.spt_pathpfthld = asoc->pf_retrans;
5582 val.spt_pathmaxrxt = asoc->pathmaxrxt;
5583 } else {
5584 trans = sctp_addr_id2transport(sk, &val.spt_address,
5585 val.spt_assoc_id);
5586 if (!trans)
5587 return -ENOENT;
5588
5589 val.spt_pathmaxrxt = trans->pathmaxrxt;
5590 val.spt_pathpfthld = trans->pf_retrans;
5591 }
5592
5593 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
5594 return -EFAULT;
5595
5596 return 0;
5597}
5598
5493SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5599SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5494 char __user *optval, int __user *optlen) 5600 char __user *optval, int __user *optlen)
5495{ 5601{
@@ -5628,6 +5734,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5628 case SCTP_AUTO_ASCONF: 5734 case SCTP_AUTO_ASCONF:
5629 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5735 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
5630 break; 5736 break;
5737 case SCTP_PEER_ADDR_THLDS:
5738 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
5739 break;
5631 default: 5740 default:
5632 retval = -ENOPROTOOPT; 5741 retval = -ENOPROTOOPT;
5633 break; 5742 break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e5fe639c89e7..2b2bfe933ff1 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -141,6 +141,15 @@ static ctl_table sctp_table[] = {
141 .extra2 = &int_max 141 .extra2 = &int_max
142 }, 142 },
143 { 143 {
144 .procname = "pf_retrans",
145 .data = &sctp_pf_retrans,
146 .maxlen = sizeof(int),
147 .mode = 0644,
148 .proc_handler = proc_dointvec_minmax,
149 .extra1 = &zero,
150 .extra2 = &int_max
151 },
152 {
144 .procname = "max_init_retransmits", 153 .procname = "max_init_retransmits",
145 .data = &sctp_max_retrans_init, 154 .data = &sctp_max_retrans_init,
146 .maxlen = sizeof(int), 155 .maxlen = sizeof(int),
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index b026ba0c6992..c97472b248a2 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
68 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); 68 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
69 memset(&peer->saddr, 0, sizeof(union sctp_addr)); 69 memset(&peer->saddr, 0, sizeof(union sctp_addr));
70 70
71 peer->sack_generation = 0;
72
71 /* From 6.3.1 RTO Calculation: 73 /* From 6.3.1 RTO Calculation:
72 * 74 *
73 * C1) Until an RTT measurement has been made for a packet sent to the 75 * C1) Until an RTT measurement has been made for a packet sent to the
@@ -85,6 +87,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
85 87
86 /* Initialize the default path max_retrans. */ 88 /* Initialize the default path max_retrans. */
87 peer->pathmaxrxt = sctp_max_retrans_path; 89 peer->pathmaxrxt = sctp_max_retrans_path;
90 peer->pf_retrans = sctp_pf_retrans;
88 91
89 INIT_LIST_HEAD(&peer->transmitted); 92 INIT_LIST_HEAD(&peer->transmitted);
90 INIT_LIST_HEAD(&peer->send_ready); 93 INIT_LIST_HEAD(&peer->send_ready);
@@ -214,7 +217,7 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
214void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) 217void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
215{ 218{
216 /* If we don't have a fresh route, look one up */ 219 /* If we don't have a fresh route, look one up */
217 if (!transport->dst || transport->dst->obsolete > 1) { 220 if (!transport->dst || transport->dst->obsolete) {
218 dst_release(transport->dst); 221 dst_release(transport->dst);
219 transport->af_specific->get_dst(transport, &transport->saddr, 222 transport->af_specific->get_dst(transport, &transport->saddr,
220 &transport->fl, sk); 223 &transport->fl, sk);
@@ -226,7 +229,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 229 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
227} 230}
228 231
229void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 232void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
230{ 233{
231 struct dst_entry *dst; 234 struct dst_entry *dst;
232 235
@@ -243,8 +246,16 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
243 } 246 }
244 247
245 dst = sctp_transport_dst_check(t); 248 dst = sctp_transport_dst_check(t);
246 if (dst) 249 if (!dst)
247 dst->ops->update_pmtu(dst, pmtu); 250 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
251
252 if (dst) {
253 dst->ops->update_pmtu(dst, sk, NULL, pmtu);
254
255 dst = sctp_transport_dst_check(t);
256 if (!dst)
257 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
258 }
248} 259}
249 260
250/* Caches the dst entry and source address for a transport's destination 261/* Caches the dst entry and source address for a transport's destination
@@ -585,7 +596,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t)
585{ 596{
586 unsigned long timeout; 597 unsigned long timeout;
587 timeout = t->rto + sctp_jitter(t->rto); 598 timeout = t->rto + sctp_jitter(t->rto);
588 if (t->state != SCTP_UNCONFIRMED) 599 if ((t->state != SCTP_UNCONFIRMED) &&
600 (t->state != SCTP_PF))
589 timeout += t->hbinterval; 601 timeout += t->hbinterval;
590 timeout += jiffies; 602 timeout += jiffies;
591 return timeout; 603 return timeout;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index f1e40cebc981..b5fb7c409023 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
114 114
115 115
116/* Mark this TSN as seen. */ 116/* Mark this TSN as seen. */
117int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn) 117int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
118 struct sctp_transport *trans)
118{ 119{
119 u16 gap; 120 u16 gap;
120 121
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
133 */ 134 */
134 map->max_tsn_seen++; 135 map->max_tsn_seen++;
135 map->cumulative_tsn_ack_point++; 136 map->cumulative_tsn_ack_point++;
137 if (trans)
138 trans->sack_generation =
139 trans->asoc->peer.sack_generation;
136 map->base_tsn++; 140 map->base_tsn++;
137 } else { 141 } else {
138 /* Either we already have a gap, or about to record a gap, so 142 /* Either we already have a gap, or about to record a gap, so
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8a84017834c2..10c018a5b9fe 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,7 +702,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
702 if (rx_count >= asoc->base.sk->sk_rcvbuf) { 702 if (rx_count >= asoc->base.sk->sk_rcvbuf) {
703 703
704 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || 704 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
705 (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize))) 705 (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
706 chunk->skb->truesize)))
706 goto fail; 707 goto fail;
707 } 708 }
708 709
@@ -715,7 +716,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
715 * can mark it as received so the tsn_map is updated correctly. 716 * can mark it as received so the tsn_map is updated correctly.
716 */ 717 */
717 if (sctp_tsnmap_mark(&asoc->peer.tsn_map, 718 if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
718 ntohl(chunk->subh.data_hdr->tsn))) 719 ntohl(chunk->subh.data_hdr->tsn),
720 chunk->transport))
719 goto fail_mark; 721 goto fail_mark;
720 722
721 /* First calculate the padding, so we don't inadvertently 723 /* First calculate the padding, so we don't inadvertently
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f2d1de7f2ffb..f5a6a4f4faf7 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1051 if (chunk && (freed >= needed)) { 1051 if (chunk && (freed >= needed)) {
1052 __u32 tsn; 1052 __u32 tsn;
1053 tsn = ntohl(chunk->subh.data_hdr->tsn); 1053 tsn = ntohl(chunk->subh.data_hdr->tsn);
1054 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); 1054 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
1055 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1055 sctp_ulpq_tail_data(ulpq, chunk, gfp);
1056 1056
1057 sctp_ulpq_partial_delivery(ulpq, chunk, gfp); 1057 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
diff --git a/net/socket.c b/net/socket.c
index 6e0ccc09b313..dfe5b66c97e0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -398,7 +398,7 @@ int sock_map_fd(struct socket *sock, int flags)
398} 398}
399EXPORT_SYMBOL(sock_map_fd); 399EXPORT_SYMBOL(sock_map_fd);
400 400
401static struct socket *sock_from_file(struct file *file, int *err) 401struct socket *sock_from_file(struct file *file, int *err)
402{ 402{
403 if (file->f_op == &socket_file_ops) 403 if (file->f_op == &socket_file_ops)
404 return file->private_data; /* set in sock_map_fd */ 404 return file->private_data; /* set in sock_map_fd */
@@ -406,6 +406,7 @@ static struct socket *sock_from_file(struct file *file, int *err)
406 *err = -ENOTSOCK; 406 *err = -ENOTSOCK;
407 return NULL; 407 return NULL;
408} 408}
409EXPORT_SYMBOL(sock_from_file);
409 410
410/** 411/**
411 * sockfd_lookup - Go from a file number to its socket slot 412 * sockfd_lookup - Go from a file number to its socket slot
@@ -522,6 +523,9 @@ void sock_release(struct socket *sock)
522 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) 523 if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
523 printk(KERN_ERR "sock_release: fasync list not empty!\n"); 524 printk(KERN_ERR "sock_release: fasync list not empty!\n");
524 525
526 if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
527 return;
528
525 this_cpu_sub(sockets_in_use, 1); 529 this_cpu_sub(sockets_in_use, 1);
526 if (!sock->file) { 530 if (!sock->file) {
527 iput(SOCK_INODE(sock)); 531 iput(SOCK_INODE(sock));
@@ -551,8 +555,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
551 555
552 sock_update_classid(sock->sk); 556 sock_update_classid(sock->sk);
553 557
554 sock_update_netprioidx(sock->sk);
555
556 si->sock = sock; 558 si->sock = sock;
557 si->scm = NULL; 559 si->scm = NULL;
558 si->msg = msg; 560 si->msg = msg;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 9fe8857d8d59..03d03e37a7d5 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -21,6 +21,11 @@ config SUNRPC_XPRT_RDMA
21 21
22 If unsure, say N. 22 If unsure, say N.
23 23
24config SUNRPC_SWAP
25 bool
26 depends on SUNRPC
27 select NETVM
28
24config RPCSEC_GSS_KRB5 29config RPCSEC_GSS_KRB5
25 tristate "Secure RPC: Kerberos V mechanism" 30 tristate "Secure RPC: Kerberos V mechanism"
26 depends on SUNRPC && CRYPTO 31 depends on SUNRPC && CRYPTO
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 727e506cacda..b5c067bccc45 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/hash.h> 14#include <linux/hash.h>
15#include <linux/sunrpc/clnt.h> 15#include <linux/sunrpc/clnt.h>
16#include <linux/sunrpc/gss_api.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17 18
18#ifdef RPC_DEBUG 19#ifdef RPC_DEBUG
@@ -122,6 +123,59 @@ rpcauth_unregister(const struct rpc_authops *ops)
122} 123}
123EXPORT_SYMBOL_GPL(rpcauth_unregister); 124EXPORT_SYMBOL_GPL(rpcauth_unregister);
124 125
126/**
127 * rpcauth_list_flavors - discover registered flavors and pseudoflavors
128 * @array: array to fill in
129 * @size: size of "array"
130 *
131 * Returns the number of array items filled in, or a negative errno.
132 *
133 * The returned array is not sorted by any policy. Callers should not
134 * rely on the order of the items in the returned array.
135 */
136int
137rpcauth_list_flavors(rpc_authflavor_t *array, int size)
138{
139 rpc_authflavor_t flavor;
140 int result = 0;
141
142 spin_lock(&rpc_authflavor_lock);
143 for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
144 const struct rpc_authops *ops = auth_flavors[flavor];
145 rpc_authflavor_t pseudos[4];
146 int i, len;
147
148 if (result >= size) {
149 result = -ENOMEM;
150 break;
151 }
152
153 if (ops == NULL)
154 continue;
155 if (ops->list_pseudoflavors == NULL) {
156 array[result++] = ops->au_flavor;
157 continue;
158 }
159 len = ops->list_pseudoflavors(pseudos, ARRAY_SIZE(pseudos));
160 if (len < 0) {
161 result = len;
162 break;
163 }
164 for (i = 0; i < len; i++) {
165 if (result >= size) {
166 result = -ENOMEM;
167 break;
168 }
169 array[result++] = pseudos[i];
170 }
171 }
172 spin_unlock(&rpc_authflavor_lock);
173
174 dprintk("RPC: %s returns %d\n", __func__, result);
175 return result;
176}
177EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
178
125struct rpc_auth * 179struct rpc_auth *
126rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) 180rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
127{ 181{
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d3ad81f8da5b..34c522021004 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1619,6 +1619,7 @@ static const struct rpc_authops authgss_ops = {
1619 .crcreate = gss_create_cred, 1619 .crcreate = gss_create_cred,
1620 .pipes_create = gss_pipes_dentries_create, 1620 .pipes_create = gss_pipes_dentries_create,
1621 .pipes_destroy = gss_pipes_dentries_destroy, 1621 .pipes_destroy = gss_pipes_dentries_destroy,
1622 .list_pseudoflavors = gss_mech_list_pseudoflavors,
1622}; 1623};
1623 1624
1624static const struct rpc_credops gss_credops = { 1625static const struct rpc_credops gss_credops = {
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 782bfe1b6465..b174fcd9ff4c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -239,14 +239,28 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
239 239
240EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); 240EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
241 241
242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) 242/**
243 * gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
244 * @array: array to fill in
245 * @size: size of "array"
246 *
247 * Returns the number of array items filled in, or a negative errno.
248 *
249 * The returned array is not sorted by any policy. Callers should not
250 * rely on the order of the items in the returned array.
251 */
252int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
243{ 253{
244 struct gss_api_mech *pos = NULL; 254 struct gss_api_mech *pos = NULL;
245 int j, i = 0; 255 int j, i = 0;
246 256
247 spin_lock(&registered_mechs_lock); 257 spin_lock(&registered_mechs_lock);
248 list_for_each_entry(pos, &registered_mechs, gm_list) { 258 list_for_each_entry(pos, &registered_mechs, gm_list) {
249 for (j=0; j < pos->gm_pf_num; j++) { 259 for (j = 0; j < pos->gm_pf_num; j++) {
260 if (i >= size) {
261 spin_unlock(&registered_mechs_lock);
262 return -ENOMEM;
263 }
250 array_ptr[i++] = pos->gm_pfs[j].pseudoflavor; 264 array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
251 } 265 }
252 } 266 }
@@ -254,8 +268,6 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
254 return i; 268 return i;
255} 269}
256 270
257EXPORT_SYMBOL_GPL(gss_mech_list_pseudoflavors);
258
259u32 271u32
260gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service) 272gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
261{ 273{
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 31def68a0f6e..5a3d675d2f2f 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -176,13 +176,14 @@ out_free:
176} 176}
177EXPORT_SYMBOL_GPL(xprt_setup_backchannel); 177EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
178 178
179/* 179/**
180 * Destroys the backchannel preallocated structures. 180 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
181 * @xprt: the transport holding the preallocated strucures
182 * @max_reqs the maximum number of preallocated structures to destroy
183 *
181 * Since these structures may have been allocated by multiple calls 184 * Since these structures may have been allocated by multiple calls
182 * to xprt_setup_backchannel, we only destroy up to the maximum number 185 * to xprt_setup_backchannel, we only destroy up to the maximum number
183 * of reqs specified by the caller. 186 * of reqs specified by the caller.
184 * @xprt: the transport holding the preallocated strucures
185 * @max_reqs the maximum number of preallocated structures to destroy
186 */ 187 */
187void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) 188void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
188{ 189{
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 47ad2666fdf6..2afd2a84dc35 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1349,8 +1349,11 @@ static int c_show(struct seq_file *m, void *p)
1349 if (cache_check(cd, cp, NULL)) 1349 if (cache_check(cd, cp, NULL))
1350 /* cache_check does a cache_put on failure */ 1350 /* cache_check does a cache_put on failure */
1351 seq_printf(m, "# "); 1351 seq_printf(m, "# ");
1352 else 1352 else {
1353 if (cache_is_expired(cd, cp))
1354 seq_printf(m, "# ");
1353 cache_put(cp, cd); 1355 cache_put(cp, cd);
1356 }
1354 1357
1355 return cd->cache_show(m, cd, cp); 1358 return cd->cache_show(m, cd, cp);
1356} 1359}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f56f045778ae..fa48c60aef23 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -385,7 +385,7 @@ out_no_rpciod:
385 return ERR_PTR(err); 385 return ERR_PTR(err);
386} 386}
387 387
388/* 388/**
389 * rpc_create - create an RPC client and transport with one call 389 * rpc_create - create an RPC client and transport with one call
390 * @args: rpc_clnt create argument structure 390 * @args: rpc_clnt create argument structure
391 * 391 *
@@ -717,6 +717,15 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
717 atomic_inc(&clnt->cl_count); 717 atomic_inc(&clnt->cl_count);
718 if (clnt->cl_softrtry) 718 if (clnt->cl_softrtry)
719 task->tk_flags |= RPC_TASK_SOFT; 719 task->tk_flags |= RPC_TASK_SOFT;
720 if (sk_memalloc_socks()) {
721 struct rpc_xprt *xprt;
722
723 rcu_read_lock();
724 xprt = rcu_dereference(clnt->cl_xprt);
725 if (xprt->swapper)
726 task->tk_flags |= RPC_TASK_SWAPPER;
727 rcu_read_unlock();
728 }
720 /* Add to the client's list of all tasks */ 729 /* Add to the client's list of all tasks */
721 spin_lock(&clnt->cl_lock); 730 spin_lock(&clnt->cl_lock);
722 list_add_tail(&task->tk_task, &clnt->cl_tasks); 731 list_add_tail(&task->tk_task, &clnt->cl_tasks);
@@ -1844,12 +1853,13 @@ call_timeout(struct rpc_task *task)
1844 return; 1853 return;
1845 } 1854 }
1846 if (RPC_IS_SOFT(task)) { 1855 if (RPC_IS_SOFT(task)) {
1847 if (clnt->cl_chatty) 1856 if (clnt->cl_chatty) {
1848 rcu_read_lock(); 1857 rcu_read_lock();
1849 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1858 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1850 clnt->cl_protname, 1859 clnt->cl_protname,
1851 rcu_dereference(clnt->cl_xprt)->servername); 1860 rcu_dereference(clnt->cl_xprt)->servername);
1852 rcu_read_unlock(); 1861 rcu_read_unlock();
1862 }
1853 if (task->tk_flags & RPC_TASK_TIMEOUT) 1863 if (task->tk_flags & RPC_TASK_TIMEOUT)
1854 rpc_exit(task, -ETIMEDOUT); 1864 rpc_exit(task, -ETIMEDOUT);
1855 else 1865 else
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 92509ffe15fc..a70acae496e4 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -251,7 +251,7 @@ static int rpcb_create_local_unix(struct net *net)
251 if (IS_ERR(clnt)) { 251 if (IS_ERR(clnt)) {
252 dprintk("RPC: failed to create AF_LOCAL rpcbind " 252 dprintk("RPC: failed to create AF_LOCAL rpcbind "
253 "client (errno %ld).\n", PTR_ERR(clnt)); 253 "client (errno %ld).\n", PTR_ERR(clnt));
254 result = -PTR_ERR(clnt); 254 result = PTR_ERR(clnt);
255 goto out; 255 goto out;
256 } 256 }
257 257
@@ -298,7 +298,7 @@ static int rpcb_create_local_net(struct net *net)
298 if (IS_ERR(clnt)) { 298 if (IS_ERR(clnt)) {
299 dprintk("RPC: failed to create local rpcbind " 299 dprintk("RPC: failed to create local rpcbind "
300 "client (errno %ld).\n", PTR_ERR(clnt)); 300 "client (errno %ld).\n", PTR_ERR(clnt));
301 result = -PTR_ERR(clnt); 301 result = PTR_ERR(clnt);
302 goto out; 302 goto out;
303 } 303 }
304 304
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 994cfea2bad6..128494ec9a64 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -300,8 +300,9 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
300/* 300/*
301 * Make an RPC task runnable. 301 * Make an RPC task runnable.
302 * 302 *
303 * Note: If the task is ASYNC, this must be called with 303 * Note: If the task is ASYNC, and is being made runnable after sitting on an
304 * the spinlock held to protect the wait queue operation. 304 * rpc_wait_queue, this must be called with the queue spinlock held to protect
305 * the wait queue operation.
305 */ 306 */
306static void rpc_make_runnable(struct rpc_task *task) 307static void rpc_make_runnable(struct rpc_task *task)
307{ 308{
@@ -790,7 +791,9 @@ void rpc_execute(struct rpc_task *task)
790 791
791static void rpc_async_schedule(struct work_struct *work) 792static void rpc_async_schedule(struct work_struct *work)
792{ 793{
794 current->flags |= PF_FSTRANS;
793 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 795 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
796 current->flags &= ~PF_FSTRANS;
794} 797}
795 798
796/** 799/**
@@ -812,7 +815,10 @@ static void rpc_async_schedule(struct work_struct *work)
812void *rpc_malloc(struct rpc_task *task, size_t size) 815void *rpc_malloc(struct rpc_task *task, size_t size)
813{ 816{
814 struct rpc_buffer *buf; 817 struct rpc_buffer *buf;
815 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; 818 gfp_t gfp = GFP_NOWAIT;
819
820 if (RPC_IS_SWAPPER(task))
821 gfp |= __GFP_MEMALLOC;
816 822
817 size += sizeof(struct rpc_buffer); 823 size += sizeof(struct rpc_buffer);
818 if (size <= RPC_BUFFER_MAXSIZE) 824 if (size <= RPC_BUFFER_MAXSIZE)
@@ -886,7 +892,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
886static struct rpc_task * 892static struct rpc_task *
887rpc_alloc_task(void) 893rpc_alloc_task(void)
888{ 894{
889 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 895 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
890} 896}
891 897
892/* 898/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2777fa896645..4d0129203733 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -104,23 +104,9 @@ static void ip_map_put(struct kref *kref)
104 kfree(im); 104 kfree(im);
105} 105}
106 106
107#if IP_HASHBITS == 8 107static inline int hash_ip6(const struct in6_addr *ip)
108/* hash_long on a 64 bit machine is currently REALLY BAD for
109 * IP addresses in reverse-endian (i.e. on a little-endian machine).
110 * So use a trivial but reliable hash instead
111 */
112static inline int hash_ip(__be32 ip)
113{
114 int hash = (__force u32)ip ^ ((__force u32)ip>>16);
115 return (hash ^ (hash>>8)) & 0xff;
116}
117#endif
118static inline int hash_ip6(struct in6_addr ip)
119{ 108{
120 return (hash_ip(ip.s6_addr32[0]) ^ 109 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
121 hash_ip(ip.s6_addr32[1]) ^
122 hash_ip(ip.s6_addr32[2]) ^
123 hash_ip(ip.s6_addr32[3]));
124} 110}
125static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 111static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
126{ 112{
@@ -301,7 +287,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
301 ip.m_addr = *addr; 287 ip.m_addr = *addr;
302 ch = sunrpc_cache_lookup(cd, &ip.h, 288 ch = sunrpc_cache_lookup(cd, &ip.h,
303 hash_str(class, IP_HASHBITS) ^ 289 hash_str(class, IP_HASHBITS) ^
304 hash_ip6(*addr)); 290 hash_ip6(addr));
305 291
306 if (ch) 292 if (ch)
307 return container_of(ch, struct ip_map, h); 293 return container_of(ch, struct ip_map, h);
@@ -331,7 +317,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
331 ip.h.expiry_time = expiry; 317 ip.h.expiry_time = expiry;
332 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 318 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
333 hash_str(ipm->m_class, IP_HASHBITS) ^ 319 hash_str(ipm->m_class, IP_HASHBITS) ^
334 hash_ip6(ipm->m_addr)); 320 hash_ip6(&ipm->m_addr));
335 if (!ch) 321 if (!ch)
336 return -ENOMEM; 322 return -ENOMEM;
337 cache_put(ch, cd); 323 cache_put(ch, cd);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6de09de5d21..18bc130255a7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -43,6 +43,7 @@
43#include <net/tcp_states.h> 43#include <net/tcp_states.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45#include <asm/ioctls.h> 45#include <asm/ioctls.h>
46#include <trace/events/skb.h>
46 47
47#include <linux/sunrpc/types.h> 48#include <linux/sunrpc/types.h>
48#include <linux/sunrpc/clnt.h> 49#include <linux/sunrpc/clnt.h>
@@ -619,6 +620,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
619 if (!svc_udp_get_dest_address(rqstp, cmh)) { 620 if (!svc_udp_get_dest_address(rqstp, cmh)) {
620 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", 621 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
621 cmh->cmsg_level, cmh->cmsg_type); 622 cmh->cmsg_level, cmh->cmsg_type);
623out_free:
624 trace_kfree_skb(skb, svc_udp_recvfrom);
622 skb_free_datagram_locked(svsk->sk_sk, skb); 625 skb_free_datagram_locked(svsk->sk_sk, skb);
623 return 0; 626 return 0;
624 } 627 }
@@ -630,8 +633,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
630 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 633 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
631 local_bh_enable(); 634 local_bh_enable();
632 /* checksum error */ 635 /* checksum error */
633 skb_free_datagram_locked(svsk->sk_sk, skb); 636 goto out_free;
634 return 0;
635 } 637 }
636 local_bh_enable(); 638 local_bh_enable();
637 skb_free_datagram_locked(svsk->sk_sk, skb); 639 skb_free_datagram_locked(svsk->sk_sk, skb);
@@ -640,10 +642,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
640 rqstp->rq_arg.head[0].iov_base = skb->data + 642 rqstp->rq_arg.head[0].iov_base = skb->data +
641 sizeof(struct udphdr); 643 sizeof(struct udphdr);
642 rqstp->rq_arg.head[0].iov_len = len; 644 rqstp->rq_arg.head[0].iov_len = len;
643 if (skb_checksum_complete(skb)) { 645 if (skb_checksum_complete(skb))
644 skb_free_datagram_locked(svsk->sk_sk, skb); 646 goto out_free;
645 return 0;
646 }
647 rqstp->rq_xprt_ctxt = skb; 647 rqstp->rq_xprt_ctxt = skb;
648 } 648 }
649 649
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fddcccfcdf76..0afba1b4b656 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -129,34 +129,6 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
129EXPORT_SYMBOL_GPL(xdr_terminate_string); 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130 130
131void 131void
132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
133 unsigned int len)
134{
135 struct kvec *tail = xdr->tail;
136 u32 *p;
137
138 xdr->pages = pages;
139 xdr->page_base = base;
140 xdr->page_len = len;
141
142 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
143 tail->iov_base = p;
144 tail->iov_len = 0;
145
146 if (len & 3) {
147 unsigned int pad = 4 - (len & 3);
148
149 *p = 0;
150 tail->iov_base = (char *)p + (len & 3);
151 tail->iov_len = pad;
152 len += pad;
153 }
154 xdr->buflen += len;
155 xdr->len += len;
156}
157EXPORT_SYMBOL_GPL(xdr_encode_pages);
158
159void
160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 132xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
161 struct page **pages, unsigned int base, unsigned int len) 133 struct page **pages, unsigned int base, unsigned int len)
162{ 134{
@@ -180,7 +152,9 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
180 152
181/* 153/*
182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
183 * 155 */
156
157/**
184 * _shift_data_right_pages 158 * _shift_data_right_pages
185 * @pages: vector of pages containing both the source and dest memory area. 159 * @pages: vector of pages containing both the source and dest memory area.
186 * @pgto_base: page vector address of destination 160 * @pgto_base: page vector address of destination
@@ -242,7 +216,7 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
242 } while ((len -= copy) != 0); 216 } while ((len -= copy) != 0);
243} 217}
244 218
245/* 219/**
246 * _copy_to_pages 220 * _copy_to_pages
247 * @pages: array of pages 221 * @pages: array of pages
248 * @pgbase: page vector address of destination 222 * @pgbase: page vector address of destination
@@ -286,7 +260,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
286 flush_dcache_page(*pgto); 260 flush_dcache_page(*pgto);
287} 261}
288 262
289/* 263/**
290 * _copy_from_pages 264 * _copy_from_pages
291 * @p: pointer to destination 265 * @p: pointer to destination
292 * @pages: array of pages 266 * @pages: array of pages
@@ -326,7 +300,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
326} 300}
327EXPORT_SYMBOL_GPL(_copy_from_pages); 301EXPORT_SYMBOL_GPL(_copy_from_pages);
328 302
329/* 303/**
330 * xdr_shrink_bufhead 304 * xdr_shrink_bufhead
331 * @buf: xdr_buf 305 * @buf: xdr_buf
332 * @len: bytes to remove from buf->head[0] 306 * @len: bytes to remove from buf->head[0]
@@ -399,7 +373,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
399 buf->len = buf->buflen; 373 buf->len = buf->buflen;
400} 374}
401 375
402/* 376/**
403 * xdr_shrink_pagelen 377 * xdr_shrink_pagelen
404 * @buf: xdr_buf 378 * @buf: xdr_buf
405 * @len: bytes to remove from buf->pages 379 * @len: bytes to remove from buf->pages
@@ -455,6 +429,16 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
455EXPORT_SYMBOL_GPL(xdr_shift_buf); 429EXPORT_SYMBOL_GPL(xdr_shift_buf);
456 430
457/** 431/**
432 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
433 * @xdr: pointer to struct xdr_stream
434 */
435unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
436{
437 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
438}
439EXPORT_SYMBOL_GPL(xdr_stream_pos);
440
441/**
458 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 442 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
459 * @xdr: pointer to xdr_stream struct 443 * @xdr: pointer to xdr_stream struct
460 * @buf: pointer to XDR buffer in which to encode data 444 * @buf: pointer to XDR buffer in which to encode data
@@ -554,13 +538,11 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
554EXPORT_SYMBOL_GPL(xdr_write_pages); 538EXPORT_SYMBOL_GPL(xdr_write_pages);
555 539
556static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 540static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
557 __be32 *p, unsigned int len) 541 unsigned int len)
558{ 542{
559 if (len > iov->iov_len) 543 if (len > iov->iov_len)
560 len = iov->iov_len; 544 len = iov->iov_len;
561 if (p == NULL) 545 xdr->p = (__be32*)iov->iov_base;
562 p = (__be32*)iov->iov_base;
563 xdr->p = p;
564 xdr->end = (__be32*)(iov->iov_base + len); 546 xdr->end = (__be32*)(iov->iov_base + len);
565 xdr->iov = iov; 547 xdr->iov = iov;
566 xdr->page_ptr = NULL; 548 xdr->page_ptr = NULL;
@@ -607,7 +589,7 @@ static void xdr_set_next_page(struct xdr_stream *xdr)
607 newbase -= xdr->buf->page_base; 589 newbase -= xdr->buf->page_base;
608 590
609 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) 591 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
610 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); 592 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
611} 593}
612 594
613static bool xdr_set_next_buffer(struct xdr_stream *xdr) 595static bool xdr_set_next_buffer(struct xdr_stream *xdr)
@@ -616,7 +598,7 @@ static bool xdr_set_next_buffer(struct xdr_stream *xdr)
616 xdr_set_next_page(xdr); 598 xdr_set_next_page(xdr);
617 else if (xdr->iov == xdr->buf->head) { 599 else if (xdr->iov == xdr->buf->head) {
618 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) 600 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
619 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); 601 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
620 } 602 }
621 return xdr->p != xdr->end; 603 return xdr->p != xdr->end;
622} 604}
@@ -632,10 +614,15 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
632 xdr->buf = buf; 614 xdr->buf = buf;
633 xdr->scratch.iov_base = NULL; 615 xdr->scratch.iov_base = NULL;
634 xdr->scratch.iov_len = 0; 616 xdr->scratch.iov_len = 0;
617 xdr->nwords = XDR_QUADLEN(buf->len);
635 if (buf->head[0].iov_len != 0) 618 if (buf->head[0].iov_len != 0)
636 xdr_set_iov(xdr, buf->head, p, buf->len); 619 xdr_set_iov(xdr, buf->head, buf->len);
637 else if (buf->page_len != 0) 620 else if (buf->page_len != 0)
638 xdr_set_page_base(xdr, 0, buf->len); 621 xdr_set_page_base(xdr, 0, buf->len);
622 if (p != NULL && p > xdr->p && xdr->end >= p) {
623 xdr->nwords -= p - xdr->p;
624 xdr->p = p;
625 }
639} 626}
640EXPORT_SYMBOL_GPL(xdr_init_decode); 627EXPORT_SYMBOL_GPL(xdr_init_decode);
641 628
@@ -660,12 +647,14 @@ EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
660 647
661static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 648static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
662{ 649{
650 unsigned int nwords = XDR_QUADLEN(nbytes);
663 __be32 *p = xdr->p; 651 __be32 *p = xdr->p;
664 __be32 *q = p + XDR_QUADLEN(nbytes); 652 __be32 *q = p + nwords;
665 653
666 if (unlikely(q > xdr->end || q < p)) 654 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
667 return NULL; 655 return NULL;
668 xdr->p = q; 656 xdr->p = q;
657 xdr->nwords -= nwords;
669 return p; 658 return p;
670} 659}
671 660
@@ -732,6 +721,31 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
732} 721}
733EXPORT_SYMBOL_GPL(xdr_inline_decode); 722EXPORT_SYMBOL_GPL(xdr_inline_decode);
734 723
724static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
725{
726 struct xdr_buf *buf = xdr->buf;
727 struct kvec *iov;
728 unsigned int nwords = XDR_QUADLEN(len);
729 unsigned int cur = xdr_stream_pos(xdr);
730
731 if (xdr->nwords == 0)
732 return 0;
733 if (nwords > xdr->nwords) {
734 nwords = xdr->nwords;
735 len = nwords << 2;
736 }
737 /* Realign pages to current pointer position */
738 iov = buf->head;
739 if (iov->iov_len > cur)
740 xdr_shrink_bufhead(buf, iov->iov_len - cur);
741
742 /* Truncate page data and move it into the tail */
743 if (buf->page_len > len)
744 xdr_shrink_pagelen(buf, buf->page_len - len);
745 xdr->nwords = XDR_QUADLEN(buf->len - cur);
746 return len;
747}
748
735/** 749/**
736 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 750 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
737 * @xdr: pointer to xdr_stream struct 751 * @xdr: pointer to xdr_stream struct
@@ -740,39 +754,37 @@ EXPORT_SYMBOL_GPL(xdr_inline_decode);
740 * Moves data beyond the current pointer position from the XDR head[] buffer 754 * Moves data beyond the current pointer position from the XDR head[] buffer
741 * into the page list. Any data that lies beyond current position + "len" 755 * into the page list. Any data that lies beyond current position + "len"
742 * bytes is moved into the XDR tail[]. 756 * bytes is moved into the XDR tail[].
757 *
758 * Returns the number of XDR encoded bytes now contained in the pages
743 */ 759 */
744void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 760unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
745{ 761{
746 struct xdr_buf *buf = xdr->buf; 762 struct xdr_buf *buf = xdr->buf;
747 struct kvec *iov; 763 struct kvec *iov;
748 ssize_t shift; 764 unsigned int nwords;
749 unsigned int end; 765 unsigned int end;
750 int padding; 766 unsigned int padding;
751
752 /* Realign pages to current pointer position */
753 iov = buf->head;
754 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
755 if (shift > 0)
756 xdr_shrink_bufhead(buf, shift);
757 767
758 /* Truncate page data and move it into the tail */ 768 len = xdr_align_pages(xdr, len);
759 if (buf->page_len > len) 769 if (len == 0)
760 xdr_shrink_pagelen(buf, buf->page_len - len); 770 return 0;
761 padding = (XDR_QUADLEN(len) << 2) - len; 771 nwords = XDR_QUADLEN(len);
772 padding = (nwords << 2) - len;
762 xdr->iov = iov = buf->tail; 773 xdr->iov = iov = buf->tail;
763 /* Compute remaining message length. */ 774 /* Compute remaining message length. */
764 end = iov->iov_len; 775 end = ((xdr->nwords - nwords) << 2) + padding;
765 shift = buf->buflen - buf->len; 776 if (end > iov->iov_len)
766 if (shift < end) 777 end = iov->iov_len;
767 end -= shift; 778
768 else if (shift > 0)
769 end = 0;
770 /* 779 /*
771 * Position current pointer at beginning of tail, and 780 * Position current pointer at beginning of tail, and
772 * set remaining message length. 781 * set remaining message length.
773 */ 782 */
774 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 783 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
775 xdr->end = (__be32 *)((char *)iov->iov_base + end); 784 xdr->end = (__be32 *)((char *)iov->iov_base + end);
785 xdr->page_ptr = NULL;
786 xdr->nwords = XDR_QUADLEN(end - padding);
787 return len;
776} 788}
777EXPORT_SYMBOL_GPL(xdr_read_pages); 789EXPORT_SYMBOL_GPL(xdr_read_pages);
778 790
@@ -788,12 +800,13 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
788 */ 800 */
789void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 801void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
790{ 802{
791 xdr_read_pages(xdr, len); 803 len = xdr_align_pages(xdr, len);
792 /* 804 /*
793 * Position current pointer at beginning of tail, and 805 * Position current pointer at beginning of tail, and
794 * set remaining message length. 806 * set remaining message length.
795 */ 807 */
796 xdr_set_page_base(xdr, 0, len); 808 if (len != 0)
809 xdr_set_page_base(xdr, 0, len);
797} 810}
798EXPORT_SYMBOL_GPL(xdr_enter_page); 811EXPORT_SYMBOL_GPL(xdr_enter_page);
799 812
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c83035cdaa9..a5a402a7d21f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -531,7 +531,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
531} 531}
532EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 532EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
533 533
534/* 534/**
535 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 535 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
536 * @task: task whose timeout is to be set 536 * @task: task whose timeout is to be set
537 * 537 *
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index b446e100286f..06cdbff79e4a 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
200 int rc = 0; 200 int rc = 0;
201 201
202 if (!xprt->shutdown) { 202 if (!xprt->shutdown) {
203 current->flags |= PF_FSTRANS;
203 xprt_clear_connected(xprt); 204 xprt_clear_connected(xprt);
204 205
205 dprintk("RPC: %s: %sconnect\n", __func__, 206 dprintk("RPC: %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
212 213
213out: 214out:
214 xprt_wake_pending_tasks(xprt, rc); 215 xprt_wake_pending_tasks(xprt, rc);
215
216out_clear: 216out_clear:
217 dprintk("RPC: %s: exit\n", __func__); 217 dprintk("RPC: %s: exit\n", __func__);
218 xprt_clear_connecting(xprt); 218 xprt_clear_connecting(xprt);
219 current->flags &= ~PF_FSTRANS;
219} 220}
220 221
221/* 222/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 890b03f8d877..400567243f84 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1014,9 +1014,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
1014 1014
1015 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 1015 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1016 1016
1017 /* Something worked... */
1018 dst_confirm(skb_dst(skb));
1019
1020 xprt_adjust_cwnd(task, copied); 1017 xprt_adjust_cwnd(task, copied);
1021 xprt_complete_rqst(task, copied); 1018 xprt_complete_rqst(task, copied);
1022 1019
@@ -1895,6 +1892,8 @@ static void xs_local_setup_socket(struct work_struct *work)
1895 if (xprt->shutdown) 1892 if (xprt->shutdown)
1896 goto out; 1893 goto out;
1897 1894
1895 current->flags |= PF_FSTRANS;
1896
1898 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1899 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1898 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1900 SOCK_STREAM, 0, &sock, 1); 1899 SOCK_STREAM, 0, &sock, 1);
@@ -1928,8 +1927,48 @@ static void xs_local_setup_socket(struct work_struct *work)
1928out: 1927out:
1929 xprt_clear_connecting(xprt); 1928 xprt_clear_connecting(xprt);
1930 xprt_wake_pending_tasks(xprt, status); 1929 xprt_wake_pending_tasks(xprt, status);
1930 current->flags &= ~PF_FSTRANS;
1931}
1932
1933#ifdef CONFIG_SUNRPC_SWAP
1934static void xs_set_memalloc(struct rpc_xprt *xprt)
1935{
1936 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1937 xprt);
1938
1939 if (xprt->swapper)
1940 sk_set_memalloc(transport->inet);
1931} 1941}
1932 1942
1943/**
1944 * xs_swapper - Tag this transport as being used for swap.
1945 * @xprt: transport to tag
1946 * @enable: enable/disable
1947 *
1948 */
1949int xs_swapper(struct rpc_xprt *xprt, int enable)
1950{
1951 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1952 xprt);
1953 int err = 0;
1954
1955 if (enable) {
1956 xprt->swapper++;
1957 xs_set_memalloc(xprt);
1958 } else if (xprt->swapper) {
1959 xprt->swapper--;
1960 sk_clear_memalloc(transport->inet);
1961 }
1962
1963 return err;
1964}
1965EXPORT_SYMBOL_GPL(xs_swapper);
1966#else
1967static void xs_set_memalloc(struct rpc_xprt *xprt)
1968{
1969}
1970#endif
1971
1933static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1972static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1934{ 1973{
1935 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1974 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1954,6 +1993,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1954 transport->sock = sock; 1993 transport->sock = sock;
1955 transport->inet = sk; 1994 transport->inet = sk;
1956 1995
1996 xs_set_memalloc(xprt);
1997
1957 write_unlock_bh(&sk->sk_callback_lock); 1998 write_unlock_bh(&sk->sk_callback_lock);
1958 } 1999 }
1959 xs_udp_do_set_buffer_size(xprt); 2000 xs_udp_do_set_buffer_size(xprt);
@@ -1970,6 +2011,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
1970 if (xprt->shutdown) 2011 if (xprt->shutdown)
1971 goto out; 2012 goto out;
1972 2013
2014 current->flags |= PF_FSTRANS;
2015
1973 /* Start by resetting any existing state */ 2016 /* Start by resetting any existing state */
1974 xs_reset_transport(transport); 2017 xs_reset_transport(transport);
1975 sock = xs_create_sock(xprt, transport, 2018 sock = xs_create_sock(xprt, transport,
@@ -1988,6 +2031,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
1988out: 2031out:
1989 xprt_clear_connecting(xprt); 2032 xprt_clear_connecting(xprt);
1990 xprt_wake_pending_tasks(xprt, status); 2033 xprt_wake_pending_tasks(xprt, status);
2034 current->flags &= ~PF_FSTRANS;
1991} 2035}
1992 2036
1993/* 2037/*
@@ -2078,6 +2122,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2078 if (!xprt_bound(xprt)) 2122 if (!xprt_bound(xprt))
2079 goto out; 2123 goto out;
2080 2124
2125 xs_set_memalloc(xprt);
2126
2081 /* Tell the socket layer to start connecting... */ 2127 /* Tell the socket layer to start connecting... */
2082 xprt->stat.connect_count++; 2128 xprt->stat.connect_count++;
2083 xprt->stat.connect_start = jiffies; 2129 xprt->stat.connect_start = jiffies;
@@ -2113,6 +2159,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2113 if (xprt->shutdown) 2159 if (xprt->shutdown)
2114 goto out; 2160 goto out;
2115 2161
2162 current->flags |= PF_FSTRANS;
2163
2116 if (!sock) { 2164 if (!sock) {
2117 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 2165 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
2118 sock = xs_create_sock(xprt, transport, 2166 sock = xs_create_sock(xprt, transport,
@@ -2162,6 +2210,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2162 case -EINPROGRESS: 2210 case -EINPROGRESS:
2163 case -EALREADY: 2211 case -EALREADY:
2164 xprt_clear_connecting(xprt); 2212 xprt_clear_connecting(xprt);
2213 current->flags &= ~PF_FSTRANS;
2165 return; 2214 return;
2166 case -EINVAL: 2215 case -EINVAL:
2167 /* Happens, for instance, if the user specified a link 2216 /* Happens, for instance, if the user specified a link
@@ -2174,6 +2223,7 @@ out_eagain:
2174out: 2223out:
2175 xprt_clear_connecting(xprt); 2224 xprt_clear_connecting(xprt);
2176 xprt_wake_pending_tasks(xprt, status); 2225 xprt_wake_pending_tasks(xprt, status);
2226 current->flags &= ~PF_FSTRANS;
2177} 2227}
2178 2228
2179/** 2229/**
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 2c5954b85933..585460180ffb 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -41,29 +41,4 @@ config TIPC_PORTS
41 Setting this to a smaller value saves some memory, 41 Setting this to a smaller value saves some memory,
42 setting it to higher allows for more ports. 42 setting it to higher allows for more ports.
43 43
44config TIPC_LOG
45 int "Size of log buffer"
46 depends on TIPC_ADVANCED
47 range 0 32768
48 default "0"
49 help
50 Size (in bytes) of TIPC's internal log buffer, which records the
51 occurrence of significant events. Can range from 0 to 32768 bytes;
52 default is 0.
53
54 There is no need to enable the log buffer unless the node will be
55 managed remotely via TIPC.
56
57config TIPC_DEBUG
58 bool "Enable debugging support"
59 default n
60 help
61 Saying Y here enables TIPC debugging capabilities used by developers.
62 Most users do not need to bother; if unsure, just say N.
63
64 Enabling debugging support causes TIPC to display data about its
65 internal state when certain abnormal conditions occur. It also
66 makes it easy for developers to capture additional information of
67 interest using the dbg() or msg_dbg() macros.
68
69endif # TIPC 44endif # TIPC
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2625f5ebe3e8..e4e6d8cd47e6 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -162,7 +162,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
162} 162}
163 163
164 164
165/* 165/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 166 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 * 167 *
168 * Called with bc_lock locked 168 * Called with bc_lock locked
@@ -270,7 +270,7 @@ exit:
270 spin_unlock_bh(&bc_lock); 270 spin_unlock_bh(&bc_lock);
271} 271}
272 272
273/* 273/**
274 * tipc_bclink_update_link_state - update broadcast link state 274 * tipc_bclink_update_link_state - update broadcast link state
275 * 275 *
276 * tipc_net_lock and node lock set 276 * tipc_net_lock and node lock set
@@ -330,7 +330,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
330 } 330 }
331} 331}
332 332
333/* 333/**
334 * bclink_peek_nack - monitor retransmission requests sent by other nodes 334 * bclink_peek_nack - monitor retransmission requests sent by other nodes
335 * 335 *
336 * Delay any upcoming NACK by this node if another node has already 336 * Delay any upcoming NACK by this node if another node has already
@@ -381,7 +381,7 @@ exit:
381 return res; 381 return res;
382} 382}
383 383
384/* 384/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 386 *
387 * Called with both sending node's lock and bc_lock taken. 387 * Called with both sending node's lock and bc_lock taken.
@@ -406,7 +406,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
406 } 406 }
407} 407}
408 408
409/* 409/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411 * 411 *
412 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
@@ -701,48 +701,43 @@ void tipc_bcbearer_sort(void)
701 701
702int tipc_bclink_stats(char *buf, const u32 buf_size) 702int tipc_bclink_stats(char *buf, const u32 buf_size)
703{ 703{
704 struct print_buf pb; 704 int ret;
705 struct tipc_stats *s;
705 706
706 if (!bcl) 707 if (!bcl)
707 return 0; 708 return 0;
708 709
709 tipc_printbuf_init(&pb, buf, buf_size);
710
711 spin_lock_bh(&bc_lock); 710 spin_lock_bh(&bc_lock);
712 711
713 tipc_printf(&pb, "Link <%s>\n" 712 s = &bcl->stats;
714 " Window:%u packets\n", 713
715 bcl->name, bcl->queue_limit[0]); 714 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
716 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 715 " Window:%u packets\n",
717 bcl->stats.recv_info, 716 bcl->name, bcl->queue_limit[0]);
718 bcl->stats.recv_fragments, 717 ret += tipc_snprintf(buf + ret, buf_size - ret,
719 bcl->stats.recv_fragmented, 718 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
720 bcl->stats.recv_bundles, 719 s->recv_info, s->recv_fragments,
721 bcl->stats.recv_bundled); 720 s->recv_fragmented, s->recv_bundles,
722 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 721 s->recv_bundled);
723 bcl->stats.sent_info, 722 ret += tipc_snprintf(buf + ret, buf_size - ret,
724 bcl->stats.sent_fragments, 723 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
725 bcl->stats.sent_fragmented, 724 s->sent_info, s->sent_fragments,
726 bcl->stats.sent_bundles, 725 s->sent_fragmented, s->sent_bundles,
727 bcl->stats.sent_bundled); 726 s->sent_bundled);
728 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", 727 ret += tipc_snprintf(buf + ret, buf_size - ret,
729 bcl->stats.recv_nacks, 728 " RX naks:%u defs:%u dups:%u\n",
730 bcl->stats.deferred_recv, 729 s->recv_nacks, s->deferred_recv, s->duplicates);
731 bcl->stats.duplicates); 730 ret += tipc_snprintf(buf + ret, buf_size - ret,
732 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", 731 " TX naks:%u acks:%u dups:%u\n",
733 bcl->stats.sent_nacks, 732 s->sent_nacks, s->sent_acks, s->retransmitted);
734 bcl->stats.sent_acks, 733 ret += tipc_snprintf(buf + ret, buf_size - ret,
735 bcl->stats.retransmitted); 734 " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
736 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 735 s->bearer_congs, s->link_congs, s->max_queue_sz,
737 bcl->stats.bearer_congs, 736 s->queue_sz_counts ?
738 bcl->stats.link_congs, 737 (s->accu_queue_sz / s->queue_sz_counts) : 0);
739 bcl->stats.max_queue_sz,
740 bcl->stats.queue_sz_counts
741 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
742 : 0);
743 738
744 spin_unlock_bh(&bc_lock); 739 spin_unlock_bh(&bc_lock);
745 return tipc_printbuf_validate(&pb); 740 return ret;
746} 741}
747 742
748int tipc_bclink_reset_stats(void) 743int tipc_bclink_reset_stats(void)
@@ -880,7 +875,7 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
880 if (!item->next) { 875 if (!item->next) {
881 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 876 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
882 if (!item->next) { 877 if (!item->next) {
883 warn("Incomplete multicast delivery, no memory\n"); 878 pr_warn("Incomplete multicast delivery, no memory\n");
884 return; 879 return;
885 } 880 }
886 item->next->next = NULL; 881 item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a297e3a2e3e7..09e71241265d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -123,28 +123,30 @@ int tipc_register_media(struct tipc_media *m_ptr)
123exit: 123exit:
124 write_unlock_bh(&tipc_net_lock); 124 write_unlock_bh(&tipc_net_lock);
125 if (res) 125 if (res)
126 warn("Media <%s> registration error\n", m_ptr->name); 126 pr_warn("Media <%s> registration error\n", m_ptr->name);
127 return res; 127 return res;
128} 128}
129 129
130/** 130/**
131 * tipc_media_addr_printf - record media address in print buffer 131 * tipc_media_addr_printf - record media address in print buffer
132 */ 132 */
133void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) 133void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
134{ 134{
135 char addr_str[MAX_ADDR_STR]; 135 char addr_str[MAX_ADDR_STR];
136 struct tipc_media *m_ptr; 136 struct tipc_media *m_ptr;
137 int ret;
137 138
138 m_ptr = media_find_id(a->media_id); 139 m_ptr = media_find_id(a->media_id);
139 140
140 if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) 141 if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
141 tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str); 142 ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
142 else { 143 else {
143 u32 i; 144 u32 i;
144 145
145 tipc_printf(pb, "UNKNOWN(%u)", a->media_id); 146 ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id);
146 for (i = 0; i < sizeof(a->value); i++) 147 for (i = 0; i < sizeof(a->value); i++)
147 tipc_printf(pb, "-%02x", a->value[i]); 148 ret += tipc_snprintf(buf - ret, len + ret,
149 "-%02x", a->value[i]);
148 } 150 }
149} 151}
150 152
@@ -172,8 +174,8 @@ struct sk_buff *tipc_media_get_names(void)
172 174
173/** 175/**
174 * bearer_name_validate - validate & (optionally) deconstruct bearer name 176 * bearer_name_validate - validate & (optionally) deconstruct bearer name
175 * @name - ptr to bearer name string 177 * @name: ptr to bearer name string
176 * @name_parts - ptr to area for bearer name components (or NULL if not needed) 178 * @name_parts: ptr to area for bearer name components (or NULL if not needed)
177 * 179 *
178 * Returns 1 if bearer name is valid, otherwise 0. 180 * Returns 1 if bearer name is valid, otherwise 0.
179 */ 181 */
@@ -418,12 +420,12 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
418 int res = -EINVAL; 420 int res = -EINVAL;
419 421
420 if (!tipc_own_addr) { 422 if (!tipc_own_addr) {
421 warn("Bearer <%s> rejected, not supported in standalone mode\n", 423 pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
422 name); 424 name);
423 return -ENOPROTOOPT; 425 return -ENOPROTOOPT;
424 } 426 }
425 if (!bearer_name_validate(name, &b_names)) { 427 if (!bearer_name_validate(name, &b_names)) {
426 warn("Bearer <%s> rejected, illegal name\n", name); 428 pr_warn("Bearer <%s> rejected, illegal name\n", name);
427 return -EINVAL; 429 return -EINVAL;
428 } 430 }
429 if (tipc_addr_domain_valid(disc_domain) && 431 if (tipc_addr_domain_valid(disc_domain) &&
@@ -435,12 +437,13 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
435 res = 0; /* accept specified node in own cluster */ 437 res = 0; /* accept specified node in own cluster */
436 } 438 }
437 if (res) { 439 if (res) {
438 warn("Bearer <%s> rejected, illegal discovery domain\n", name); 440 pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
441 name);
439 return -EINVAL; 442 return -EINVAL;
440 } 443 }
441 if ((priority > TIPC_MAX_LINK_PRI) && 444 if ((priority > TIPC_MAX_LINK_PRI) &&
442 (priority != TIPC_MEDIA_LINK_PRI)) { 445 (priority != TIPC_MEDIA_LINK_PRI)) {
443 warn("Bearer <%s> rejected, illegal priority\n", name); 446 pr_warn("Bearer <%s> rejected, illegal priority\n", name);
444 return -EINVAL; 447 return -EINVAL;
445 } 448 }
446 449
@@ -448,8 +451,8 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
448 451
449 m_ptr = tipc_media_find(b_names.media_name); 452 m_ptr = tipc_media_find(b_names.media_name);
450 if (!m_ptr) { 453 if (!m_ptr) {
451 warn("Bearer <%s> rejected, media <%s> not registered\n", name, 454 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
452 b_names.media_name); 455 name, b_names.media_name);
453 goto exit; 456 goto exit;
454 } 457 }
455 458
@@ -465,24 +468,25 @@ restart:
465 continue; 468 continue;
466 } 469 }
467 if (!strcmp(name, tipc_bearers[i].name)) { 470 if (!strcmp(name, tipc_bearers[i].name)) {
468 warn("Bearer <%s> rejected, already enabled\n", name); 471 pr_warn("Bearer <%s> rejected, already enabled\n",
472 name);
469 goto exit; 473 goto exit;
470 } 474 }
471 if ((tipc_bearers[i].priority == priority) && 475 if ((tipc_bearers[i].priority == priority) &&
472 (++with_this_prio > 2)) { 476 (++with_this_prio > 2)) {
473 if (priority-- == 0) { 477 if (priority-- == 0) {
474 warn("Bearer <%s> rejected, duplicate priority\n", 478 pr_warn("Bearer <%s> rejected, duplicate priority\n",
475 name); 479 name);
476 goto exit; 480 goto exit;
477 } 481 }
478 warn("Bearer <%s> priority adjustment required %u->%u\n", 482 pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
479 name, priority + 1, priority); 483 name, priority + 1, priority);
480 goto restart; 484 goto restart;
481 } 485 }
482 } 486 }
483 if (bearer_id >= MAX_BEARERS) { 487 if (bearer_id >= MAX_BEARERS) {
484 warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 488 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
485 name, MAX_BEARERS); 489 name, MAX_BEARERS);
486 goto exit; 490 goto exit;
487 } 491 }
488 492
@@ -490,7 +494,8 @@ restart:
490 strcpy(b_ptr->name, name); 494 strcpy(b_ptr->name, name);
491 res = m_ptr->enable_bearer(b_ptr); 495 res = m_ptr->enable_bearer(b_ptr);
492 if (res) { 496 if (res) {
493 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); 497 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
498 name, -res);
494 goto exit; 499 goto exit;
495 } 500 }
496 501
@@ -508,20 +513,20 @@ restart:
508 res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain); 513 res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
509 if (res) { 514 if (res) {
510 bearer_disable(b_ptr); 515 bearer_disable(b_ptr);
511 warn("Bearer <%s> rejected, discovery object creation failed\n", 516 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
512 name); 517 name);
513 goto exit; 518 goto exit;
514 } 519 }
515 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 520 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
516 name, tipc_addr_string_fill(addr_string, disc_domain), priority); 521 name,
522 tipc_addr_string_fill(addr_string, disc_domain), priority);
517exit: 523exit:
518 write_unlock_bh(&tipc_net_lock); 524 write_unlock_bh(&tipc_net_lock);
519 return res; 525 return res;
520} 526}
521 527
522/** 528/**
523 * tipc_block_bearer(): Block the bearer with the given name, 529 * tipc_block_bearer - Block the bearer with the given name, and reset all its links
524 * and reset all its links
525 */ 530 */
526int tipc_block_bearer(const char *name) 531int tipc_block_bearer(const char *name)
527{ 532{
@@ -532,12 +537,12 @@ int tipc_block_bearer(const char *name)
532 read_lock_bh(&tipc_net_lock); 537 read_lock_bh(&tipc_net_lock);
533 b_ptr = tipc_bearer_find(name); 538 b_ptr = tipc_bearer_find(name);
534 if (!b_ptr) { 539 if (!b_ptr) {
535 warn("Attempt to block unknown bearer <%s>\n", name); 540 pr_warn("Attempt to block unknown bearer <%s>\n", name);
536 read_unlock_bh(&tipc_net_lock); 541 read_unlock_bh(&tipc_net_lock);
537 return -EINVAL; 542 return -EINVAL;
538 } 543 }
539 544
540 info("Blocking bearer <%s>\n", name); 545 pr_info("Blocking bearer <%s>\n", name);
541 spin_lock_bh(&b_ptr->lock); 546 spin_lock_bh(&b_ptr->lock);
542 b_ptr->blocked = 1; 547 b_ptr->blocked = 1;
543 list_splice_init(&b_ptr->cong_links, &b_ptr->links); 548 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
@@ -563,7 +568,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
563 struct tipc_link *l_ptr; 568 struct tipc_link *l_ptr;
564 struct tipc_link *temp_l_ptr; 569 struct tipc_link *temp_l_ptr;
565 570
566 info("Disabling bearer <%s>\n", b_ptr->name); 571 pr_info("Disabling bearer <%s>\n", b_ptr->name);
567 spin_lock_bh(&b_ptr->lock); 572 spin_lock_bh(&b_ptr->lock);
568 b_ptr->blocked = 1; 573 b_ptr->blocked = 1;
569 b_ptr->media->disable_bearer(b_ptr); 574 b_ptr->media->disable_bearer(b_ptr);
@@ -585,7 +590,7 @@ int tipc_disable_bearer(const char *name)
585 write_lock_bh(&tipc_net_lock); 590 write_lock_bh(&tipc_net_lock);
586 b_ptr = tipc_bearer_find(name); 591 b_ptr = tipc_bearer_find(name);
587 if (b_ptr == NULL) { 592 if (b_ptr == NULL) {
588 warn("Attempt to disable unknown bearer <%s>\n", name); 593 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
589 res = -EINVAL; 594 res = -EINVAL;
590 } else { 595 } else {
591 bearer_disable(b_ptr); 596 bearer_disable(b_ptr);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e3b2be37fb31..dd4c2abf08e7 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -57,7 +57,7 @@
57 */ 57 */
58#define TIPC_MEDIA_TYPE_ETH 1 58#define TIPC_MEDIA_TYPE_ETH 1
59 59
60/* 60/**
61 * struct tipc_media_addr - destination address used by TIPC bearers 61 * struct tipc_media_addr - destination address used by TIPC bearers
62 * @value: address info (format defined by media) 62 * @value: address info (format defined by media)
63 * @media_id: TIPC media type identifier 63 * @media_id: TIPC media type identifier
@@ -179,7 +179,7 @@ void tipc_eth_media_stop(void);
179 179
180int tipc_media_set_priority(const char *name, u32 new_value); 180int tipc_media_set_priority(const char *name, u32 new_value);
181int tipc_media_set_window(const char *name, u32 new_value); 181int tipc_media_set_window(const char *name, u32 new_value);
182void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 182void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
183struct sk_buff *tipc_media_get_names(void); 183struct sk_buff *tipc_media_get_names(void);
184 184
185struct sk_buff *tipc_bearer_get_names(void); 185struct sk_buff *tipc_bearer_get_names(void);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c5712a343810..a056a3852f71 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -39,6 +39,8 @@
39#include "name_table.h" 39#include "name_table.h"
40#include "config.h" 40#include "config.h"
41 41
42#define REPLY_TRUNCATED "<truncated>\n"
43
42static u32 config_port_ref; 44static u32 config_port_ref;
43 45
44static DEFINE_SPINLOCK(config_lock); 46static DEFINE_SPINLOCK(config_lock);
@@ -104,13 +106,12 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
104 return buf; 106 return buf;
105} 107}
106 108
107#define MAX_STATS_INFO 2000
108
109static struct sk_buff *tipc_show_stats(void) 109static struct sk_buff *tipc_show_stats(void)
110{ 110{
111 struct sk_buff *buf; 111 struct sk_buff *buf;
112 struct tlv_desc *rep_tlv; 112 struct tlv_desc *rep_tlv;
113 struct print_buf pb; 113 char *pb;
114 int pb_len;
114 int str_len; 115 int str_len;
115 u32 value; 116 u32 value;
116 117
@@ -121,17 +122,16 @@ static struct sk_buff *tipc_show_stats(void)
121 if (value != 0) 122 if (value != 0)
122 return tipc_cfg_reply_error_string("unsupported argument"); 123 return tipc_cfg_reply_error_string("unsupported argument");
123 124
124 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO)); 125 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
125 if (buf == NULL) 126 if (buf == NULL)
126 return NULL; 127 return NULL;
127 128
128 rep_tlv = (struct tlv_desc *)buf->data; 129 rep_tlv = (struct tlv_desc *)buf->data;
129 tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO); 130 pb = TLV_DATA(rep_tlv);
130 131 pb_len = ULTRA_STRING_MAX_LEN;
131 tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
132 132
133 /* Use additional tipc_printf()'s to return more info ... */ 133 str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
134 str_len = tipc_printbuf_validate(&pb); 134 str_len += 1; /* for "\0" */
135 skb_put(buf, TLV_SPACE(str_len)); 135 skb_put(buf, TLV_SPACE(str_len));
136 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 136 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
137 137
@@ -334,12 +334,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
334 case TIPC_CMD_SHOW_PORTS: 334 case TIPC_CMD_SHOW_PORTS:
335 rep_tlv_buf = tipc_port_get_ports(); 335 rep_tlv_buf = tipc_port_get_ports();
336 break; 336 break;
337 case TIPC_CMD_SET_LOG_SIZE:
338 rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
339 break;
340 case TIPC_CMD_DUMP_LOG:
341 rep_tlv_buf = tipc_log_dump();
342 break;
343 case TIPC_CMD_SHOW_STATS: 337 case TIPC_CMD_SHOW_STATS:
344 rep_tlv_buf = tipc_show_stats(); 338 rep_tlv_buf = tipc_show_stats();
345 break; 339 break;
@@ -399,6 +393,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
399 case TIPC_CMD_GET_MAX_CLUSTERS: 393 case TIPC_CMD_GET_MAX_CLUSTERS:
400 case TIPC_CMD_SET_MAX_NODES: 394 case TIPC_CMD_SET_MAX_NODES:
401 case TIPC_CMD_GET_MAX_NODES: 395 case TIPC_CMD_GET_MAX_NODES:
396 case TIPC_CMD_SET_LOG_SIZE:
397 case TIPC_CMD_DUMP_LOG:
402 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 398 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
403 " (obsolete command)"); 399 " (obsolete command)");
404 break; 400 break;
@@ -408,6 +404,15 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
408 break; 404 break;
409 } 405 }
410 406
407 WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
408
409 /* Append an error message if we cannot return all requested data */
410 if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
411 if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
412 sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
413 sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
414 }
415
411 /* Return reply buffer */ 416 /* Return reply buffer */
412exit: 417exit:
413 spin_unlock_bh(&config_lock); 418 spin_unlock_bh(&config_lock);
@@ -432,7 +437,7 @@ static void cfg_named_msg_event(void *userdata,
432 if ((size < sizeof(*req_hdr)) || 437 if ((size < sizeof(*req_hdr)) ||
433 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || 438 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
434 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) { 439 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
435 warn("Invalid configuration message discarded\n"); 440 pr_warn("Invalid configuration message discarded\n");
436 return; 441 return;
437 } 442 }
438 443
@@ -478,7 +483,7 @@ int tipc_cfg_init(void)
478 return 0; 483 return 0;
479 484
480failed: 485failed:
481 err("Unable to create configuration service\n"); 486 pr_err("Unable to create configuration service\n");
482 return res; 487 return res;
483} 488}
484 489
@@ -494,7 +499,7 @@ void tipc_cfg_reinit(void)
494 seq.lower = seq.upper = tipc_own_addr; 499 seq.lower = seq.upper = tipc_own_addr;
495 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq); 500 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
496 if (res) 501 if (res)
497 err("Unable to reinitialize configuration service\n"); 502 pr_err("Unable to reinitialize configuration service\n");
498} 503}
499 504
500void tipc_cfg_stop(void) 505void tipc_cfg_stop(void)
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f7b95239ebda..6586eac6a50e 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,22 +34,18 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <linux/module.h>
38
39#include "core.h" 37#include "core.h"
40#include "ref.h" 38#include "ref.h"
41#include "name_table.h" 39#include "name_table.h"
42#include "subscr.h" 40#include "subscr.h"
43#include "config.h" 41#include "config.h"
44 42
43#include <linux/module.h>
45 44
46#ifndef CONFIG_TIPC_PORTS 45#ifndef CONFIG_TIPC_PORTS
47#define CONFIG_TIPC_PORTS 8191 46#define CONFIG_TIPC_PORTS 8191
48#endif 47#endif
49 48
50#ifndef CONFIG_TIPC_LOG
51#define CONFIG_TIPC_LOG 0
52#endif
53 49
54/* global variables used by multiple sub-systems within TIPC */ 50/* global variables used by multiple sub-systems within TIPC */
55int tipc_random; 51int tipc_random;
@@ -125,7 +121,6 @@ static void tipc_core_stop(void)
125 tipc_nametbl_stop(); 121 tipc_nametbl_stop();
126 tipc_ref_table_stop(); 122 tipc_ref_table_stop();
127 tipc_socket_stop(); 123 tipc_socket_stop();
128 tipc_log_resize(0);
129} 124}
130 125
131/** 126/**
@@ -161,10 +156,7 @@ static int __init tipc_init(void)
161{ 156{
162 int res; 157 int res;
163 158
164 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0) 159 pr_info("Activated (version " TIPC_MOD_VER ")\n");
165 warn("Unable to create log buffer\n");
166
167 info("Activated (version " TIPC_MOD_VER ")\n");
168 160
169 tipc_own_addr = 0; 161 tipc_own_addr = 0;
170 tipc_remote_management = 1; 162 tipc_remote_management = 1;
@@ -175,9 +167,9 @@ static int __init tipc_init(void)
175 167
176 res = tipc_core_start(); 168 res = tipc_core_start();
177 if (res) 169 if (res)
178 err("Unable to start in single node mode\n"); 170 pr_err("Unable to start in single node mode\n");
179 else 171 else
180 info("Started in single node mode\n"); 172 pr_info("Started in single node mode\n");
181 return res; 173 return res;
182} 174}
183 175
@@ -185,7 +177,7 @@ static void __exit tipc_exit(void)
185{ 177{
186 tipc_core_stop_net(); 178 tipc_core_stop_net();
187 tipc_core_stop(); 179 tipc_core_stop();
188 info("Deactivated\n"); 180 pr_info("Deactivated\n");
189} 181}
190 182
191module_init(tipc_init); 183module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 2a9bb99537b3..fd42e106c185 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,6 +37,8 @@
37#ifndef _TIPC_CORE_H 37#ifndef _TIPC_CORE_H
38#define _TIPC_CORE_H 38#define _TIPC_CORE_H
39 39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
40#include <linux/tipc.h> 42#include <linux/tipc.h>
41#include <linux/tipc_config.h> 43#include <linux/tipc_config.h>
42#include <linux/types.h> 44#include <linux/types.h>
@@ -58,68 +60,11 @@
58 60
59#define TIPC_MOD_VER "2.0.0" 61#define TIPC_MOD_VER "2.0.0"
60 62
61struct tipc_msg; /* msg.h */ 63#define ULTRA_STRING_MAX_LEN 32768
62struct print_buf; /* log.h */
63
64/*
65 * TIPC system monitoring code
66 */
67
68/*
69 * TIPC's print buffer subsystem supports the following print buffers:
70 *
71 * TIPC_NULL : null buffer (i.e. print nowhere)
72 * TIPC_CONS : system console
73 * TIPC_LOG : TIPC log buffer
74 * &buf : user-defined buffer (struct print_buf *)
75 *
76 * Note: TIPC_LOG is configured to echo its output to the system console;
77 * user-defined buffers can be configured to do the same thing.
78 */
79extern struct print_buf *const TIPC_NULL;
80extern struct print_buf *const TIPC_CONS;
81extern struct print_buf *const TIPC_LOG;
82
83void tipc_printf(struct print_buf *, const char *fmt, ...);
84
85/*
86 * TIPC_OUTPUT is the destination print buffer for system messages.
87 */
88#ifndef TIPC_OUTPUT
89#define TIPC_OUTPUT TIPC_LOG
90#endif
91 64
92#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \ 65struct tipc_msg; /* msg.h */
93 KERN_ERR "TIPC: " fmt, ## arg)
94#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
95 KERN_WARNING "TIPC: " fmt, ## arg)
96#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
97 KERN_NOTICE "TIPC: " fmt, ## arg)
98
99#ifdef CONFIG_TIPC_DEBUG
100
101/*
102 * DBG_OUTPUT is the destination print buffer for debug messages.
103 */
104#ifndef DBG_OUTPUT
105#define DBG_OUTPUT TIPC_LOG
106#endif
107
108#define dbg(fmt, arg...) tipc_printf(DBG_OUTPUT, KERN_DEBUG fmt, ## arg);
109
110#define msg_dbg(msg, txt) tipc_msg_dbg(DBG_OUTPUT, msg, txt);
111
112void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
113
114#else
115
116#define dbg(fmt, arg...) do {} while (0)
117#define msg_dbg(msg, txt) do {} while (0)
118
119#define tipc_msg_dbg(buf, msg, txt) do {} while (0)
120
121#endif
122 66
67int tipc_snprintf(char *buf, int len, const char *fmt, ...);
123 68
124/* 69/*
125 * TIPC-specific error codes 70 * TIPC-specific error codes
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ae054cfe179f..50eaa403eb6e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -100,14 +100,12 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
100{ 100{
101 char node_addr_str[16]; 101 char node_addr_str[16];
102 char media_addr_str[64]; 102 char media_addr_str[64];
103 struct print_buf pb;
104 103
105 tipc_addr_string_fill(node_addr_str, node_addr); 104 tipc_addr_string_fill(node_addr_str, node_addr);
106 tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str)); 105 tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
107 tipc_media_addr_printf(&pb, media_addr); 106 media_addr);
108 tipc_printbuf_validate(&pb); 107 pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
109 warn("Duplicate %s using %s seen on <%s>\n", 108 media_addr_str, b_ptr->name);
110 node_addr_str, media_addr_str, b_ptr->name);
111} 109}
112 110
113/** 111/**
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 9c6f22ff1c6d..7a52d3922f3c 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -57,14 +57,14 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
57 struct queue_item *item; 57 struct queue_item *item;
58 58
59 if (!handler_enabled) { 59 if (!handler_enabled) {
60 err("Signal request ignored by handler\n"); 60 pr_err("Signal request ignored by handler\n");
61 return -ENOPROTOOPT; 61 return -ENOPROTOOPT;
62 } 62 }
63 63
64 spin_lock_bh(&qitem_lock); 64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); 65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) { 66 if (!item) {
67 err("Signal queue out of memory\n"); 67 pr_err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock); 68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM; 69 return -ENOMEM;
70 } 70 }
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7a614f43549d..1c1e6151875e 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -41,6 +41,12 @@
41#include "discover.h" 41#include "discover.h"
42#include "config.h" 42#include "config.h"
43 43
44/*
45 * Error message prefixes
46 */
47static const char *link_co_err = "Link changeover error, ";
48static const char *link_rst_msg = "Resetting link ";
49static const char *link_unk_evt = "Unknown link event ";
44 50
45/* 51/*
46 * Out-of-range value for link session numbers 52 * Out-of-range value for link session numbers
@@ -153,8 +159,8 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
153 159
154/** 160/**
155 * link_name_validate - validate & (optionally) deconstruct tipc_link name 161 * link_name_validate - validate & (optionally) deconstruct tipc_link name
156 * @name - ptr to link name string 162 * @name: ptr to link name string
157 * @name_parts - ptr to area for link name components (or NULL if not needed) 163 * @name_parts: ptr to area for link name components (or NULL if not needed)
158 * 164 *
159 * Returns 1 if link name is valid, otherwise 0. 165 * Returns 1 if link name is valid, otherwise 0.
160 */ 166 */
@@ -300,20 +306,20 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
300 306
301 if (n_ptr->link_cnt >= 2) { 307 if (n_ptr->link_cnt >= 2) {
302 tipc_addr_string_fill(addr_string, n_ptr->addr); 308 tipc_addr_string_fill(addr_string, n_ptr->addr);
303 err("Attempt to establish third link to %s\n", addr_string); 309 pr_err("Attempt to establish third link to %s\n", addr_string);
304 return NULL; 310 return NULL;
305 } 311 }
306 312
307 if (n_ptr->links[b_ptr->identity]) { 313 if (n_ptr->links[b_ptr->identity]) {
308 tipc_addr_string_fill(addr_string, n_ptr->addr); 314 tipc_addr_string_fill(addr_string, n_ptr->addr);
309 err("Attempt to establish second link on <%s> to %s\n", 315 pr_err("Attempt to establish second link on <%s> to %s\n",
310 b_ptr->name, addr_string); 316 b_ptr->name, addr_string);
311 return NULL; 317 return NULL;
312 } 318 }
313 319
314 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 320 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
315 if (!l_ptr) { 321 if (!l_ptr) {
316 warn("Link creation failed, no memory\n"); 322 pr_warn("Link creation failed, no memory\n");
317 return NULL; 323 return NULL;
318 } 324 }
319 325
@@ -371,7 +377,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
371void tipc_link_delete(struct tipc_link *l_ptr) 377void tipc_link_delete(struct tipc_link *l_ptr)
372{ 378{
373 if (!l_ptr) { 379 if (!l_ptr) {
374 err("Attempt to delete non-existent link\n"); 380 pr_err("Attempt to delete non-existent link\n");
375 return; 381 return;
376 } 382 }
377 383
@@ -632,8 +638,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
632 link_set_timer(l_ptr, cont_intv / 4); 638 link_set_timer(l_ptr, cont_intv / 4);
633 break; 639 break;
634 case RESET_MSG: 640 case RESET_MSG:
635 info("Resetting link <%s>, requested by peer\n", 641 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
636 l_ptr->name); 642 l_ptr->name);
637 tipc_link_reset(l_ptr); 643 tipc_link_reset(l_ptr);
638 l_ptr->state = RESET_RESET; 644 l_ptr->state = RESET_RESET;
639 l_ptr->fsm_msg_cnt = 0; 645 l_ptr->fsm_msg_cnt = 0;
@@ -642,7 +648,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
642 link_set_timer(l_ptr, cont_intv); 648 link_set_timer(l_ptr, cont_intv);
643 break; 649 break;
644 default: 650 default:
645 err("Unknown link event %u in WW state\n", event); 651 pr_err("%s%u in WW state\n", link_unk_evt, event);
646 } 652 }
647 break; 653 break;
648 case WORKING_UNKNOWN: 654 case WORKING_UNKNOWN:
@@ -654,8 +660,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
654 link_set_timer(l_ptr, cont_intv); 660 link_set_timer(l_ptr, cont_intv);
655 break; 661 break;
656 case RESET_MSG: 662 case RESET_MSG:
657 info("Resetting link <%s>, requested by peer " 663 pr_info("%s<%s>, requested by peer while probing\n",
658 "while probing\n", l_ptr->name); 664 link_rst_msg, l_ptr->name);
659 tipc_link_reset(l_ptr); 665 tipc_link_reset(l_ptr);
660 l_ptr->state = RESET_RESET; 666 l_ptr->state = RESET_RESET;
661 l_ptr->fsm_msg_cnt = 0; 667 l_ptr->fsm_msg_cnt = 0;
@@ -680,8 +686,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
680 l_ptr->fsm_msg_cnt++; 686 l_ptr->fsm_msg_cnt++;
681 link_set_timer(l_ptr, cont_intv / 4); 687 link_set_timer(l_ptr, cont_intv / 4);
682 } else { /* Link has failed */ 688 } else { /* Link has failed */
683 warn("Resetting link <%s>, peer not responding\n", 689 pr_warn("%s<%s>, peer not responding\n",
684 l_ptr->name); 690 link_rst_msg, l_ptr->name);
685 tipc_link_reset(l_ptr); 691 tipc_link_reset(l_ptr);
686 l_ptr->state = RESET_UNKNOWN; 692 l_ptr->state = RESET_UNKNOWN;
687 l_ptr->fsm_msg_cnt = 0; 693 l_ptr->fsm_msg_cnt = 0;
@@ -692,7 +698,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
692 } 698 }
693 break; 699 break;
694 default: 700 default:
695 err("Unknown link event %u in WU state\n", event); 701 pr_err("%s%u in WU state\n", link_unk_evt, event);
696 } 702 }
697 break; 703 break;
698 case RESET_UNKNOWN: 704 case RESET_UNKNOWN:
@@ -726,7 +732,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
726 link_set_timer(l_ptr, cont_intv); 732 link_set_timer(l_ptr, cont_intv);
727 break; 733 break;
728 default: 734 default:
729 err("Unknown link event %u in RU state\n", event); 735 pr_err("%s%u in RU state\n", link_unk_evt, event);
730 } 736 }
731 break; 737 break;
732 case RESET_RESET: 738 case RESET_RESET:
@@ -751,11 +757,11 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
751 link_set_timer(l_ptr, cont_intv); 757 link_set_timer(l_ptr, cont_intv);
752 break; 758 break;
753 default: 759 default:
754 err("Unknown link event %u in RR state\n", event); 760 pr_err("%s%u in RR state\n", link_unk_evt, event);
755 } 761 }
756 break; 762 break;
757 default: 763 default:
758 err("Unknown link state %u/%u\n", l_ptr->state, event); 764 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
759 } 765 }
760} 766}
761 767
@@ -856,7 +862,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
856 } 862 }
857 kfree_skb(buf); 863 kfree_skb(buf);
858 if (imp > CONN_MANAGER) { 864 if (imp > CONN_MANAGER) {
859 warn("Resetting link <%s>, send queue full", l_ptr->name); 865 pr_warn("%s<%s>, send queue full", link_rst_msg,
866 l_ptr->name);
860 tipc_link_reset(l_ptr); 867 tipc_link_reset(l_ptr);
861 } 868 }
862 return dsz; 869 return dsz;
@@ -944,7 +951,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
944 return res; 951 return res;
945} 952}
946 953
947/* 954/**
948 * tipc_link_send_names - send name table entries to new neighbor 955 * tipc_link_send_names - send name table entries to new neighbor
949 * 956 *
950 * Send routine for bulk delivery of name table messages when contact 957 * Send routine for bulk delivery of name table messages when contact
@@ -1409,8 +1416,8 @@ static void link_reset_all(unsigned long addr)
1409 1416
1410 tipc_node_lock(n_ptr); 1417 tipc_node_lock(n_ptr);
1411 1418
1412 warn("Resetting all links to %s\n", 1419 pr_warn("Resetting all links to %s\n",
1413 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1420 tipc_addr_string_fill(addr_string, n_ptr->addr));
1414 1421
1415 for (i = 0; i < MAX_BEARERS; i++) { 1422 for (i = 0; i < MAX_BEARERS; i++) {
1416 if (n_ptr->links[i]) { 1423 if (n_ptr->links[i]) {
@@ -1428,7 +1435,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1428{ 1435{
1429 struct tipc_msg *msg = buf_msg(buf); 1436 struct tipc_msg *msg = buf_msg(buf);
1430 1437
1431 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1438 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1432 1439
1433 if (l_ptr->addr) { 1440 if (l_ptr->addr) {
1434 /* Handle failure on standard link */ 1441 /* Handle failure on standard link */
@@ -1440,21 +1447,23 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1440 struct tipc_node *n_ptr; 1447 struct tipc_node *n_ptr;
1441 char addr_string[16]; 1448 char addr_string[16];
1442 1449
1443 info("Msg seq number: %u, ", msg_seqno(msg)); 1450 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1444 info("Outstanding acks: %lu\n", 1451 pr_cont("Outstanding acks: %lu\n",
1445 (unsigned long) TIPC_SKB_CB(buf)->handle); 1452 (unsigned long) TIPC_SKB_CB(buf)->handle);
1446 1453
1447 n_ptr = tipc_bclink_retransmit_to(); 1454 n_ptr = tipc_bclink_retransmit_to();
1448 tipc_node_lock(n_ptr); 1455 tipc_node_lock(n_ptr);
1449 1456
1450 tipc_addr_string_fill(addr_string, n_ptr->addr); 1457 tipc_addr_string_fill(addr_string, n_ptr->addr);
1451 info("Broadcast link info for %s\n", addr_string); 1458 pr_info("Broadcast link info for %s\n", addr_string);
1452 info("Supportable: %d, ", n_ptr->bclink.supportable); 1459 pr_info("Supportable: %d, Supported: %d, Acked: %u\n",
1453 info("Supported: %d, ", n_ptr->bclink.supported); 1460 n_ptr->bclink.supportable,
1454 info("Acked: %u\n", n_ptr->bclink.acked); 1461 n_ptr->bclink.supported,
1455 info("Last in: %u, ", n_ptr->bclink.last_in); 1462 n_ptr->bclink.acked);
1456 info("Oos state: %u, ", n_ptr->bclink.oos_state); 1463 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1457 info("Last sent: %u\n", n_ptr->bclink.last_sent); 1464 n_ptr->bclink.last_in,
1465 n_ptr->bclink.oos_state,
1466 n_ptr->bclink.last_sent);
1458 1467
1459 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1468 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1460 1469
@@ -1479,8 +1488,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1479 l_ptr->retransm_queue_head = msg_seqno(msg); 1488 l_ptr->retransm_queue_head = msg_seqno(msg);
1480 l_ptr->retransm_queue_size = retransmits; 1489 l_ptr->retransm_queue_size = retransmits;
1481 } else { 1490 } else {
1482 err("Unexpected retransmit on link %s (qsize=%d)\n", 1491 pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
1483 l_ptr->name, l_ptr->retransm_queue_size); 1492 l_ptr->name, l_ptr->retransm_queue_size);
1484 } 1493 }
1485 return; 1494 return;
1486 } else { 1495 } else {
@@ -1787,7 +1796,7 @@ cont:
1787 read_unlock_bh(&tipc_net_lock); 1796 read_unlock_bh(&tipc_net_lock);
1788} 1797}
1789 1798
1790/* 1799/**
1791 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1800 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1792 * 1801 *
1793 * Returns increase in queue length (i.e. 0 or 1) 1802 * Returns increase in queue length (i.e. 0 or 1)
@@ -2074,8 +2083,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2074 2083
2075 if (msg_linkprio(msg) && 2084 if (msg_linkprio(msg) &&
2076 (msg_linkprio(msg) != l_ptr->priority)) { 2085 (msg_linkprio(msg) != l_ptr->priority)) {
2077 warn("Resetting link <%s>, priority change %u->%u\n", 2086 pr_warn("%s<%s>, priority change %u->%u\n",
2078 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2087 link_rst_msg, l_ptr->name, l_ptr->priority,
2088 msg_linkprio(msg));
2079 l_ptr->priority = msg_linkprio(msg); 2089 l_ptr->priority = msg_linkprio(msg);
2080 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 2090 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2081 break; 2091 break;
@@ -2139,15 +2149,13 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
2139 2149
2140 tunnel = l_ptr->owner->active_links[selector & 1]; 2150 tunnel = l_ptr->owner->active_links[selector & 1];
2141 if (!tipc_link_is_up(tunnel)) { 2151 if (!tipc_link_is_up(tunnel)) {
2142 warn("Link changeover error, " 2152 pr_warn("%stunnel link no longer available\n", link_co_err);
2143 "tunnel link no longer available\n");
2144 return; 2153 return;
2145 } 2154 }
2146 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2155 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2147 buf = tipc_buf_acquire(length + INT_H_SIZE); 2156 buf = tipc_buf_acquire(length + INT_H_SIZE);
2148 if (!buf) { 2157 if (!buf) {
2149 warn("Link changeover error, " 2158 pr_warn("%sunable to send tunnel msg\n", link_co_err);
2150 "unable to send tunnel msg\n");
2151 return; 2159 return;
2152 } 2160 }
2153 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 2161 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
@@ -2173,8 +2181,7 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
2173 return; 2181 return;
2174 2182
2175 if (!l_ptr->owner->permit_changeover) { 2183 if (!l_ptr->owner->permit_changeover) {
2176 warn("Link changeover error, " 2184 pr_warn("%speer did not permit changeover\n", link_co_err);
2177 "peer did not permit changeover\n");
2178 return; 2185 return;
2179 } 2186 }
2180 2187
@@ -2192,8 +2199,8 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
2192 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2199 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2193 tipc_link_send_buf(tunnel, buf); 2200 tipc_link_send_buf(tunnel, buf);
2194 } else { 2201 } else {
2195 warn("Link changeover error, " 2202 pr_warn("%sunable to send changeover msg\n",
2196 "unable to send changeover msg\n"); 2203 link_co_err);
2197 } 2204 }
2198 return; 2205 return;
2199 } 2206 }
@@ -2246,8 +2253,8 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2246 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2253 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2247 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2254 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2248 if (outbuf == NULL) { 2255 if (outbuf == NULL) {
2249 warn("Link changeover error, " 2256 pr_warn("%sunable to send duplicate msg\n",
2250 "unable to send duplicate msg\n"); 2257 link_co_err);
2251 return; 2258 return;
2252 } 2259 }
2253 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2260 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
@@ -2298,8 +2305,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2298 if (!dest_link) 2305 if (!dest_link)
2299 goto exit; 2306 goto exit;
2300 if (dest_link == *l_ptr) { 2307 if (dest_link == *l_ptr) {
2301 err("Unexpected changeover message on link <%s>\n", 2308 pr_err("Unexpected changeover message on link <%s>\n",
2302 (*l_ptr)->name); 2309 (*l_ptr)->name);
2303 goto exit; 2310 goto exit;
2304 } 2311 }
2305 *l_ptr = dest_link; 2312 *l_ptr = dest_link;
@@ -2310,7 +2317,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2310 goto exit; 2317 goto exit;
2311 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2318 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2312 if (*buf == NULL) { 2319 if (*buf == NULL) {
2313 warn("Link changeover error, duplicate msg dropped\n"); 2320 pr_warn("%sduplicate msg dropped\n", link_co_err);
2314 goto exit; 2321 goto exit;
2315 } 2322 }
2316 kfree_skb(tunnel_buf); 2323 kfree_skb(tunnel_buf);
@@ -2319,8 +2326,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2319 2326
2320 /* First original message ?: */ 2327 /* First original message ?: */
2321 if (tipc_link_is_up(dest_link)) { 2328 if (tipc_link_is_up(dest_link)) {
2322 info("Resetting link <%s>, changeover initiated by peer\n", 2329 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2323 dest_link->name); 2330 dest_link->name);
2324 tipc_link_reset(dest_link); 2331 tipc_link_reset(dest_link);
2325 dest_link->exp_msg_count = msg_count; 2332 dest_link->exp_msg_count = msg_count;
2326 if (!msg_count) 2333 if (!msg_count)
@@ -2333,8 +2340,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2333 2340
2334 /* Receive original message */ 2341 /* Receive original message */
2335 if (dest_link->exp_msg_count == 0) { 2342 if (dest_link->exp_msg_count == 0) {
2336 warn("Link switchover error, " 2343 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
2337 "got too many tunnelled messages\n");
2338 goto exit; 2344 goto exit;
2339 } 2345 }
2340 dest_link->exp_msg_count--; 2346 dest_link->exp_msg_count--;
@@ -2346,7 +2352,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2346 kfree_skb(tunnel_buf); 2352 kfree_skb(tunnel_buf);
2347 return 1; 2353 return 1;
2348 } else { 2354 } else {
2349 warn("Link changeover error, original msg dropped\n"); 2355 pr_warn("%soriginal msg dropped\n", link_co_err);
2350 } 2356 }
2351 } 2357 }
2352exit: 2358exit:
@@ -2367,7 +2373,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2367 while (msgcount--) { 2373 while (msgcount--) {
2368 obuf = buf_extract(buf, pos); 2374 obuf = buf_extract(buf, pos);
2369 if (obuf == NULL) { 2375 if (obuf == NULL) {
2370 warn("Link unable to unbundle message(s)\n"); 2376 pr_warn("Link unable to unbundle message(s)\n");
2371 break; 2377 break;
2372 } 2378 }
2373 pos += align(msg_size(buf_msg(obuf))); 2379 pos += align(msg_size(buf_msg(obuf)));
@@ -2538,7 +2544,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2538 set_fragm_size(pbuf, fragm_sz); 2544 set_fragm_size(pbuf, fragm_sz);
2539 set_expected_frags(pbuf, exp_fragm_cnt - 1); 2545 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2540 } else { 2546 } else {
2541 dbg("Link unable to reassemble fragmented message\n"); 2547 pr_debug("Link unable to reassemble fragmented message\n");
2542 kfree_skb(fbuf); 2548 kfree_skb(fbuf);
2543 return -1; 2549 return -1;
2544 } 2550 }
@@ -2635,8 +2641,8 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2635 2641
2636/** 2642/**
2637 * link_find_link - locate link by name 2643 * link_find_link - locate link by name
2638 * @name - ptr to link name string 2644 * @name: ptr to link name string
2639 * @node - ptr to area to be filled with ptr to associated node 2645 * @node: ptr to area to be filled with ptr to associated node
2640 * 2646 *
2641 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2647 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2642 * this also prevents link deletion. 2648 * this also prevents link deletion.
@@ -2671,8 +2677,8 @@ static struct tipc_link *link_find_link(const char *name,
2671/** 2677/**
2672 * link_value_is_valid -- validate proposed link tolerance/priority/window 2678 * link_value_is_valid -- validate proposed link tolerance/priority/window
2673 * 2679 *
2674 * @cmd - value type (TIPC_CMD_SET_LINK_*) 2680 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2675 * @new_value - the new value 2681 * @new_value: the new value
2676 * 2682 *
2677 * Returns 1 if value is within range, 0 if not. 2683 * Returns 1 if value is within range, 0 if not.
2678 */ 2684 */
@@ -2693,9 +2699,9 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2693 2699
2694/** 2700/**
2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2701 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2696 * @name - ptr to link, bearer, or media name 2702 * @name: ptr to link, bearer, or media name
2697 * @new_value - new value of link, bearer, or media setting 2703 * @new_value: new value of link, bearer, or media setting
2698 * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2704 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2699 * 2705 *
2700 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2706 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2701 * 2707 *
@@ -2860,112 +2866,114 @@ static u32 percent(u32 count, u32 total)
2860 */ 2866 */
2861static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2867static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2862{ 2868{
2863 struct print_buf pb; 2869 struct tipc_link *l;
2864 struct tipc_link *l_ptr; 2870 struct tipc_stats *s;
2865 struct tipc_node *node; 2871 struct tipc_node *node;
2866 char *status; 2872 char *status;
2867 u32 profile_total = 0; 2873 u32 profile_total = 0;
2874 int ret;
2868 2875
2869 if (!strcmp(name, tipc_bclink_name)) 2876 if (!strcmp(name, tipc_bclink_name))
2870 return tipc_bclink_stats(buf, buf_size); 2877 return tipc_bclink_stats(buf, buf_size);
2871 2878
2872 tipc_printbuf_init(&pb, buf, buf_size);
2873
2874 read_lock_bh(&tipc_net_lock); 2879 read_lock_bh(&tipc_net_lock);
2875 l_ptr = link_find_link(name, &node); 2880 l = link_find_link(name, &node);
2876 if (!l_ptr) { 2881 if (!l) {
2877 read_unlock_bh(&tipc_net_lock); 2882 read_unlock_bh(&tipc_net_lock);
2878 return 0; 2883 return 0;
2879 } 2884 }
2880 tipc_node_lock(node); 2885 tipc_node_lock(node);
2886 s = &l->stats;
2881 2887
2882 if (tipc_link_is_active(l_ptr)) 2888 if (tipc_link_is_active(l))
2883 status = "ACTIVE"; 2889 status = "ACTIVE";
2884 else if (tipc_link_is_up(l_ptr)) 2890 else if (tipc_link_is_up(l))
2885 status = "STANDBY"; 2891 status = "STANDBY";
2886 else 2892 else
2887 status = "DEFUNCT"; 2893 status = "DEFUNCT";
2888 tipc_printf(&pb, "Link <%s>\n" 2894
2889 " %s MTU:%u Priority:%u Tolerance:%u ms" 2895 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2890 " Window:%u packets\n", 2896 " %s MTU:%u Priority:%u Tolerance:%u ms"
2891 l_ptr->name, status, l_ptr->max_pkt, 2897 " Window:%u packets\n",
2892 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); 2898 l->name, status, l->max_pkt, l->priority,
2893 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2899 l->tolerance, l->queue_limit[0]);
2894 l_ptr->next_in_no - l_ptr->stats.recv_info, 2900
2895 l_ptr->stats.recv_fragments, 2901 ret += tipc_snprintf(buf + ret, buf_size - ret,
2896 l_ptr->stats.recv_fragmented, 2902 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2897 l_ptr->stats.recv_bundles, 2903 l->next_in_no - s->recv_info, s->recv_fragments,
2898 l_ptr->stats.recv_bundled); 2904 s->recv_fragmented, s->recv_bundles,
2899 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2905 s->recv_bundled);
2900 l_ptr->next_out_no - l_ptr->stats.sent_info, 2906
2901 l_ptr->stats.sent_fragments, 2907 ret += tipc_snprintf(buf + ret, buf_size - ret,
2902 l_ptr->stats.sent_fragmented, 2908 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2903 l_ptr->stats.sent_bundles, 2909 l->next_out_no - s->sent_info, s->sent_fragments,
2904 l_ptr->stats.sent_bundled); 2910 s->sent_fragmented, s->sent_bundles,
2905 profile_total = l_ptr->stats.msg_length_counts; 2911 s->sent_bundled);
2912
2913 profile_total = s->msg_length_counts;
2906 if (!profile_total) 2914 if (!profile_total)
2907 profile_total = 1; 2915 profile_total = 1;
2908 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n" 2916
2909 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2917 ret += tipc_snprintf(buf + ret, buf_size - ret,
2910 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2918 " TX profile sample:%u packets average:%u octets\n"
2911 l_ptr->stats.msg_length_counts, 2919 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2912 l_ptr->stats.msg_lengths_total / profile_total, 2920 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2913 percent(l_ptr->stats.msg_length_profile[0], profile_total), 2921 s->msg_length_counts,
2914 percent(l_ptr->stats.msg_length_profile[1], profile_total), 2922 s->msg_lengths_total / profile_total,
2915 percent(l_ptr->stats.msg_length_profile[2], profile_total), 2923 percent(s->msg_length_profile[0], profile_total),
2916 percent(l_ptr->stats.msg_length_profile[3], profile_total), 2924 percent(s->msg_length_profile[1], profile_total),
2917 percent(l_ptr->stats.msg_length_profile[4], profile_total), 2925 percent(s->msg_length_profile[2], profile_total),
2918 percent(l_ptr->stats.msg_length_profile[5], profile_total), 2926 percent(s->msg_length_profile[3], profile_total),
2919 percent(l_ptr->stats.msg_length_profile[6], profile_total)); 2927 percent(s->msg_length_profile[4], profile_total),
2920 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 2928 percent(s->msg_length_profile[5], profile_total),
2921 l_ptr->stats.recv_states, 2929 percent(s->msg_length_profile[6], profile_total));
2922 l_ptr->stats.recv_probes, 2930
2923 l_ptr->stats.recv_nacks, 2931 ret += tipc_snprintf(buf + ret, buf_size - ret,
2924 l_ptr->stats.deferred_recv, 2932 " RX states:%u probes:%u naks:%u defs:%u"
2925 l_ptr->stats.duplicates); 2933 " dups:%u\n", s->recv_states, s->recv_probes,
2926 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 2934 s->recv_nacks, s->deferred_recv, s->duplicates);
2927 l_ptr->stats.sent_states, 2935
2928 l_ptr->stats.sent_probes, 2936 ret += tipc_snprintf(buf + ret, buf_size - ret,
2929 l_ptr->stats.sent_nacks, 2937 " TX states:%u probes:%u naks:%u acks:%u"
2930 l_ptr->stats.sent_acks, 2938 " dups:%u\n", s->sent_states, s->sent_probes,
2931 l_ptr->stats.retransmitted); 2939 s->sent_nacks, s->sent_acks, s->retransmitted);
2932 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 2940
2933 l_ptr->stats.bearer_congs, 2941 ret += tipc_snprintf(buf + ret, buf_size - ret,
2934 l_ptr->stats.link_congs, 2942 " Congestion bearer:%u link:%u Send queue"
2935 l_ptr->stats.max_queue_sz, 2943 " max:%u avg:%u\n", s->bearer_congs, s->link_congs,
2936 l_ptr->stats.queue_sz_counts 2944 s->max_queue_sz, s->queue_sz_counts ?
2937 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts) 2945 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2938 : 0);
2939 2946
2940 tipc_node_unlock(node); 2947 tipc_node_unlock(node);
2941 read_unlock_bh(&tipc_net_lock); 2948 read_unlock_bh(&tipc_net_lock);
2942 return tipc_printbuf_validate(&pb); 2949 return ret;
2943} 2950}
2944 2951
2945#define MAX_LINK_STATS_INFO 2000
2946
2947struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2952struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2948{ 2953{
2949 struct sk_buff *buf; 2954 struct sk_buff *buf;
2950 struct tlv_desc *rep_tlv; 2955 struct tlv_desc *rep_tlv;
2951 int str_len; 2956 int str_len;
2957 int pb_len;
2958 char *pb;
2952 2959
2953 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2960 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2954 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2961 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2955 2962
2956 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO)); 2963 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2957 if (!buf) 2964 if (!buf)
2958 return NULL; 2965 return NULL;
2959 2966
2960 rep_tlv = (struct tlv_desc *)buf->data; 2967 rep_tlv = (struct tlv_desc *)buf->data;
2961 2968 pb = TLV_DATA(rep_tlv);
2969 pb_len = ULTRA_STRING_MAX_LEN;
2962 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2970 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2963 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 2971 pb, pb_len);
2964 if (!str_len) { 2972 if (!str_len) {
2965 kfree_skb(buf); 2973 kfree_skb(buf);
2966 return tipc_cfg_reply_error_string("link not found"); 2974 return tipc_cfg_reply_error_string("link not found");
2967 } 2975 }
2968 2976 str_len += 1; /* for "\0" */
2969 skb_put(buf, TLV_SPACE(str_len)); 2977 skb_put(buf, TLV_SPACE(str_len));
2970 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2978 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2971 2979
@@ -3003,62 +3011,16 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3003 3011
3004static void link_print(struct tipc_link *l_ptr, const char *str) 3012static void link_print(struct tipc_link *l_ptr, const char *str)
3005{ 3013{
3006 char print_area[256]; 3014 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
3007 struct print_buf pb;
3008 struct print_buf *buf = &pb;
3009
3010 tipc_printbuf_init(buf, print_area, sizeof(print_area));
3011
3012 tipc_printf(buf, str);
3013 tipc_printf(buf, "Link %x<%s>:",
3014 l_ptr->addr, l_ptr->b_ptr->name);
3015
3016#ifdef CONFIG_TIPC_DEBUG
3017 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3018 goto print_state;
3019
3020 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3021 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3022 tipc_printf(buf, "SQUE");
3023 if (l_ptr->first_out) {
3024 tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
3025 if (l_ptr->next_out)
3026 tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
3027 tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
3028 if ((mod(buf_seqno(l_ptr->last_out) -
3029 buf_seqno(l_ptr->first_out))
3030 != (l_ptr->out_queue_size - 1)) ||
3031 (l_ptr->last_out->next != NULL)) {
3032 tipc_printf(buf, "\nSend queue inconsistency\n");
3033 tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3034 tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3035 tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3036 }
3037 } else
3038 tipc_printf(buf, "[]");
3039 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3040 if (l_ptr->oldest_deferred_in) {
3041 u32 o = buf_seqno(l_ptr->oldest_deferred_in);
3042 u32 n = buf_seqno(l_ptr->newest_deferred_in);
3043 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3044 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3045 tipc_printf(buf, ":RQSIZ(%u)",
3046 l_ptr->deferred_inqueue_sz);
3047 }
3048 }
3049print_state:
3050#endif
3051 3015
3052 if (link_working_unknown(l_ptr)) 3016 if (link_working_unknown(l_ptr))
3053 tipc_printf(buf, ":WU"); 3017 pr_cont(":WU\n");
3054 else if (link_reset_reset(l_ptr)) 3018 else if (link_reset_reset(l_ptr))
3055 tipc_printf(buf, ":RR"); 3019 pr_cont(":RR\n");
3056 else if (link_reset_unknown(l_ptr)) 3020 else if (link_reset_unknown(l_ptr))
3057 tipc_printf(buf, ":RU"); 3021 pr_cont(":RU\n");
3058 else if (link_working_working(l_ptr)) 3022 else if (link_working_working(l_ptr))
3059 tipc_printf(buf, ":WW"); 3023 pr_cont(":WW\n");
3060 tipc_printf(buf, "\n"); 3024 else
3061 3025 pr_cont("\n");
3062 tipc_printbuf_validate(buf);
3063 info("%s", print_area);
3064} 3026}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d6a60a963ce6..6e921121be06 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -37,7 +37,6 @@
37#ifndef _TIPC_LINK_H 37#ifndef _TIPC_LINK_H
38#define _TIPC_LINK_H 38#define _TIPC_LINK_H
39 39
40#include "log.h"
41#include "msg.h" 40#include "msg.h"
42#include "node.h" 41#include "node.h"
43 42
@@ -63,6 +62,37 @@
63 */ 62 */
64#define MAX_PKT_DEFAULT 1500 63#define MAX_PKT_DEFAULT 1500
65 64
65struct tipc_stats {
66 u32 sent_info; /* used in counting # sent packets */
67 u32 recv_info; /* used in counting # recv'd packets */
68 u32 sent_states;
69 u32 recv_states;
70 u32 sent_probes;
71 u32 recv_probes;
72 u32 sent_nacks;
73 u32 recv_nacks;
74 u32 sent_acks;
75 u32 sent_bundled;
76 u32 sent_bundles;
77 u32 recv_bundled;
78 u32 recv_bundles;
79 u32 retransmitted;
80 u32 sent_fragmented;
81 u32 sent_fragments;
82 u32 recv_fragmented;
83 u32 recv_fragments;
84 u32 link_congs; /* # port sends blocked by congestion */
85 u32 bearer_congs;
86 u32 deferred_recv;
87 u32 duplicates;
88 u32 max_queue_sz; /* send queue size high water mark */
89 u32 accu_queue_sz; /* used for send queue size profiling */
90 u32 queue_sz_counts; /* used for send queue size profiling */
91 u32 msg_length_counts; /* used for message length profiling */
92 u32 msg_lengths_total; /* used for message length profiling */
93 u32 msg_length_profile[7]; /* used for msg. length profiling */
94};
95
66/** 96/**
67 * struct tipc_link - TIPC link data structure 97 * struct tipc_link - TIPC link data structure
68 * @addr: network address of link's peer node 98 * @addr: network address of link's peer node
@@ -175,36 +205,7 @@ struct tipc_link {
175 struct sk_buff *defragm_buf; 205 struct sk_buff *defragm_buf;
176 206
177 /* Statistics */ 207 /* Statistics */
178 struct { 208 struct tipc_stats stats;
179 u32 sent_info; /* used in counting # sent packets */
180 u32 recv_info; /* used in counting # recv'd packets */
181 u32 sent_states;
182 u32 recv_states;
183 u32 sent_probes;
184 u32 recv_probes;
185 u32 sent_nacks;
186 u32 recv_nacks;
187 u32 sent_acks;
188 u32 sent_bundled;
189 u32 sent_bundles;
190 u32 recv_bundled;
191 u32 recv_bundles;
192 u32 retransmitted;
193 u32 sent_fragmented;
194 u32 sent_fragments;
195 u32 recv_fragmented;
196 u32 recv_fragments;
197 u32 link_congs; /* # port sends blocked by congestion */
198 u32 bearer_congs;
199 u32 deferred_recv;
200 u32 duplicates;
201 u32 max_queue_sz; /* send queue size high water mark */
202 u32 accu_queue_sz; /* used for send queue size profiling */
203 u32 queue_sz_counts; /* used for send queue size profiling */
204 u32 msg_length_counts; /* used for message length profiling */
205 u32 msg_lengths_total; /* used for message length profiling */
206 u32 msg_length_profile[7]; /* used for msg. length profiling */
207 } stats;
208}; 209};
209 210
210struct tipc_port; 211struct tipc_port;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 026733f24919..abef644f27d8 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -36,302 +36,20 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "log.h"
40
41/*
42 * TIPC pre-defines the following print buffers:
43 *
44 * TIPC_NULL : null buffer (i.e. print nowhere)
45 * TIPC_CONS : system console
46 * TIPC_LOG : TIPC log buffer
47 *
48 * Additional user-defined print buffers are also permitted.
49 */
50static struct print_buf null_buf = { NULL, 0, NULL, 0 };
51struct print_buf *const TIPC_NULL = &null_buf;
52
53static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
54struct print_buf *const TIPC_CONS = &cons_buf;
55
56static struct print_buf log_buf = { NULL, 0, NULL, 1 };
57struct print_buf *const TIPC_LOG = &log_buf;
58
59/*
60 * Locking policy when using print buffers.
61 *
62 * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
63 * 'print_string' when writing to a print buffer. This also protects against
64 * concurrent writes to the print buffer being written to.
65 *
66 * 2) tipc_log_XXX() leverages the aforementioned use of 'print_lock' to
67 * protect against all types of concurrent operations on their associated
68 * print buffer (not just write operations).
69 *
70 * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
71 * on the caller to prevent simultaneous use of the print buffer(s) being
72 * manipulated.
73 */
74static char print_string[TIPC_PB_MAX_STR];
75static DEFINE_SPINLOCK(print_lock);
76
77static void tipc_printbuf_move(struct print_buf *pb_to,
78 struct print_buf *pb_from);
79
80#define FORMAT(PTR, LEN, FMT) \
81{\
82 va_list args;\
83 va_start(args, FMT);\
84 LEN = vsprintf(PTR, FMT, args);\
85 va_end(args);\
86 *(PTR + LEN) = '\0';\
87}
88
89/**
90 * tipc_printbuf_init - initialize print buffer to empty
91 * @pb: pointer to print buffer structure
92 * @raw: pointer to character array used by print buffer
93 * @size: size of character array
94 *
95 * Note: If the character array is too small (or absent), the print buffer
96 * becomes a null device that discards anything written to it.
97 */
98void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
99{
100 pb->buf = raw;
101 pb->crs = raw;
102 pb->size = size;
103 pb->echo = 0;
104
105 if (size < TIPC_PB_MIN_SIZE) {
106 pb->buf = NULL;
107 } else if (raw) {
108 pb->buf[0] = 0;
109 pb->buf[size - 1] = ~0;
110 }
111}
112
113/**
114 * tipc_printbuf_reset - reinitialize print buffer to empty state
115 * @pb: pointer to print buffer structure
116 */
117static void tipc_printbuf_reset(struct print_buf *pb)
118{
119 if (pb->buf) {
120 pb->crs = pb->buf;
121 pb->buf[0] = 0;
122 pb->buf[pb->size - 1] = ~0;
123 }
124}
125
126/**
127 * tipc_printbuf_empty - test if print buffer is in empty state
128 * @pb: pointer to print buffer structure
129 *
130 * Returns non-zero if print buffer is empty.
131 */
132static int tipc_printbuf_empty(struct print_buf *pb)
133{
134 return !pb->buf || (pb->crs == pb->buf);
135}
136
137/**
138 * tipc_printbuf_validate - check for print buffer overflow
139 * @pb: pointer to print buffer structure
140 *
141 * Verifies that a print buffer has captured all data written to it.
142 * If data has been lost, linearize buffer and prepend an error message
143 *
144 * Returns length of print buffer data string (including trailing NUL)
145 */
146int tipc_printbuf_validate(struct print_buf *pb)
147{
148 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
149 char *cp_buf;
150 struct print_buf cb;
151
152 if (!pb->buf)
153 return 0;
154
155 if (pb->buf[pb->size - 1] == 0) {
156 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
157 if (cp_buf) {
158 tipc_printbuf_init(&cb, cp_buf, pb->size);
159 tipc_printbuf_move(&cb, pb);
160 tipc_printbuf_move(pb, &cb);
161 kfree(cp_buf);
162 memcpy(pb->buf, err, strlen(err));
163 } else {
164 tipc_printbuf_reset(pb);
165 tipc_printf(pb, err);
166 }
167 }
168 return pb->crs - pb->buf + 1;
169}
170
171/**
172 * tipc_printbuf_move - move print buffer contents to another print buffer
173 * @pb_to: pointer to destination print buffer structure
174 * @pb_from: pointer to source print buffer structure
175 *
176 * Current contents of destination print buffer (if any) are discarded.
177 * Source print buffer becomes empty if a successful move occurs.
178 */
179static void tipc_printbuf_move(struct print_buf *pb_to,
180 struct print_buf *pb_from)
181{
182 int len;
183
184 /* Handle the cases where contents can't be moved */
185 if (!pb_to->buf)
186 return;
187
188 if (!pb_from->buf) {
189 tipc_printbuf_reset(pb_to);
190 return;
191 }
192
193 if (pb_to->size < pb_from->size) {
194 strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
195 pb_to->buf[pb_to->size - 1] = ~0;
196 pb_to->crs = strchr(pb_to->buf, 0);
197 return;
198 }
199
200 /* Copy data from char after cursor to end (if used) */
201 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
202 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
203 strcpy(pb_to->buf, pb_from->crs + 1);
204 pb_to->crs = pb_to->buf + len;
205 } else
206 pb_to->crs = pb_to->buf;
207
208 /* Copy data from start to cursor (always) */
209 len = pb_from->crs - pb_from->buf;
210 strcpy(pb_to->crs, pb_from->buf);
211 pb_to->crs += len;
212
213 tipc_printbuf_reset(pb_from);
214}
215 39
216/** 40/**
217 * tipc_printf - append formatted output to print buffer 41 * tipc_snprintf - append formatted output to print buffer
218 * @pb: pointer to print buffer 42 * @buf: pointer to print buffer
43 * @len: buffer length
219 * @fmt: formatted info to be printed 44 * @fmt: formatted info to be printed
220 */ 45 */
221void tipc_printf(struct print_buf *pb, const char *fmt, ...) 46int tipc_snprintf(char *buf, int len, const char *fmt, ...)
222{
223 int chars_to_add;
224 int chars_left;
225 char save_char;
226
227 spin_lock_bh(&print_lock);
228
229 FORMAT(print_string, chars_to_add, fmt);
230 if (chars_to_add >= TIPC_PB_MAX_STR)
231 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
232
233 if (pb->buf) {
234 chars_left = pb->buf + pb->size - pb->crs - 1;
235 if (chars_to_add <= chars_left) {
236 strcpy(pb->crs, print_string);
237 pb->crs += chars_to_add;
238 } else if (chars_to_add >= (pb->size - 1)) {
239 strcpy(pb->buf, print_string + chars_to_add + 1
240 - pb->size);
241 pb->crs = pb->buf + pb->size - 1;
242 } else {
243 strcpy(pb->buf, print_string + chars_left);
244 save_char = print_string[chars_left];
245 print_string[chars_left] = 0;
246 strcpy(pb->crs, print_string);
247 print_string[chars_left] = save_char;
248 pb->crs = pb->buf + chars_to_add - chars_left;
249 }
250 }
251
252 if (pb->echo)
253 printk("%s", print_string);
254
255 spin_unlock_bh(&print_lock);
256}
257
258/**
259 * tipc_log_resize - change the size of the TIPC log buffer
260 * @log_size: print buffer size to use
261 */
262int tipc_log_resize(int log_size)
263{
264 int res = 0;
265
266 spin_lock_bh(&print_lock);
267 kfree(TIPC_LOG->buf);
268 TIPC_LOG->buf = NULL;
269 if (log_size) {
270 if (log_size < TIPC_PB_MIN_SIZE)
271 log_size = TIPC_PB_MIN_SIZE;
272 res = TIPC_LOG->echo;
273 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
274 log_size);
275 TIPC_LOG->echo = res;
276 res = !TIPC_LOG->buf;
277 }
278 spin_unlock_bh(&print_lock);
279
280 return res;
281}
282
283/**
284 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
285 */
286struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
287{
288 u32 value;
289
290 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
291 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
292
293 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
294 if (value > 32768)
295 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
296 " (log size must be 0-32768)");
297 if (tipc_log_resize(value))
298 return tipc_cfg_reply_error_string(
299 "unable to create specified log (log size is now 0)");
300 return tipc_cfg_reply_none();
301}
302
303/**
304 * tipc_log_dump - capture TIPC log buffer contents in configuration message
305 */
306struct sk_buff *tipc_log_dump(void)
307{ 47{
308 struct sk_buff *reply; 48 int i;
309 49 va_list args;
310 spin_lock_bh(&print_lock);
311 if (!TIPC_LOG->buf) {
312 spin_unlock_bh(&print_lock);
313 reply = tipc_cfg_reply_ultra_string("log not activated\n");
314 } else if (tipc_printbuf_empty(TIPC_LOG)) {
315 spin_unlock_bh(&print_lock);
316 reply = tipc_cfg_reply_ultra_string("log is empty\n");
317 } else {
318 struct tlv_desc *rep_tlv;
319 struct print_buf pb;
320 int str_len;
321 50
322 str_len = min(TIPC_LOG->size, 32768u); 51 va_start(args, fmt);
323 spin_unlock_bh(&print_lock); 52 i = vscnprintf(buf, len, fmt, args);
324 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); 53 va_end(args);
325 if (reply) { 54 return i;
326 rep_tlv = (struct tlv_desc *)reply->data;
327 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
328 spin_lock_bh(&print_lock);
329 tipc_printbuf_move(&pb, TIPC_LOG);
330 spin_unlock_bh(&print_lock);
331 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
332 skb_put(reply, TLV_SPACE(str_len));
333 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
334 }
335 }
336 return reply;
337} 55}
diff --git a/net/tipc/log.h b/net/tipc/log.h
deleted file mode 100644
index d1f5eb967fd8..000000000000
--- a/net/tipc/log.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * net/tipc/log.h: Include file for TIPC print buffer routines
3 *
4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_LOG_H
38#define _TIPC_LOG_H
39
40/**
41 * struct print_buf - TIPC print buffer structure
42 * @buf: pointer to character array containing print buffer contents
43 * @size: size of character array
44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @echo: echo output to system console if non-zero
46 */
47struct print_buf {
48 char *buf;
49 u32 size;
50 char *crs;
51 int echo;
52};
53
54#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
55#define TIPC_PB_MAX_STR 512 /* max printable string (with trailing NUL) */
56
57void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
58int tipc_printbuf_validate(struct print_buf *pb);
59
60int tipc_log_resize(int log_size);
61
62struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
63 int req_tlv_space);
64struct sk_buff *tipc_log_dump(void);
65
66#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index deea0d232dca..f2db8a87d9c5 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -109,245 +109,3 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
109 *buf = NULL; 109 *buf = NULL;
110 return -EFAULT; 110 return -EFAULT;
111} 111}
112
113#ifdef CONFIG_TIPC_DEBUG
114void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
115{
116 u32 usr = msg_user(msg);
117 tipc_printf(buf, KERN_DEBUG);
118 tipc_printf(buf, str);
119
120 switch (usr) {
121 case MSG_BUNDLER:
122 tipc_printf(buf, "BNDL::");
123 tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
124 break;
125 case BCAST_PROTOCOL:
126 tipc_printf(buf, "BCASTP::");
127 break;
128 case MSG_FRAGMENTER:
129 tipc_printf(buf, "FRAGM::");
130 switch (msg_type(msg)) {
131 case FIRST_FRAGMENT:
132 tipc_printf(buf, "FIRST:");
133 break;
134 case FRAGMENT:
135 tipc_printf(buf, "BODY:");
136 break;
137 case LAST_FRAGMENT:
138 tipc_printf(buf, "LAST:");
139 break;
140 default:
141 tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
142
143 }
144 tipc_printf(buf, "NO(%u/%u):", msg_long_msgno(msg),
145 msg_fragm_no(msg));
146 break;
147 case TIPC_LOW_IMPORTANCE:
148 case TIPC_MEDIUM_IMPORTANCE:
149 case TIPC_HIGH_IMPORTANCE:
150 case TIPC_CRITICAL_IMPORTANCE:
151 tipc_printf(buf, "DAT%u:", msg_user(msg));
152 if (msg_short(msg)) {
153 tipc_printf(buf, "CON:");
154 break;
155 }
156 switch (msg_type(msg)) {
157 case TIPC_CONN_MSG:
158 tipc_printf(buf, "CON:");
159 break;
160 case TIPC_MCAST_MSG:
161 tipc_printf(buf, "MCST:");
162 break;
163 case TIPC_NAMED_MSG:
164 tipc_printf(buf, "NAM:");
165 break;
166 case TIPC_DIRECT_MSG:
167 tipc_printf(buf, "DIR:");
168 break;
169 default:
170 tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg));
171 }
172 if (msg_reroute_cnt(msg))
173 tipc_printf(buf, "REROUTED(%u):",
174 msg_reroute_cnt(msg));
175 break;
176 case NAME_DISTRIBUTOR:
177 tipc_printf(buf, "NMD::");
178 switch (msg_type(msg)) {
179 case PUBLICATION:
180 tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
181 break;
182 case WITHDRAWAL:
183 tipc_printf(buf, "WDRW:");
184 break;
185 default:
186 tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
187 }
188 if (msg_reroute_cnt(msg))
189 tipc_printf(buf, "REROUTED(%u):",
190 msg_reroute_cnt(msg));
191 break;
192 case CONN_MANAGER:
193 tipc_printf(buf, "CONN_MNG:");
194 switch (msg_type(msg)) {
195 case CONN_PROBE:
196 tipc_printf(buf, "PROBE:");
197 break;
198 case CONN_PROBE_REPLY:
199 tipc_printf(buf, "PROBE_REPLY:");
200 break;
201 case CONN_ACK:
202 tipc_printf(buf, "CONN_ACK:");
203 tipc_printf(buf, "ACK(%u):", msg_msgcnt(msg));
204 break;
205 default:
206 tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
207 }
208 if (msg_reroute_cnt(msg))
209 tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg));
210 break;
211 case LINK_PROTOCOL:
212 switch (msg_type(msg)) {
213 case STATE_MSG:
214 tipc_printf(buf, "STATE:");
215 tipc_printf(buf, "%s:", msg_probe(msg) ? "PRB" : "");
216 tipc_printf(buf, "NXS(%u):", msg_next_sent(msg));
217 tipc_printf(buf, "GAP(%u):", msg_seq_gap(msg));
218 tipc_printf(buf, "LSTBC(%u):", msg_last_bcast(msg));
219 break;
220 case RESET_MSG:
221 tipc_printf(buf, "RESET:");
222 if (msg_size(msg) != msg_hdr_sz(msg))
223 tipc_printf(buf, "BEAR:%s:", msg_data(msg));
224 break;
225 case ACTIVATE_MSG:
226 tipc_printf(buf, "ACTIVATE:");
227 break;
228 default:
229 tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
230 }
231 tipc_printf(buf, "PLANE(%c):", msg_net_plane(msg));
232 tipc_printf(buf, "SESS(%u):", msg_session(msg));
233 break;
234 case CHANGEOVER_PROTOCOL:
235 tipc_printf(buf, "TUNL:");
236 switch (msg_type(msg)) {
237 case DUPLICATE_MSG:
238 tipc_printf(buf, "DUPL:");
239 break;
240 case ORIGINAL_MSG:
241 tipc_printf(buf, "ORIG:");
242 tipc_printf(buf, "EXP(%u)", msg_msgcnt(msg));
243 break;
244 default:
245 tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
246 }
247 break;
248 case LINK_CONFIG:
249 tipc_printf(buf, "CFG:");
250 switch (msg_type(msg)) {
251 case DSC_REQ_MSG:
252 tipc_printf(buf, "DSC_REQ:");
253 break;
254 case DSC_RESP_MSG:
255 tipc_printf(buf, "DSC_RESP:");
256 break;
257 default:
258 tipc_printf(buf, "UNKNOWN TYPE:%x:", msg_type(msg));
259 break;
260 }
261 break;
262 default:
263 tipc_printf(buf, "UNKNOWN USER:");
264 }
265
266 switch (usr) {
267 case CONN_MANAGER:
268 case TIPC_LOW_IMPORTANCE:
269 case TIPC_MEDIUM_IMPORTANCE:
270 case TIPC_HIGH_IMPORTANCE:
271 case TIPC_CRITICAL_IMPORTANCE:
272 switch (msg_errcode(msg)) {
273 case TIPC_OK:
274 break;
275 case TIPC_ERR_NO_NAME:
276 tipc_printf(buf, "NO_NAME:");
277 break;
278 case TIPC_ERR_NO_PORT:
279 tipc_printf(buf, "NO_PORT:");
280 break;
281 case TIPC_ERR_NO_NODE:
282 tipc_printf(buf, "NO_PROC:");
283 break;
284 case TIPC_ERR_OVERLOAD:
285 tipc_printf(buf, "OVERLOAD:");
286 break;
287 case TIPC_CONN_SHUTDOWN:
288 tipc_printf(buf, "SHUTDOWN:");
289 break;
290 default:
291 tipc_printf(buf, "UNKNOWN ERROR(%x):",
292 msg_errcode(msg));
293 }
294 default:
295 break;
296 }
297
298 tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
299 tipc_printf(buf, "SZ(%u):", msg_size(msg));
300 tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
301
302 if (msg_non_seq(msg))
303 tipc_printf(buf, "NOSEQ:");
304 else
305 tipc_printf(buf, "ACK(%u):", msg_ack(msg));
306 tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
307 tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
308
309 if (msg_isdata(msg)) {
310 if (msg_named(msg)) {
311 tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
312 tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
313 }
314 }
315
316 if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
317 (usr != MSG_BUNDLER)) {
318 if (!msg_short(msg)) {
319 tipc_printf(buf, ":ORIG(%x:%u):",
320 msg_orignode(msg), msg_origport(msg));
321 tipc_printf(buf, ":DEST(%x:%u):",
322 msg_destnode(msg), msg_destport(msg));
323 } else {
324 tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
325 tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
326 }
327 }
328 if (msg_user(msg) == NAME_DISTRIBUTOR) {
329 tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
330 tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
331 }
332
333 if (msg_user(msg) == LINK_CONFIG) {
334 struct tipc_media_addr orig;
335
336 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
337 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
338 memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
339 orig.media_id = 0;
340 orig.broadcast = 0;
341 tipc_media_addr_printf(buf, &orig);
342 }
343 if (msg_user(msg) == BCAST_PROTOCOL) {
344 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
345 tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
346 }
347 tipc_printf(buf, "\n");
348 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg)))
349 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
350 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
351 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
352}
353#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 158318e67b08..55d3928dfd67 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -161,7 +161,7 @@ void tipc_named_publish(struct publication *publ)
161 161
162 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 162 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
163 if (!buf) { 163 if (!buf) {
164 warn("Publication distribution failure\n"); 164 pr_warn("Publication distribution failure\n");
165 return; 165 return;
166 } 166 }
167 167
@@ -186,7 +186,7 @@ void tipc_named_withdraw(struct publication *publ)
186 186
187 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 187 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
188 if (!buf) { 188 if (!buf) {
189 warn("Withdrawal distribution failure\n"); 189 pr_warn("Withdrawal distribution failure\n");
190 return; 190 return;
191 } 191 }
192 192
@@ -213,7 +213,7 @@ static void named_distribute(struct list_head *message_list, u32 node,
213 rest -= left; 213 rest -= left;
214 buf = named_prepare_buf(PUBLICATION, left, node); 214 buf = named_prepare_buf(PUBLICATION, left, node);
215 if (!buf) { 215 if (!buf) {
216 warn("Bulk publication failure\n"); 216 pr_warn("Bulk publication failure\n");
217 return; 217 return;
218 } 218 }
219 item = (struct distr_item *)msg_data(buf_msg(buf)); 219 item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -283,9 +283,10 @@ static void named_purge_publ(struct publication *publ)
283 write_unlock_bh(&tipc_nametbl_lock); 283 write_unlock_bh(&tipc_nametbl_lock);
284 284
285 if (p != publ) { 285 if (p != publ) {
286 err("Unable to remove publication from failed node\n" 286 pr_err("Unable to remove publication from failed node\n"
287 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n", 287 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
288 publ->type, publ->lower, publ->node, publ->ref, publ->key); 288 publ->type, publ->lower, publ->node, publ->ref,
289 publ->key);
289 } 290 }
290 291
291 kfree(p); 292 kfree(p);
@@ -329,14 +330,14 @@ void tipc_named_recv(struct sk_buff *buf)
329 tipc_nodesub_unsubscribe(&publ->subscr); 330 tipc_nodesub_unsubscribe(&publ->subscr);
330 kfree(publ); 331 kfree(publ);
331 } else { 332 } else {
332 err("Unable to remove publication by node 0x%x\n" 333 pr_err("Unable to remove publication by node 0x%x\n"
333 "(type=%u, lower=%u, ref=%u, key=%u)\n", 334 " (type=%u, lower=%u, ref=%u, key=%u)\n",
334 msg_orignode(msg), 335 msg_orignode(msg), ntohl(item->type),
335 ntohl(item->type), ntohl(item->lower), 336 ntohl(item->lower), ntohl(item->ref),
336 ntohl(item->ref), ntohl(item->key)); 337 ntohl(item->key));
337 } 338 }
338 } else { 339 } else {
339 warn("Unrecognized name table message received\n"); 340 pr_warn("Unrecognized name table message received\n");
340 } 341 }
341 item++; 342 item++;
342 } 343 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 010f24a59da2..360c478b0b53 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -126,7 +126,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
126{ 126{
127 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 127 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
128 if (publ == NULL) { 128 if (publ == NULL) {
129 warn("Publication creation failure, no memory\n"); 129 pr_warn("Publication creation failure, no memory\n");
130 return NULL; 130 return NULL;
131 } 131 }
132 132
@@ -163,7 +163,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
163 struct sub_seq *sseq = tipc_subseq_alloc(1); 163 struct sub_seq *sseq = tipc_subseq_alloc(1);
164 164
165 if (!nseq || !sseq) { 165 if (!nseq || !sseq) {
166 warn("Name sequence creation failed, no memory\n"); 166 pr_warn("Name sequence creation failed, no memory\n");
167 kfree(nseq); 167 kfree(nseq);
168 kfree(sseq); 168 kfree(sseq);
169 return NULL; 169 return NULL;
@@ -191,7 +191,7 @@ static void nameseq_delete_empty(struct name_seq *seq)
191 } 191 }
192} 192}
193 193
194/* 194/**
195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
196 * 196 *
197 * Very time-critical, so binary searches through sub-sequence array. 197 * Very time-critical, so binary searches through sub-sequence array.
@@ -263,8 +263,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
263 263
264 /* Lower end overlaps existing entry => need an exact match */ 264 /* Lower end overlaps existing entry => need an exact match */
265 if ((sseq->lower != lower) || (sseq->upper != upper)) { 265 if ((sseq->lower != lower) || (sseq->upper != upper)) {
266 warn("Cannot publish {%u,%u,%u}, overlap error\n", 266 pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
267 type, lower, upper); 267 type, lower, upper);
268 return NULL; 268 return NULL;
269 } 269 }
270 270
@@ -286,8 +286,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
286 /* Fail if upper end overlaps into an existing entry */ 286 /* Fail if upper end overlaps into an existing entry */
287 if ((inspos < nseq->first_free) && 287 if ((inspos < nseq->first_free) &&
288 (upper >= nseq->sseqs[inspos].lower)) { 288 (upper >= nseq->sseqs[inspos].lower)) {
289 warn("Cannot publish {%u,%u,%u}, overlap error\n", 289 pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
290 type, lower, upper); 290 type, lower, upper);
291 return NULL; 291 return NULL;
292 } 292 }
293 293
@@ -296,8 +296,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
296 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); 296 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
297 297
298 if (!sseqs) { 298 if (!sseqs) {
299 warn("Cannot publish {%u,%u,%u}, no memory\n", 299 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
300 type, lower, upper); 300 type, lower, upper);
301 return NULL; 301 return NULL;
302 } 302 }
303 memcpy(sseqs, nseq->sseqs, 303 memcpy(sseqs, nseq->sseqs,
@@ -309,8 +309,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
309 309
310 info = kzalloc(sizeof(*info), GFP_ATOMIC); 310 info = kzalloc(sizeof(*info), GFP_ATOMIC);
311 if (!info) { 311 if (!info) {
312 warn("Cannot publish {%u,%u,%u}, no memory\n", 312 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
313 type, lower, upper); 313 type, lower, upper);
314 return NULL; 314 return NULL;
315 } 315 }
316 316
@@ -435,7 +435,7 @@ found:
435} 435}
436 436
437/** 437/**
438 * tipc_nameseq_subscribe: attach a subscription, and issue 438 * tipc_nameseq_subscribe - attach a subscription, and issue
439 * the prescribed number of events if there is any sub- 439 * the prescribed number of events if there is any sub-
440 * sequence overlapping with the requested sequence 440 * sequence overlapping with the requested sequence
441 */ 441 */
@@ -492,8 +492,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
492 492
493 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || 493 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
494 (lower > upper)) { 494 (lower > upper)) {
495 dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n", 495 pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
496 type, lower, upper, scope); 496 type, lower, upper, scope);
497 return NULL; 497 return NULL;
498 } 498 }
499 499
@@ -520,7 +520,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
520 return publ; 520 return publ;
521} 521}
522 522
523/* 523/**
524 * tipc_nametbl_translate - perform name translation 524 * tipc_nametbl_translate - perform name translation
525 * 525 *
526 * On entry, 'destnode' is the search domain used during translation. 526 * On entry, 'destnode' is the search domain used during translation.
@@ -668,8 +668,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
668 struct publication *publ; 668 struct publication *publ;
669 669
670 if (table.local_publ_count >= tipc_max_publications) { 670 if (table.local_publ_count >= tipc_max_publications) {
671 warn("Publication failed, local publication limit reached (%u)\n", 671 pr_warn("Publication failed, local publication limit reached (%u)\n",
672 tipc_max_publications); 672 tipc_max_publications);
673 return NULL; 673 return NULL;
674 } 674 }
675 675
@@ -702,9 +702,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
702 return 1; 702 return 1;
703 } 703 }
704 write_unlock_bh(&tipc_nametbl_lock); 704 write_unlock_bh(&tipc_nametbl_lock);
705 err("Unable to remove local publication\n" 705 pr_err("Unable to remove local publication\n"
706 "(type=%u, lower=%u, ref=%u, key=%u)\n", 706 "(type=%u, lower=%u, ref=%u, key=%u)\n",
707 type, lower, ref, key); 707 type, lower, ref, key);
708 return 0; 708 return 0;
709} 709}
710 710
@@ -725,8 +725,8 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
725 tipc_nameseq_subscribe(seq, s); 725 tipc_nameseq_subscribe(seq, s);
726 spin_unlock_bh(&seq->lock); 726 spin_unlock_bh(&seq->lock);
727 } else { 727 } else {
728 warn("Failed to create subscription for {%u,%u,%u}\n", 728 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
729 s->seq.type, s->seq.lower, s->seq.upper); 729 s->seq.type, s->seq.lower, s->seq.upper);
730 } 730 }
731 write_unlock_bh(&tipc_nametbl_lock); 731 write_unlock_bh(&tipc_nametbl_lock);
732} 732}
@@ -751,21 +751,22 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
751 751
752 752
753/** 753/**
754 * subseq_list: print specified sub-sequence contents into the given buffer 754 * subseq_list - print specified sub-sequence contents into the given buffer
755 */ 755 */
756static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, 756static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
757 u32 index) 757 u32 index)
758{ 758{
759 char portIdStr[27]; 759 char portIdStr[27];
760 const char *scope_str[] = {"", " zone", " cluster", " node"}; 760 const char *scope_str[] = {"", " zone", " cluster", " node"};
761 struct publication *publ; 761 struct publication *publ;
762 struct name_info *info; 762 struct name_info *info;
763 int ret;
763 764
764 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); 765 ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
765 766
766 if (depth == 2) { 767 if (depth == 2) {
767 tipc_printf(buf, "\n"); 768 ret += tipc_snprintf(buf - ret, len + ret, "\n");
768 return; 769 return ret;
769 } 770 }
770 771
771 info = sseq->info; 772 info = sseq->info;
@@ -774,52 +775,58 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
774 sprintf(portIdStr, "<%u.%u.%u:%u>", 775 sprintf(portIdStr, "<%u.%u.%u:%u>",
775 tipc_zone(publ->node), tipc_cluster(publ->node), 776 tipc_zone(publ->node), tipc_cluster(publ->node),
776 tipc_node(publ->node), publ->ref); 777 tipc_node(publ->node), publ->ref);
777 tipc_printf(buf, "%-26s ", portIdStr); 778 ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
778 if (depth > 3) { 779 if (depth > 3) {
779 tipc_printf(buf, "%-10u %s", publ->key, 780 ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
780 scope_str[publ->scope]); 781 publ->key, scope_str[publ->scope]);
781 } 782 }
782 if (!list_is_last(&publ->zone_list, &info->zone_list)) 783 if (!list_is_last(&publ->zone_list, &info->zone_list))
783 tipc_printf(buf, "\n%33s", " "); 784 ret += tipc_snprintf(buf + ret, len - ret,
785 "\n%33s", " ");
784 }; 786 };
785 787
786 tipc_printf(buf, "\n"); 788 ret += tipc_snprintf(buf + ret, len - ret, "\n");
789 return ret;
787} 790}
788 791
789/** 792/**
790 * nameseq_list: print specified name sequence contents into the given buffer 793 * nameseq_list - print specified name sequence contents into the given buffer
791 */ 794 */
792static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, 795static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
793 u32 type, u32 lowbound, u32 upbound, u32 index) 796 u32 type, u32 lowbound, u32 upbound, u32 index)
794{ 797{
795 struct sub_seq *sseq; 798 struct sub_seq *sseq;
796 char typearea[11]; 799 char typearea[11];
800 int ret = 0;
797 801
798 if (seq->first_free == 0) 802 if (seq->first_free == 0)
799 return; 803 return 0;
800 804
801 sprintf(typearea, "%-10u", seq->type); 805 sprintf(typearea, "%-10u", seq->type);
802 806
803 if (depth == 1) { 807 if (depth == 1) {
804 tipc_printf(buf, "%s\n", typearea); 808 ret += tipc_snprintf(buf, len, "%s\n", typearea);
805 return; 809 return ret;
806 } 810 }
807 811
808 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { 812 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
809 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { 813 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
810 tipc_printf(buf, "%s ", typearea); 814 ret += tipc_snprintf(buf + ret, len - ret, "%s ",
815 typearea);
811 spin_lock_bh(&seq->lock); 816 spin_lock_bh(&seq->lock);
812 subseq_list(sseq, buf, depth, index); 817 ret += subseq_list(sseq, buf + ret, len - ret,
818 depth, index);
813 spin_unlock_bh(&seq->lock); 819 spin_unlock_bh(&seq->lock);
814 sprintf(typearea, "%10s", " "); 820 sprintf(typearea, "%10s", " ");
815 } 821 }
816 } 822 }
823 return ret;
817} 824}
818 825
819/** 826/**
820 * nametbl_header - print name table header into the given buffer 827 * nametbl_header - print name table header into the given buffer
821 */ 828 */
822static void nametbl_header(struct print_buf *buf, u32 depth) 829static int nametbl_header(char *buf, int len, u32 depth)
823{ 830{
824 const char *header[] = { 831 const char *header[] = {
825 "Type ", 832 "Type ",
@@ -829,24 +836,27 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
829 }; 836 };
830 837
831 int i; 838 int i;
839 int ret = 0;
832 840
833 if (depth > 4) 841 if (depth > 4)
834 depth = 4; 842 depth = 4;
835 for (i = 0; i < depth; i++) 843 for (i = 0; i < depth; i++)
836 tipc_printf(buf, header[i]); 844 ret += tipc_snprintf(buf + ret, len - ret, header[i]);
837 tipc_printf(buf, "\n"); 845 ret += tipc_snprintf(buf + ret, len - ret, "\n");
846 return ret;
838} 847}
839 848
840/** 849/**
841 * nametbl_list - print specified name table contents into the given buffer 850 * nametbl_list - print specified name table contents into the given buffer
842 */ 851 */
843static void nametbl_list(struct print_buf *buf, u32 depth_info, 852static int nametbl_list(char *buf, int len, u32 depth_info,
844 u32 type, u32 lowbound, u32 upbound) 853 u32 type, u32 lowbound, u32 upbound)
845{ 854{
846 struct hlist_head *seq_head; 855 struct hlist_head *seq_head;
847 struct hlist_node *seq_node; 856 struct hlist_node *seq_node;
848 struct name_seq *seq; 857 struct name_seq *seq;
849 int all_types; 858 int all_types;
859 int ret = 0;
850 u32 depth; 860 u32 depth;
851 u32 i; 861 u32 i;
852 862
@@ -854,65 +864,69 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
854 depth = (depth_info & ~TIPC_NTQ_ALLTYPES); 864 depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
855 865
856 if (depth == 0) 866 if (depth == 0)
857 return; 867 return 0;
858 868
859 if (all_types) { 869 if (all_types) {
860 /* display all entries in name table to specified depth */ 870 /* display all entries in name table to specified depth */
861 nametbl_header(buf, depth); 871 ret += nametbl_header(buf, len, depth);
862 lowbound = 0; 872 lowbound = 0;
863 upbound = ~0; 873 upbound = ~0;
864 for (i = 0; i < tipc_nametbl_size; i++) { 874 for (i = 0; i < tipc_nametbl_size; i++) {
865 seq_head = &table.types[i]; 875 seq_head = &table.types[i];
866 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 876 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
867 nameseq_list(seq, buf, depth, seq->type, 877 ret += nameseq_list(seq, buf + ret, len - ret,
868 lowbound, upbound, i); 878 depth, seq->type,
879 lowbound, upbound, i);
869 } 880 }
870 } 881 }
871 } else { 882 } else {
872 /* display only the sequence that matches the specified type */ 883 /* display only the sequence that matches the specified type */
873 if (upbound < lowbound) { 884 if (upbound < lowbound) {
874 tipc_printf(buf, "invalid name sequence specified\n"); 885 ret += tipc_snprintf(buf + ret, len - ret,
875 return; 886 "invalid name sequence specified\n");
887 return ret;
876 } 888 }
877 nametbl_header(buf, depth); 889 ret += nametbl_header(buf + ret, len - ret, depth);
878 i = hash(type); 890 i = hash(type);
879 seq_head = &table.types[i]; 891 seq_head = &table.types[i];
880 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 892 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
881 if (seq->type == type) { 893 if (seq->type == type) {
882 nameseq_list(seq, buf, depth, type, 894 ret += nameseq_list(seq, buf + ret, len - ret,
883 lowbound, upbound, i); 895 depth, type,
896 lowbound, upbound, i);
884 break; 897 break;
885 } 898 }
886 } 899 }
887 } 900 }
901 return ret;
888} 902}
889 903
890#define MAX_NAME_TBL_QUERY 32768
891
892struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) 904struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
893{ 905{
894 struct sk_buff *buf; 906 struct sk_buff *buf;
895 struct tipc_name_table_query *argv; 907 struct tipc_name_table_query *argv;
896 struct tlv_desc *rep_tlv; 908 struct tlv_desc *rep_tlv;
897 struct print_buf b; 909 char *pb;
910 int pb_len;
898 int str_len; 911 int str_len;
899 912
900 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) 913 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
901 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 914 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
902 915
903 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY)); 916 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
904 if (!buf) 917 if (!buf)
905 return NULL; 918 return NULL;
906 919
907 rep_tlv = (struct tlv_desc *)buf->data; 920 rep_tlv = (struct tlv_desc *)buf->data;
908 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); 921 pb = TLV_DATA(rep_tlv);
922 pb_len = ULTRA_STRING_MAX_LEN;
909 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); 923 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
910 read_lock_bh(&tipc_nametbl_lock); 924 read_lock_bh(&tipc_nametbl_lock);
911 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 925 str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
912 ntohl(argv->lowbound), ntohl(argv->upbound)); 926 ntohl(argv->type),
927 ntohl(argv->lowbound), ntohl(argv->upbound));
913 read_unlock_bh(&tipc_nametbl_lock); 928 read_unlock_bh(&tipc_nametbl_lock);
914 str_len = tipc_printbuf_validate(&b); 929 str_len += 1; /* for "\0" */
915
916 skb_put(buf, TLV_SPACE(str_len)); 930 skb_put(buf, TLV_SPACE(str_len));
917 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 931 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
918 932
@@ -940,8 +954,10 @@ void tipc_nametbl_stop(void)
940 /* Verify name table is empty, then release it */ 954 /* Verify name table is empty, then release it */
941 write_lock_bh(&tipc_nametbl_lock); 955 write_lock_bh(&tipc_nametbl_lock);
942 for (i = 0; i < tipc_nametbl_size; i++) { 956 for (i = 0; i < tipc_nametbl_size; i++) {
943 if (!hlist_empty(&table.types[i])) 957 if (hlist_empty(&table.types[i]))
944 err("tipc_nametbl_stop(): hash chain %u is non-null\n", i); 958 continue;
959 pr_err("nametbl_stop(): orphaned hash chain detected\n");
960 break;
945 } 961 }
946 kfree(table.types); 962 kfree(table.types);
947 table.types = NULL; 963 table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7c236c89cf5e..5b5cea259caf 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -184,9 +184,9 @@ int tipc_net_start(u32 addr)
184 184
185 tipc_cfg_reinit(); 185 tipc_cfg_reinit();
186 186
187 info("Started in network mode\n"); 187 pr_info("Started in network mode\n");
188 info("Own node address %s, network identity %u\n", 188 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
190 return 0; 190 return 0;
191} 191}
192 192
@@ -202,5 +202,5 @@ void tipc_net_stop(void)
202 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 202 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
203 tipc_node_delete(node); 203 tipc_node_delete(node);
204 write_unlock_bh(&tipc_net_lock); 204 write_unlock_bh(&tipc_net_lock);
205 info("Left network mode\n"); 205 pr_info("Left network mode\n");
206} 206}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7bda8e3d1398..47a839df27dc 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -90,7 +90,7 @@ int tipc_netlink_start(void)
90 res = genl_register_family_with_ops(&tipc_genl_family, 90 res = genl_register_family_with_ops(&tipc_genl_family,
91 &tipc_genl_ops, 1); 91 &tipc_genl_ops, 1);
92 if (res) { 92 if (res) {
93 err("Failed to register netlink interface\n"); 93 pr_err("Failed to register netlink interface\n");
94 return res; 94 return res;
95 } 95 }
96 96
diff --git a/net/tipc/node.c b/net/tipc/node.c
index d4fd341e6e0d..d21db204e25a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -105,7 +105,7 @@ struct tipc_node *tipc_node_create(u32 addr)
105 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 105 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
106 if (!n_ptr) { 106 if (!n_ptr) {
107 spin_unlock_bh(&node_create_lock); 107 spin_unlock_bh(&node_create_lock);
108 warn("Node creation failed, no memory\n"); 108 pr_warn("Node creation failed, no memory\n");
109 return NULL; 109 return NULL;
110 } 110 }
111 111
@@ -151,8 +151,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
151 151
152 n_ptr->working_links++; 152 n_ptr->working_links++;
153 153
154 info("Established link <%s> on network plane %c\n", 154 pr_info("Established link <%s> on network plane %c\n",
155 l_ptr->name, l_ptr->b_ptr->net_plane); 155 l_ptr->name, l_ptr->b_ptr->net_plane);
156 156
157 if (!active[0]) { 157 if (!active[0]) {
158 active[0] = active[1] = l_ptr; 158 active[0] = active[1] = l_ptr;
@@ -160,7 +160,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
160 return; 160 return;
161 } 161 }
162 if (l_ptr->priority < active[0]->priority) { 162 if (l_ptr->priority < active[0]->priority) {
163 info("New link <%s> becomes standby\n", l_ptr->name); 163 pr_info("New link <%s> becomes standby\n", l_ptr->name);
164 return; 164 return;
165 } 165 }
166 tipc_link_send_duplicate(active[0], l_ptr); 166 tipc_link_send_duplicate(active[0], l_ptr);
@@ -168,9 +168,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
168 active[0] = l_ptr; 168 active[0] = l_ptr;
169 return; 169 return;
170 } 170 }
171 info("Old link <%s> becomes standby\n", active[0]->name); 171 pr_info("Old link <%s> becomes standby\n", active[0]->name);
172 if (active[1] != active[0]) 172 if (active[1] != active[0])
173 info("Old link <%s> becomes standby\n", active[1]->name); 173 pr_info("Old link <%s> becomes standby\n", active[1]->name);
174 active[0] = active[1] = l_ptr; 174 active[0] = active[1] = l_ptr;
175} 175}
176 176
@@ -211,11 +211,11 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
211 n_ptr->working_links--; 211 n_ptr->working_links--;
212 212
213 if (!tipc_link_is_active(l_ptr)) { 213 if (!tipc_link_is_active(l_ptr)) {
214 info("Lost standby link <%s> on network plane %c\n", 214 pr_info("Lost standby link <%s> on network plane %c\n",
215 l_ptr->name, l_ptr->b_ptr->net_plane); 215 l_ptr->name, l_ptr->b_ptr->net_plane);
216 return; 216 return;
217 } 217 }
218 info("Lost link <%s> on network plane %c\n", 218 pr_info("Lost link <%s> on network plane %c\n",
219 l_ptr->name, l_ptr->b_ptr->net_plane); 219 l_ptr->name, l_ptr->b_ptr->net_plane);
220 220
221 active = &n_ptr->active_links[0]; 221 active = &n_ptr->active_links[0];
@@ -290,8 +290,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
290 char addr_string[16]; 290 char addr_string[16];
291 u32 i; 291 u32 i;
292 292
293 info("Lost contact with %s\n", 293 pr_info("Lost contact with %s\n",
294 tipc_addr_string_fill(addr_string, n_ptr->addr)); 294 tipc_addr_string_fill(addr_string, n_ptr->addr));
295 295
296 /* Flush broadcast link info associated with lost node */ 296 /* Flush broadcast link info associated with lost node */
297 if (n_ptr->bclink.supported) { 297 if (n_ptr->bclink.supported) {
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 7a27344108fe..5e34b015da45 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -51,7 +51,8 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
51 51
52 node_sub->node = tipc_node_find(addr); 52 node_sub->node = tipc_node_find(addr);
53 if (!node_sub->node) { 53 if (!node_sub->node) {
54 warn("Node subscription rejected, unknown node 0x%x\n", addr); 54 pr_warn("Node subscription rejected, unknown node 0x%x\n",
55 addr);
55 return; 56 return;
56 } 57 }
57 node_sub->handle_node_down = handle_down; 58 node_sub->handle_node_down = handle_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2ad37a4db376..07c42fba672b 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,7 +69,7 @@ static u32 port_peerport(struct tipc_port *p_ptr)
69 return msg_destport(&p_ptr->phdr); 69 return msg_destport(&p_ptr->phdr);
70} 70}
71 71
72/* 72/**
73 * tipc_port_peer_msg - verify message was sent by connected port's peer 73 * tipc_port_peer_msg - verify message was sent by connected port's peer
74 * 74 *
75 * Handles cases where the node's network address has changed from 75 * Handles cases where the node's network address has changed from
@@ -191,7 +191,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
191 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC); 191 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
192 192
193 if (b == NULL) { 193 if (b == NULL) {
194 warn("Unable to deliver multicast message(s)\n"); 194 pr_warn("Unable to deliver multicast message(s)\n");
195 goto exit; 195 goto exit;
196 } 196 }
197 if ((index == 0) && (cnt != 0)) 197 if ((index == 0) && (cnt != 0))
@@ -221,12 +221,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
221 221
222 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); 222 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
223 if (!p_ptr) { 223 if (!p_ptr) {
224 warn("Port creation failed, no memory\n"); 224 pr_warn("Port creation failed, no memory\n");
225 return NULL; 225 return NULL;
226 } 226 }
227 ref = tipc_ref_acquire(p_ptr, &p_ptr->lock); 227 ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
228 if (!ref) { 228 if (!ref) {
229 warn("Port creation failed, reference table exhausted\n"); 229 pr_warn("Port creation failed, ref. table exhausted\n");
230 kfree(p_ptr); 230 kfree(p_ptr);
231 return NULL; 231 return NULL;
232 } 232 }
@@ -581,67 +581,73 @@ exit:
581 kfree_skb(buf); 581 kfree_skb(buf);
582} 582}
583 583
584static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id) 584static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
585{ 585{
586 struct publication *publ; 586 struct publication *publ;
587 int ret;
587 588
588 if (full_id) 589 if (full_id)
589 tipc_printf(buf, "<%u.%u.%u:%u>:", 590 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
590 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 591 tipc_zone(tipc_own_addr),
591 tipc_node(tipc_own_addr), p_ptr->ref); 592 tipc_cluster(tipc_own_addr),
593 tipc_node(tipc_own_addr), p_ptr->ref);
592 else 594 else
593 tipc_printf(buf, "%-10u:", p_ptr->ref); 595 ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
594 596
595 if (p_ptr->connected) { 597 if (p_ptr->connected) {
596 u32 dport = port_peerport(p_ptr); 598 u32 dport = port_peerport(p_ptr);
597 u32 destnode = port_peernode(p_ptr); 599 u32 destnode = port_peernode(p_ptr);
598 600
599 tipc_printf(buf, " connected to <%u.%u.%u:%u>", 601 ret += tipc_snprintf(buf + ret, len - ret,
600 tipc_zone(destnode), tipc_cluster(destnode), 602 " connected to <%u.%u.%u:%u>",
601 tipc_node(destnode), dport); 603 tipc_zone(destnode),
604 tipc_cluster(destnode),
605 tipc_node(destnode), dport);
602 if (p_ptr->conn_type != 0) 606 if (p_ptr->conn_type != 0)
603 tipc_printf(buf, " via {%u,%u}", 607 ret += tipc_snprintf(buf + ret, len - ret,
604 p_ptr->conn_type, 608 " via {%u,%u}", p_ptr->conn_type,
605 p_ptr->conn_instance); 609 p_ptr->conn_instance);
606 } else if (p_ptr->published) { 610 } else if (p_ptr->published) {
607 tipc_printf(buf, " bound to"); 611 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
608 list_for_each_entry(publ, &p_ptr->publications, pport_list) { 612 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
609 if (publ->lower == publ->upper) 613 if (publ->lower == publ->upper)
610 tipc_printf(buf, " {%u,%u}", publ->type, 614 ret += tipc_snprintf(buf + ret, len - ret,
611 publ->lower); 615 " {%u,%u}", publ->type,
616 publ->lower);
612 else 617 else
613 tipc_printf(buf, " {%u,%u,%u}", publ->type, 618 ret += tipc_snprintf(buf + ret, len - ret,
614 publ->lower, publ->upper); 619 " {%u,%u,%u}", publ->type,
620 publ->lower, publ->upper);
615 } 621 }
616 } 622 }
617 tipc_printf(buf, "\n"); 623 ret += tipc_snprintf(buf + ret, len - ret, "\n");
624 return ret;
618} 625}
619 626
620#define MAX_PORT_QUERY 32768
621
622struct sk_buff *tipc_port_get_ports(void) 627struct sk_buff *tipc_port_get_ports(void)
623{ 628{
624 struct sk_buff *buf; 629 struct sk_buff *buf;
625 struct tlv_desc *rep_tlv; 630 struct tlv_desc *rep_tlv;
626 struct print_buf pb; 631 char *pb;
632 int pb_len;
627 struct tipc_port *p_ptr; 633 struct tipc_port *p_ptr;
628 int str_len; 634 int str_len = 0;
629 635
630 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY)); 636 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
631 if (!buf) 637 if (!buf)
632 return NULL; 638 return NULL;
633 rep_tlv = (struct tlv_desc *)buf->data; 639 rep_tlv = (struct tlv_desc *)buf->data;
640 pb = TLV_DATA(rep_tlv);
641 pb_len = ULTRA_STRING_MAX_LEN;
634 642
635 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
636 spin_lock_bh(&tipc_port_list_lock); 643 spin_lock_bh(&tipc_port_list_lock);
637 list_for_each_entry(p_ptr, &ports, port_list) { 644 list_for_each_entry(p_ptr, &ports, port_list) {
638 spin_lock_bh(p_ptr->lock); 645 spin_lock_bh(p_ptr->lock);
639 port_print(p_ptr, &pb, 0); 646 str_len += port_print(p_ptr, pb, pb_len, 0);
640 spin_unlock_bh(p_ptr->lock); 647 spin_unlock_bh(p_ptr->lock);
641 } 648 }
642 spin_unlock_bh(&tipc_port_list_lock); 649 spin_unlock_bh(&tipc_port_list_lock);
643 str_len = tipc_printbuf_validate(&pb); 650 str_len += 1; /* for "\0" */
644
645 skb_put(buf, TLV_SPACE(str_len)); 651 skb_put(buf, TLV_SPACE(str_len));
646 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 652 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
647 653
@@ -906,11 +912,11 @@ int tipc_createport(void *usr_handle,
906 912
907 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 913 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
908 if (!up_ptr) { 914 if (!up_ptr) {
909 warn("Port creation failed, no memory\n"); 915 pr_warn("Port creation failed, no memory\n");
910 return -ENOMEM; 916 return -ENOMEM;
911 } 917 }
912 p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher, 918 p_ptr = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
913 port_wakeup, importance); 919 importance);
914 if (!p_ptr) { 920 if (!p_ptr) {
915 kfree(up_ptr); 921 kfree(up_ptr);
916 return -ENOMEM; 922 return -ENOMEM;
@@ -1078,8 +1084,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
1078 if (tp_ptr->connected) { 1084 if (tp_ptr->connected) {
1079 tp_ptr->connected = 0; 1085 tp_ptr->connected = 0;
1080 /* let timer expire on it's own to avoid deadlock! */ 1086 /* let timer expire on it's own to avoid deadlock! */
1081 tipc_nodesub_unsubscribe( 1087 tipc_nodesub_unsubscribe(&tp_ptr->subscription);
1082 &((struct tipc_port *)tp_ptr)->subscription);
1083 res = 0; 1088 res = 0;
1084 } else { 1089 } else {
1085 res = -ENOTCONN; 1090 res = -ENOTCONN;
@@ -1099,7 +1104,7 @@ int tipc_disconnect(u32 ref)
1099 p_ptr = tipc_port_lock(ref); 1104 p_ptr = tipc_port_lock(ref);
1100 if (!p_ptr) 1105 if (!p_ptr)
1101 return -EINVAL; 1106 return -EINVAL;
1102 res = tipc_disconnect_port((struct tipc_port *)p_ptr); 1107 res = tipc_disconnect_port(p_ptr);
1103 tipc_port_unlock(p_ptr); 1108 tipc_port_unlock(p_ptr);
1104 return res; 1109 return res;
1105} 1110}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 98cbec9c4532..4660e3065790 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -79,6 +79,7 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
79 * struct user_port - TIPC user port (used with native API) 79 * struct user_port - TIPC user port (used with native API)
80 * @usr_handle: user-specified field 80 * @usr_handle: user-specified field
81 * @ref: object reference to associated TIPC port 81 * @ref: object reference to associated TIPC port
82 *
82 * <various callback routines> 83 * <various callback routines>
83 */ 84 */
84struct user_port { 85struct user_port {
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 5cada0e38e03..2a2a938dc22c 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -153,11 +153,11 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
153 struct reference *entry = NULL; 153 struct reference *entry = NULL;
154 154
155 if (!object) { 155 if (!object) {
156 err("Attempt to acquire reference to non-existent object\n"); 156 pr_err("Attempt to acquire ref. to non-existent obj\n");
157 return 0; 157 return 0;
158 } 158 }
159 if (!tipc_ref_table.entries) { 159 if (!tipc_ref_table.entries) {
160 err("Reference table not found during acquisition attempt\n"); 160 pr_err("Ref. table not found in acquisition attempt\n");
161 return 0; 161 return 0;
162 } 162 }
163 163
@@ -211,7 +211,7 @@ void tipc_ref_discard(u32 ref)
211 u32 index_mask; 211 u32 index_mask;
212 212
213 if (!tipc_ref_table.entries) { 213 if (!tipc_ref_table.entries) {
214 err("Reference table not found during discard attempt\n"); 214 pr_err("Ref. table not found during discard attempt\n");
215 return; 215 return;
216 } 216 }
217 217
@@ -222,11 +222,11 @@ void tipc_ref_discard(u32 ref)
222 write_lock_bh(&ref_table_lock); 222 write_lock_bh(&ref_table_lock);
223 223
224 if (!entry->object) { 224 if (!entry->object) {
225 err("Attempt to discard reference to non-existent object\n"); 225 pr_err("Attempt to discard ref. to non-existent obj\n");
226 goto exit; 226 goto exit;
227 } 227 }
228 if (entry->ref != ref) { 228 if (entry->ref != ref) {
229 err("Attempt to discard non-existent reference\n"); 229 pr_err("Attempt to discard non-existent reference\n");
230 goto exit; 230 goto exit;
231 } 231 }
232 232
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5577a447f531..09dc5b97e079 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,12 +34,12 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <linux/export.h>
38#include <net/sock.h>
39
40#include "core.h" 37#include "core.h"
41#include "port.h" 38#include "port.h"
42 39
40#include <linux/export.h>
41#include <net/sock.h>
42
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
@@ -54,7 +54,7 @@ struct tipc_sock {
54}; 54};
55 55
56#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 56#define tipc_sk(sk) ((struct tipc_sock *)(sk))
57#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p)) 57#define tipc_sk_port(sk) (tipc_sk(sk)->p)
58 58
59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \ 59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
60 (sock->state == SS_DISCONNECTING)) 60 (sock->state == SS_DISCONNECTING))
@@ -1699,9 +1699,8 @@ static int getsockopt(struct socket *sock,
1699 return put_user(sizeof(value), ol); 1699 return put_user(sizeof(value), ol);
1700} 1700}
1701 1701
1702/** 1702/* Protocol switches for the various types of TIPC sockets */
1703 * Protocol switches for the various types of TIPC sockets 1703
1704 */
1705static const struct proto_ops msg_ops = { 1704static const struct proto_ops msg_ops = {
1706 .owner = THIS_MODULE, 1705 .owner = THIS_MODULE,
1707 .family = AF_TIPC, 1706 .family = AF_TIPC,
@@ -1788,13 +1787,13 @@ int tipc_socket_init(void)
1788 1787
1789 res = proto_register(&tipc_proto, 1); 1788 res = proto_register(&tipc_proto, 1);
1790 if (res) { 1789 if (res) {
1791 err("Failed to register TIPC protocol type\n"); 1790 pr_err("Failed to register TIPC protocol type\n");
1792 goto out; 1791 goto out;
1793 } 1792 }
1794 1793
1795 res = sock_register(&tipc_family_ops); 1794 res = sock_register(&tipc_family_ops);
1796 if (res) { 1795 if (res) {
1797 err("Failed to register TIPC socket type\n"); 1796 pr_err("Failed to register TIPC socket type\n");
1798 proto_unregister(&tipc_proto); 1797 proto_unregister(&tipc_proto);
1799 goto out; 1798 goto out;
1800 } 1799 }
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index f976e9cd6a72..5ed5965eb0be 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -305,8 +305,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
305 305
306 /* Refuse subscription if global limit exceeded */ 306 /* Refuse subscription if global limit exceeded */
307 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 307 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
308 warn("Subscription rejected, subscription limit reached (%u)\n", 308 pr_warn("Subscription rejected, limit reached (%u)\n",
309 tipc_max_subscriptions); 309 tipc_max_subscriptions);
310 subscr_terminate(subscriber); 310 subscr_terminate(subscriber);
311 return NULL; 311 return NULL;
312 } 312 }
@@ -314,7 +314,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
314 /* Allocate subscription object */ 314 /* Allocate subscription object */
315 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 315 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
316 if (!sub) { 316 if (!sub) {
317 warn("Subscription rejected, no memory\n"); 317 pr_warn("Subscription rejected, no memory\n");
318 subscr_terminate(subscriber); 318 subscr_terminate(subscriber);
319 return NULL; 319 return NULL;
320 } 320 }
@@ -328,7 +328,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
328 if ((!(sub->filter & TIPC_SUB_PORTS) == 328 if ((!(sub->filter & TIPC_SUB_PORTS) ==
329 !(sub->filter & TIPC_SUB_SERVICE)) || 329 !(sub->filter & TIPC_SUB_SERVICE)) ||
330 (sub->seq.lower > sub->seq.upper)) { 330 (sub->seq.lower > sub->seq.upper)) {
331 warn("Subscription rejected, illegal request\n"); 331 pr_warn("Subscription rejected, illegal request\n");
332 kfree(sub); 332 kfree(sub);
333 subscr_terminate(subscriber); 333 subscr_terminate(subscriber);
334 return NULL; 334 return NULL;
@@ -440,7 +440,7 @@ static void subscr_named_msg_event(void *usr_handle,
440 /* Create subscriber object */ 440 /* Create subscriber object */
441 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); 441 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
442 if (subscriber == NULL) { 442 if (subscriber == NULL) {
443 warn("Subscriber rejected, no memory\n"); 443 pr_warn("Subscriber rejected, no memory\n");
444 return; 444 return;
445 } 445 }
446 INIT_LIST_HEAD(&subscriber->subscription_list); 446 INIT_LIST_HEAD(&subscriber->subscription_list);
@@ -458,7 +458,7 @@ static void subscr_named_msg_event(void *usr_handle,
458 NULL, 458 NULL,
459 &subscriber->port_ref); 459 &subscriber->port_ref);
460 if (subscriber->port_ref == 0) { 460 if (subscriber->port_ref == 0) {
461 warn("Subscriber rejected, unable to create port\n"); 461 pr_warn("Subscriber rejected, unable to create port\n");
462 kfree(subscriber); 462 kfree(subscriber);
463 return; 463 return;
464 } 464 }
@@ -517,7 +517,7 @@ int tipc_subscr_start(void)
517 return 0; 517 return 0;
518 518
519failed: 519failed:
520 err("Failed to create subscription service\n"); 520 pr_err("Failed to create subscription service\n");
521 return res; 521 return res;
522} 522}
523 523
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 641f2e47f165..e4768c180da2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,15 +115,24 @@
115#include <net/checksum.h> 115#include <net/checksum.h>
116#include <linux/security.h> 116#include <linux/security.h>
117 117
118struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; 118struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
119EXPORT_SYMBOL_GPL(unix_socket_table); 119EXPORT_SYMBOL_GPL(unix_socket_table);
120DEFINE_SPINLOCK(unix_table_lock); 120DEFINE_SPINLOCK(unix_table_lock);
121EXPORT_SYMBOL_GPL(unix_table_lock); 121EXPORT_SYMBOL_GPL(unix_table_lock);
122static atomic_long_t unix_nr_socks; 122static atomic_long_t unix_nr_socks;
123 123
124#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
125 124
126#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 125static struct hlist_head *unix_sockets_unbound(void *addr)
126{
127 unsigned long hash = (unsigned long)addr;
128
129 hash ^= hash >> 16;
130 hash ^= hash >> 8;
131 hash %= UNIX_HASH_SIZE;
132 return &unix_socket_table[UNIX_HASH_SIZE + hash];
133}
134
135#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
127 136
128#ifdef CONFIG_SECURITY_NETWORK 137#ifdef CONFIG_SECURITY_NETWORK
129static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 138static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -645,7 +654,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
645 INIT_LIST_HEAD(&u->link); 654 INIT_LIST_HEAD(&u->link);
646 mutex_init(&u->readlock); /* single task reading lock */ 655 mutex_init(&u->readlock); /* single task reading lock */
647 init_waitqueue_head(&u->peer_wait); 656 init_waitqueue_head(&u->peer_wait);
648 unix_insert_socket(unix_sockets_unbound, sk); 657 unix_insert_socket(unix_sockets_unbound(sk), sk);
649out: 658out:
650 if (sk == NULL) 659 if (sk == NULL)
651 atomic_long_dec(&unix_nr_socks); 660 atomic_long_dec(&unix_nr_socks);
@@ -814,6 +823,34 @@ fail:
814 return NULL; 823 return NULL;
815} 824}
816 825
826static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
827{
828 struct dentry *dentry;
829 struct path path;
830 int err = 0;
831 /*
832 * Get the parent directory, calculate the hash for last
833 * component.
834 */
835 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
836 err = PTR_ERR(dentry);
837 if (IS_ERR(dentry))
838 return err;
839
840 /*
841 * All right, let's create it.
842 */
843 err = security_path_mknod(&path, dentry, mode, 0);
844 if (!err) {
845 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
846 if (!err) {
847 res->mnt = mntget(path.mnt);
848 res->dentry = dget(dentry);
849 }
850 }
851 done_path_create(&path, dentry);
852 return err;
853}
817 854
818static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 855static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
819{ 856{
@@ -822,8 +859,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
822 struct unix_sock *u = unix_sk(sk); 859 struct unix_sock *u = unix_sk(sk);
823 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 860 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
824 char *sun_path = sunaddr->sun_path; 861 char *sun_path = sunaddr->sun_path;
825 struct dentry *dentry = NULL;
826 struct path path;
827 int err; 862 int err;
828 unsigned int hash; 863 unsigned int hash;
829 struct unix_address *addr; 864 struct unix_address *addr;
@@ -860,43 +895,23 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
860 atomic_set(&addr->refcnt, 1); 895 atomic_set(&addr->refcnt, 1);
861 896
862 if (sun_path[0]) { 897 if (sun_path[0]) {
863 umode_t mode; 898 struct path path;
864 err = 0; 899 umode_t mode = S_IFSOCK |
865 /*
866 * Get the parent directory, calculate the hash for last
867 * component.
868 */
869 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
870 err = PTR_ERR(dentry);
871 if (IS_ERR(dentry))
872 goto out_mknod_parent;
873
874 /*
875 * All right, let's create it.
876 */
877 mode = S_IFSOCK |
878 (SOCK_INODE(sock)->i_mode & ~current_umask()); 900 (SOCK_INODE(sock)->i_mode & ~current_umask());
879 err = mnt_want_write(path.mnt); 901 err = unix_mknod(sun_path, mode, &path);
880 if (err) 902 if (err) {
881 goto out_mknod_dput; 903 if (err == -EEXIST)
882 err = security_path_mknod(&path, dentry, mode, 0); 904 err = -EADDRINUSE;
883 if (err) 905 unix_release_addr(addr);
884 goto out_mknod_drop_write; 906 goto out_up;
885 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); 907 }
886out_mknod_drop_write:
887 mnt_drop_write(path.mnt);
888 if (err)
889 goto out_mknod_dput;
890 mutex_unlock(&path.dentry->d_inode->i_mutex);
891 dput(path.dentry);
892 path.dentry = dentry;
893
894 addr->hash = UNIX_HASH_SIZE; 908 addr->hash = UNIX_HASH_SIZE;
895 } 909 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
896 910 spin_lock(&unix_table_lock);
897 spin_lock(&unix_table_lock); 911 u->path = path;
898 912 list = &unix_socket_table[hash];
899 if (!sun_path[0]) { 913 } else {
914 spin_lock(&unix_table_lock);
900 err = -EADDRINUSE; 915 err = -EADDRINUSE;
901 if (__unix_find_socket_byname(net, sunaddr, addr_len, 916 if (__unix_find_socket_byname(net, sunaddr, addr_len,
902 sk->sk_type, hash)) { 917 sk->sk_type, hash)) {
@@ -905,9 +920,6 @@ out_mknod_drop_write:
905 } 920 }
906 921
907 list = &unix_socket_table[addr->hash]; 922 list = &unix_socket_table[addr->hash];
908 } else {
909 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
910 u->path = path;
911 } 923 }
912 924
913 err = 0; 925 err = 0;
@@ -921,16 +933,6 @@ out_up:
921 mutex_unlock(&u->readlock); 933 mutex_unlock(&u->readlock);
922out: 934out:
923 return err; 935 return err;
924
925out_mknod_dput:
926 dput(dentry);
927 mutex_unlock(&path.dentry->d_inode->i_mutex);
928 path_put(&path);
929out_mknod_parent:
930 if (err == -EEXIST)
931 err = -EADDRINUSE;
932 unix_release_addr(addr);
933 goto out_up;
934} 936}
935 937
936static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) 938static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
@@ -2239,47 +2241,54 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2239} 2241}
2240 2242
2241#ifdef CONFIG_PROC_FS 2243#ifdef CONFIG_PROC_FS
2242static struct sock *first_unix_socket(int *i) 2244
2245#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2246
2247#define get_bucket(x) ((x) >> BUCKET_SPACE)
2248#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2249#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2250
2251static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2243{ 2252{
2244 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) { 2253 unsigned long offset = get_offset(*pos);
2245 if (!hlist_empty(&unix_socket_table[*i])) 2254 unsigned long bucket = get_bucket(*pos);
2246 return __sk_head(&unix_socket_table[*i]); 2255 struct sock *sk;
2256 unsigned long count = 0;
2257
2258 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2259 if (sock_net(sk) != seq_file_net(seq))
2260 continue;
2261 if (++count == offset)
2262 break;
2247 } 2263 }
2248 return NULL; 2264
2265 return sk;
2249} 2266}
2250 2267
2251static struct sock *next_unix_socket(int *i, struct sock *s) 2268static struct sock *unix_next_socket(struct seq_file *seq,
2269 struct sock *sk,
2270 loff_t *pos)
2252{ 2271{
2253 struct sock *next = sk_next(s); 2272 unsigned long bucket;
2254 /* More in this chain? */ 2273
2255 if (next) 2274 while (sk > (struct sock *)SEQ_START_TOKEN) {
2256 return next; 2275 sk = sk_next(sk);
2257 /* Look for next non-empty chain. */ 2276 if (!sk)
2258 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) { 2277 goto next_bucket;
2259 if (!hlist_empty(&unix_socket_table[*i])) 2278 if (sock_net(sk) == seq_file_net(seq))
2260 return __sk_head(&unix_socket_table[*i]); 2279 return sk;
2261 } 2280 }
2262 return NULL;
2263}
2264 2281
2265struct unix_iter_state { 2282 do {
2266 struct seq_net_private p; 2283 sk = unix_from_bucket(seq, pos);
2267 int i; 2284 if (sk)
2268}; 2285 return sk;
2269 2286
2270static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos) 2287next_bucket:
2271{ 2288 bucket = get_bucket(*pos) + 1;
2272 struct unix_iter_state *iter = seq->private; 2289 *pos = set_bucket_offset(bucket, 1);
2273 loff_t off = 0; 2290 } while (bucket < ARRAY_SIZE(unix_socket_table));
2274 struct sock *s;
2275 2291
2276 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2277 if (sock_net(s) != seq_file_net(seq))
2278 continue;
2279 if (off == pos)
2280 return s;
2281 ++off;
2282 }
2283 return NULL; 2292 return NULL;
2284} 2293}
2285 2294
@@ -2287,22 +2296,20 @@ static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2287 __acquires(unix_table_lock) 2296 __acquires(unix_table_lock)
2288{ 2297{
2289 spin_lock(&unix_table_lock); 2298 spin_lock(&unix_table_lock);
2290 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2299
2300 if (!*pos)
2301 return SEQ_START_TOKEN;
2302
2303 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2304 return NULL;
2305
2306 return unix_next_socket(seq, NULL, pos);
2291} 2307}
2292 2308
2293static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2309static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2294{ 2310{
2295 struct unix_iter_state *iter = seq->private;
2296 struct sock *sk = v;
2297 ++*pos; 2311 ++*pos;
2298 2312 return unix_next_socket(seq, v, pos);
2299 if (v == SEQ_START_TOKEN)
2300 sk = first_unix_socket(&iter->i);
2301 else
2302 sk = next_unix_socket(&iter->i, sk);
2303 while (sk && (sock_net(sk) != seq_file_net(seq)))
2304 sk = next_unix_socket(&iter->i, sk);
2305 return sk;
2306} 2313}
2307 2314
2308static void unix_seq_stop(struct seq_file *seq, void *v) 2315static void unix_seq_stop(struct seq_file *seq, void *v)
@@ -2365,7 +2372,7 @@ static const struct seq_operations unix_seq_ops = {
2365static int unix_seq_open(struct inode *inode, struct file *file) 2372static int unix_seq_open(struct inode *inode, struct file *file)
2366{ 2373{
2367 return seq_open_net(inode, file, &unix_seq_ops, 2374 return seq_open_net(inode, file, &unix_seq_ops,
2368 sizeof(struct unix_iter_state)); 2375 sizeof(struct seq_net_private));
2369} 2376}
2370 2377
2371static const struct file_operations unix_seq_fops = { 2378static const struct file_operations unix_seq_fops = {
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 47d3002737f5..750b13408449 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -8,40 +8,31 @@
8#include <net/af_unix.h> 8#include <net/af_unix.h>
9#include <net/tcp_states.h> 9#include <net/tcp_states.h>
10 10
11#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
13
14static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{ 12{
16 struct unix_address *addr = unix_sk(sk)->addr; 13 struct unix_address *addr = unix_sk(sk)->addr;
17 char *s;
18
19 if (addr) {
20 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
21 memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
22 }
23 14
24 return 0; 15 if (!addr)
16 return 0;
25 17
26rtattr_failure: 18 return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
27 return -EMSGSIZE; 19 addr->name->sun_path);
28} 20}
29 21
30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 22static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
31{ 23{
32 struct dentry *dentry = unix_sk(sk)->path.dentry; 24 struct dentry *dentry = unix_sk(sk)->path.dentry;
33 struct unix_diag_vfs *uv;
34 25
35 if (dentry) { 26 if (dentry) {
36 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); 27 struct unix_diag_vfs uv = {
37 uv->udiag_vfs_ino = dentry->d_inode->i_ino; 28 .udiag_vfs_ino = dentry->d_inode->i_ino,
38 uv->udiag_vfs_dev = dentry->d_sb->s_dev; 29 .udiag_vfs_dev = dentry->d_sb->s_dev,
30 };
31
32 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
39 } 33 }
40 34
41 return 0; 35 return 0;
42
43rtattr_failure:
44 return -EMSGSIZE;
45} 36}
46 37
47static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) 38static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
@@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
56 unix_state_unlock(peer); 47 unix_state_unlock(peer);
57 sock_put(peer); 48 sock_put(peer);
58 49
59 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); 50 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
60 } 51 }
61 52
62 return 0; 53 return 0;
63rtattr_failure:
64 return -EMSGSIZE;
65} 54}
66 55
67static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) 56static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
68{ 57{
69 struct sk_buff *skb; 58 struct sk_buff *skb;
59 struct nlattr *attr;
70 u32 *buf; 60 u32 *buf;
71 int i; 61 int i;
72 62
73 if (sk->sk_state == TCP_LISTEN) { 63 if (sk->sk_state == TCP_LISTEN) {
74 spin_lock(&sk->sk_receive_queue.lock); 64 spin_lock(&sk->sk_receive_queue.lock);
75 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, 65
76 sk->sk_receive_queue.qlen * sizeof(u32)); 66 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
67 sk->sk_receive_queue.qlen * sizeof(u32));
68 if (!attr)
69 goto errout;
70
71 buf = nla_data(attr);
77 i = 0; 72 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) { 73 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer; 74 struct sock *req, *peer;
@@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
94 89
95 return 0; 90 return 0;
96 91
97rtattr_failure: 92errout:
98 spin_unlock(&sk->sk_receive_queue.lock); 93 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE; 94 return -EMSGSIZE;
100} 95}
101 96
102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) 97static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{ 98{
104 struct unix_diag_rqlen *rql; 99 struct unix_diag_rqlen rql;
105
106 rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
107 100
108 if (sk->sk_state == TCP_LISTEN) { 101 if (sk->sk_state == TCP_LISTEN) {
109 rql->udiag_rqueue = sk->sk_receive_queue.qlen; 102 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
110 rql->udiag_wqueue = sk->sk_max_ack_backlog; 103 rql.udiag_wqueue = sk->sk_max_ack_backlog;
111 } else { 104 } else {
112 rql->udiag_rqueue = (__u32)unix_inq_len(sk); 105 rql.udiag_rqueue = (u32) unix_inq_len(sk);
113 rql->udiag_wqueue = (__u32)unix_outq_len(sk); 106 rql.udiag_wqueue = (u32) unix_outq_len(sk);
114 } 107 }
115 108
116 return 0; 109 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
117
118rtattr_failure:
119 return -EMSGSIZE;
120} 110}
121 111
122static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
123 u32 pid, u32 seq, u32 flags, int sk_ino) 113 u32 pid, u32 seq, u32 flags, int sk_ino)
124{ 114{
125 unsigned char *b = skb_tail_pointer(skb);
126 struct nlmsghdr *nlh; 115 struct nlmsghdr *nlh;
127 struct unix_diag_msg *rep; 116 struct unix_diag_msg *rep;
128 117
129 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); 118 nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
130 nlh->nlmsg_flags = flags; 119 flags);
131 120 if (!nlh)
132 rep = NLMSG_DATA(nlh); 121 return -EMSGSIZE;
133 122
123 rep = nlmsg_data(nlh);
134 rep->udiag_family = AF_UNIX; 124 rep->udiag_family = AF_UNIX;
135 rep->udiag_type = sk->sk_type; 125 rep->udiag_type = sk->sk_type;
136 rep->udiag_state = sk->sk_state; 126 rep->udiag_state = sk->sk_state;
@@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
139 129
140 if ((req->udiag_show & UDIAG_SHOW_NAME) && 130 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
141 sk_diag_dump_name(sk, skb)) 131 sk_diag_dump_name(sk, skb))
142 goto nlmsg_failure; 132 goto out_nlmsg_trim;
143 133
144 if ((req->udiag_show & UDIAG_SHOW_VFS) && 134 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
145 sk_diag_dump_vfs(sk, skb)) 135 sk_diag_dump_vfs(sk, skb))
146 goto nlmsg_failure; 136 goto out_nlmsg_trim;
147 137
148 if ((req->udiag_show & UDIAG_SHOW_PEER) && 138 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
149 sk_diag_dump_peer(sk, skb)) 139 sk_diag_dump_peer(sk, skb))
150 goto nlmsg_failure; 140 goto out_nlmsg_trim;
151 141
152 if ((req->udiag_show & UDIAG_SHOW_ICONS) && 142 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
153 sk_diag_dump_icons(sk, skb)) 143 sk_diag_dump_icons(sk, skb))
154 goto nlmsg_failure; 144 goto out_nlmsg_trim;
155 145
156 if ((req->udiag_show & UDIAG_SHOW_RQLEN) && 146 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
157 sk_diag_show_rqlen(sk, skb)) 147 sk_diag_show_rqlen(sk, skb))
158 goto nlmsg_failure; 148 goto out_nlmsg_trim;
159 149
160 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && 150 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
161 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) 151 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
162 goto nlmsg_failure; 152 goto out_nlmsg_trim;
163 153
164 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 154 return nlmsg_end(skb, nlh);
165 return skb->len;
166 155
167nlmsg_failure: 156out_nlmsg_trim:
168 nlmsg_trim(skb, b); 157 nlmsg_cancel(skb, nlh);
169 return -EMSGSIZE; 158 return -EMSGSIZE;
170} 159}
171 160
@@ -188,19 +177,24 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
188{ 177{
189 struct unix_diag_req *req; 178 struct unix_diag_req *req;
190 int num, s_num, slot, s_slot; 179 int num, s_num, slot, s_slot;
180 struct net *net = sock_net(skb->sk);
191 181
192 req = NLMSG_DATA(cb->nlh); 182 req = nlmsg_data(cb->nlh);
193 183
194 s_slot = cb->args[0]; 184 s_slot = cb->args[0];
195 num = s_num = cb->args[1]; 185 num = s_num = cb->args[1];
196 186
197 spin_lock(&unix_table_lock); 187 spin_lock(&unix_table_lock);
198 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { 188 for (slot = s_slot;
189 slot < ARRAY_SIZE(unix_socket_table);
190 s_num = 0, slot++) {
199 struct sock *sk; 191 struct sock *sk;
200 struct hlist_node *node; 192 struct hlist_node *node;
201 193
202 num = 0; 194 num = 0;
203 sk_for_each(sk, node, &unix_socket_table[slot]) { 195 sk_for_each(sk, node, &unix_socket_table[slot]) {
196 if (!net_eq(sock_net(sk), net))
197 continue;
204 if (num < s_num) 198 if (num < s_num)
205 goto next; 199 goto next;
206 if (!(req->udiag_states & (1 << sk->sk_state))) 200 if (!(req->udiag_states & (1 << sk->sk_state)))
@@ -228,7 +222,7 @@ static struct sock *unix_lookup_by_ino(int ino)
228 struct sock *sk; 222 struct sock *sk;
229 223
230 spin_lock(&unix_table_lock); 224 spin_lock(&unix_table_lock);
231 for (i = 0; i <= UNIX_HASH_SIZE; i++) { 225 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
232 struct hlist_node *node; 226 struct hlist_node *node;
233 227
234 sk_for_each(sk, node, &unix_socket_table[i]) 228 sk_for_each(sk, node, &unix_socket_table[i])
@@ -252,6 +246,7 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
252 struct sock *sk; 246 struct sock *sk;
253 struct sk_buff *rep; 247 struct sk_buff *rep;
254 unsigned int extra_len; 248 unsigned int extra_len;
249 struct net *net = sock_net(in_skb->sk);
255 250
256 if (req->udiag_ino == 0) 251 if (req->udiag_ino == 0)
257 goto out_nosk; 252 goto out_nosk;
@@ -268,22 +263,21 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
268 extra_len = 256; 263 extra_len = 256;
269again: 264again:
270 err = -ENOMEM; 265 err = -ENOMEM;
271 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), 266 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
272 GFP_KERNEL);
273 if (!rep) 267 if (!rep)
274 goto out; 268 goto out;
275 269
276 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, 270 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
277 nlh->nlmsg_seq, 0, req->udiag_ino); 271 nlh->nlmsg_seq, 0, req->udiag_ino);
278 if (err < 0) { 272 if (err < 0) {
279 kfree_skb(rep); 273 nlmsg_free(rep);
280 extra_len += 256; 274 extra_len += 256;
281 if (extra_len >= PAGE_SIZE) 275 if (extra_len >= PAGE_SIZE)
282 goto out; 276 goto out;
283 277
284 goto again; 278 goto again;
285 } 279 }
286 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 280 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
287 MSG_DONTWAIT); 281 MSG_DONTWAIT);
288 if (err > 0) 282 if (err > 0)
289 err = 0; 283 err = 0;
@@ -297,6 +291,7 @@ out_nosk:
297static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 291static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
298{ 292{
299 int hdrlen = sizeof(struct unix_diag_req); 293 int hdrlen = sizeof(struct unix_diag_req);
294 struct net *net = sock_net(skb->sk);
300 295
301 if (nlmsg_len(h) < hdrlen) 296 if (nlmsg_len(h) < hdrlen)
302 return -EINVAL; 297 return -EINVAL;
@@ -305,9 +300,9 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
305 struct netlink_dump_control c = { 300 struct netlink_dump_control c = {
306 .dump = unix_diag_dump, 301 .dump = unix_diag_dump,
307 }; 302 };
308 return netlink_dump_start(sock_diag_nlsk, skb, h, &c); 303 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
309 } else 304 } else
310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 305 return unix_diag_get_exact(skb, h, nlmsg_data(h));
311} 306}
312 307
313static const struct sock_diag_handler unix_diag_handler = { 308static const struct sock_diag_handler unix_diag_handler = {
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 788a12c1eb5d..2ab785064b7e 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
602 * successfully, add it to the interface list. 602 * successfully, add it to the interface list.
603 */ 603 */
604 604
605 if (dev->name == NULL) { 605#ifdef WANDEBUG
606 err = -EINVAL; 606 printk(KERN_INFO "%s: registering interface %s...\n",
607 } else { 607 wanrouter_modname, dev->name);
608#endif
608 609
609 #ifdef WANDEBUG 610 err = register_netdev(dev);
610 printk(KERN_INFO "%s: registering interface %s...\n", 611 if (!err) {
611 wanrouter_modname, dev->name); 612 struct net_device *slave = NULL;
612 #endif 613 unsigned long smp_flags=0;
613 614
614 err = register_netdev(dev); 615 lock_adapter_irq(&wandev->lock, &smp_flags);
615 if (!err) { 616
616 struct net_device *slave = NULL; 617 if (wandev->dev == NULL) {
617 unsigned long smp_flags=0; 618 wandev->dev = dev;
618 619 } else {
619 lock_adapter_irq(&wandev->lock, &smp_flags); 620 for (slave=wandev->dev;
620 621 DEV_TO_SLAVE(slave);
621 if (wandev->dev == NULL) { 622 slave = DEV_TO_SLAVE(slave))
622 wandev->dev = dev; 623 DEV_TO_SLAVE(slave) = dev;
623 } else {
624 for (slave=wandev->dev;
625 DEV_TO_SLAVE(slave);
626 slave = DEV_TO_SLAVE(slave))
627 DEV_TO_SLAVE(slave) = dev;
628 }
629 ++wandev->ndev;
630
631 unlock_adapter_irq(&wandev->lock, &smp_flags);
632 err = 0; /* done !!! */
633 goto out;
634 } 624 }
625 ++wandev->ndev;
626
627 unlock_adapter_irq(&wandev->lock, &smp_flags);
628 err = 0; /* done !!! */
629 goto out;
635 } 630 }
636 if (wandev->del_if) 631 if (wandev->del_if)
637 wandev->del_if(wandev, dev); 632 wandev->del_if(wandev, dev);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 2e4444fedbe0..fe4adb12b3ef 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -74,6 +74,27 @@ config CFG80211_REG_DEBUG
74 74
75 If unsure, say N. 75 If unsure, say N.
76 76
77config CFG80211_CERTIFICATION_ONUS
78 bool "cfg80211 certification onus"
79 depends on CFG80211 && EXPERT
80 default n
81 ---help---
82 You should disable this option unless you are both capable
83 and willing to ensure your system will remain regulatory
84 compliant with the features available under this option.
85 Some options may still be under heavy development and
86 for whatever reason regulatory compliance has not or
87 cannot yet be verified. Regulatory verification may at
88 times only be possible until you have the final system
89 in place.
90
91 This option should only be enabled by system integrators
92 or distributions that have done work necessary to ensure
93 regulatory certification on the system with the enabled
94 features. Alternatively you can enable this option if
95 you are a wireless researcher and are working in a controlled
96 and approved environment by your local regulatory agency.
97
77config CFG80211_DEFAULT_PS 98config CFG80211_DEFAULT_PS
78 bool "enable powersave by default" 99 bool "enable powersave by default"
79 depends on CFG80211 100 depends on CFG80211
@@ -114,24 +135,10 @@ config CFG80211_WEXT
114 bool "cfg80211 wireless extensions compatibility" 135 bool "cfg80211 wireless extensions compatibility"
115 depends on CFG80211 136 depends on CFG80211
116 select WEXT_CORE 137 select WEXT_CORE
117 default y
118 help 138 help
119 Enable this option if you need old userspace for wireless 139 Enable this option if you need old userspace for wireless
120 extensions with cfg80211-based drivers. 140 extensions with cfg80211-based drivers.
121 141
122config WIRELESS_EXT_SYSFS
123 bool "Wireless extensions sysfs files"
124 depends on WEXT_CORE && SYSFS
125 help
126 This option enables the deprecated wireless statistics
127 files in /sys/class/net/*/wireless/. The same information
128 is available via the ioctls as well.
129
130 Say N. If you know you have ancient tools requiring it,
131 like very old versions of hal (prior to 0.5.12 release),
132 say Y and update the tools as soon as possible as this
133 option will be removed soon.
134
135config LIB80211 142config LIB80211
136 tristate "Common routines for IEEE802.11 drivers" 143 tristate "Common routines for IEEE802.11 drivers"
137 default n 144 default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 55a28ab21db9..0f7e0d621ab0 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
10obj-$(CONFIG_WEXT_PRIV) += wext-priv.o 10obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
11 11
12cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o 12cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o 16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
new file mode 100644
index 000000000000..fcc60d8dbefa
--- /dev/null
+++ b/net/wireless/ap.c
@@ -0,0 +1,46 @@
1#include <linux/ieee80211.h>
2#include <linux/export.h>
3#include <net/cfg80211.h>
4#include "nl80211.h"
5#include "core.h"
6
7
8static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
9 struct net_device *dev)
10{
11 struct wireless_dev *wdev = dev->ieee80211_ptr;
12 int err;
13
14 ASSERT_WDEV_LOCK(wdev);
15
16 if (!rdev->ops->stop_ap)
17 return -EOPNOTSUPP;
18
19 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
20 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
21 return -EOPNOTSUPP;
22
23 if (!wdev->beacon_interval)
24 return -ENOENT;
25
26 err = rdev->ops->stop_ap(&rdev->wiphy, dev);
27 if (!err) {
28 wdev->beacon_interval = 0;
29 wdev->channel = NULL;
30 }
31
32 return err;
33}
34
35int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
36 struct net_device *dev)
37{
38 struct wireless_dev *wdev = dev->ieee80211_ptr;
39 int err;
40
41 wdev_lock(wdev);
42 err = __cfg80211_stop_ap(rdev, dev);
43 wdev_unlock(wdev);
44
45 return err;
46}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 884801ac4dd0..d355f67d0cdd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -60,7 +60,7 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
60 diff = -20; 60 diff = -20;
61 break; 61 break;
62 default: 62 default:
63 return false; 63 return true;
64 } 64 }
65 65
66 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); 66 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
@@ -78,60 +78,75 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
78} 78}
79EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan); 79EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
80 80
81int cfg80211_set_freq(struct cfg80211_registered_device *rdev, 81int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
82 struct wireless_dev *wdev, int freq, 82 int freq, enum nl80211_channel_type chantype)
83 enum nl80211_channel_type channel_type)
84{ 83{
85 struct ieee80211_channel *chan; 84 struct ieee80211_channel *chan;
86 int result;
87
88 if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR)
89 wdev = NULL;
90 85
91 if (wdev) { 86 if (!rdev->ops->set_monitor_channel)
92 ASSERT_WDEV_LOCK(wdev);
93
94 if (!netif_running(wdev->netdev))
95 return -ENETDOWN;
96 }
97
98 if (!rdev->ops->set_channel)
99 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
88 if (!cfg80211_has_monitors_only(rdev))
89 return -EBUSY;
100 90
101 chan = rdev_freq_to_chan(rdev, freq, channel_type); 91 chan = rdev_freq_to_chan(rdev, freq, chantype);
102 if (!chan) 92 if (!chan)
103 return -EINVAL; 93 return -EINVAL;
104 94
105 /* Both channels should be able to initiate communication */ 95 return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
106 if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC || 96}
107 wdev->iftype == NL80211_IFTYPE_AP || 97
108 wdev->iftype == NL80211_IFTYPE_AP_VLAN || 98void
109 wdev->iftype == NL80211_IFTYPE_MESH_POINT || 99cfg80211_get_chan_state(struct wireless_dev *wdev,
110 wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 100 struct ieee80211_channel **chan,
111 switch (channel_type) { 101 enum cfg80211_chan_mode *chanmode)
112 case NL80211_CHAN_HT40PLUS: 102{
113 case NL80211_CHAN_HT40MINUS: 103 *chan = NULL;
114 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan, 104 *chanmode = CHAN_MODE_UNDEFINED;
115 channel_type)) { 105
116 printk(KERN_DEBUG 106 ASSERT_WDEV_LOCK(wdev);
117 "cfg80211: Secondary channel not " 107
118 "allowed to initiate communication\n"); 108 if (!netif_running(wdev->netdev))
119 return -EINVAL; 109 return;
120 } 110
121 break; 111 switch (wdev->iftype) {
122 default: 112 case NL80211_IFTYPE_ADHOC:
123 break; 113 if (wdev->current_bss) {
114 *chan = wdev->current_bss->pub.channel;
115 *chanmode = wdev->ibss_fixed
116 ? CHAN_MODE_SHARED
117 : CHAN_MODE_EXCLUSIVE;
118 return;
119 }
120 case NL80211_IFTYPE_STATION:
121 case NL80211_IFTYPE_P2P_CLIENT:
122 if (wdev->current_bss) {
123 *chan = wdev->current_bss->pub.channel;
124 *chanmode = CHAN_MODE_SHARED;
125 return;
126 }
127 break;
128 case NL80211_IFTYPE_AP:
129 case NL80211_IFTYPE_P2P_GO:
130 if (wdev->beacon_interval) {
131 *chan = wdev->channel;
132 *chanmode = CHAN_MODE_SHARED;
124 } 133 }
134 return;
135 case NL80211_IFTYPE_MESH_POINT:
136 if (wdev->mesh_id_len) {
137 *chan = wdev->channel;
138 *chanmode = CHAN_MODE_SHARED;
139 }
140 return;
141 case NL80211_IFTYPE_MONITOR:
142 case NL80211_IFTYPE_AP_VLAN:
143 case NL80211_IFTYPE_WDS:
144 /* these interface types don't really have a channel */
145 return;
146 case NL80211_IFTYPE_UNSPECIFIED:
147 case NUM_NL80211_IFTYPES:
148 WARN_ON(1);
125 } 149 }
126 150
127 result = rdev->ops->set_channel(&rdev->wiphy, 151 return;
128 wdev ? wdev->netdev : NULL,
129 chan, channel_type);
130 if (result)
131 return result;
132
133 if (wdev)
134 wdev->channel = chan;
135
136 return 0;
137} 152}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a87d43552974..31b40cc4a9c3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -96,69 +96,6 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
96 return &rdev->wiphy; 96 return &rdev->wiphy;
97} 97}
98 98
99/* requires cfg80211_mutex to be held! */
100struct cfg80211_registered_device *
101__cfg80211_rdev_from_info(struct genl_info *info)
102{
103 int ifindex;
104 struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL;
105 struct net_device *dev;
106 int err = -EINVAL;
107
108 assert_cfg80211_lock();
109
110 if (info->attrs[NL80211_ATTR_WIPHY]) {
111 bywiphyidx = cfg80211_rdev_by_wiphy_idx(
112 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY]));
113 err = -ENODEV;
114 }
115
116 if (info->attrs[NL80211_ATTR_IFINDEX]) {
117 ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
118 dev = dev_get_by_index(genl_info_net(info), ifindex);
119 if (dev) {
120 if (dev->ieee80211_ptr)
121 byifidx =
122 wiphy_to_dev(dev->ieee80211_ptr->wiphy);
123 dev_put(dev);
124 }
125 err = -ENODEV;
126 }
127
128 if (bywiphyidx && byifidx) {
129 if (bywiphyidx != byifidx)
130 return ERR_PTR(-EINVAL);
131 else
132 return bywiphyidx; /* == byifidx */
133 }
134 if (bywiphyidx)
135 return bywiphyidx;
136
137 if (byifidx)
138 return byifidx;
139
140 return ERR_PTR(err);
141}
142
143struct cfg80211_registered_device *
144cfg80211_get_dev_from_info(struct genl_info *info)
145{
146 struct cfg80211_registered_device *rdev;
147
148 mutex_lock(&cfg80211_mutex);
149 rdev = __cfg80211_rdev_from_info(info);
150
151 /* if it is not an error we grab the lock on
152 * it to assure it won't be going away while
153 * we operate on it */
154 if (!IS_ERR(rdev))
155 mutex_lock(&rdev->mtx);
156
157 mutex_unlock(&cfg80211_mutex);
158
159 return rdev;
160}
161
162struct cfg80211_registered_device * 99struct cfg80211_registered_device *
163cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) 100cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
164{ 101{
@@ -239,7 +176,9 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
239 if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) 176 if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
240 return -EOPNOTSUPP; 177 return -EOPNOTSUPP;
241 178
242 list_for_each_entry(wdev, &rdev->netdev_list, list) { 179 list_for_each_entry(wdev, &rdev->wdev_list, list) {
180 if (!wdev->netdev)
181 continue;
243 wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; 182 wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
244 err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); 183 err = dev_change_net_namespace(wdev->netdev, net, "wlan%d");
245 if (err) 184 if (err)
@@ -251,8 +190,10 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
251 /* failed -- clean up to old netns */ 190 /* failed -- clean up to old netns */
252 net = wiphy_net(&rdev->wiphy); 191 net = wiphy_net(&rdev->wiphy);
253 192
254 list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list, 193 list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
255 list) { 194 list) {
195 if (!wdev->netdev)
196 continue;
256 wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; 197 wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
257 err = dev_change_net_namespace(wdev->netdev, net, 198 err = dev_change_net_namespace(wdev->netdev, net,
258 "wlan%d"); 199 "wlan%d");
@@ -289,8 +230,9 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
289 rtnl_lock(); 230 rtnl_lock();
290 mutex_lock(&rdev->devlist_mtx); 231 mutex_lock(&rdev->devlist_mtx);
291 232
292 list_for_each_entry(wdev, &rdev->netdev_list, list) 233 list_for_each_entry(wdev, &rdev->wdev_list, list)
293 dev_close(wdev->netdev); 234 if (wdev->netdev)
235 dev_close(wdev->netdev);
294 236
295 mutex_unlock(&rdev->devlist_mtx); 237 mutex_unlock(&rdev->devlist_mtx);
296 rtnl_unlock(); 238 rtnl_unlock();
@@ -367,7 +309,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
367 mutex_init(&rdev->mtx); 309 mutex_init(&rdev->mtx);
368 mutex_init(&rdev->devlist_mtx); 310 mutex_init(&rdev->devlist_mtx);
369 mutex_init(&rdev->sched_scan_mtx); 311 mutex_init(&rdev->sched_scan_mtx);
370 INIT_LIST_HEAD(&rdev->netdev_list); 312 INIT_LIST_HEAD(&rdev->wdev_list);
371 spin_lock_init(&rdev->bss_lock); 313 spin_lock_init(&rdev->bss_lock);
372 INIT_LIST_HEAD(&rdev->bss_list); 314 INIT_LIST_HEAD(&rdev->bss_list);
373 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 315 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
@@ -436,6 +378,14 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
436 if (WARN_ON(!c->num_different_channels)) 378 if (WARN_ON(!c->num_different_channels))
437 return -EINVAL; 379 return -EINVAL;
438 380
381 /*
382 * Put a sane limit on maximum number of different
383 * channels to simplify channel accounting code.
384 */
385 if (WARN_ON(c->num_different_channels >
386 CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
387 return -EINVAL;
388
439 if (WARN_ON(!c->n_limits)) 389 if (WARN_ON(!c->n_limits))
440 return -EINVAL; 390 return -EINVAL;
441 391
@@ -484,9 +434,11 @@ int wiphy_register(struct wiphy *wiphy)
484 int i; 434 int i;
485 u16 ifmodes = wiphy->interface_modes; 435 u16 ifmodes = wiphy->interface_modes;
486 436
437#ifdef CONFIG_PM
487 if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && 438 if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
488 !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) 439 !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
489 return -EINVAL; 440 return -EINVAL;
441#endif
490 442
491 if (WARN_ON(wiphy->ap_sme_capa && 443 if (WARN_ON(wiphy->ap_sme_capa &&
492 !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) 444 !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
@@ -521,8 +473,14 @@ int wiphy_register(struct wiphy *wiphy)
521 continue; 473 continue;
522 474
523 sband->band = band; 475 sband->band = band;
524 476 if (WARN_ON(!sband->n_channels))
525 if (WARN_ON(!sband->n_channels || !sband->n_bitrates)) 477 return -EINVAL;
478 /*
479 * on 60gHz band, there are no legacy rates, so
480 * n_bitrates is 0
481 */
482 if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
483 !sband->n_bitrates))
526 return -EINVAL; 484 return -EINVAL;
527 485
528 /* 486 /*
@@ -563,12 +521,14 @@ int wiphy_register(struct wiphy *wiphy)
563 return -EINVAL; 521 return -EINVAL;
564 } 522 }
565 523
524#ifdef CONFIG_PM
566 if (rdev->wiphy.wowlan.n_patterns) { 525 if (rdev->wiphy.wowlan.n_patterns) {
567 if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len || 526 if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
568 rdev->wiphy.wowlan.pattern_min_len > 527 rdev->wiphy.wowlan.pattern_min_len >
569 rdev->wiphy.wowlan.pattern_max_len)) 528 rdev->wiphy.wowlan.pattern_max_len))
570 return -EINVAL; 529 return -EINVAL;
571 } 530 }
531#endif
572 532
573 /* check and set up bitrates */ 533 /* check and set up bitrates */
574 ieee80211_set_bitrate_flags(wiphy); 534 ieee80211_set_bitrate_flags(wiphy);
@@ -582,7 +542,7 @@ int wiphy_register(struct wiphy *wiphy)
582 } 542 }
583 543
584 /* set up regulatory info */ 544 /* set up regulatory info */
585 regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE); 545 wiphy_regulatory_register(wiphy);
586 546
587 list_add_rcu(&rdev->list, &cfg80211_rdev_list); 547 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
588 cfg80211_rdev_list_generation++; 548 cfg80211_rdev_list_generation++;
@@ -667,7 +627,7 @@ void wiphy_unregister(struct wiphy *wiphy)
667 __count == 0; })); 627 __count == 0; }));
668 628
669 mutex_lock(&rdev->devlist_mtx); 629 mutex_lock(&rdev->devlist_mtx);
670 BUG_ON(!list_empty(&rdev->netdev_list)); 630 BUG_ON(!list_empty(&rdev->wdev_list));
671 mutex_unlock(&rdev->devlist_mtx); 631 mutex_unlock(&rdev->devlist_mtx);
672 632
673 /* 633 /*
@@ -692,9 +652,11 @@ void wiphy_unregister(struct wiphy *wiphy)
692 /* nothing */ 652 /* nothing */
693 cfg80211_unlock_rdev(rdev); 653 cfg80211_unlock_rdev(rdev);
694 654
695 /* If this device got a regulatory hint tell core its 655 /*
696 * free to listen now to a new shiny device regulatory hint */ 656 * If this device got a regulatory hint tell core its
697 reg_device_remove(wiphy); 657 * free to listen now to a new shiny device regulatory hint
658 */
659 wiphy_regulatory_deregister(wiphy);
698 660
699 cfg80211_rdev_list_generation++; 661 cfg80211_rdev_list_generation++;
700 device_del(&rdev->wiphy.dev); 662 device_del(&rdev->wiphy.dev);
@@ -748,7 +710,7 @@ static void wdev_cleanup_work(struct work_struct *work)
748 710
749 cfg80211_lock_rdev(rdev); 711 cfg80211_lock_rdev(rdev);
750 712
751 if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) { 713 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
752 rdev->scan_req->aborted = true; 714 rdev->scan_req->aborted = true;
753 ___cfg80211_scan_done(rdev, true); 715 ___cfg80211_scan_done(rdev, true);
754 } 716 }
@@ -776,6 +738,16 @@ static struct device_type wiphy_type = {
776 .name = "wlan", 738 .name = "wlan",
777}; 739};
778 740
741void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
742 enum nl80211_iftype iftype, int num)
743{
744 ASSERT_RTNL();
745
746 rdev->num_running_ifaces += num;
747 if (iftype == NL80211_IFTYPE_MONITOR)
748 rdev->num_running_monitor_ifaces += num;
749}
750
779static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 751static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
780 unsigned long state, 752 unsigned long state,
781 void *ndev) 753 void *ndev)
@@ -810,7 +782,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
810 spin_lock_init(&wdev->mgmt_registrations_lock); 782 spin_lock_init(&wdev->mgmt_registrations_lock);
811 783
812 mutex_lock(&rdev->devlist_mtx); 784 mutex_lock(&rdev->devlist_mtx);
813 list_add_rcu(&wdev->list, &rdev->netdev_list); 785 wdev->identifier = ++rdev->wdev_id;
786 list_add_rcu(&wdev->list, &rdev->wdev_list);
814 rdev->devlist_generation++; 787 rdev->devlist_generation++;
815 /* can only change netns with wiphy */ 788 /* can only change netns with wiphy */
816 dev->features |= NETIF_F_NETNS_LOCAL; 789 dev->features |= NETIF_F_NETNS_LOCAL;
@@ -869,12 +842,16 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
869 case NL80211_IFTYPE_MESH_POINT: 842 case NL80211_IFTYPE_MESH_POINT:
870 cfg80211_leave_mesh(rdev, dev); 843 cfg80211_leave_mesh(rdev, dev);
871 break; 844 break;
845 case NL80211_IFTYPE_AP:
846 cfg80211_stop_ap(rdev, dev);
847 break;
872 default: 848 default:
873 break; 849 break;
874 } 850 }
875 wdev->beacon_interval = 0; 851 wdev->beacon_interval = 0;
876 break; 852 break;
877 case NETDEV_DOWN: 853 case NETDEV_DOWN:
854 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
878 dev_hold(dev); 855 dev_hold(dev);
879 queue_work(cfg80211_wq, &wdev->cleanup_work); 856 queue_work(cfg80211_wq, &wdev->cleanup_work);
880 break; 857 break;
@@ -891,6 +868,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
891 mutex_unlock(&rdev->devlist_mtx); 868 mutex_unlock(&rdev->devlist_mtx);
892 dev_put(dev); 869 dev_put(dev);
893 } 870 }
871 cfg80211_update_iface_num(rdev, wdev->iftype, 1);
894 cfg80211_lock_rdev(rdev); 872 cfg80211_lock_rdev(rdev);
895 mutex_lock(&rdev->devlist_mtx); 873 mutex_lock(&rdev->devlist_mtx);
896 wdev_lock(wdev); 874 wdev_lock(wdev);
@@ -980,7 +958,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
980 return notifier_from_errno(-EOPNOTSUPP); 958 return notifier_from_errno(-EOPNOTSUPP);
981 if (rfkill_blocked(rdev->rfkill)) 959 if (rfkill_blocked(rdev->rfkill))
982 return notifier_from_errno(-ERFKILL); 960 return notifier_from_errno(-ERFKILL);
961 mutex_lock(&rdev->devlist_mtx);
983 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 962 ret = cfg80211_can_add_interface(rdev, wdev->iftype);
963 mutex_unlock(&rdev->devlist_mtx);
984 if (ret) 964 if (ret)
985 return notifier_from_errno(ret); 965 return notifier_from_errno(ret);
986 break; 966 break;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8523f3878677..5206c6844fd7 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -13,6 +13,7 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/rfkill.h> 14#include <linux/rfkill.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/rtnetlink.h>
16#include <net/genetlink.h> 17#include <net/genetlink.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
18#include "reg.h" 19#include "reg.h"
@@ -46,16 +47,20 @@ struct cfg80211_registered_device {
46 /* wiphy index, internal only */ 47 /* wiphy index, internal only */
47 int wiphy_idx; 48 int wiphy_idx;
48 49
49 /* associate netdev list */ 50 /* associated wireless interfaces */
50 struct mutex devlist_mtx; 51 struct mutex devlist_mtx;
51 /* protected by devlist_mtx or RCU */ 52 /* protected by devlist_mtx or RCU */
52 struct list_head netdev_list; 53 struct list_head wdev_list;
53 int devlist_generation; 54 int devlist_generation, wdev_id;
54 int opencount; /* also protected by devlist_mtx */ 55 int opencount; /* also protected by devlist_mtx */
55 wait_queue_head_t dev_wait; 56 wait_queue_head_t dev_wait;
56 57
57 u32 ap_beacons_nlpid; 58 u32 ap_beacons_nlpid;
58 59
60 /* protected by RTNL only */
61 int num_running_ifaces;
62 int num_running_monitor_ifaces;
63
59 /* BSSes/scanning */ 64 /* BSSes/scanning */
60 spinlock_t bss_lock; 65 spinlock_t bss_lock;
61 struct list_head bss_list; 66 struct list_head bss_list;
@@ -159,32 +164,6 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
159struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); 164struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
160int get_wiphy_idx(struct wiphy *wiphy); 165int get_wiphy_idx(struct wiphy *wiphy);
161 166
162struct cfg80211_registered_device *
163__cfg80211_rdev_from_info(struct genl_info *info);
164
165/*
166 * This function returns a pointer to the driver
167 * that the genl_info item that is passed refers to.
168 * If successful, it returns non-NULL and also locks
169 * the driver's mutex!
170 *
171 * This means that you need to call cfg80211_unlock_rdev()
172 * before being allowed to acquire &cfg80211_mutex!
173 *
174 * This is necessary because we need to lock the global
175 * mutex to get an item off the list safely, and then
176 * we lock the rdev mutex so it doesn't go away under us.
177 *
178 * We don't want to keep cfg80211_mutex locked
179 * for all the time in order to allow requests on
180 * other interfaces to go through at the same time.
181 *
182 * The result of this can be a PTR_ERR and hence must
183 * be checked with IS_ERR() for errors.
184 */
185extern struct cfg80211_registered_device *
186cfg80211_get_dev_from_info(struct genl_info *info);
187
188/* requires cfg80211_rdev_mutex to be held! */ 167/* requires cfg80211_rdev_mutex to be held! */
189struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); 168struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
190 169
@@ -223,6 +202,14 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
223#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx) 202#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
224#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) 203#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
225 204
205static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
206{
207 ASSERT_RTNL();
208
209 return rdev->num_running_ifaces == rdev->num_running_monitor_ifaces &&
210 rdev->num_running_ifaces > 0;
211}
212
226enum cfg80211_event_type { 213enum cfg80211_event_type {
227 EVENT_CONNECT_RESULT, 214 EVENT_CONNECT_RESULT,
228 EVENT_ROAMED, 215 EVENT_ROAMED,
@@ -267,6 +254,12 @@ struct cfg80211_cached_keys {
267 int def, defmgmt; 254 int def, defmgmt;
268}; 255};
269 256
257enum cfg80211_chan_mode {
258 CHAN_MODE_UNDEFINED,
259 CHAN_MODE_SHARED,
260 CHAN_MODE_EXCLUSIVE,
261};
262
270 263
271/* free object */ 264/* free object */
272extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); 265extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
@@ -303,14 +296,21 @@ extern const struct mesh_config default_mesh_config;
303extern const struct mesh_setup default_mesh_setup; 296extern const struct mesh_setup default_mesh_setup;
304int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 297int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
305 struct net_device *dev, 298 struct net_device *dev,
306 const struct mesh_setup *setup, 299 struct mesh_setup *setup,
307 const struct mesh_config *conf); 300 const struct mesh_config *conf);
308int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 301int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
309 struct net_device *dev, 302 struct net_device *dev,
310 const struct mesh_setup *setup, 303 struct mesh_setup *setup,
311 const struct mesh_config *conf); 304 const struct mesh_config *conf);
312int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 305int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
313 struct net_device *dev); 306 struct net_device *dev);
307int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
308 struct wireless_dev *wdev, int freq,
309 enum nl80211_channel_type channel_type);
310
311/* AP */
312int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
313 struct net_device *dev);
314 314
315/* MLME */ 315/* MLME */
316int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 316int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
@@ -369,7 +369,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
369void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid); 369void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
370void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev); 370void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
371int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, 371int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
372 struct net_device *dev, 372 struct wireless_dev *wdev,
373 struct ieee80211_channel *chan, bool offchan, 373 struct ieee80211_channel *chan, bool offchan,
374 enum nl80211_channel_type channel_type, 374 enum nl80211_channel_type channel_type,
375 bool channel_type_valid, unsigned int wait, 375 bool channel_type_valid, unsigned int wait,
@@ -427,9 +427,20 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
427 u32 *flags, struct vif_params *params); 427 u32 *flags, struct vif_params *params);
428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); 428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
429 429
430int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, 430int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
431 struct wireless_dev *wdev, 431 struct wireless_dev *wdev,
432 enum nl80211_iftype iftype); 432 enum nl80211_iftype iftype,
433 struct ieee80211_channel *chan,
434 enum cfg80211_chan_mode chanmode);
435
436static inline int
437cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
438 struct wireless_dev *wdev,
439 enum nl80211_iftype iftype)
440{
441 return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
442 CHAN_MODE_UNDEFINED);
443}
433 444
434static inline int 445static inline int
435cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, 446cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
@@ -438,12 +449,26 @@ cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
438 return cfg80211_can_change_interface(rdev, NULL, iftype); 449 return cfg80211_can_change_interface(rdev, NULL, iftype);
439} 450}
440 451
452static inline int
453cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
454 struct wireless_dev *wdev,
455 struct ieee80211_channel *chan,
456 enum cfg80211_chan_mode chanmode)
457{
458 return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
459 chan, chanmode);
460}
461
462void
463cfg80211_get_chan_state(struct wireless_dev *wdev,
464 struct ieee80211_channel **chan,
465 enum cfg80211_chan_mode *chanmode);
466
441struct ieee80211_channel * 467struct ieee80211_channel *
442rdev_freq_to_chan(struct cfg80211_registered_device *rdev, 468rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
443 int freq, enum nl80211_channel_type channel_type); 469 int freq, enum nl80211_channel_type channel_type);
444int cfg80211_set_freq(struct cfg80211_registered_device *rdev, 470int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
445 struct wireless_dev *wdev, int freq, 471 int freq, enum nl80211_channel_type chantype);
446 enum nl80211_channel_type channel_type);
447 472
448int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 473int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
449 const u8 *rates, unsigned int n_rates, 474 const u8 *rates, unsigned int n_rates,
@@ -452,6 +477,11 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
452int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, 477int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
453 u32 beacon_int); 478 u32 beacon_int);
454 479
480void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
481 enum nl80211_iftype iftype, int num);
482
483#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
484
455#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 485#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
456#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 486#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
457#else 487#else
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 89baa3328411..ca5672f6ee2f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -113,10 +113,21 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
113 kfree(wdev->connect_keys); 113 kfree(wdev->connect_keys);
114 wdev->connect_keys = connkeys; 114 wdev->connect_keys = connkeys;
115 115
116 wdev->ibss_fixed = params->channel_fixed;
116#ifdef CONFIG_CFG80211_WEXT 117#ifdef CONFIG_CFG80211_WEXT
117 wdev->wext.ibss.channel = params->channel; 118 wdev->wext.ibss.channel = params->channel;
118#endif 119#endif
119 wdev->sme_state = CFG80211_SME_CONNECTING; 120 wdev->sme_state = CFG80211_SME_CONNECTING;
121
122 err = cfg80211_can_use_chan(rdev, wdev, params->channel,
123 params->channel_fixed
124 ? CHAN_MODE_SHARED
125 : CHAN_MODE_EXCLUSIVE);
126 if (err) {
127 wdev->connect_keys = NULL;
128 return err;
129 }
130
120 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); 131 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
121 if (err) { 132 if (err) {
122 wdev->connect_keys = NULL; 133 wdev->connect_keys = NULL;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 2749cb86b462..c384e77ff77a 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -14,6 +14,9 @@
14 14
15#define MESH_PATH_TIMEOUT 5000 15#define MESH_PATH_TIMEOUT 5000
16#define MESH_RANN_INTERVAL 5000 16#define MESH_RANN_INTERVAL 5000
17#define MESH_PATH_TO_ROOT_TIMEOUT 6000
18#define MESH_ROOT_INTERVAL 5000
19#define MESH_ROOT_CONFIRMATION_INTERVAL 2000
17 20
18/* 21/*
19 * Minimum interval between two consecutive PREQs originated by the same 22 * Minimum interval between two consecutive PREQs originated by the same
@@ -62,9 +65,15 @@ const struct mesh_config default_mesh_config = {
62 .dot11MeshForwarding = true, 65 .dot11MeshForwarding = true,
63 .rssi_threshold = MESH_RSSI_THRESHOLD, 66 .rssi_threshold = MESH_RSSI_THRESHOLD,
64 .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, 67 .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
68 .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT,
69 .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL,
70 .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
65}; 71};
66 72
67const struct mesh_setup default_mesh_setup = { 73const struct mesh_setup default_mesh_setup = {
74 /* cfg80211_join_mesh() will pick a channel if needed */
75 .channel = NULL,
76 .channel_type = NL80211_CHAN_NO_HT,
68 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, 77 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
69 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 78 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
70 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 79 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -75,7 +84,7 @@ const struct mesh_setup default_mesh_setup = {
75 84
76int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 85int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
77 struct net_device *dev, 86 struct net_device *dev,
78 const struct mesh_setup *setup, 87 struct mesh_setup *setup,
79 const struct mesh_config *conf) 88 const struct mesh_config *conf)
80{ 89{
81 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -101,10 +110,61 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
101 if (!rdev->ops->join_mesh) 110 if (!rdev->ops->join_mesh)
102 return -EOPNOTSUPP; 111 return -EOPNOTSUPP;
103 112
113 if (!setup->channel) {
114 /* if no channel explicitly given, use preset channel */
115 setup->channel = wdev->preset_chan;
116 setup->channel_type = wdev->preset_chantype;
117 }
118
119 if (!setup->channel) {
120 /* if we don't have that either, use the first usable channel */
121 enum ieee80211_band band;
122
123 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
124 struct ieee80211_supported_band *sband;
125 struct ieee80211_channel *chan;
126 int i;
127
128 sband = rdev->wiphy.bands[band];
129 if (!sband)
130 continue;
131
132 for (i = 0; i < sband->n_channels; i++) {
133 chan = &sband->channels[i];
134 if (chan->flags & (IEEE80211_CHAN_NO_IBSS |
135 IEEE80211_CHAN_PASSIVE_SCAN |
136 IEEE80211_CHAN_DISABLED |
137 IEEE80211_CHAN_RADAR))
138 continue;
139 setup->channel = chan;
140 break;
141 }
142
143 if (setup->channel)
144 break;
145 }
146
147 /* no usable channel ... */
148 if (!setup->channel)
149 return -EINVAL;
150
151 setup->channel_type = NL80211_CHAN_NO_HT;
152 }
153
154 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel,
155 setup->channel_type))
156 return -EINVAL;
157
158 err = cfg80211_can_use_chan(rdev, wdev, setup->channel,
159 CHAN_MODE_SHARED);
160 if (err)
161 return err;
162
104 err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); 163 err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
105 if (!err) { 164 if (!err) {
106 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); 165 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
107 wdev->mesh_id_len = setup->mesh_id_len; 166 wdev->mesh_id_len = setup->mesh_id_len;
167 wdev->channel = setup->channel;
108 } 168 }
109 169
110 return err; 170 return err;
@@ -112,19 +172,71 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
112 172
113int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 173int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
114 struct net_device *dev, 174 struct net_device *dev,
115 const struct mesh_setup *setup, 175 struct mesh_setup *setup,
116 const struct mesh_config *conf) 176 const struct mesh_config *conf)
117{ 177{
118 struct wireless_dev *wdev = dev->ieee80211_ptr; 178 struct wireless_dev *wdev = dev->ieee80211_ptr;
119 int err; 179 int err;
120 180
181 mutex_lock(&rdev->devlist_mtx);
121 wdev_lock(wdev); 182 wdev_lock(wdev);
122 err = __cfg80211_join_mesh(rdev, dev, setup, conf); 183 err = __cfg80211_join_mesh(rdev, dev, setup, conf);
123 wdev_unlock(wdev); 184 wdev_unlock(wdev);
185 mutex_unlock(&rdev->devlist_mtx);
124 186
125 return err; 187 return err;
126} 188}
127 189
190int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
191 struct wireless_dev *wdev, int freq,
192 enum nl80211_channel_type channel_type)
193{
194 struct ieee80211_channel *channel;
195 int err;
196
197 channel = rdev_freq_to_chan(rdev, freq, channel_type);
198 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
199 channel,
200 channel_type)) {
201 return -EINVAL;
202 }
203
204 /*
205 * Workaround for libertas (only!), it puts the interface
206 * into mesh mode but doesn't implement join_mesh. Instead,
207 * it is configured via sysfs and then joins the mesh when
208 * you set the channel. Note that the libertas mesh isn't
209 * compatible with 802.11 mesh.
210 */
211 if (rdev->ops->libertas_set_mesh_channel) {
212 if (channel_type != NL80211_CHAN_NO_HT)
213 return -EINVAL;
214
215 if (!netif_running(wdev->netdev))
216 return -ENETDOWN;
217
218 err = cfg80211_can_use_chan(rdev, wdev, channel,
219 CHAN_MODE_SHARED);
220 if (err)
221 return err;
222
223 err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy,
224 wdev->netdev,
225 channel);
226 if (!err)
227 wdev->channel = channel;
228
229 return err;
230 }
231
232 if (wdev->mesh_id_len)
233 return -EBUSY;
234
235 wdev->preset_chan = channel;
236 wdev->preset_chantype = channel_type;
237 return 0;
238}
239
128void cfg80211_notify_new_peer_candidate(struct net_device *dev, 240void cfg80211_notify_new_peer_candidate(struct net_device *dev,
129 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp) 241 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
130{ 242{
@@ -156,8 +268,11 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
156 return -ENOTCONN; 268 return -ENOTCONN;
157 269
158 err = rdev->ops->leave_mesh(&rdev->wiphy, dev); 270 err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
159 if (!err) 271 if (!err) {
160 wdev->mesh_id_len = 0; 272 wdev->mesh_id_len = 0;
273 wdev->channel = NULL;
274 }
275
161 return err; 276 return err;
162} 277}
163 278
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index eb90988bbd36..1cdb1d5e6b0f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -302,8 +302,14 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
302 if (!req.bss) 302 if (!req.bss)
303 return -ENOENT; 303 return -ENOENT;
304 304
305 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
306 CHAN_MODE_SHARED);
307 if (err)
308 goto out;
309
305 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 310 err = rdev->ops->auth(&rdev->wiphy, dev, &req);
306 311
312out:
307 cfg80211_put_bss(req.bss); 313 cfg80211_put_bss(req.bss);
308 return err; 314 return err;
309} 315}
@@ -317,11 +323,13 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
317{ 323{
318 int err; 324 int err;
319 325
326 mutex_lock(&rdev->devlist_mtx);
320 wdev_lock(dev->ieee80211_ptr); 327 wdev_lock(dev->ieee80211_ptr);
321 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 328 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
322 ssid, ssid_len, ie, ie_len, 329 ssid, ssid_len, ie, ie_len,
323 key, key_len, key_idx); 330 key, key_len, key_idx);
324 wdev_unlock(dev->ieee80211_ptr); 331 wdev_unlock(dev->ieee80211_ptr);
332 mutex_unlock(&rdev->devlist_mtx);
325 333
326 return err; 334 return err;
327} 335}
@@ -397,8 +405,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
397 return -ENOENT; 405 return -ENOENT;
398 } 406 }
399 407
408 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
409 CHAN_MODE_SHARED);
410 if (err)
411 goto out;
412
400 err = rdev->ops->assoc(&rdev->wiphy, dev, &req); 413 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
401 414
415out:
402 if (err) { 416 if (err) {
403 if (was_connected) 417 if (was_connected)
404 wdev->sme_state = CFG80211_SME_CONNECTED; 418 wdev->sme_state = CFG80211_SME_CONNECTED;
@@ -421,11 +435,13 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
421 struct wireless_dev *wdev = dev->ieee80211_ptr; 435 struct wireless_dev *wdev = dev->ieee80211_ptr;
422 int err; 436 int err;
423 437
438 mutex_lock(&rdev->devlist_mtx);
424 wdev_lock(wdev); 439 wdev_lock(wdev);
425 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 440 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
426 ssid, ssid_len, ie, ie_len, use_mfp, crypt, 441 ssid, ssid_len, ie, ie_len, use_mfp, crypt,
427 assoc_flags, ht_capa, ht_capa_mask); 442 assoc_flags, ht_capa, ht_capa_mask);
428 wdev_unlock(wdev); 443 wdev_unlock(wdev);
444 mutex_unlock(&rdev->devlist_mtx);
429 445
430 return err; 446 return err;
431} 447}
@@ -551,29 +567,28 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
551 } 567 }
552} 568}
553 569
554void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie, 570void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
555 struct ieee80211_channel *chan, 571 struct ieee80211_channel *chan,
556 enum nl80211_channel_type channel_type, 572 enum nl80211_channel_type channel_type,
557 unsigned int duration, gfp_t gfp) 573 unsigned int duration, gfp_t gfp)
558{ 574{
559 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 575 struct wiphy *wiphy = wdev->wiphy;
560 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 576 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
561 577
562 nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type, 578 nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type,
563 duration, gfp); 579 duration, gfp);
564} 580}
565EXPORT_SYMBOL(cfg80211_ready_on_channel); 581EXPORT_SYMBOL(cfg80211_ready_on_channel);
566 582
567void cfg80211_remain_on_channel_expired(struct net_device *dev, 583void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
568 u64 cookie,
569 struct ieee80211_channel *chan, 584 struct ieee80211_channel *chan,
570 enum nl80211_channel_type channel_type, 585 enum nl80211_channel_type channel_type,
571 gfp_t gfp) 586 gfp_t gfp)
572{ 587{
573 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 588 struct wiphy *wiphy = wdev->wiphy;
574 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 589 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
575 590
576 nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan, 591 nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan,
577 channel_type, gfp); 592 channel_type, gfp);
578} 593}
579EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); 594EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
@@ -662,8 +677,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
662 list_add(&nreg->list, &wdev->mgmt_registrations); 677 list_add(&nreg->list, &wdev->mgmt_registrations);
663 678
664 if (rdev->ops->mgmt_frame_register) 679 if (rdev->ops->mgmt_frame_register)
665 rdev->ops->mgmt_frame_register(wiphy, wdev->netdev, 680 rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true);
666 frame_type, true);
667 681
668 out: 682 out:
669 spin_unlock_bh(&wdev->mgmt_registrations_lock); 683 spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -686,7 +700,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
686 if (rdev->ops->mgmt_frame_register) { 700 if (rdev->ops->mgmt_frame_register) {
687 u16 frame_type = le16_to_cpu(reg->frame_type); 701 u16 frame_type = le16_to_cpu(reg->frame_type);
688 702
689 rdev->ops->mgmt_frame_register(wiphy, wdev->netdev, 703 rdev->ops->mgmt_frame_register(wiphy, wdev,
690 frame_type, false); 704 frame_type, false);
691 } 705 }
692 706
@@ -715,14 +729,14 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
715} 729}
716 730
717int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, 731int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
718 struct net_device *dev, 732 struct wireless_dev *wdev,
719 struct ieee80211_channel *chan, bool offchan, 733 struct ieee80211_channel *chan, bool offchan,
720 enum nl80211_channel_type channel_type, 734 enum nl80211_channel_type channel_type,
721 bool channel_type_valid, unsigned int wait, 735 bool channel_type_valid, unsigned int wait,
722 const u8 *buf, size_t len, bool no_cck, 736 const u8 *buf, size_t len, bool no_cck,
723 bool dont_wait_for_ack, u64 *cookie) 737 bool dont_wait_for_ack, u64 *cookie)
724{ 738{
725 struct wireless_dev *wdev = dev->ieee80211_ptr; 739 struct net_device *dev = wdev->netdev;
726 const struct ieee80211_mgmt *mgmt; 740 const struct ieee80211_mgmt *mgmt;
727 u16 stype; 741 u16 stype;
728 742
@@ -809,16 +823,15 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
809 return -EINVAL; 823 return -EINVAL;
810 824
811 /* Transmit the Action frame as requested by user space */ 825 /* Transmit the Action frame as requested by user space */
812 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, 826 return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
813 channel_type, channel_type_valid, 827 channel_type, channel_type_valid,
814 wait, buf, len, no_cck, dont_wait_for_ack, 828 wait, buf, len, no_cck, dont_wait_for_ack,
815 cookie); 829 cookie);
816} 830}
817 831
818bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm, 832bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
819 const u8 *buf, size_t len, gfp_t gfp) 833 const u8 *buf, size_t len, gfp_t gfp)
820{ 834{
821 struct wireless_dev *wdev = dev->ieee80211_ptr;
822 struct wiphy *wiphy = wdev->wiphy; 835 struct wiphy *wiphy = wdev->wiphy;
823 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 836 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
824 struct cfg80211_mgmt_registration *reg; 837 struct cfg80211_mgmt_registration *reg;
@@ -855,7 +868,7 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
855 /* found match! */ 868 /* found match! */
856 869
857 /* Indicate the received Action frame to user space */ 870 /* Indicate the received Action frame to user space */
858 if (nl80211_send_mgmt(rdev, dev, reg->nlpid, 871 if (nl80211_send_mgmt(rdev, wdev, reg->nlpid,
859 freq, sig_mbm, 872 freq, sig_mbm,
860 buf, len, gfp)) 873 buf, len, gfp))
861 continue; 874 continue;
@@ -870,15 +883,14 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
870} 883}
871EXPORT_SYMBOL(cfg80211_rx_mgmt); 884EXPORT_SYMBOL(cfg80211_rx_mgmt);
872 885
873void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie, 886void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
874 const u8 *buf, size_t len, bool ack, gfp_t gfp) 887 const u8 *buf, size_t len, bool ack, gfp_t gfp)
875{ 888{
876 struct wireless_dev *wdev = dev->ieee80211_ptr;
877 struct wiphy *wiphy = wdev->wiphy; 889 struct wiphy *wiphy = wdev->wiphy;
878 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 890 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
879 891
880 /* Indicate TX status of the Action frame to user space */ 892 /* Indicate TX status of the Action frame to user space */
881 nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp); 893 nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
882} 894}
883EXPORT_SYMBOL(cfg80211_mgmt_tx_status); 895EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
884 896
@@ -907,6 +919,19 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
907} 919}
908EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); 920EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
909 921
922void cfg80211_cqm_txe_notify(struct net_device *dev,
923 const u8 *peer, u32 num_packets,
924 u32 rate, u32 intvl, gfp_t gfp)
925{
926 struct wireless_dev *wdev = dev->ieee80211_ptr;
927 struct wiphy *wiphy = wdev->wiphy;
928 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
929
930 nl80211_send_cqm_txe_notify(rdev, dev, peer, num_packets,
931 rate, intvl, gfp);
932}
933EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
934
910void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, 935void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
911 const u8 *replay_ctr, gfp_t gfp) 936 const u8 *replay_ctr, gfp_t gfp)
912{ 937{
@@ -948,7 +973,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
948 goto out; 973 goto out;
949 974
950 wdev->channel = chan; 975 wdev->channel = chan;
951
952 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); 976 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
953out: 977out:
954 wdev_unlock(wdev); 978 wdev_unlock(wdev);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 206465dc0cab..97026f3b215a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -46,28 +46,175 @@ static struct genl_family nl80211_fam = {
46 .post_doit = nl80211_post_doit, 46 .post_doit = nl80211_post_doit,
47}; 47};
48 48
49/* internal helper: get rdev and dev */ 49/* returns ERR_PTR values */
50static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs, 50static struct wireless_dev *
51 struct cfg80211_registered_device **rdev, 51__cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
52 struct net_device **dev)
53{ 52{
54 int ifindex; 53 struct cfg80211_registered_device *rdev;
54 struct wireless_dev *result = NULL;
55 bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
56 bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
57 u64 wdev_id;
58 int wiphy_idx = -1;
59 int ifidx = -1;
55 60
56 if (!attrs[NL80211_ATTR_IFINDEX]) 61 assert_cfg80211_lock();
57 return -EINVAL;
58 62
59 ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); 63 if (!have_ifidx && !have_wdev_id)
60 *dev = dev_get_by_index(netns, ifindex); 64 return ERR_PTR(-EINVAL);
61 if (!*dev)
62 return -ENODEV;
63 65
64 *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex); 66 if (have_ifidx)
65 if (IS_ERR(*rdev)) { 67 ifidx = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
66 dev_put(*dev); 68 if (have_wdev_id) {
67 return PTR_ERR(*rdev); 69 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
70 wiphy_idx = wdev_id >> 32;
68 } 71 }
69 72
70 return 0; 73 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
74 struct wireless_dev *wdev;
75
76 if (wiphy_net(&rdev->wiphy) != netns)
77 continue;
78
79 if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
80 continue;
81
82 mutex_lock(&rdev->devlist_mtx);
83 list_for_each_entry(wdev, &rdev->wdev_list, list) {
84 if (have_ifidx && wdev->netdev &&
85 wdev->netdev->ifindex == ifidx) {
86 result = wdev;
87 break;
88 }
89 if (have_wdev_id && wdev->identifier == (u32)wdev_id) {
90 result = wdev;
91 break;
92 }
93 }
94 mutex_unlock(&rdev->devlist_mtx);
95
96 if (result)
97 break;
98 }
99
100 if (result)
101 return result;
102 return ERR_PTR(-ENODEV);
103}
104
105static struct cfg80211_registered_device *
106__cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
107{
108 struct cfg80211_registered_device *rdev = NULL, *tmp;
109 struct net_device *netdev;
110
111 assert_cfg80211_lock();
112
113 if (!attrs[NL80211_ATTR_WIPHY] &&
114 !attrs[NL80211_ATTR_IFINDEX] &&
115 !attrs[NL80211_ATTR_WDEV])
116 return ERR_PTR(-EINVAL);
117
118 if (attrs[NL80211_ATTR_WIPHY])
119 rdev = cfg80211_rdev_by_wiphy_idx(
120 nla_get_u32(attrs[NL80211_ATTR_WIPHY]));
121
122 if (attrs[NL80211_ATTR_WDEV]) {
123 u64 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
124 struct wireless_dev *wdev;
125 bool found = false;
126
127 tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
128 if (tmp) {
129 /* make sure wdev exists */
130 mutex_lock(&tmp->devlist_mtx);
131 list_for_each_entry(wdev, &tmp->wdev_list, list) {
132 if (wdev->identifier != (u32)wdev_id)
133 continue;
134 found = true;
135 break;
136 }
137 mutex_unlock(&tmp->devlist_mtx);
138
139 if (!found)
140 tmp = NULL;
141
142 if (rdev && tmp != rdev)
143 return ERR_PTR(-EINVAL);
144 rdev = tmp;
145 }
146 }
147
148 if (attrs[NL80211_ATTR_IFINDEX]) {
149 int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
150 netdev = dev_get_by_index(netns, ifindex);
151 if (netdev) {
152 if (netdev->ieee80211_ptr)
153 tmp = wiphy_to_dev(
154 netdev->ieee80211_ptr->wiphy);
155 else
156 tmp = NULL;
157
158 dev_put(netdev);
159
160 /* not wireless device -- return error */
161 if (!tmp)
162 return ERR_PTR(-EINVAL);
163
164 /* mismatch -- return error */
165 if (rdev && tmp != rdev)
166 return ERR_PTR(-EINVAL);
167
168 rdev = tmp;
169 }
170 }
171
172 if (!rdev)
173 return ERR_PTR(-ENODEV);
174
175 if (netns != wiphy_net(&rdev->wiphy))
176 return ERR_PTR(-ENODEV);
177
178 return rdev;
179}
180
181/*
182 * This function returns a pointer to the driver
183 * that the genl_info item that is passed refers to.
184 * If successful, it returns non-NULL and also locks
185 * the driver's mutex!
186 *
187 * This means that you need to call cfg80211_unlock_rdev()
188 * before being allowed to acquire &cfg80211_mutex!
189 *
190 * This is necessary because we need to lock the global
191 * mutex to get an item off the list safely, and then
192 * we lock the rdev mutex so it doesn't go away under us.
193 *
194 * We don't want to keep cfg80211_mutex locked
195 * for all the time in order to allow requests on
196 * other interfaces to go through at the same time.
197 *
198 * The result of this can be a PTR_ERR and hence must
199 * be checked with IS_ERR() for errors.
200 */
201static struct cfg80211_registered_device *
202cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
203{
204 struct cfg80211_registered_device *rdev;
205
206 mutex_lock(&cfg80211_mutex);
207 rdev = __cfg80211_rdev_from_attrs(netns, info->attrs);
208
209 /* if it is not an error we grab the lock on
210 * it to assure it won't be going away while
211 * we operate on it */
212 if (!IS_ERR(rdev))
213 mutex_lock(&rdev->mtx);
214
215 mutex_unlock(&cfg80211_mutex);
216
217 return rdev;
71} 218}
72 219
73/* policy for the attributes */ 220/* policy for the attributes */
@@ -115,7 +262,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
115 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, 262 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
116 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, 263 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
117 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 264 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
118 .len = IEEE80211_MAX_MESH_ID_LEN }, 265 .len = IEEE80211_MAX_MESH_ID_LEN },
119 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 266 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
120 267
121 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, 268 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
@@ -206,6 +353,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
206 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, 353 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
207 [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 }, 354 [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
208 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, 355 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
356 [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
357 [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
209}; 358};
210 359
211/* policy for the key attributes */ 360/* policy for the key attributes */
@@ -250,8 +399,9 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
250 399
251static const struct nla_policy 400static const struct nla_policy
252nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { 401nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
253 [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY, 402 [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
254 .len = IEEE80211_MAX_SSID_LEN }, 403 .len = IEEE80211_MAX_SSID_LEN },
404 [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
255}; 405};
256 406
257/* ifidx get helper */ 407/* ifidx get helper */
@@ -832,6 +982,15 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
832 dev->wiphy.bands[band]->ht_cap.ampdu_density))) 982 dev->wiphy.bands[band]->ht_cap.ampdu_density)))
833 goto nla_put_failure; 983 goto nla_put_failure;
834 984
985 /* add VHT info */
986 if (dev->wiphy.bands[band]->vht_cap.vht_supported &&
987 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
988 sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs),
989 &dev->wiphy.bands[band]->vht_cap.vht_mcs) ||
990 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
991 dev->wiphy.bands[band]->vht_cap.cap)))
992 goto nla_put_failure;
993
835 /* add frequencies */ 994 /* add frequencies */
836 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); 995 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
837 if (!nl_freqs) 996 if (!nl_freqs)
@@ -921,7 +1080,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
921 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) 1080 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
922 goto nla_put_failure; 1081 goto nla_put_failure;
923 } 1082 }
924 CMD(set_channel, SET_CHANNEL); 1083 if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
1084 dev->ops->join_mesh) {
1085 i++;
1086 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1087 goto nla_put_failure;
1088 }
925 CMD(set_wds_peer, SET_WDS_PEER); 1089 CMD(set_wds_peer, SET_WDS_PEER);
926 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1090 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
927 CMD(tdls_mgmt, TDLS_MGMT); 1091 CMD(tdls_mgmt, TDLS_MGMT);
@@ -1018,6 +1182,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1018 nla_nest_end(msg, nl_ifs); 1182 nla_nest_end(msg, nl_ifs);
1019 } 1183 }
1020 1184
1185#ifdef CONFIG_PM
1021 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) { 1186 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) {
1022 struct nlattr *nl_wowlan; 1187 struct nlattr *nl_wowlan;
1023 1188
@@ -1058,6 +1223,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1058 1223
1059 nla_nest_end(msg, nl_wowlan); 1224 nla_nest_end(msg, nl_wowlan);
1060 } 1225 }
1226#endif
1061 1227
1062 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, 1228 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
1063 dev->wiphy.software_iftypes)) 1229 dev->wiphy.software_iftypes))
@@ -1162,18 +1328,22 @@ static int parse_txq_params(struct nlattr *tb[],
1162static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) 1328static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
1163{ 1329{
1164 /* 1330 /*
1165 * You can only set the channel explicitly for AP, mesh 1331 * You can only set the channel explicitly for WDS interfaces,
1166 * and WDS type interfaces; all others have their channel 1332 * all others have their channel managed via their respective
1167 * managed via their respective "establish a connection" 1333 * "establish a connection" command (connect, join, ...)
1168 * command (connect, join, ...) 1334 *
1335 * For AP/GO and mesh mode, the channel can be set with the
1336 * channel userspace API, but is only stored and passed to the
1337 * low-level driver when the AP starts or the mesh is joined.
1338 * This is for backward compatibility, userspace can also give
1339 * the channel in the start-ap or join-mesh commands instead.
1169 * 1340 *
1170 * Monitors are special as they are normally slaved to 1341 * Monitors are special as they are normally slaved to
1171 * whatever else is going on, so they behave as though 1342 * whatever else is going on, so they have their own special
1172 * you tried setting the wiphy channel itself. 1343 * operation to set the monitor channel if possible.
1173 */ 1344 */
1174 return !wdev || 1345 return !wdev ||
1175 wdev->iftype == NL80211_IFTYPE_AP || 1346 wdev->iftype == NL80211_IFTYPE_AP ||
1176 wdev->iftype == NL80211_IFTYPE_WDS ||
1177 wdev->iftype == NL80211_IFTYPE_MESH_POINT || 1347 wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
1178 wdev->iftype == NL80211_IFTYPE_MONITOR || 1348 wdev->iftype == NL80211_IFTYPE_MONITOR ||
1179 wdev->iftype == NL80211_IFTYPE_P2P_GO; 1349 wdev->iftype == NL80211_IFTYPE_P2P_GO;
@@ -1204,9 +1374,14 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1204 struct wireless_dev *wdev, 1374 struct wireless_dev *wdev,
1205 struct genl_info *info) 1375 struct genl_info *info)
1206{ 1376{
1377 struct ieee80211_channel *channel;
1207 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 1378 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
1208 u32 freq; 1379 u32 freq;
1209 int result; 1380 int result;
1381 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
1382
1383 if (wdev)
1384 iftype = wdev->iftype;
1210 1385
1211 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) 1386 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
1212 return -EINVAL; 1387 return -EINVAL;
@@ -1221,12 +1396,32 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1221 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 1396 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1222 1397
1223 mutex_lock(&rdev->devlist_mtx); 1398 mutex_lock(&rdev->devlist_mtx);
1224 if (wdev) { 1399 switch (iftype) {
1225 wdev_lock(wdev); 1400 case NL80211_IFTYPE_AP:
1226 result = cfg80211_set_freq(rdev, wdev, freq, channel_type); 1401 case NL80211_IFTYPE_P2P_GO:
1227 wdev_unlock(wdev); 1402 if (wdev->beacon_interval) {
1228 } else { 1403 result = -EBUSY;
1229 result = cfg80211_set_freq(rdev, NULL, freq, channel_type); 1404 break;
1405 }
1406 channel = rdev_freq_to_chan(rdev, freq, channel_type);
1407 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
1408 channel,
1409 channel_type)) {
1410 result = -EINVAL;
1411 break;
1412 }
1413 wdev->preset_chan = channel;
1414 wdev->preset_chantype = channel_type;
1415 result = 0;
1416 break;
1417 case NL80211_IFTYPE_MESH_POINT:
1418 result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type);
1419 break;
1420 case NL80211_IFTYPE_MONITOR:
1421 result = cfg80211_set_monitor_channel(rdev, freq, channel_type);
1422 break;
1423 default:
1424 result = -EINVAL;
1230 } 1425 }
1231 mutex_unlock(&rdev->devlist_mtx); 1426 mutex_unlock(&rdev->devlist_mtx);
1232 1427
@@ -1300,7 +1495,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1300 } 1495 }
1301 1496
1302 if (!netdev) { 1497 if (!netdev) {
1303 rdev = __cfg80211_rdev_from_info(info); 1498 rdev = __cfg80211_rdev_from_attrs(genl_info_net(info),
1499 info->attrs);
1304 if (IS_ERR(rdev)) { 1500 if (IS_ERR(rdev)) {
1305 mutex_unlock(&cfg80211_mutex); 1501 mutex_unlock(&cfg80211_mutex);
1306 return PTR_ERR(rdev); 1502 return PTR_ERR(rdev);
@@ -1310,8 +1506,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1310 result = 0; 1506 result = 0;
1311 1507
1312 mutex_lock(&rdev->mtx); 1508 mutex_lock(&rdev->mtx);
1313 } else if (netif_running(netdev) && 1509 } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
1314 nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
1315 wdev = netdev->ieee80211_ptr; 1510 wdev = netdev->ieee80211_ptr;
1316 else 1511 else
1317 wdev = NULL; 1512 wdev = NULL;
@@ -1534,22 +1729,32 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1534 return result; 1729 return result;
1535} 1730}
1536 1731
1732static inline u64 wdev_id(struct wireless_dev *wdev)
1733{
1734 return (u64)wdev->identifier |
1735 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
1736}
1537 1737
1538static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, 1738static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1539 struct cfg80211_registered_device *rdev, 1739 struct cfg80211_registered_device *rdev,
1540 struct net_device *dev) 1740 struct wireless_dev *wdev)
1541{ 1741{
1742 struct net_device *dev = wdev->netdev;
1542 void *hdr; 1743 void *hdr;
1543 1744
1544 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); 1745 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE);
1545 if (!hdr) 1746 if (!hdr)
1546 return -1; 1747 return -1;
1547 1748
1548 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 1749 if (dev &&
1549 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 1750 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1550 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) || 1751 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
1551 nla_put_u32(msg, NL80211_ATTR_IFTYPE, 1752 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
1552 dev->ieee80211_ptr->iftype) || 1753 goto nla_put_failure;
1754
1755 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1756 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
1757 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
1553 nla_put_u32(msg, NL80211_ATTR_GENERATION, 1758 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1554 rdev->devlist_generation ^ 1759 rdev->devlist_generation ^
1555 (cfg80211_rdev_list_generation << 2))) 1760 (cfg80211_rdev_list_generation << 2)))
@@ -1559,12 +1764,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1559 struct ieee80211_channel *chan; 1764 struct ieee80211_channel *chan;
1560 enum nl80211_channel_type channel_type; 1765 enum nl80211_channel_type channel_type;
1561 1766
1562 chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type); 1767 chan = rdev->ops->get_channel(&rdev->wiphy, wdev,
1768 &channel_type);
1563 if (chan && 1769 if (chan &&
1564 (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, 1770 (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
1565 chan->center_freq) || 1771 chan->center_freq) ||
1566 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, 1772 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
1567 channel_type))) 1773 channel_type)))
1568 goto nla_put_failure; 1774 goto nla_put_failure;
1569 } 1775 }
1570 1776
@@ -1595,14 +1801,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
1595 if_idx = 0; 1801 if_idx = 0;
1596 1802
1597 mutex_lock(&rdev->devlist_mtx); 1803 mutex_lock(&rdev->devlist_mtx);
1598 list_for_each_entry(wdev, &rdev->netdev_list, list) { 1804 list_for_each_entry(wdev, &rdev->wdev_list, list) {
1599 if (if_idx < if_start) { 1805 if (if_idx < if_start) {
1600 if_idx++; 1806 if_idx++;
1601 continue; 1807 continue;
1602 } 1808 }
1603 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, 1809 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
1604 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1810 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1605 rdev, wdev->netdev) < 0) { 1811 rdev, wdev) < 0) {
1606 mutex_unlock(&rdev->devlist_mtx); 1812 mutex_unlock(&rdev->devlist_mtx);
1607 goto out; 1813 goto out;
1608 } 1814 }
@@ -1625,14 +1831,14 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
1625{ 1831{
1626 struct sk_buff *msg; 1832 struct sk_buff *msg;
1627 struct cfg80211_registered_device *dev = info->user_ptr[0]; 1833 struct cfg80211_registered_device *dev = info->user_ptr[0];
1628 struct net_device *netdev = info->user_ptr[1]; 1834 struct wireless_dev *wdev = info->user_ptr[1];
1629 1835
1630 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1836 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1631 if (!msg) 1837 if (!msg)
1632 return -ENOMEM; 1838 return -ENOMEM;
1633 1839
1634 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, 1840 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
1635 dev, netdev) < 0) { 1841 dev, wdev) < 0) {
1636 nlmsg_free(msg); 1842 nlmsg_free(msg);
1637 return -ENOBUFS; 1843 return -ENOBUFS;
1638 } 1844 }
@@ -1772,7 +1978,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
1772{ 1978{
1773 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 1979 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1774 struct vif_params params; 1980 struct vif_params params;
1775 struct net_device *dev; 1981 struct wireless_dev *wdev;
1982 struct sk_buff *msg;
1776 int err; 1983 int err;
1777 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; 1984 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
1778 u32 flags; 1985 u32 flags;
@@ -1799,19 +2006,23 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
1799 return err; 2006 return err;
1800 } 2007 }
1801 2008
2009 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2010 if (!msg)
2011 return -ENOMEM;
2012
1802 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 2013 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
1803 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2014 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
1804 &flags); 2015 &flags);
1805 dev = rdev->ops->add_virtual_intf(&rdev->wiphy, 2016 wdev = rdev->ops->add_virtual_intf(&rdev->wiphy,
1806 nla_data(info->attrs[NL80211_ATTR_IFNAME]), 2017 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
1807 type, err ? NULL : &flags, &params); 2018 type, err ? NULL : &flags, &params);
1808 if (IS_ERR(dev)) 2019 if (IS_ERR(wdev)) {
1809 return PTR_ERR(dev); 2020 nlmsg_free(msg);
2021 return PTR_ERR(wdev);
2022 }
1810 2023
1811 if (type == NL80211_IFTYPE_MESH_POINT && 2024 if (type == NL80211_IFTYPE_MESH_POINT &&
1812 info->attrs[NL80211_ATTR_MESH_ID]) { 2025 info->attrs[NL80211_ATTR_MESH_ID]) {
1813 struct wireless_dev *wdev = dev->ieee80211_ptr;
1814
1815 wdev_lock(wdev); 2026 wdev_lock(wdev);
1816 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != 2027 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
1817 IEEE80211_MAX_MESH_ID_LEN); 2028 IEEE80211_MAX_MESH_ID_LEN);
@@ -1822,18 +2033,34 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
1822 wdev_unlock(wdev); 2033 wdev_unlock(wdev);
1823 } 2034 }
1824 2035
1825 return 0; 2036 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
2037 rdev, wdev) < 0) {
2038 nlmsg_free(msg);
2039 return -ENOBUFS;
2040 }
2041
2042 return genlmsg_reply(msg, info);
1826} 2043}
1827 2044
1828static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) 2045static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
1829{ 2046{
1830 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2047 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1831 struct net_device *dev = info->user_ptr[1]; 2048 struct wireless_dev *wdev = info->user_ptr[1];
1832 2049
1833 if (!rdev->ops->del_virtual_intf) 2050 if (!rdev->ops->del_virtual_intf)
1834 return -EOPNOTSUPP; 2051 return -EOPNOTSUPP;
1835 2052
1836 return rdev->ops->del_virtual_intf(&rdev->wiphy, dev); 2053 /*
2054 * If we remove a wireless device without a netdev then clear
2055 * user_ptr[1] so that nl80211_post_doit won't dereference it
2056 * to check if it needs to do dev_put(). Otherwise it crashes
2057 * since the wdev has been freed, unlike with a netdev where
2058 * we need the dev_put() for the netdev to really be freed.
2059 */
2060 if (!wdev->netdev)
2061 info->user_ptr[1] = NULL;
2062
2063 return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
1837} 2064}
1838 2065
1839static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) 2066static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -2213,6 +2440,33 @@ static int nl80211_parse_beacon(struct genl_info *info,
2213 return 0; 2440 return 0;
2214} 2441}
2215 2442
2443static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
2444 struct cfg80211_ap_settings *params)
2445{
2446 struct wireless_dev *wdev;
2447 bool ret = false;
2448
2449 mutex_lock(&rdev->devlist_mtx);
2450
2451 list_for_each_entry(wdev, &rdev->wdev_list, list) {
2452 if (wdev->iftype != NL80211_IFTYPE_AP &&
2453 wdev->iftype != NL80211_IFTYPE_P2P_GO)
2454 continue;
2455
2456 if (!wdev->preset_chan)
2457 continue;
2458
2459 params->channel = wdev->preset_chan;
2460 params->channel_type = wdev->preset_chantype;
2461 ret = true;
2462 break;
2463 }
2464
2465 mutex_unlock(&rdev->devlist_mtx);
2466
2467 return ret;
2468}
2469
2216static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) 2470static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2217{ 2471{
2218 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2472 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2299,9 +2553,44 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2299 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); 2553 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
2300 } 2554 }
2301 2555
2556 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2557 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
2558
2559 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
2560 !nl80211_valid_channel_type(info, &channel_type))
2561 return -EINVAL;
2562
2563 params.channel = rdev_freq_to_chan(rdev,
2564 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
2565 channel_type);
2566 if (!params.channel)
2567 return -EINVAL;
2568 params.channel_type = channel_type;
2569 } else if (wdev->preset_chan) {
2570 params.channel = wdev->preset_chan;
2571 params.channel_type = wdev->preset_chantype;
2572 } else if (!nl80211_get_ap_channel(rdev, &params))
2573 return -EINVAL;
2574
2575 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel,
2576 params.channel_type))
2577 return -EINVAL;
2578
2579 mutex_lock(&rdev->devlist_mtx);
2580 err = cfg80211_can_use_chan(rdev, wdev, params.channel,
2581 CHAN_MODE_SHARED);
2582 mutex_unlock(&rdev->devlist_mtx);
2583
2584 if (err)
2585 return err;
2586
2302 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params); 2587 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
2303 if (!err) 2588 if (!err) {
2589 wdev->preset_chan = params.channel;
2590 wdev->preset_chantype = params.channel_type;
2304 wdev->beacon_interval = params.beacon_interval; 2591 wdev->beacon_interval = params.beacon_interval;
2592 wdev->channel = params.channel;
2593 }
2305 return err; 2594 return err;
2306} 2595}
2307 2596
@@ -2334,23 +2623,8 @@ static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
2334{ 2623{
2335 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2624 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2336 struct net_device *dev = info->user_ptr[1]; 2625 struct net_device *dev = info->user_ptr[1];
2337 struct wireless_dev *wdev = dev->ieee80211_ptr;
2338 int err;
2339 2626
2340 if (!rdev->ops->stop_ap) 2627 return cfg80211_stop_ap(rdev, dev);
2341 return -EOPNOTSUPP;
2342
2343 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2344 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2345 return -EOPNOTSUPP;
2346
2347 if (!wdev->beacon_interval)
2348 return -ENOENT;
2349
2350 err = rdev->ops->stop_ap(&rdev->wiphy, dev);
2351 if (!err)
2352 wdev->beacon_interval = 0;
2353 return err;
2354} 2628}
2355 2629
2356static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { 2630static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -2442,7 +2716,8 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
2442 int attr) 2716 int attr)
2443{ 2717{
2444 struct nlattr *rate; 2718 struct nlattr *rate;
2445 u16 bitrate; 2719 u32 bitrate;
2720 u16 bitrate_compat;
2446 2721
2447 rate = nla_nest_start(msg, attr); 2722 rate = nla_nest_start(msg, attr);
2448 if (!rate) 2723 if (!rate)
@@ -2450,8 +2725,12 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
2450 2725
2451 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ 2726 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
2452 bitrate = cfg80211_calculate_bitrate(info); 2727 bitrate = cfg80211_calculate_bitrate(info);
2728 /* report 16-bit bitrate only if we can */
2729 bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0;
2453 if ((bitrate > 0 && 2730 if ((bitrate > 0 &&
2454 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) || 2731 nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) ||
2732 (bitrate_compat > 0 &&
2733 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) ||
2455 ((info->flags & RATE_INFO_FLAGS_MCS) && 2734 ((info->flags & RATE_INFO_FLAGS_MCS) &&
2456 nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) || 2735 nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
2457 ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) && 2736 ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
@@ -3304,6 +3583,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
3304{ 3583{
3305 int r; 3584 int r;
3306 char *data = NULL; 3585 char *data = NULL;
3586 enum nl80211_user_reg_hint_type user_reg_hint_type;
3307 3587
3308 /* 3588 /*
3309 * You should only get this when cfg80211 hasn't yet initialized 3589 * You should only get this when cfg80211 hasn't yet initialized
@@ -3323,7 +3603,21 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
3323 3603
3324 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 3604 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
3325 3605
3326 r = regulatory_hint_user(data); 3606 if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
3607 user_reg_hint_type =
3608 nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
3609 else
3610 user_reg_hint_type = NL80211_USER_REG_HINT_USER;
3611
3612 switch (user_reg_hint_type) {
3613 case NL80211_USER_REG_HINT_USER:
3614 case NL80211_USER_REG_HINT_CELL_BASE:
3615 break;
3616 default:
3617 return -EINVAL;
3618 }
3619
3620 r = regulatory_hint_user(data, user_reg_hint_type);
3327 3621
3328 return r; 3622 return r;
3329} 3623}
@@ -3413,7 +3707,13 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3413 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, 3707 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
3414 cur_params.rssi_threshold) || 3708 cur_params.rssi_threshold) ||
3415 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, 3709 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
3416 cur_params.ht_opmode)) 3710 cur_params.ht_opmode) ||
3711 nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
3712 cur_params.dot11MeshHWMPactivePathToRootTimeout) ||
3713 nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
3714 cur_params.dot11MeshHWMProotInterval) ||
3715 nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
3716 cur_params.dot11MeshHWMPconfirmationInterval))
3417 goto nla_put_failure; 3717 goto nla_put_failure;
3418 nla_nest_end(msg, pinfoattr); 3718 nla_nest_end(msg, pinfoattr);
3419 genlmsg_end(msg, hdr); 3719 genlmsg_end(msg, hdr);
@@ -3436,7 +3736,6 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3436 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, 3736 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
3437 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, 3737 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
3438 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 }, 3738 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
3439
3440 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, 3739 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
3441 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, 3740 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
3442 [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, 3741 [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
@@ -3448,8 +3747,11 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3448 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, 3747 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3449 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, 3748 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
3450 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 }, 3749 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
3451 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32}, 3750 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
3452 [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16}, 3751 [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
3752 [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
3753 [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
3754 [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
3453}; 3755};
3454 3756
3455static const struct nla_policy 3757static const struct nla_policy
@@ -3459,7 +3761,7 @@ static const struct nla_policy
3459 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 3761 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
3460 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 3762 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
3461 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, 3763 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
3462 .len = IEEE80211_MAX_DATA_LEN }, 3764 .len = IEEE80211_MAX_DATA_LEN },
3463 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, 3765 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
3464}; 3766};
3465 3767
@@ -3492,63 +3794,82 @@ do {\
3492 3794
3493 /* Fill in the params struct */ 3795 /* Fill in the params struct */
3494 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 3796 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout,
3495 mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); 3797 mask, NL80211_MESHCONF_RETRY_TIMEOUT,
3798 nla_get_u16);
3496 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 3799 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout,
3497 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); 3800 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
3801 nla_get_u16);
3498 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 3802 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout,
3499 mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); 3803 mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
3804 nla_get_u16);
3500 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 3805 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks,
3501 mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); 3806 mask, NL80211_MESHCONF_MAX_PEER_LINKS,
3807 nla_get_u16);
3502 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 3808 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries,
3503 mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); 3809 mask, NL80211_MESHCONF_MAX_RETRIES,
3810 nla_get_u8);
3504 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 3811 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL,
3505 mask, NL80211_MESHCONF_TTL, nla_get_u8); 3812 mask, NL80211_MESHCONF_TTL, nla_get_u8);
3506 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 3813 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl,
3507 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); 3814 mask, NL80211_MESHCONF_ELEMENT_TTL,
3815 nla_get_u8);
3508 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 3816 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
3509 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); 3817 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
3510 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, 3818 nla_get_u8);
3511 mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, 3819 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask,
3512 nla_get_u32); 3820 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3821 nla_get_u32);
3513 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 3822 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
3514 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3823 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3515 nla_get_u8); 3824 nla_get_u8);
3516 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 3825 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time,
3517 mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); 3826 mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
3827 nla_get_u32);
3518 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 3828 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout,
3519 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 3829 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
3520 nla_get_u16); 3830 nla_get_u16);
3521 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, 3831 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask,
3522 mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 3832 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
3523 nla_get_u32); 3833 nla_get_u32);
3524 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 3834 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
3525 mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 3835 mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
3526 nla_get_u16); 3836 nla_get_u16);
3527 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 3837 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
3528 mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 3838 mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
3529 nla_get_u16); 3839 nla_get_u16);
3530 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 3840 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3531 dot11MeshHWMPnetDiameterTraversalTime, 3841 dot11MeshHWMPnetDiameterTraversalTime, mask,
3532 mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 3842 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
3533 nla_get_u16); 3843 nla_get_u16);
3844 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
3845 NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
3846 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
3847 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3848 nla_get_u16);
3534 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 3849 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3535 dot11MeshHWMPRootMode, mask, 3850 dot11MeshGateAnnouncementProtocol, mask,
3536 NL80211_MESHCONF_HWMP_ROOTMODE, 3851 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3537 nla_get_u8); 3852 nla_get_u8);
3538 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3539 dot11MeshHWMPRannInterval, mask,
3540 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3541 nla_get_u16);
3542 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3543 dot11MeshGateAnnouncementProtocol, mask,
3544 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3545 nla_get_u8);
3546 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 3853 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
3547 mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); 3854 mask, NL80211_MESHCONF_FORWARDING,
3855 nla_get_u8);
3548 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 3856 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
3549 mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32); 3857 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
3858 nla_get_u32);
3550 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 3859 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
3551 mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16); 3860 mask, NL80211_MESHCONF_HT_OPMODE,
3861 nla_get_u16);
3862 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
3863 mask,
3864 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
3865 nla_get_u32);
3866 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval,
3867 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
3868 nla_get_u16);
3869 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3870 dot11MeshHWMPconfirmationInterval, mask,
3871 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
3872 nla_get_u16);
3552 if (mask_out) 3873 if (mask_out)
3553 *mask_out = mask; 3874 *mask_out = mask;
3554 3875
@@ -3666,6 +3987,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3666 cfg80211_regdomain->dfs_region))) 3987 cfg80211_regdomain->dfs_region)))
3667 goto nla_put_failure; 3988 goto nla_put_failure;
3668 3989
3990 if (reg_last_request_cell_base() &&
3991 nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
3992 NL80211_USER_REG_HINT_CELL_BASE))
3993 goto nla_put_failure;
3994
3669 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); 3995 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
3670 if (!nl_reg_rules) 3996 if (!nl_reg_rules)
3671 goto nla_put_failure; 3997 goto nla_put_failure;
@@ -3831,7 +4157,7 @@ static int validate_scan_freqs(struct nlattr *freqs)
3831static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) 4157static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3832{ 4158{
3833 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 4159 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3834 struct net_device *dev = info->user_ptr[1]; 4160 struct wireless_dev *wdev = info->user_ptr[1];
3835 struct cfg80211_scan_request *request; 4161 struct cfg80211_scan_request *request;
3836 struct nlattr *attr; 4162 struct nlattr *attr;
3837 struct wiphy *wiphy; 4163 struct wiphy *wiphy;
@@ -3991,15 +4317,16 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3991 request->no_cck = 4317 request->no_cck =
3992 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); 4318 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
3993 4319
3994 request->dev = dev; 4320 request->wdev = wdev;
3995 request->wiphy = &rdev->wiphy; 4321 request->wiphy = &rdev->wiphy;
3996 4322
3997 rdev->scan_req = request; 4323 rdev->scan_req = request;
3998 err = rdev->ops->scan(&rdev->wiphy, dev, request); 4324 err = rdev->ops->scan(&rdev->wiphy, request);
3999 4325
4000 if (!err) { 4326 if (!err) {
4001 nl80211_send_scan_start(rdev, dev); 4327 nl80211_send_scan_start(rdev, wdev);
4002 dev_hold(dev); 4328 if (wdev->netdev)
4329 dev_hold(wdev->netdev);
4003 } else { 4330 } else {
4004 out_free: 4331 out_free:
4005 rdev->scan_req = NULL; 4332 rdev->scan_req = NULL;
@@ -4185,12 +4512,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
4185 nla_for_each_nested(attr, 4512 nla_for_each_nested(attr,
4186 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH], 4513 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
4187 tmp) { 4514 tmp) {
4188 struct nlattr *ssid; 4515 struct nlattr *ssid, *rssi;
4189 4516
4190 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, 4517 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
4191 nla_data(attr), nla_len(attr), 4518 nla_data(attr), nla_len(attr),
4192 nl80211_match_policy); 4519 nl80211_match_policy);
4193 ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID]; 4520 ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
4194 if (ssid) { 4521 if (ssid) {
4195 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { 4522 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
4196 err = -EINVAL; 4523 err = -EINVAL;
@@ -4201,6 +4528,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
4201 request->match_sets[i].ssid.ssid_len = 4528 request->match_sets[i].ssid.ssid_len =
4202 nla_len(ssid); 4529 nla_len(ssid);
4203 } 4530 }
4531 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
4532 if (rssi)
4533 request->rssi_thold = nla_get_u32(rssi);
4534 else
4535 request->rssi_thold =
4536 NL80211_SCAN_RSSI_THOLD_OFF;
4204 i++; 4537 i++;
4205 } 4538 }
4206 } 4539 }
@@ -5058,21 +5391,18 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
5058 nl80211_policy); 5391 nl80211_policy);
5059 if (err) 5392 if (err)
5060 return err; 5393 return err;
5061 if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
5062 phy_idx = nla_get_u32(
5063 nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
5064 } else {
5065 struct net_device *netdev;
5066 5394
5067 err = get_rdev_dev_by_ifindex(sock_net(skb->sk), 5395 mutex_lock(&cfg80211_mutex);
5068 nl80211_fam.attrbuf, 5396 rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
5069 &rdev, &netdev); 5397 nl80211_fam.attrbuf);
5070 if (err) 5398 if (IS_ERR(rdev)) {
5071 return err; 5399 mutex_unlock(&cfg80211_mutex);
5072 dev_put(netdev); 5400 return PTR_ERR(rdev);
5073 phy_idx = rdev->wiphy_idx;
5074 cfg80211_unlock_rdev(rdev);
5075 } 5401 }
5402 phy_idx = rdev->wiphy_idx;
5403 rdev = NULL;
5404 mutex_unlock(&cfg80211_mutex);
5405
5076 if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) 5406 if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
5077 cb->args[1] = 5407 cb->args[1] =
5078 (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]; 5408 (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -5474,7 +5804,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5474 struct genl_info *info) 5804 struct genl_info *info)
5475{ 5805{
5476 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 5806 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5477 struct net_device *dev = info->user_ptr[1]; 5807 struct wireless_dev *wdev = info->user_ptr[1];
5478 struct ieee80211_channel *chan; 5808 struct ieee80211_channel *chan;
5479 struct sk_buff *msg; 5809 struct sk_buff *msg;
5480 void *hdr; 5810 void *hdr;
@@ -5489,18 +5819,18 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5489 5819
5490 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); 5820 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
5491 5821
5822 if (!rdev->ops->remain_on_channel ||
5823 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5824 return -EOPNOTSUPP;
5825
5492 /* 5826 /*
5493 * We should be on that channel for at least one jiffie, 5827 * We should be on that channel for at least a minimum amount of
5494 * and more than 5 seconds seems excessive. 5828 * time (10ms) but no longer than the driver supports.
5495 */ 5829 */
5496 if (!duration || !msecs_to_jiffies(duration) || 5830 if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
5497 duration > rdev->wiphy.max_remain_on_channel_duration) 5831 duration > rdev->wiphy.max_remain_on_channel_duration)
5498 return -EINVAL; 5832 return -EINVAL;
5499 5833
5500 if (!rdev->ops->remain_on_channel ||
5501 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5502 return -EOPNOTSUPP;
5503
5504 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && 5834 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
5505 !nl80211_valid_channel_type(info, &channel_type)) 5835 !nl80211_valid_channel_type(info, &channel_type))
5506 return -EINVAL; 5836 return -EINVAL;
@@ -5522,7 +5852,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5522 goto free_msg; 5852 goto free_msg;
5523 } 5853 }
5524 5854
5525 err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan, 5855 err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
5526 channel_type, duration, &cookie); 5856 channel_type, duration, &cookie);
5527 5857
5528 if (err) 5858 if (err)
@@ -5546,7 +5876,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
5546 struct genl_info *info) 5876 struct genl_info *info)
5547{ 5877{
5548 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 5878 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5549 struct net_device *dev = info->user_ptr[1]; 5879 struct wireless_dev *wdev = info->user_ptr[1];
5550 u64 cookie; 5880 u64 cookie;
5551 5881
5552 if (!info->attrs[NL80211_ATTR_COOKIE]) 5882 if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5557,7 +5887,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
5557 5887
5558 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); 5888 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
5559 5889
5560 return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie); 5890 return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
5561} 5891}
5562 5892
5563static u32 rateset_to_mask(struct ieee80211_supported_band *sband, 5893static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
@@ -5706,7 +6036,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5706static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) 6036static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
5707{ 6037{
5708 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6038 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5709 struct net_device *dev = info->user_ptr[1]; 6039 struct wireless_dev *wdev = info->user_ptr[1];
5710 u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION; 6040 u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
5711 6041
5712 if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) 6042 if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
@@ -5715,21 +6045,24 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
5715 if (info->attrs[NL80211_ATTR_FRAME_TYPE]) 6045 if (info->attrs[NL80211_ATTR_FRAME_TYPE])
5716 frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]); 6046 frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
5717 6047
5718 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && 6048 switch (wdev->iftype) {
5719 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && 6049 case NL80211_IFTYPE_STATION:
5720 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && 6050 case NL80211_IFTYPE_ADHOC:
5721 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 6051 case NL80211_IFTYPE_P2P_CLIENT:
5722 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 6052 case NL80211_IFTYPE_AP:
5723 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && 6053 case NL80211_IFTYPE_AP_VLAN:
5724 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 6054 case NL80211_IFTYPE_MESH_POINT:
6055 case NL80211_IFTYPE_P2P_GO:
6056 break;
6057 default:
5725 return -EOPNOTSUPP; 6058 return -EOPNOTSUPP;
6059 }
5726 6060
5727 /* not much point in registering if we can't reply */ 6061 /* not much point in registering if we can't reply */
5728 if (!rdev->ops->mgmt_tx) 6062 if (!rdev->ops->mgmt_tx)
5729 return -EOPNOTSUPP; 6063 return -EOPNOTSUPP;
5730 6064
5731 return cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid, 6065 return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type,
5732 frame_type,
5733 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), 6066 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
5734 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); 6067 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
5735} 6068}
@@ -5737,7 +6070,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
5737static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) 6070static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5738{ 6071{
5739 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6072 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5740 struct net_device *dev = info->user_ptr[1]; 6073 struct wireless_dev *wdev = info->user_ptr[1];
5741 struct ieee80211_channel *chan; 6074 struct ieee80211_channel *chan;
5742 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 6075 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
5743 bool channel_type_valid = false; 6076 bool channel_type_valid = false;
@@ -5758,19 +6091,32 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5758 if (!rdev->ops->mgmt_tx) 6091 if (!rdev->ops->mgmt_tx)
5759 return -EOPNOTSUPP; 6092 return -EOPNOTSUPP;
5760 6093
5761 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && 6094 switch (wdev->iftype) {
5762 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && 6095 case NL80211_IFTYPE_STATION:
5763 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && 6096 case NL80211_IFTYPE_ADHOC:
5764 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 6097 case NL80211_IFTYPE_P2P_CLIENT:
5765 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 6098 case NL80211_IFTYPE_AP:
5766 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && 6099 case NL80211_IFTYPE_AP_VLAN:
5767 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 6100 case NL80211_IFTYPE_MESH_POINT:
6101 case NL80211_IFTYPE_P2P_GO:
6102 break;
6103 default:
5768 return -EOPNOTSUPP; 6104 return -EOPNOTSUPP;
6105 }
5769 6106
5770 if (info->attrs[NL80211_ATTR_DURATION]) { 6107 if (info->attrs[NL80211_ATTR_DURATION]) {
5771 if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) 6108 if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
5772 return -EINVAL; 6109 return -EINVAL;
5773 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); 6110 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
6111
6112 /*
6113 * We should wait on the channel for at least a minimum amount
6114 * of time (10ms) but no longer than the driver supports.
6115 */
6116 if (wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
6117 wait > rdev->wiphy.max_remain_on_channel_duration)
6118 return -EINVAL;
6119
5774 } 6120 }
5775 6121
5776 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 6122 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5805,7 +6151,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5805 } 6151 }
5806 } 6152 }
5807 6153
5808 err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type, 6154 err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type,
5809 channel_type_valid, wait, 6155 channel_type_valid, wait,
5810 nla_data(info->attrs[NL80211_ATTR_FRAME]), 6156 nla_data(info->attrs[NL80211_ATTR_FRAME]),
5811 nla_len(info->attrs[NL80211_ATTR_FRAME]), 6157 nla_len(info->attrs[NL80211_ATTR_FRAME]),
@@ -5833,7 +6179,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5833static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info) 6179static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info)
5834{ 6180{
5835 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6181 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5836 struct net_device *dev = info->user_ptr[1]; 6182 struct wireless_dev *wdev = info->user_ptr[1];
5837 u64 cookie; 6183 u64 cookie;
5838 6184
5839 if (!info->attrs[NL80211_ATTR_COOKIE]) 6185 if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5842,17 +6188,21 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
5842 if (!rdev->ops->mgmt_tx_cancel_wait) 6188 if (!rdev->ops->mgmt_tx_cancel_wait)
5843 return -EOPNOTSUPP; 6189 return -EOPNOTSUPP;
5844 6190
5845 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && 6191 switch (wdev->iftype) {
5846 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && 6192 case NL80211_IFTYPE_STATION:
5847 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && 6193 case NL80211_IFTYPE_ADHOC:
5848 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 6194 case NL80211_IFTYPE_P2P_CLIENT:
5849 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 6195 case NL80211_IFTYPE_AP:
5850 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 6196 case NL80211_IFTYPE_AP_VLAN:
6197 case NL80211_IFTYPE_P2P_GO:
6198 break;
6199 default:
5851 return -EOPNOTSUPP; 6200 return -EOPNOTSUPP;
6201 }
5852 6202
5853 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); 6203 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
5854 6204
5855 return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, dev, cookie); 6205 return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
5856} 6206}
5857 6207
5858static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) 6208static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
@@ -5938,8 +6288,35 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
5938 [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 }, 6288 [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
5939 [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, 6289 [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
5940 [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, 6290 [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
6291 [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 },
6292 [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 },
6293 [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 },
5941}; 6294};
5942 6295
6296static int nl80211_set_cqm_txe(struct genl_info *info,
6297 u32 rate, u32 pkts, u32 intvl)
6298{
6299 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6300 struct wireless_dev *wdev;
6301 struct net_device *dev = info->user_ptr[1];
6302
6303 if ((rate < 0 || rate > 100) ||
6304 (intvl < 0 || intvl > NL80211_CQM_TXE_MAX_INTVL))
6305 return -EINVAL;
6306
6307 wdev = dev->ieee80211_ptr;
6308
6309 if (!rdev->ops->set_cqm_txe_config)
6310 return -EOPNOTSUPP;
6311
6312 if (wdev->iftype != NL80211_IFTYPE_STATION &&
6313 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
6314 return -EOPNOTSUPP;
6315
6316 return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev,
6317 rate, pkts, intvl);
6318}
6319
5943static int nl80211_set_cqm_rssi(struct genl_info *info, 6320static int nl80211_set_cqm_rssi(struct genl_info *info,
5944 s32 threshold, u32 hysteresis) 6321 s32 threshold, u32 hysteresis)
5945{ 6322{
@@ -5987,6 +6364,14 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
5987 threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); 6364 threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
5988 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); 6365 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
5989 err = nl80211_set_cqm_rssi(info, threshold, hysteresis); 6366 err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
6367 } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
6368 attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
6369 attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
6370 u32 rate, pkts, intvl;
6371 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
6372 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
6373 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
6374 err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
5990 } else 6375 } else
5991 err = -EINVAL; 6376 err = -EINVAL;
5992 6377
@@ -6032,6 +6417,24 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
6032 return err; 6417 return err;
6033 } 6418 }
6034 6419
6420 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
6421 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
6422
6423 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
6424 !nl80211_valid_channel_type(info, &channel_type))
6425 return -EINVAL;
6426
6427 setup.channel = rdev_freq_to_chan(rdev,
6428 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
6429 channel_type);
6430 if (!setup.channel)
6431 return -EINVAL;
6432 setup.channel_type = channel_type;
6433 } else {
6434 /* cfg80211_join_mesh() will sort it out */
6435 setup.channel = NULL;
6436 }
6437
6035 return cfg80211_join_mesh(rdev, dev, &setup, &cfg); 6438 return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
6036} 6439}
6037 6440
@@ -6043,6 +6446,7 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
6043 return cfg80211_leave_mesh(rdev, dev); 6446 return cfg80211_leave_mesh(rdev, dev);
6044} 6447}
6045 6448
6449#ifdef CONFIG_PM
6046static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) 6450static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6047{ 6451{
6048 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6452 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6124,8 +6528,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6124{ 6528{
6125 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6529 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6126 struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; 6530 struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
6127 struct cfg80211_wowlan no_triggers = {};
6128 struct cfg80211_wowlan new_triggers = {}; 6531 struct cfg80211_wowlan new_triggers = {};
6532 struct cfg80211_wowlan *ntrig;
6129 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; 6533 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
6130 int err, i; 6534 int err, i;
6131 bool prev_enabled = rdev->wowlan; 6535 bool prev_enabled = rdev->wowlan;
@@ -6133,8 +6537,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6133 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 6537 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
6134 return -EOPNOTSUPP; 6538 return -EOPNOTSUPP;
6135 6539
6136 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) 6540 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
6137 goto no_triggers; 6541 cfg80211_rdev_free_wowlan(rdev);
6542 rdev->wowlan = NULL;
6543 goto set_wakeup;
6544 }
6138 6545
6139 err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG, 6546 err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG,
6140 nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]), 6547 nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
@@ -6245,22 +6652,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6245 } 6652 }
6246 } 6653 }
6247 6654
6248 if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) { 6655 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
6249 struct cfg80211_wowlan *ntrig; 6656 if (!ntrig) {
6250 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), 6657 err = -ENOMEM;
6251 GFP_KERNEL); 6658 goto error;
6252 if (!ntrig) {
6253 err = -ENOMEM;
6254 goto error;
6255 }
6256 cfg80211_rdev_free_wowlan(rdev);
6257 rdev->wowlan = ntrig;
6258 } else {
6259 no_triggers:
6260 cfg80211_rdev_free_wowlan(rdev);
6261 rdev->wowlan = NULL;
6262 } 6659 }
6660 cfg80211_rdev_free_wowlan(rdev);
6661 rdev->wowlan = ntrig;
6263 6662
6663 set_wakeup:
6264 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) 6664 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
6265 rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan); 6665 rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
6266 6666
@@ -6271,6 +6671,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6271 kfree(new_triggers.patterns); 6671 kfree(new_triggers.patterns);
6272 return err; 6672 return err;
6273} 6673}
6674#endif
6274 6675
6275static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) 6676static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
6276{ 6677{
@@ -6415,44 +6816,75 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6415#define NL80211_FLAG_CHECK_NETDEV_UP 0x08 6816#define NL80211_FLAG_CHECK_NETDEV_UP 0x08
6416#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ 6817#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
6417 NL80211_FLAG_CHECK_NETDEV_UP) 6818 NL80211_FLAG_CHECK_NETDEV_UP)
6819#define NL80211_FLAG_NEED_WDEV 0x10
6820/* If a netdev is associated, it must be UP */
6821#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
6822 NL80211_FLAG_CHECK_NETDEV_UP)
6418 6823
6419static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, 6824static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
6420 struct genl_info *info) 6825 struct genl_info *info)
6421{ 6826{
6422 struct cfg80211_registered_device *rdev; 6827 struct cfg80211_registered_device *rdev;
6828 struct wireless_dev *wdev;
6423 struct net_device *dev; 6829 struct net_device *dev;
6424 int err;
6425 bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL; 6830 bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL;
6426 6831
6427 if (rtnl) 6832 if (rtnl)
6428 rtnl_lock(); 6833 rtnl_lock();
6429 6834
6430 if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) { 6835 if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) {
6431 rdev = cfg80211_get_dev_from_info(info); 6836 rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
6432 if (IS_ERR(rdev)) { 6837 if (IS_ERR(rdev)) {
6433 if (rtnl) 6838 if (rtnl)
6434 rtnl_unlock(); 6839 rtnl_unlock();
6435 return PTR_ERR(rdev); 6840 return PTR_ERR(rdev);
6436 } 6841 }
6437 info->user_ptr[0] = rdev; 6842 info->user_ptr[0] = rdev;
6438 } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { 6843 } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV ||
6439 err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs, 6844 ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
6440 &rdev, &dev); 6845 mutex_lock(&cfg80211_mutex);
6441 if (err) { 6846 wdev = __cfg80211_wdev_from_attrs(genl_info_net(info),
6847 info->attrs);
6848 if (IS_ERR(wdev)) {
6849 mutex_unlock(&cfg80211_mutex);
6442 if (rtnl) 6850 if (rtnl)
6443 rtnl_unlock(); 6851 rtnl_unlock();
6444 return err; 6852 return PTR_ERR(wdev);
6445 } 6853 }
6446 if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && 6854
6447 !netif_running(dev)) { 6855 dev = wdev->netdev;
6448 cfg80211_unlock_rdev(rdev); 6856 rdev = wiphy_to_dev(wdev->wiphy);
6449 dev_put(dev); 6857
6450 if (rtnl) 6858 if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
6451 rtnl_unlock(); 6859 if (!dev) {
6452 return -ENETDOWN; 6860 mutex_unlock(&cfg80211_mutex);
6861 if (rtnl)
6862 rtnl_unlock();
6863 return -EINVAL;
6864 }
6865
6866 info->user_ptr[1] = dev;
6867 } else {
6868 info->user_ptr[1] = wdev;
6453 } 6869 }
6870
6871 if (dev) {
6872 if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
6873 !netif_running(dev)) {
6874 mutex_unlock(&cfg80211_mutex);
6875 if (rtnl)
6876 rtnl_unlock();
6877 return -ENETDOWN;
6878 }
6879
6880 dev_hold(dev);
6881 }
6882
6883 cfg80211_lock_rdev(rdev);
6884
6885 mutex_unlock(&cfg80211_mutex);
6886
6454 info->user_ptr[0] = rdev; 6887 info->user_ptr[0] = rdev;
6455 info->user_ptr[1] = dev;
6456 } 6888 }
6457 6889
6458 return 0; 6890 return 0;
@@ -6463,8 +6895,16 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
6463{ 6895{
6464 if (info->user_ptr[0]) 6896 if (info->user_ptr[0])
6465 cfg80211_unlock_rdev(info->user_ptr[0]); 6897 cfg80211_unlock_rdev(info->user_ptr[0]);
6466 if (info->user_ptr[1]) 6898 if (info->user_ptr[1]) {
6467 dev_put(info->user_ptr[1]); 6899 if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
6900 struct wireless_dev *wdev = info->user_ptr[1];
6901
6902 if (wdev->netdev)
6903 dev_put(wdev->netdev);
6904 } else {
6905 dev_put(info->user_ptr[1]);
6906 }
6907 }
6468 if (ops->internal_flags & NL80211_FLAG_NEED_RTNL) 6908 if (ops->internal_flags & NL80211_FLAG_NEED_RTNL)
6469 rtnl_unlock(); 6909 rtnl_unlock();
6470} 6910}
@@ -6491,7 +6931,7 @@ static struct genl_ops nl80211_ops[] = {
6491 .dumpit = nl80211_dump_interface, 6931 .dumpit = nl80211_dump_interface,
6492 .policy = nl80211_policy, 6932 .policy = nl80211_policy,
6493 /* can be retrieved by unprivileged users */ 6933 /* can be retrieved by unprivileged users */
6494 .internal_flags = NL80211_FLAG_NEED_NETDEV, 6934 .internal_flags = NL80211_FLAG_NEED_WDEV,
6495 }, 6935 },
6496 { 6936 {
6497 .cmd = NL80211_CMD_SET_INTERFACE, 6937 .cmd = NL80211_CMD_SET_INTERFACE,
@@ -6514,7 +6954,7 @@ static struct genl_ops nl80211_ops[] = {
6514 .doit = nl80211_del_interface, 6954 .doit = nl80211_del_interface,
6515 .policy = nl80211_policy, 6955 .policy = nl80211_policy,
6516 .flags = GENL_ADMIN_PERM, 6956 .flags = GENL_ADMIN_PERM,
6517 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6957 .internal_flags = NL80211_FLAG_NEED_WDEV |
6518 NL80211_FLAG_NEED_RTNL, 6958 NL80211_FLAG_NEED_RTNL,
6519 }, 6959 },
6520 { 6960 {
@@ -6685,7 +7125,7 @@ static struct genl_ops nl80211_ops[] = {
6685 .doit = nl80211_trigger_scan, 7125 .doit = nl80211_trigger_scan,
6686 .policy = nl80211_policy, 7126 .policy = nl80211_policy,
6687 .flags = GENL_ADMIN_PERM, 7127 .flags = GENL_ADMIN_PERM,
6688 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7128 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
6689 NL80211_FLAG_NEED_RTNL, 7129 NL80211_FLAG_NEED_RTNL,
6690 }, 7130 },
6691 { 7131 {
@@ -6826,7 +7266,7 @@ static struct genl_ops nl80211_ops[] = {
6826 .doit = nl80211_remain_on_channel, 7266 .doit = nl80211_remain_on_channel,
6827 .policy = nl80211_policy, 7267 .policy = nl80211_policy,
6828 .flags = GENL_ADMIN_PERM, 7268 .flags = GENL_ADMIN_PERM,
6829 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7269 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
6830 NL80211_FLAG_NEED_RTNL, 7270 NL80211_FLAG_NEED_RTNL,
6831 }, 7271 },
6832 { 7272 {
@@ -6834,7 +7274,7 @@ static struct genl_ops nl80211_ops[] = {
6834 .doit = nl80211_cancel_remain_on_channel, 7274 .doit = nl80211_cancel_remain_on_channel,
6835 .policy = nl80211_policy, 7275 .policy = nl80211_policy,
6836 .flags = GENL_ADMIN_PERM, 7276 .flags = GENL_ADMIN_PERM,
6837 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7277 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
6838 NL80211_FLAG_NEED_RTNL, 7278 NL80211_FLAG_NEED_RTNL,
6839 }, 7279 },
6840 { 7280 {
@@ -6850,7 +7290,7 @@ static struct genl_ops nl80211_ops[] = {
6850 .doit = nl80211_register_mgmt, 7290 .doit = nl80211_register_mgmt,
6851 .policy = nl80211_policy, 7291 .policy = nl80211_policy,
6852 .flags = GENL_ADMIN_PERM, 7292 .flags = GENL_ADMIN_PERM,
6853 .internal_flags = NL80211_FLAG_NEED_NETDEV | 7293 .internal_flags = NL80211_FLAG_NEED_WDEV |
6854 NL80211_FLAG_NEED_RTNL, 7294 NL80211_FLAG_NEED_RTNL,
6855 }, 7295 },
6856 { 7296 {
@@ -6858,7 +7298,7 @@ static struct genl_ops nl80211_ops[] = {
6858 .doit = nl80211_tx_mgmt, 7298 .doit = nl80211_tx_mgmt,
6859 .policy = nl80211_policy, 7299 .policy = nl80211_policy,
6860 .flags = GENL_ADMIN_PERM, 7300 .flags = GENL_ADMIN_PERM,
6861 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7301 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
6862 NL80211_FLAG_NEED_RTNL, 7302 NL80211_FLAG_NEED_RTNL,
6863 }, 7303 },
6864 { 7304 {
@@ -6866,7 +7306,7 @@ static struct genl_ops nl80211_ops[] = {
6866 .doit = nl80211_tx_mgmt_cancel_wait, 7306 .doit = nl80211_tx_mgmt_cancel_wait,
6867 .policy = nl80211_policy, 7307 .policy = nl80211_policy,
6868 .flags = GENL_ADMIN_PERM, 7308 .flags = GENL_ADMIN_PERM,
6869 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7309 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
6870 NL80211_FLAG_NEED_RTNL, 7310 NL80211_FLAG_NEED_RTNL,
6871 }, 7311 },
6872 { 7312 {
@@ -6925,6 +7365,7 @@ static struct genl_ops nl80211_ops[] = {
6925 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 7365 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6926 NL80211_FLAG_NEED_RTNL, 7366 NL80211_FLAG_NEED_RTNL,
6927 }, 7367 },
7368#ifdef CONFIG_PM
6928 { 7369 {
6929 .cmd = NL80211_CMD_GET_WOWLAN, 7370 .cmd = NL80211_CMD_GET_WOWLAN,
6930 .doit = nl80211_get_wowlan, 7371 .doit = nl80211_get_wowlan,
@@ -6941,6 +7382,7 @@ static struct genl_ops nl80211_ops[] = {
6941 .internal_flags = NL80211_FLAG_NEED_WIPHY | 7382 .internal_flags = NL80211_FLAG_NEED_WIPHY |
6942 NL80211_FLAG_NEED_RTNL, 7383 NL80211_FLAG_NEED_RTNL,
6943 }, 7384 },
7385#endif
6944 { 7386 {
6945 .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, 7387 .cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
6946 .doit = nl80211_set_rekey_data, 7388 .doit = nl80211_set_rekey_data,
@@ -7075,7 +7517,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
7075 7517
7076static int nl80211_send_scan_msg(struct sk_buff *msg, 7518static int nl80211_send_scan_msg(struct sk_buff *msg,
7077 struct cfg80211_registered_device *rdev, 7519 struct cfg80211_registered_device *rdev,
7078 struct net_device *netdev, 7520 struct wireless_dev *wdev,
7079 u32 pid, u32 seq, int flags, 7521 u32 pid, u32 seq, int flags,
7080 u32 cmd) 7522 u32 cmd)
7081{ 7523{
@@ -7086,7 +7528,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
7086 return -1; 7528 return -1;
7087 7529
7088 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 7530 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7089 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) 7531 (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
7532 wdev->netdev->ifindex)) ||
7533 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
7090 goto nla_put_failure; 7534 goto nla_put_failure;
7091 7535
7092 /* ignore errors and send incomplete event anyway */ 7536 /* ignore errors and send incomplete event anyway */
@@ -7123,15 +7567,15 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
7123} 7567}
7124 7568
7125void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 7569void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
7126 struct net_device *netdev) 7570 struct wireless_dev *wdev)
7127{ 7571{
7128 struct sk_buff *msg; 7572 struct sk_buff *msg;
7129 7573
7130 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7574 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7131 if (!msg) 7575 if (!msg)
7132 return; 7576 return;
7133 7577
7134 if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, 7578 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
7135 NL80211_CMD_TRIGGER_SCAN) < 0) { 7579 NL80211_CMD_TRIGGER_SCAN) < 0) {
7136 nlmsg_free(msg); 7580 nlmsg_free(msg);
7137 return; 7581 return;
@@ -7142,7 +7586,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
7142} 7586}
7143 7587
7144void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 7588void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
7145 struct net_device *netdev) 7589 struct wireless_dev *wdev)
7146{ 7590{
7147 struct sk_buff *msg; 7591 struct sk_buff *msg;
7148 7592
@@ -7150,7 +7594,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
7150 if (!msg) 7594 if (!msg)
7151 return; 7595 return;
7152 7596
7153 if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, 7597 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
7154 NL80211_CMD_NEW_SCAN_RESULTS) < 0) { 7598 NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
7155 nlmsg_free(msg); 7599 nlmsg_free(msg);
7156 return; 7600 return;
@@ -7161,7 +7605,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
7161} 7605}
7162 7606
7163void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 7607void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
7164 struct net_device *netdev) 7608 struct wireless_dev *wdev)
7165{ 7609{
7166 struct sk_buff *msg; 7610 struct sk_buff *msg;
7167 7611
@@ -7169,7 +7613,7 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
7169 if (!msg) 7613 if (!msg)
7170 return; 7614 return;
7171 7615
7172 if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, 7616 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
7173 NL80211_CMD_SCAN_ABORTED) < 0) { 7617 NL80211_CMD_SCAN_ABORTED) < 0) {
7174 nlmsg_free(msg); 7618 nlmsg_free(msg);
7175 return; 7619 return;
@@ -7203,7 +7647,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
7203{ 7647{
7204 struct sk_buff *msg; 7648 struct sk_buff *msg;
7205 7649
7206 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7650 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7207 if (!msg) 7651 if (!msg)
7208 return; 7652 return;
7209 7653
@@ -7419,7 +7863,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
7419 struct sk_buff *msg; 7863 struct sk_buff *msg;
7420 void *hdr; 7864 void *hdr;
7421 7865
7422 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 7866 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7423 if (!msg) 7867 if (!msg)
7424 return; 7868 return;
7425 7869
@@ -7459,7 +7903,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
7459 struct sk_buff *msg; 7903 struct sk_buff *msg;
7460 void *hdr; 7904 void *hdr;
7461 7905
7462 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 7906 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7463 if (!msg) 7907 if (!msg)
7464 return; 7908 return;
7465 7909
@@ -7497,7 +7941,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
7497 struct sk_buff *msg; 7941 struct sk_buff *msg;
7498 void *hdr; 7942 void *hdr;
7499 7943
7500 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7944 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7501 if (!msg) 7945 if (!msg)
7502 return; 7946 return;
7503 7947
@@ -7692,7 +8136,7 @@ nla_put_failure:
7692 8136
7693static void nl80211_send_remain_on_chan_event( 8137static void nl80211_send_remain_on_chan_event(
7694 int cmd, struct cfg80211_registered_device *rdev, 8138 int cmd, struct cfg80211_registered_device *rdev,
7695 struct net_device *netdev, u64 cookie, 8139 struct wireless_dev *wdev, u64 cookie,
7696 struct ieee80211_channel *chan, 8140 struct ieee80211_channel *chan,
7697 enum nl80211_channel_type channel_type, 8141 enum nl80211_channel_type channel_type,
7698 unsigned int duration, gfp_t gfp) 8142 unsigned int duration, gfp_t gfp)
@@ -7711,7 +8155,9 @@ static void nl80211_send_remain_on_chan_event(
7711 } 8155 }
7712 8156
7713 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 8157 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7714 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 8158 (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
8159 wdev->netdev->ifindex)) ||
8160 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
7715 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || 8161 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
7716 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) || 8162 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
7717 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) 8163 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
@@ -7733,23 +8179,24 @@ static void nl80211_send_remain_on_chan_event(
7733} 8179}
7734 8180
7735void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 8181void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
7736 struct net_device *netdev, u64 cookie, 8182 struct wireless_dev *wdev, u64 cookie,
7737 struct ieee80211_channel *chan, 8183 struct ieee80211_channel *chan,
7738 enum nl80211_channel_type channel_type, 8184 enum nl80211_channel_type channel_type,
7739 unsigned int duration, gfp_t gfp) 8185 unsigned int duration, gfp_t gfp)
7740{ 8186{
7741 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 8187 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
7742 rdev, netdev, cookie, chan, 8188 rdev, wdev, cookie, chan,
7743 channel_type, duration, gfp); 8189 channel_type, duration, gfp);
7744} 8190}
7745 8191
7746void nl80211_send_remain_on_channel_cancel( 8192void nl80211_send_remain_on_channel_cancel(
7747 struct cfg80211_registered_device *rdev, struct net_device *netdev, 8193 struct cfg80211_registered_device *rdev,
8194 struct wireless_dev *wdev,
7748 u64 cookie, struct ieee80211_channel *chan, 8195 u64 cookie, struct ieee80211_channel *chan,
7749 enum nl80211_channel_type channel_type, gfp_t gfp) 8196 enum nl80211_channel_type channel_type, gfp_t gfp)
7750{ 8197{
7751 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 8198 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
7752 rdev, netdev, cookie, chan, 8199 rdev, wdev, cookie, chan,
7753 channel_type, 0, gfp); 8200 channel_type, 0, gfp);
7754} 8201}
7755 8202
@@ -7759,7 +8206,7 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
7759{ 8206{
7760 struct sk_buff *msg; 8207 struct sk_buff *msg;
7761 8208
7762 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8209 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7763 if (!msg) 8210 if (!msg)
7764 return; 8211 return;
7765 8212
@@ -7780,7 +8227,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
7780 struct sk_buff *msg; 8227 struct sk_buff *msg;
7781 void *hdr; 8228 void *hdr;
7782 8229
7783 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8230 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7784 if (!msg) 8231 if (!msg)
7785 return; 8232 return;
7786 8233
@@ -7863,10 +8310,11 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
7863} 8310}
7864 8311
7865int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 8312int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7866 struct net_device *netdev, u32 nlpid, 8313 struct wireless_dev *wdev, u32 nlpid,
7867 int freq, int sig_dbm, 8314 int freq, int sig_dbm,
7868 const u8 *buf, size_t len, gfp_t gfp) 8315 const u8 *buf, size_t len, gfp_t gfp)
7869{ 8316{
8317 struct net_device *netdev = wdev->netdev;
7870 struct sk_buff *msg; 8318 struct sk_buff *msg;
7871 void *hdr; 8319 void *hdr;
7872 8320
@@ -7881,7 +8329,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7881 } 8329 }
7882 8330
7883 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 8331 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7884 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 8332 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
8333 netdev->ifindex)) ||
7885 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || 8334 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
7886 (sig_dbm && 8335 (sig_dbm &&
7887 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || 8336 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -7899,10 +8348,11 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7899} 8348}
7900 8349
7901void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 8350void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
7902 struct net_device *netdev, u64 cookie, 8351 struct wireless_dev *wdev, u64 cookie,
7903 const u8 *buf, size_t len, bool ack, 8352 const u8 *buf, size_t len, bool ack,
7904 gfp_t gfp) 8353 gfp_t gfp)
7905{ 8354{
8355 struct net_device *netdev = wdev->netdev;
7906 struct sk_buff *msg; 8356 struct sk_buff *msg;
7907 void *hdr; 8357 void *hdr;
7908 8358
@@ -7917,7 +8367,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
7917 } 8367 }
7918 8368
7919 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 8369 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7920 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 8370 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
8371 netdev->ifindex)) ||
7921 nla_put(msg, NL80211_ATTR_FRAME, len, buf) || 8372 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
7922 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || 8373 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
7923 (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) 8374 (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
@@ -7943,7 +8394,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
7943 struct nlattr *pinfoattr; 8394 struct nlattr *pinfoattr;
7944 void *hdr; 8395 void *hdr;
7945 8396
7946 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8397 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7947 if (!msg) 8398 if (!msg)
7948 return; 8399 return;
7949 8400
@@ -7986,7 +8437,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7986 struct nlattr *rekey_attr; 8437 struct nlattr *rekey_attr;
7987 void *hdr; 8438 void *hdr;
7988 8439
7989 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8440 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7990 if (!msg) 8441 if (!msg)
7991 return; 8442 return;
7992 8443
@@ -8030,7 +8481,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
8030 struct nlattr *attr; 8481 struct nlattr *attr;
8031 void *hdr; 8482 void *hdr;
8032 8483
8033 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8484 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8034 if (!msg) 8485 if (!msg)
8035 return; 8486 return;
8036 8487
@@ -8074,7 +8525,7 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8074 struct sk_buff *msg; 8525 struct sk_buff *msg;
8075 void *hdr; 8526 void *hdr;
8076 8527
8077 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8528 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8078 if (!msg) 8529 if (!msg)
8079 return; 8530 return;
8080 8531
@@ -8101,6 +8552,56 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8101} 8552}
8102 8553
8103void 8554void
8555nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
8556 struct net_device *netdev, const u8 *peer,
8557 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp)
8558{
8559 struct sk_buff *msg;
8560 struct nlattr *pinfoattr;
8561 void *hdr;
8562
8563 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
8564 if (!msg)
8565 return;
8566
8567 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
8568 if (!hdr) {
8569 nlmsg_free(msg);
8570 return;
8571 }
8572
8573 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
8574 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
8575 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
8576 goto nla_put_failure;
8577
8578 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
8579 if (!pinfoattr)
8580 goto nla_put_failure;
8581
8582 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets))
8583 goto nla_put_failure;
8584
8585 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate))
8586 goto nla_put_failure;
8587
8588 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl))
8589 goto nla_put_failure;
8590
8591 nla_nest_end(msg, pinfoattr);
8592
8593 genlmsg_end(msg, hdr);
8594
8595 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
8596 nl80211_mlme_mcgrp.id, gfp);
8597 return;
8598
8599 nla_put_failure:
8600 genlmsg_cancel(msg, hdr);
8601 nlmsg_free(msg);
8602}
8603
8604void
8104nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 8605nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
8105 struct net_device *netdev, const u8 *peer, 8606 struct net_device *netdev, const u8 *peer,
8106 u32 num_packets, gfp_t gfp) 8607 u32 num_packets, gfp_t gfp)
@@ -8109,7 +8610,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
8109 struct nlattr *pinfoattr; 8610 struct nlattr *pinfoattr;
8110 void *hdr; 8611 void *hdr;
8111 8612
8112 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8613 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8113 if (!msg) 8614 if (!msg)
8114 return; 8615 return;
8115 8616
@@ -8153,7 +8654,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
8153 void *hdr; 8654 void *hdr;
8154 int err; 8655 int err;
8155 8656
8156 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8657 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8157 if (!msg) 8658 if (!msg)
8158 return; 8659 return;
8159 8660
@@ -8241,7 +8742,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
8241 rcu_read_lock(); 8742 rcu_read_lock();
8242 8743
8243 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 8744 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
8244 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) 8745 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
8245 cfg80211_mlme_unregister_socket(wdev, notify->pid); 8746 cfg80211_mlme_unregister_socket(wdev, notify->pid);
8246 if (rdev->ap_beacons_nlpid == notify->pid) 8747 if (rdev->ap_beacons_nlpid == notify->pid)
8247 rdev->ap_beacons_nlpid = 0; 8748 rdev->ap_beacons_nlpid = 0;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 01a1122c3b33..9f2616fffb40 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -7,11 +7,11 @@ int nl80211_init(void);
7void nl80211_exit(void); 7void nl80211_exit(void);
8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10 struct net_device *netdev); 10 struct wireless_dev *wdev);
11void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 11void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
12 struct net_device *netdev); 12 struct wireless_dev *wdev);
13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
14 struct net_device *netdev); 14 struct wireless_dev *wdev);
15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, 15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
16 struct net_device *netdev, u32 cmd); 16 struct net_device *netdev, u32 cmd);
17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, 17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
@@ -74,13 +74,13 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
74 gfp_t gfp); 74 gfp_t gfp);
75 75
76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
77 struct net_device *netdev, 77 struct wireless_dev *wdev, u64 cookie,
78 u64 cookie,
79 struct ieee80211_channel *chan, 78 struct ieee80211_channel *chan,
80 enum nl80211_channel_type channel_type, 79 enum nl80211_channel_type channel_type,
81 unsigned int duration, gfp_t gfp); 80 unsigned int duration, gfp_t gfp);
82void nl80211_send_remain_on_channel_cancel( 81void nl80211_send_remain_on_channel_cancel(
83 struct cfg80211_registered_device *rdev, struct net_device *netdev, 82 struct cfg80211_registered_device *rdev,
83 struct wireless_dev *wdev,
84 u64 cookie, struct ieee80211_channel *chan, 84 u64 cookie, struct ieee80211_channel *chan,
85 enum nl80211_channel_type channel_type, gfp_t gfp); 85 enum nl80211_channel_type channel_type, gfp_t gfp);
86 86
@@ -92,11 +92,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
92 gfp_t gfp); 92 gfp_t gfp);
93 93
94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
95 struct net_device *netdev, u32 nlpid, 95 struct wireless_dev *wdev, u32 nlpid,
96 int freq, int sig_dbm, 96 int freq, int sig_dbm,
97 const u8 *buf, size_t len, gfp_t gfp); 97 const u8 *buf, size_t len, gfp_t gfp);
98void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 98void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
99 struct net_device *netdev, u64 cookie, 99 struct wireless_dev *wdev, u64 cookie,
100 const u8 *buf, size_t len, bool ack, 100 const u8 *buf, size_t len, bool ack,
101 gfp_t gfp); 101 gfp_t gfp);
102 102
@@ -110,6 +110,11 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
110 struct net_device *netdev, const u8 *peer, 110 struct net_device *netdev, const u8 *peer,
111 u32 num_packets, gfp_t gfp); 111 u32 num_packets, gfp_t gfp);
112 112
113void
114nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
115 struct net_device *netdev, const u8 *peer,
116 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
117
113void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, 118void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
114 struct net_device *netdev, const u8 *bssid, 119 struct net_device *netdev, const u8 *bssid,
115 const u8 *replay_ctr, gfp_t gfp); 120 const u8 *replay_ctr, gfp_t gfp);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 15f347477a99..2303ee73b50a 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -97,9 +97,16 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
97 * - cfg80211_world_regdom 97 * - cfg80211_world_regdom
98 * - cfg80211_regdom 98 * - cfg80211_regdom
99 * - last_request 99 * - last_request
100 * - reg_num_devs_support_basehint
100 */ 101 */
101static DEFINE_MUTEX(reg_mutex); 102static DEFINE_MUTEX(reg_mutex);
102 103
104/*
105 * Number of devices that registered to the core
106 * that support cellular base station regulatory hints
107 */
108static int reg_num_devs_support_basehint;
109
103static inline void assert_reg_lock(void) 110static inline void assert_reg_lock(void)
104{ 111{
105 lockdep_assert_held(&reg_mutex); 112 lockdep_assert_held(&reg_mutex);
@@ -129,7 +136,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
129 136
130/* We keep a static world regulatory domain in case of the absence of CRDA */ 137/* We keep a static world regulatory domain in case of the absence of CRDA */
131static const struct ieee80211_regdomain world_regdom = { 138static const struct ieee80211_regdomain world_regdom = {
132 .n_reg_rules = 5, 139 .n_reg_rules = 6,
133 .alpha2 = "00", 140 .alpha2 = "00",
134 .reg_rules = { 141 .reg_rules = {
135 /* IEEE 802.11b/g, channels 1..11 */ 142 /* IEEE 802.11b/g, channels 1..11 */
@@ -156,6 +163,9 @@ static const struct ieee80211_regdomain world_regdom = {
156 REG_RULE(5745-10, 5825+10, 40, 6, 20, 163 REG_RULE(5745-10, 5825+10, 40, 6, 20,
157 NL80211_RRF_PASSIVE_SCAN | 164 NL80211_RRF_PASSIVE_SCAN |
158 NL80211_RRF_NO_IBSS), 165 NL80211_RRF_NO_IBSS),
166
167 /* IEEE 802.11ad (60gHz), channels 1..3 */
168 REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
159 } 169 }
160}; 170};
161 171
@@ -908,6 +918,61 @@ static void handle_band(struct wiphy *wiphy,
908 handle_channel(wiphy, initiator, band, i); 918 handle_channel(wiphy, initiator, band, i);
909} 919}
910 920
921static bool reg_request_cell_base(struct regulatory_request *request)
922{
923 if (request->initiator != NL80211_REGDOM_SET_BY_USER)
924 return false;
925 if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
926 return false;
927 return true;
928}
929
930bool reg_last_request_cell_base(void)
931{
932 bool val;
933 assert_cfg80211_lock();
934
935 mutex_lock(&reg_mutex);
936 val = reg_request_cell_base(last_request);
937 mutex_unlock(&reg_mutex);
938 return val;
939}
940
941#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
942
943/* Core specific check */
944static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
945{
946 if (!reg_num_devs_support_basehint)
947 return -EOPNOTSUPP;
948
949 if (reg_request_cell_base(last_request)) {
950 if (!regdom_changes(pending_request->alpha2))
951 return -EALREADY;
952 return 0;
953 }
954 return 0;
955}
956
957/* Device specific check */
958static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
959{
960 if (!(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS))
961 return true;
962 return false;
963}
964#else
965static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
966{
967 return -EOPNOTSUPP;
968}
969static int reg_dev_ignore_cell_hint(struct wiphy *wiphy)
970{
971 return true;
972}
973#endif
974
975
911static bool ignore_reg_update(struct wiphy *wiphy, 976static bool ignore_reg_update(struct wiphy *wiphy,
912 enum nl80211_reg_initiator initiator) 977 enum nl80211_reg_initiator initiator)
913{ 978{
@@ -941,6 +1006,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
941 return true; 1006 return true;
942 } 1007 }
943 1008
1009 if (reg_request_cell_base(last_request))
1010 return reg_dev_ignore_cell_hint(wiphy);
1011
944 return false; 1012 return false;
945} 1013}
946 1014
@@ -1166,14 +1234,6 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
1166 wiphy->reg_notifier(wiphy, last_request); 1234 wiphy->reg_notifier(wiphy, last_request);
1167} 1235}
1168 1236
1169void regulatory_update(struct wiphy *wiphy,
1170 enum nl80211_reg_initiator setby)
1171{
1172 mutex_lock(&reg_mutex);
1173 wiphy_update_regulatory(wiphy, setby);
1174 mutex_unlock(&reg_mutex);
1175}
1176
1177static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) 1237static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
1178{ 1238{
1179 struct cfg80211_registered_device *rdev; 1239 struct cfg80211_registered_device *rdev;
@@ -1304,6 +1364,13 @@ static int ignore_request(struct wiphy *wiphy,
1304 return 0; 1364 return 0;
1305 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 1365 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1306 1366
1367 if (reg_request_cell_base(last_request)) {
1368 /* Trust a Cell base station over the AP's country IE */
1369 if (regdom_changes(pending_request->alpha2))
1370 return -EOPNOTSUPP;
1371 return -EALREADY;
1372 }
1373
1307 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1374 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
1308 1375
1309 if (unlikely(!is_an_alpha2(pending_request->alpha2))) 1376 if (unlikely(!is_an_alpha2(pending_request->alpha2)))
@@ -1348,6 +1415,12 @@ static int ignore_request(struct wiphy *wiphy,
1348 1415
1349 return REG_INTERSECT; 1416 return REG_INTERSECT;
1350 case NL80211_REGDOM_SET_BY_USER: 1417 case NL80211_REGDOM_SET_BY_USER:
1418 if (reg_request_cell_base(pending_request))
1419 return reg_ignore_cell_hint(pending_request);
1420
1421 if (reg_request_cell_base(last_request))
1422 return -EOPNOTSUPP;
1423
1351 if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) 1424 if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
1352 return REG_INTERSECT; 1425 return REG_INTERSECT;
1353 /* 1426 /*
@@ -1389,7 +1462,7 @@ static void reg_set_request_processed(void)
1389 spin_unlock(&reg_requests_lock); 1462 spin_unlock(&reg_requests_lock);
1390 1463
1391 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) 1464 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1392 cancel_delayed_work_sync(&reg_timeout); 1465 cancel_delayed_work(&reg_timeout);
1393 1466
1394 if (need_more_processing) 1467 if (need_more_processing)
1395 schedule_work(&reg_work); 1468 schedule_work(&reg_work);
@@ -1637,7 +1710,8 @@ static int regulatory_hint_core(const char *alpha2)
1637} 1710}
1638 1711
1639/* User hints */ 1712/* User hints */
1640int regulatory_hint_user(const char *alpha2) 1713int regulatory_hint_user(const char *alpha2,
1714 enum nl80211_user_reg_hint_type user_reg_hint_type)
1641{ 1715{
1642 struct regulatory_request *request; 1716 struct regulatory_request *request;
1643 1717
@@ -1651,6 +1725,7 @@ int regulatory_hint_user(const char *alpha2)
1651 request->alpha2[0] = alpha2[0]; 1725 request->alpha2[0] = alpha2[0];
1652 request->alpha2[1] = alpha2[1]; 1726 request->alpha2[1] = alpha2[1];
1653 request->initiator = NL80211_REGDOM_SET_BY_USER; 1727 request->initiator = NL80211_REGDOM_SET_BY_USER;
1728 request->user_reg_hint_type = user_reg_hint_type;
1654 1729
1655 queue_regulatory_request(request); 1730 queue_regulatory_request(request);
1656 1731
@@ -1903,7 +1978,7 @@ static void restore_regulatory_settings(bool reset_user)
1903 * settings, user regulatory settings takes precedence. 1978 * settings, user regulatory settings takes precedence.
1904 */ 1979 */
1905 if (is_an_alpha2(alpha2)) 1980 if (is_an_alpha2(alpha2))
1906 regulatory_hint_user(user_alpha2); 1981 regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
1907 1982
1908 if (list_empty(&tmp_reg_req_list)) 1983 if (list_empty(&tmp_reg_req_list))
1909 return; 1984 return;
@@ -2078,9 +2153,16 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
2078 else { 2153 else {
2079 if (is_unknown_alpha2(rd->alpha2)) 2154 if (is_unknown_alpha2(rd->alpha2))
2080 pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n"); 2155 pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
2081 else 2156 else {
2082 pr_info("Regulatory domain changed to country: %c%c\n", 2157 if (reg_request_cell_base(last_request))
2083 rd->alpha2[0], rd->alpha2[1]); 2158 pr_info("Regulatory domain changed "
2159 "to country: %c%c by Cell Station\n",
2160 rd->alpha2[0], rd->alpha2[1]);
2161 else
2162 pr_info("Regulatory domain changed "
2163 "to country: %c%c\n",
2164 rd->alpha2[0], rd->alpha2[1]);
2165 }
2084 } 2166 }
2085 print_dfs_region(rd->dfs_region); 2167 print_dfs_region(rd->dfs_region);
2086 print_rd_rules(rd); 2168 print_rd_rules(rd);
@@ -2125,7 +2207,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2125 * checking if the alpha2 changes if CRDA was already called 2207 * checking if the alpha2 changes if CRDA was already called
2126 */ 2208 */
2127 if (!regdom_changes(rd->alpha2)) 2209 if (!regdom_changes(rd->alpha2))
2128 return -EINVAL; 2210 return -EALREADY;
2129 } 2211 }
2130 2212
2131 /* 2213 /*
@@ -2245,6 +2327,9 @@ int set_regdom(const struct ieee80211_regdomain *rd)
2245 /* Note that this doesn't update the wiphys, this is done below */ 2327 /* Note that this doesn't update the wiphys, this is done below */
2246 r = __set_regdom(rd); 2328 r = __set_regdom(rd);
2247 if (r) { 2329 if (r) {
2330 if (r == -EALREADY)
2331 reg_set_request_processed();
2332
2248 kfree(rd); 2333 kfree(rd);
2249 mutex_unlock(&reg_mutex); 2334 mutex_unlock(&reg_mutex);
2250 return r; 2335 return r;
@@ -2287,8 +2372,22 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2287} 2372}
2288#endif /* CONFIG_HOTPLUG */ 2373#endif /* CONFIG_HOTPLUG */
2289 2374
2375void wiphy_regulatory_register(struct wiphy *wiphy)
2376{
2377 assert_cfg80211_lock();
2378
2379 mutex_lock(&reg_mutex);
2380
2381 if (!reg_dev_ignore_cell_hint(wiphy))
2382 reg_num_devs_support_basehint++;
2383
2384 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
2385
2386 mutex_unlock(&reg_mutex);
2387}
2388
2290/* Caller must hold cfg80211_mutex */ 2389/* Caller must hold cfg80211_mutex */
2291void reg_device_remove(struct wiphy *wiphy) 2390void wiphy_regulatory_deregister(struct wiphy *wiphy)
2292{ 2391{
2293 struct wiphy *request_wiphy = NULL; 2392 struct wiphy *request_wiphy = NULL;
2294 2393
@@ -2296,6 +2395,9 @@ void reg_device_remove(struct wiphy *wiphy)
2296 2395
2297 mutex_lock(&reg_mutex); 2396 mutex_lock(&reg_mutex);
2298 2397
2398 if (!reg_dev_ignore_cell_hint(wiphy))
2399 reg_num_devs_support_basehint--;
2400
2299 kfree(wiphy->regd); 2401 kfree(wiphy->regd);
2300 2402
2301 if (last_request) 2403 if (last_request)
@@ -2361,7 +2463,8 @@ int __init regulatory_init(void)
2361 * as a user hint. 2463 * as a user hint.
2362 */ 2464 */
2363 if (!is_world_regdom(ieee80211_regdom)) 2465 if (!is_world_regdom(ieee80211_regdom))
2364 regulatory_hint_user(ieee80211_regdom); 2466 regulatory_hint_user(ieee80211_regdom,
2467 NL80211_USER_REG_HINT_USER);
2365 2468
2366 return 0; 2469 return 0;
2367} 2470}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index e2aaaf525a22..f023c8a31c60 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -22,17 +22,19 @@ bool is_world_regdom(const char *alpha2);
22bool reg_is_valid_request(const char *alpha2); 22bool reg_is_valid_request(const char *alpha2);
23bool reg_supported_dfs_region(u8 dfs_region); 23bool reg_supported_dfs_region(u8 dfs_region);
24 24
25int regulatory_hint_user(const char *alpha2); 25int regulatory_hint_user(const char *alpha2,
26 enum nl80211_user_reg_hint_type user_reg_hint_type);
26 27
27int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env); 28int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
28void reg_device_remove(struct wiphy *wiphy); 29void wiphy_regulatory_register(struct wiphy *wiphy);
30void wiphy_regulatory_deregister(struct wiphy *wiphy);
29 31
30int __init regulatory_init(void); 32int __init regulatory_init(void);
31void regulatory_exit(void); 33void regulatory_exit(void);
32 34
33int set_regdom(const struct ieee80211_regdomain *rd); 35int set_regdom(const struct ieee80211_regdomain *rd);
34 36
35void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby); 37bool reg_last_request_cell_base(void);
36 38
37/** 39/**
38 * regulatory_hint_found_beacon - hints a beacon was found on a channel 40 * regulatory_hint_found_beacon - hints a beacon was found on a channel
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index af2b1caa37fa..848523a2b22f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -23,7 +23,7 @@
23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
24{ 24{
25 struct cfg80211_scan_request *request; 25 struct cfg80211_scan_request *request;
26 struct net_device *dev; 26 struct wireless_dev *wdev;
27#ifdef CONFIG_CFG80211_WEXT 27#ifdef CONFIG_CFG80211_WEXT
28 union iwreq_data wrqu; 28 union iwreq_data wrqu;
29#endif 29#endif
@@ -35,29 +35,31 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
35 if (!request) 35 if (!request)
36 return; 36 return;
37 37
38 dev = request->dev; 38 wdev = request->wdev;
39 39
40 /* 40 /*
41 * This must be before sending the other events! 41 * This must be before sending the other events!
42 * Otherwise, wpa_supplicant gets completely confused with 42 * Otherwise, wpa_supplicant gets completely confused with
43 * wext events. 43 * wext events.
44 */ 44 */
45 cfg80211_sme_scan_done(dev); 45 if (wdev->netdev)
46 cfg80211_sme_scan_done(wdev->netdev);
46 47
47 if (request->aborted) 48 if (request->aborted)
48 nl80211_send_scan_aborted(rdev, dev); 49 nl80211_send_scan_aborted(rdev, wdev);
49 else 50 else
50 nl80211_send_scan_done(rdev, dev); 51 nl80211_send_scan_done(rdev, wdev);
51 52
52#ifdef CONFIG_CFG80211_WEXT 53#ifdef CONFIG_CFG80211_WEXT
53 if (!request->aborted) { 54 if (wdev->netdev && !request->aborted) {
54 memset(&wrqu, 0, sizeof(wrqu)); 55 memset(&wrqu, 0, sizeof(wrqu));
55 56
56 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); 57 wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL);
57 } 58 }
58#endif 59#endif
59 60
60 dev_put(dev); 61 if (wdev->netdev)
62 dev_put(wdev->netdev);
61 63
62 rdev->scan_req = NULL; 64 rdev->scan_req = NULL;
63 65
@@ -955,7 +957,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
955 } 957 }
956 958
957 creq->wiphy = wiphy; 959 creq->wiphy = wiphy;
958 creq->dev = dev; 960 creq->wdev = dev->ieee80211_ptr;
959 /* SSIDs come after channels */ 961 /* SSIDs come after channels */
960 creq->ssids = (void *)&creq->channels[n_channels]; 962 creq->ssids = (void *)&creq->channels[n_channels];
961 creq->n_channels = n_channels; 963 creq->n_channels = n_channels;
@@ -1024,12 +1026,12 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1024 creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; 1026 creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
1025 1027
1026 rdev->scan_req = creq; 1028 rdev->scan_req = creq;
1027 err = rdev->ops->scan(wiphy, dev, creq); 1029 err = rdev->ops->scan(wiphy, creq);
1028 if (err) { 1030 if (err) {
1029 rdev->scan_req = NULL; 1031 rdev->scan_req = NULL;
1030 /* creq will be freed below */ 1032 /* creq will be freed below */
1031 } else { 1033 } else {
1032 nl80211_send_scan_start(rdev, dev); 1034 nl80211_send_scan_start(rdev, dev->ieee80211_ptr);
1033 /* creq now owned by driver */ 1035 /* creq now owned by driver */
1034 creq = NULL; 1036 creq = NULL;
1035 dev_hold(dev); 1037 dev_hold(dev);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f7e937ff8978..6f39cb808302 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -51,7 +51,7 @@ static bool cfg80211_is_all_idle(void)
51 */ 51 */
52 list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 52 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
53 cfg80211_lock_rdev(rdev); 53 cfg80211_lock_rdev(rdev);
54 list_for_each_entry(wdev, &rdev->netdev_list, list) { 54 list_for_each_entry(wdev, &rdev->wdev_list, list) {
55 wdev_lock(wdev); 55 wdev_lock(wdev);
56 if (wdev->sme_state != CFG80211_SME_IDLE) 56 if (wdev->sme_state != CFG80211_SME_IDLE)
57 is_all_idle = false; 57 is_all_idle = false;
@@ -136,15 +136,15 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
136 wdev->conn->params.ssid_len); 136 wdev->conn->params.ssid_len);
137 request->ssids[0].ssid_len = wdev->conn->params.ssid_len; 137 request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
138 138
139 request->dev = wdev->netdev; 139 request->wdev = wdev;
140 request->wiphy = &rdev->wiphy; 140 request->wiphy = &rdev->wiphy;
141 141
142 rdev->scan_req = request; 142 rdev->scan_req = request;
143 143
144 err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request); 144 err = rdev->ops->scan(wdev->wiphy, request);
145 if (!err) { 145 if (!err) {
146 wdev->conn->state = CFG80211_CONN_SCANNING; 146 wdev->conn->state = CFG80211_CONN_SCANNING;
147 nl80211_send_scan_start(rdev, wdev->netdev); 147 nl80211_send_scan_start(rdev, wdev);
148 dev_hold(wdev->netdev); 148 dev_hold(wdev->netdev);
149 } else { 149 } else {
150 rdev->scan_req = NULL; 150 rdev->scan_req = NULL;
@@ -221,7 +221,7 @@ void cfg80211_conn_work(struct work_struct *work)
221 cfg80211_lock_rdev(rdev); 221 cfg80211_lock_rdev(rdev);
222 mutex_lock(&rdev->devlist_mtx); 222 mutex_lock(&rdev->devlist_mtx);
223 223
224 list_for_each_entry(wdev, &rdev->netdev_list, list) { 224 list_for_each_entry(wdev, &rdev->wdev_list, list) {
225 wdev_lock(wdev); 225 wdev_lock(wdev);
226 if (!netif_running(wdev->netdev)) { 226 if (!netif_running(wdev->netdev)) {
227 wdev_unlock(wdev); 227 wdev_unlock(wdev);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8f2d68fc3a44..26f8cd30f712 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -35,19 +35,29 @@ int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
35{ 35{
36 /* see 802.11 17.3.8.3.2 and Annex J 36 /* see 802.11 17.3.8.3.2 and Annex J
37 * there are overlapping channel numbers in 5GHz and 2GHz bands */ 37 * there are overlapping channel numbers in 5GHz and 2GHz bands */
38 if (band == IEEE80211_BAND_5GHZ) { 38 if (chan <= 0)
39 if (chan >= 182 && chan <= 196) 39 return 0; /* not supported */
40 return 4000 + chan * 5; 40 switch (band) {
41 else 41 case IEEE80211_BAND_2GHZ:
42 return 5000 + chan * 5;
43 } else { /* IEEE80211_BAND_2GHZ */
44 if (chan == 14) 42 if (chan == 14)
45 return 2484; 43 return 2484;
46 else if (chan < 14) 44 else if (chan < 14)
47 return 2407 + chan * 5; 45 return 2407 + chan * 5;
46 break;
47 case IEEE80211_BAND_5GHZ:
48 if (chan >= 182 && chan <= 196)
49 return 4000 + chan * 5;
48 else 50 else
49 return 0; /* not supported */ 51 return 5000 + chan * 5;
52 break;
53 case IEEE80211_BAND_60GHZ:
54 if (chan < 5)
55 return 56160 + chan * 2160;
56 break;
57 default:
58 ;
50 } 59 }
60 return 0; /* not supported */
51} 61}
52EXPORT_SYMBOL(ieee80211_channel_to_frequency); 62EXPORT_SYMBOL(ieee80211_channel_to_frequency);
53 63
@@ -60,8 +70,12 @@ int ieee80211_frequency_to_channel(int freq)
60 return (freq - 2407) / 5; 70 return (freq - 2407) / 5;
61 else if (freq >= 4910 && freq <= 4980) 71 else if (freq >= 4910 && freq <= 4980)
62 return (freq - 4000) / 5; 72 return (freq - 4000) / 5;
63 else 73 else if (freq <= 45000) /* DMG band lower limit */
64 return (freq - 5000) / 5; 74 return (freq - 5000) / 5;
75 else if (freq >= 58320 && freq <= 64800)
76 return (freq - 56160) / 2160;
77 else
78 return 0;
65} 79}
66EXPORT_SYMBOL(ieee80211_frequency_to_channel); 80EXPORT_SYMBOL(ieee80211_frequency_to_channel);
67 81
@@ -137,6 +151,11 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
137 } 151 }
138 WARN_ON(want != 0 && want != 3 && want != 6); 152 WARN_ON(want != 0 && want != 3 && want != 6);
139 break; 153 break;
154 case IEEE80211_BAND_60GHZ:
155 /* check for mandatory HT MCS 1..4 */
156 WARN_ON(!sband->ht_cap.ht_supported);
157 WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
158 break;
140 case IEEE80211_NUM_BANDS: 159 case IEEE80211_NUM_BANDS:
141 WARN_ON(1); 160 WARN_ON(1);
142 break; 161 break;
@@ -774,7 +793,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
774 793
775 mutex_lock(&rdev->devlist_mtx); 794 mutex_lock(&rdev->devlist_mtx);
776 795
777 list_for_each_entry(wdev, &rdev->netdev_list, list) 796 list_for_each_entry(wdev, &rdev->wdev_list, list)
778 cfg80211_process_wdev_events(wdev); 797 cfg80211_process_wdev_events(wdev);
779 798
780 mutex_unlock(&rdev->devlist_mtx); 799 mutex_unlock(&rdev->devlist_mtx);
@@ -804,9 +823,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
804 ntype == NL80211_IFTYPE_P2P_CLIENT)) 823 ntype == NL80211_IFTYPE_P2P_CLIENT))
805 return -EBUSY; 824 return -EBUSY;
806 825
807 if (ntype != otype) { 826 if (ntype != otype && netif_running(dev)) {
827 mutex_lock(&rdev->devlist_mtx);
808 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, 828 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
809 ntype); 829 ntype);
830 mutex_unlock(&rdev->devlist_mtx);
810 if (err) 831 if (err)
811 return err; 832 return err;
812 833
@@ -814,6 +835,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
814 dev->ieee80211_ptr->mesh_id_up_len = 0; 835 dev->ieee80211_ptr->mesh_id_up_len = 0;
815 836
816 switch (otype) { 837 switch (otype) {
838 case NL80211_IFTYPE_AP:
839 cfg80211_stop_ap(rdev, dev);
840 break;
817 case NL80211_IFTYPE_ADHOC: 841 case NL80211_IFTYPE_ADHOC:
818 cfg80211_leave_ibss(rdev, dev, false); 842 cfg80211_leave_ibss(rdev, dev, false);
819 break; 843 break;
@@ -868,15 +892,69 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
868 } 892 }
869 } 893 }
870 894
895 if (!err && ntype != otype && netif_running(dev)) {
896 cfg80211_update_iface_num(rdev, ntype, 1);
897 cfg80211_update_iface_num(rdev, otype, -1);
898 }
899
871 return err; 900 return err;
872} 901}
873 902
874u16 cfg80211_calculate_bitrate(struct rate_info *rate) 903static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
904{
905 static const u32 __mcs2bitrate[] = {
906 /* control PHY */
907 [0] = 275,
908 /* SC PHY */
909 [1] = 3850,
910 [2] = 7700,
911 [3] = 9625,
912 [4] = 11550,
913 [5] = 12512, /* 1251.25 mbps */
914 [6] = 15400,
915 [7] = 19250,
916 [8] = 23100,
917 [9] = 25025,
918 [10] = 30800,
919 [11] = 38500,
920 [12] = 46200,
921 /* OFDM PHY */
922 [13] = 6930,
923 [14] = 8662, /* 866.25 mbps */
924 [15] = 13860,
925 [16] = 17325,
926 [17] = 20790,
927 [18] = 27720,
928 [19] = 34650,
929 [20] = 41580,
930 [21] = 45045,
931 [22] = 51975,
932 [23] = 62370,
933 [24] = 67568, /* 6756.75 mbps */
934 /* LP-SC PHY */
935 [25] = 6260,
936 [26] = 8340,
937 [27] = 11120,
938 [28] = 12510,
939 [29] = 16680,
940 [30] = 22240,
941 [31] = 25030,
942 };
943
944 if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
945 return 0;
946
947 return __mcs2bitrate[rate->mcs];
948}
949
950u32 cfg80211_calculate_bitrate(struct rate_info *rate)
875{ 951{
876 int modulation, streams, bitrate; 952 int modulation, streams, bitrate;
877 953
878 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) 954 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
879 return rate->legacy; 955 return rate->legacy;
956 if (rate->flags & RATE_INFO_FLAGS_60G)
957 return cfg80211_calculate_bitrate_60g(rate);
880 958
881 /* the formula below does only work for MCS values smaller than 32 */ 959 /* the formula below does only work for MCS values smaller than 32 */
882 if (WARN_ON_ONCE(rate->mcs >= 32)) 960 if (WARN_ON_ONCE(rate->mcs >= 32))
@@ -916,7 +994,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
916 994
917 mutex_lock(&rdev->devlist_mtx); 995 mutex_lock(&rdev->devlist_mtx);
918 996
919 list_for_each_entry(wdev, &rdev->netdev_list, list) { 997 list_for_each_entry(wdev, &rdev->wdev_list, list) {
920 if (!wdev->beacon_interval) 998 if (!wdev->beacon_interval)
921 continue; 999 continue;
922 if (wdev->beacon_interval != beacon_int) { 1000 if (wdev->beacon_interval != beacon_int) {
@@ -930,28 +1008,49 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
930 return res; 1008 return res;
931} 1009}
932 1010
933int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, 1011int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
934 struct wireless_dev *wdev, 1012 struct wireless_dev *wdev,
935 enum nl80211_iftype iftype) 1013 enum nl80211_iftype iftype,
1014 struct ieee80211_channel *chan,
1015 enum cfg80211_chan_mode chanmode)
936{ 1016{
937 struct wireless_dev *wdev_iter; 1017 struct wireless_dev *wdev_iter;
938 u32 used_iftypes = BIT(iftype); 1018 u32 used_iftypes = BIT(iftype);
939 int num[NUM_NL80211_IFTYPES]; 1019 int num[NUM_NL80211_IFTYPES];
1020 struct ieee80211_channel
1021 *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
1022 struct ieee80211_channel *ch;
1023 enum cfg80211_chan_mode chmode;
1024 int num_different_channels = 0;
940 int total = 1; 1025 int total = 1;
941 int i, j; 1026 int i, j;
942 1027
943 ASSERT_RTNL(); 1028 ASSERT_RTNL();
1029 lockdep_assert_held(&rdev->devlist_mtx);
944 1030
945 /* Always allow software iftypes */ 1031 /* Always allow software iftypes */
946 if (rdev->wiphy.software_iftypes & BIT(iftype)) 1032 if (rdev->wiphy.software_iftypes & BIT(iftype))
947 return 0; 1033 return 0;
948 1034
949 memset(num, 0, sizeof(num)); 1035 memset(num, 0, sizeof(num));
1036 memset(used_channels, 0, sizeof(used_channels));
950 1037
951 num[iftype] = 1; 1038 num[iftype] = 1;
952 1039
953 mutex_lock(&rdev->devlist_mtx); 1040 switch (chanmode) {
954 list_for_each_entry(wdev_iter, &rdev->netdev_list, list) { 1041 case CHAN_MODE_UNDEFINED:
1042 break;
1043 case CHAN_MODE_SHARED:
1044 WARN_ON(!chan);
1045 used_channels[0] = chan;
1046 num_different_channels++;
1047 break;
1048 case CHAN_MODE_EXCLUSIVE:
1049 num_different_channels++;
1050 break;
1051 }
1052
1053 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
955 if (wdev_iter == wdev) 1054 if (wdev_iter == wdev)
956 continue; 1055 continue;
957 if (!netif_running(wdev_iter->netdev)) 1056 if (!netif_running(wdev_iter->netdev))
@@ -960,11 +1059,42 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
960 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype)) 1059 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
961 continue; 1060 continue;
962 1061
1062 /*
1063 * We may be holding the "wdev" mutex, but now need to lock
1064 * wdev_iter. This is OK because once we get here wdev_iter
1065 * is not wdev (tested above), but we need to use the nested
1066 * locking for lockdep.
1067 */
1068 mutex_lock_nested(&wdev_iter->mtx, 1);
1069 __acquire(wdev_iter->mtx);
1070 cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
1071 wdev_unlock(wdev_iter);
1072
1073 switch (chmode) {
1074 case CHAN_MODE_UNDEFINED:
1075 break;
1076 case CHAN_MODE_SHARED:
1077 for (i = 0; i < CFG80211_MAX_NUM_DIFFERENT_CHANNELS; i++)
1078 if (!used_channels[i] || used_channels[i] == ch)
1079 break;
1080
1081 if (i == CFG80211_MAX_NUM_DIFFERENT_CHANNELS)
1082 return -EBUSY;
1083
1084 if (used_channels[i] == NULL) {
1085 used_channels[i] = ch;
1086 num_different_channels++;
1087 }
1088 break;
1089 case CHAN_MODE_EXCLUSIVE:
1090 num_different_channels++;
1091 break;
1092 }
1093
963 num[wdev_iter->iftype]++; 1094 num[wdev_iter->iftype]++;
964 total++; 1095 total++;
965 used_iftypes |= BIT(wdev_iter->iftype); 1096 used_iftypes |= BIT(wdev_iter->iftype);
966 } 1097 }
967 mutex_unlock(&rdev->devlist_mtx);
968 1098
969 if (total == 1) 1099 if (total == 1)
970 return 0; 1100 return 0;
@@ -976,12 +1106,15 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
976 1106
977 c = &rdev->wiphy.iface_combinations[i]; 1107 c = &rdev->wiphy.iface_combinations[i];
978 1108
1109 if (total > c->max_interfaces)
1110 continue;
1111 if (num_different_channels > c->num_different_channels)
1112 continue;
1113
979 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, 1114 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
980 GFP_KERNEL); 1115 GFP_KERNEL);
981 if (!limits) 1116 if (!limits)
982 return -ENOMEM; 1117 return -ENOMEM;
983 if (total > c->max_interfaces)
984 goto cont;
985 1118
986 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 1119 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
987 if (rdev->wiphy.software_iftypes & BIT(iftype)) 1120 if (rdev->wiphy.software_iftypes & BIT(iftype))
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6a6181a673ca..494379eb464f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -796,7 +796,15 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
796 case NL80211_IFTYPE_ADHOC: 796 case NL80211_IFTYPE_ADHOC:
797 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 797 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
798 case NL80211_IFTYPE_MONITOR: 798 case NL80211_IFTYPE_MONITOR:
799 case NL80211_IFTYPE_WDS: 799 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
800 if (freq < 0)
801 return freq;
802 if (freq == 0)
803 return -EINVAL;
804 mutex_lock(&rdev->devlist_mtx);
805 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
806 mutex_unlock(&rdev->devlist_mtx);
807 return err;
800 case NL80211_IFTYPE_MESH_POINT: 808 case NL80211_IFTYPE_MESH_POINT:
801 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 809 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
802 if (freq < 0) 810 if (freq < 0)
@@ -804,9 +812,8 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
804 if (freq == 0) 812 if (freq == 0)
805 return -EINVAL; 813 return -EINVAL;
806 mutex_lock(&rdev->devlist_mtx); 814 mutex_lock(&rdev->devlist_mtx);
807 wdev_lock(wdev); 815 err = cfg80211_set_mesh_freq(rdev, wdev, freq,
808 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 816 NL80211_CHAN_NO_HT);
809 wdev_unlock(wdev);
810 mutex_unlock(&rdev->devlist_mtx); 817 mutex_unlock(&rdev->devlist_mtx);
811 return err; 818 return err;
812 default: 819 default:
@@ -832,18 +839,14 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
832 if (!rdev->ops->get_channel) 839 if (!rdev->ops->get_channel)
833 return -EINVAL; 840 return -EINVAL;
834 841
835 chan = rdev->ops->get_channel(wdev->wiphy, &channel_type); 842 chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type);
836 if (!chan) 843 if (!chan)
837 return -EINVAL; 844 return -EINVAL;
838 freq->m = chan->center_freq; 845 freq->m = chan->center_freq;
839 freq->e = 6; 846 freq->e = 6;
840 return 0; 847 return 0;
841 default: 848 default:
842 if (!wdev->channel) 849 return -EINVAL;
843 return -EINVAL;
844 freq->m = wdev->channel->center_freq;
845 freq->e = 6;
846 return 0;
847 } 850 }
848} 851}
849 852
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7decbd357d51..1f773f668d1a 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -111,9 +111,15 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
111 111
112 wdev->wext.connect.channel = chan; 112 wdev->wext.connect.channel = chan;
113 113
114 /* SSID is not set, we just want to switch channel */ 114 /*
115 * SSID is not set, we just want to switch monitor channel,
116 * this is really just backward compatibility, if the SSID
117 * is set then we use the channel to select the BSS to use
118 * to connect to instead. If we were connected on another
119 * channel we disconnected above and reconnect below.
120 */
115 if (chan && !wdev->wext.connect.ssid_len) { 121 if (chan && !wdev->wext.connect.ssid_len) {
116 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 122 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
117 goto out; 123 goto out;
118 } 124 }
119 125
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index cf6366270054..277c8d2448d6 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -66,7 +66,7 @@ out:
66 66
67/** 67/**
68 * __x25_remove_route - remove route from x25_route_list 68 * __x25_remove_route - remove route from x25_route_list
69 * @rt - route to remove 69 * @rt: route to remove
70 * 70 *
71 * Remove route from x25_route_list. If it was there. 71 * Remove route from x25_route_list. If it was there.
72 * Caller must hold x25_route_list_lock. 72 * Caller must hold x25_route_list_lock.
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ccfbd328a69d..c5a5165a5927 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1350,11 +1350,12 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1350 default: 1350 default:
1351 BUG(); 1351 BUG();
1352 } 1352 }
1353 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1353 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1354 1354
1355 if (likely(xdst)) { 1355 if (likely(xdst)) {
1356 memset(&xdst->u.rt6.rt6i_table, 0, 1356 struct dst_entry *dst = &xdst->u.dst;
1357 sizeof(*xdst) - sizeof(struct dst_entry)); 1357
1358 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1358 xdst->flo.ops = &xfrm_bundle_fc_ops; 1359 xdst->flo.ops = &xfrm_bundle_fc_ops;
1359 } else 1360 } else
1360 xdst = ERR_PTR(-ENOBUFS); 1361 xdst = ERR_PTR(-ENOBUFS);
@@ -1476,7 +1477,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1476 dst1->xfrm = xfrm[i]; 1477 dst1->xfrm = xfrm[i];
1477 xdst->xfrm_genid = xfrm[i]->genid; 1478 xdst->xfrm_genid = xfrm[i]->genid;
1478 1479
1479 dst1->obsolete = -1; 1480 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1480 dst1->flags |= DST_HOST; 1481 dst1->flags |= DST_HOST;
1481 dst1->lastuse = now; 1482 dst1->lastuse = now;
1482 1483
@@ -1500,9 +1501,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1500 if (!dev) 1501 if (!dev)
1501 goto free_dst; 1502 goto free_dst;
1502 1503
1503 /* Copy neighbour for reachability confirmation */
1504 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
1505
1506 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1504 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1507 xfrm_init_pmtu(dst_prev); 1505 xfrm_init_pmtu(dst_prev);
1508 1506
@@ -2221,12 +2219,13 @@ EXPORT_SYMBOL(__xfrm_route_forward);
2221static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2219static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2222{ 2220{
2223 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2221 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2224 * to "-1" to force all XFRM destinations to get validated by 2222 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2225 * dst_ops->check on every use. We do this because when a 2223 * get validated by dst_ops->check on every use. We do this
2226 * normal route referenced by an XFRM dst is obsoleted we do 2224 * because when a normal route referenced by an XFRM dst is
2227 * not go looking around for all parent referencing XFRM dsts 2225 * obsoleted we do not go looking around for all parent
2228 * so that we can invalidate them. It is just too much work. 2226 * referencing XFRM dsts so that we can invalidate them. It
2229 * Instead we make the checks here on every use. For example: 2227 * is just too much work. Instead we make the checks here on
2228 * every use. For example:
2230 * 2229 *
2231 * XFRM dst A --> IPv4 dst X 2230 * XFRM dst A --> IPv4 dst X
2232 * 2231 *
@@ -2236,9 +2235,9 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2236 * stale_bundle() check. 2235 * stale_bundle() check.
2237 * 2236 *
2238 * When a policy's bundle is pruned, we dst_free() the XFRM 2237 * When a policy's bundle is pruned, we dst_free() the XFRM
2239 * dst which causes it's ->obsolete field to be set to a 2238 * dst which causes it's ->obsolete field to be set to
2240 * positive non-zero integer. If an XFRM dst has been pruned 2239 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2241 * like this, we want to force a new route lookup. 2240 * this, we want to force a new route lookup.
2242 */ 2241 */
2243 if (dst->obsolete < 0 && !stale_bundle(dst)) 2242 if (dst->obsolete < 0 && !stale_bundle(dst))
2244 return dst; 2243 return dst;
@@ -2404,9 +2403,11 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst)
2404 return mtu ? : dst_mtu(dst->path); 2403 return mtu ? : dst_mtu(dst->path);
2405} 2404}
2406 2405
2407static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) 2406static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2407 struct sk_buff *skb,
2408 const void *daddr)
2408{ 2409{
2409 return dst_neigh_lookup(dst->path, daddr); 2410 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2410} 2411}
2411 2412
2412int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2413int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 44293b3fd6a1..e75d8e47f35c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -754,58 +754,67 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
754 struct xfrm_usersa_info *p, 754 struct xfrm_usersa_info *p,
755 struct sk_buff *skb) 755 struct sk_buff *skb)
756{ 756{
757 copy_to_user_state(x, p); 757 int ret = 0;
758
759 if (x->coaddr &&
760 nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
761 goto nla_put_failure;
762
763 if (x->lastused &&
764 nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
765 goto nla_put_failure;
766
767 if (x->aead &&
768 nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
769 goto nla_put_failure;
770
771 if (x->aalg &&
772 (copy_to_user_auth(x->aalg, skb) ||
773 nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
774 xfrm_alg_auth_len(x->aalg), x->aalg)))
775 goto nla_put_failure;
776
777 if (x->ealg &&
778 nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
779 goto nla_put_failure;
780
781 if (x->calg &&
782 nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
783 goto nla_put_failure;
784
785 if (x->encap &&
786 nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
787 goto nla_put_failure;
788 758
789 if (x->tfcpad && 759 copy_to_user_state(x, p);
790 nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
791 goto nla_put_failure;
792
793 if (xfrm_mark_put(skb, &x->mark))
794 goto nla_put_failure;
795
796 if (x->replay_esn &&
797 nla_put(skb, XFRMA_REPLAY_ESN_VAL,
798 xfrm_replay_state_esn_len(x->replay_esn),
799 x->replay_esn))
800 goto nla_put_failure;
801
802 if (x->security && copy_sec_ctx(x->security, skb))
803 goto nla_put_failure;
804
805 return 0;
806 760
807nla_put_failure: 761 if (x->coaddr) {
808 return -EMSGSIZE; 762 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
763 if (ret)
764 goto out;
765 }
766 if (x->lastused) {
767 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
768 if (ret)
769 goto out;
770 }
771 if (x->aead) {
772 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
773 if (ret)
774 goto out;
775 }
776 if (x->aalg) {
777 ret = copy_to_user_auth(x->aalg, skb);
778 if (!ret)
779 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
780 xfrm_alg_auth_len(x->aalg), x->aalg);
781 if (ret)
782 goto out;
783 }
784 if (x->ealg) {
785 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
786 if (ret)
787 goto out;
788 }
789 if (x->calg) {
790 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
791 if (ret)
792 goto out;
793 }
794 if (x->encap) {
795 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
796 if (ret)
797 goto out;
798 }
799 if (x->tfcpad) {
800 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
801 if (ret)
802 goto out;
803 }
804 ret = xfrm_mark_put(skb, &x->mark);
805 if (ret)
806 goto out;
807 if (x->replay_esn) {
808 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
809 xfrm_replay_state_esn_len(x->replay_esn),
810 x->replay_esn);
811 if (ret)
812 goto out;
813 }
814 if (x->security)
815 ret = copy_sec_ctx(x->security, skb);
816out:
817 return ret;
809} 818}
810 819
811static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 820static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
@@ -825,15 +834,12 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
825 p = nlmsg_data(nlh); 834 p = nlmsg_data(nlh);
826 835
827 err = copy_to_user_state_extra(x, p, skb); 836 err = copy_to_user_state_extra(x, p, skb);
828 if (err) 837 if (err) {
829 goto nla_put_failure; 838 nlmsg_cancel(skb, nlh);
830 839 return err;
840 }
831 nlmsg_end(skb, nlh); 841 nlmsg_end(skb, nlh);
832 return 0; 842 return 0;
833
834nla_put_failure:
835 nlmsg_cancel(skb, nlh);
836 return err;
837} 843}
838 844
839static int xfrm_dump_sa_done(struct netlink_callback *cb) 845static int xfrm_dump_sa_done(struct netlink_callback *cb)
@@ -904,6 +910,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
904 struct xfrmu_spdinfo spc; 910 struct xfrmu_spdinfo spc;
905 struct xfrmu_spdhinfo sph; 911 struct xfrmu_spdhinfo sph;
906 struct nlmsghdr *nlh; 912 struct nlmsghdr *nlh;
913 int err;
907 u32 *f; 914 u32 *f;
908 915
909 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 916 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
@@ -922,15 +929,15 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
922 sph.spdhcnt = si.spdhcnt; 929 sph.spdhcnt = si.spdhcnt;
923 sph.spdhmcnt = si.spdhmcnt; 930 sph.spdhmcnt = si.spdhmcnt;
924 931
925 if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) || 932 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
926 nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph)) 933 if (!err)
927 goto nla_put_failure; 934 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
935 if (err) {
936 nlmsg_cancel(skb, nlh);
937 return err;
938 }
928 939
929 return nlmsg_end(skb, nlh); 940 return nlmsg_end(skb, nlh);
930
931nla_put_failure:
932 nlmsg_cancel(skb, nlh);
933 return -EMSGSIZE;
934} 941}
935 942
936static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 943static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -965,6 +972,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
965 struct xfrmk_sadinfo si; 972 struct xfrmk_sadinfo si;
966 struct xfrmu_sadhinfo sh; 973 struct xfrmu_sadhinfo sh;
967 struct nlmsghdr *nlh; 974 struct nlmsghdr *nlh;
975 int err;
968 u32 *f; 976 u32 *f;
969 977
970 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 978 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
@@ -978,15 +986,15 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
978 sh.sadhmcnt = si.sadhmcnt; 986 sh.sadhmcnt = si.sadhmcnt;
979 sh.sadhcnt = si.sadhcnt; 987 sh.sadhcnt = si.sadhcnt;
980 988
981 if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) || 989 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
982 nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh)) 990 if (!err)
983 goto nla_put_failure; 991 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
992 if (err) {
993 nlmsg_cancel(skb, nlh);
994 return err;
995 }
984 996
985 return nlmsg_end(skb, nlh); 997 return nlmsg_end(skb, nlh);
986
987nla_put_failure:
988 nlmsg_cancel(skb, nlh);
989 return -EMSGSIZE;
990} 998}
991 999
992static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1000static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1439,9 +1447,8 @@ static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buf
1439 1447
1440static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1448static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1441{ 1449{
1442 if (xp->security) { 1450 if (xp->security)
1443 return copy_sec_ctx(xp->security, skb); 1451 return copy_sec_ctx(xp->security, skb);
1444 }
1445 return 0; 1452 return 0;
1446} 1453}
1447static inline size_t userpolicy_type_attrsize(void) 1454static inline size_t userpolicy_type_attrsize(void)
@@ -1477,6 +1484,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1477 struct sk_buff *in_skb = sp->in_skb; 1484 struct sk_buff *in_skb = sp->in_skb;
1478 struct sk_buff *skb = sp->out_skb; 1485 struct sk_buff *skb = sp->out_skb;
1479 struct nlmsghdr *nlh; 1486 struct nlmsghdr *nlh;
1487 int err;
1480 1488
1481 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1489 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1482 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1490 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
@@ -1485,22 +1493,19 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1485 1493
1486 p = nlmsg_data(nlh); 1494 p = nlmsg_data(nlh);
1487 copy_to_user_policy(xp, p, dir); 1495 copy_to_user_policy(xp, p, dir);
1488 if (copy_to_user_tmpl(xp, skb) < 0) 1496 err = copy_to_user_tmpl(xp, skb);
1489 goto nlmsg_failure; 1497 if (!err)
1490 if (copy_to_user_sec_ctx(xp, skb)) 1498 err = copy_to_user_sec_ctx(xp, skb);
1491 goto nlmsg_failure; 1499 if (!err)
1492 if (copy_to_user_policy_type(xp->type, skb) < 0) 1500 err = copy_to_user_policy_type(xp->type, skb);
1493 goto nlmsg_failure; 1501 if (!err)
1494 if (xfrm_mark_put(skb, &xp->mark)) 1502 err = xfrm_mark_put(skb, &xp->mark);
1495 goto nla_put_failure; 1503 if (err) {
1496 1504 nlmsg_cancel(skb, nlh);
1505 return err;
1506 }
1497 nlmsg_end(skb, nlh); 1507 nlmsg_end(skb, nlh);
1498 return 0; 1508 return 0;
1499
1500nla_put_failure:
1501nlmsg_failure:
1502 nlmsg_cancel(skb, nlh);
1503 return -EMSGSIZE;
1504} 1509}
1505 1510
1506static int xfrm_dump_policy_done(struct netlink_callback *cb) 1511static int xfrm_dump_policy_done(struct netlink_callback *cb)
@@ -1688,6 +1693,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1688{ 1693{
1689 struct xfrm_aevent_id *id; 1694 struct xfrm_aevent_id *id;
1690 struct nlmsghdr *nlh; 1695 struct nlmsghdr *nlh;
1696 int err;
1691 1697
1692 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1698 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1693 if (nlh == NULL) 1699 if (nlh == NULL)
@@ -1703,35 +1709,39 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1703 id->flags = c->data.aevent; 1709 id->flags = c->data.aevent;
1704 1710
1705 if (x->replay_esn) { 1711 if (x->replay_esn) {
1706 if (nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1712 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1707 xfrm_replay_state_esn_len(x->replay_esn), 1713 xfrm_replay_state_esn_len(x->replay_esn),
1708 x->replay_esn)) 1714 x->replay_esn);
1709 goto nla_put_failure;
1710 } else { 1715 } else {
1711 if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1716 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1712 &x->replay)) 1717 &x->replay);
1713 goto nla_put_failure;
1714 } 1718 }
1715 if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft)) 1719 if (err)
1716 goto nla_put_failure; 1720 goto out_cancel;
1717 1721 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1718 if ((id->flags & XFRM_AE_RTHR) && 1722 if (err)
1719 nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff)) 1723 goto out_cancel;
1720 goto nla_put_failure;
1721
1722 if ((id->flags & XFRM_AE_ETHR) &&
1723 nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1724 x->replay_maxage * 10 / HZ))
1725 goto nla_put_failure;
1726 1724
1727 if (xfrm_mark_put(skb, &x->mark)) 1725 if (id->flags & XFRM_AE_RTHR) {
1728 goto nla_put_failure; 1726 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1727 if (err)
1728 goto out_cancel;
1729 }
1730 if (id->flags & XFRM_AE_ETHR) {
1731 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1732 x->replay_maxage * 10 / HZ);
1733 if (err)
1734 goto out_cancel;
1735 }
1736 err = xfrm_mark_put(skb, &x->mark);
1737 if (err)
1738 goto out_cancel;
1729 1739
1730 return nlmsg_end(skb, nlh); 1740 return nlmsg_end(skb, nlh);
1731 1741
1732nla_put_failure: 1742out_cancel:
1733 nlmsg_cancel(skb, nlh); 1743 nlmsg_cancel(skb, nlh);
1734 return -EMSGSIZE; 1744 return err;
1735} 1745}
1736 1746
1737static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1747static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2155,7 +2165,7 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2155 const struct xfrm_migrate *mp; 2165 const struct xfrm_migrate *mp;
2156 struct xfrm_userpolicy_id *pol_id; 2166 struct xfrm_userpolicy_id *pol_id;
2157 struct nlmsghdr *nlh; 2167 struct nlmsghdr *nlh;
2158 int i; 2168 int i, err;
2159 2169
2160 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2170 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2161 if (nlh == NULL) 2171 if (nlh == NULL)
@@ -2167,21 +2177,25 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2167 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2177 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2168 pol_id->dir = dir; 2178 pol_id->dir = dir;
2169 2179
2170 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0)) 2180 if (k != NULL) {
2171 goto nlmsg_failure; 2181 err = copy_to_user_kmaddress(k, skb);
2172 2182 if (err)
2173 if (copy_to_user_policy_type(type, skb) < 0) 2183 goto out_cancel;
2174 goto nlmsg_failure; 2184 }
2175 2185 err = copy_to_user_policy_type(type, skb);
2186 if (err)
2187 goto out_cancel;
2176 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2188 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2177 if (copy_to_user_migrate(mp, skb) < 0) 2189 err = copy_to_user_migrate(mp, skb);
2178 goto nlmsg_failure; 2190 if (err)
2191 goto out_cancel;
2179 } 2192 }
2180 2193
2181 return nlmsg_end(skb, nlh); 2194 return nlmsg_end(skb, nlh);
2182nlmsg_failure: 2195
2196out_cancel:
2183 nlmsg_cancel(skb, nlh); 2197 nlmsg_cancel(skb, nlh);
2184 return -EMSGSIZE; 2198 return err;
2185} 2199}
2186 2200
2187static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2201static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2354,6 +2368,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2354{ 2368{
2355 struct xfrm_user_expire *ue; 2369 struct xfrm_user_expire *ue;
2356 struct nlmsghdr *nlh; 2370 struct nlmsghdr *nlh;
2371 int err;
2357 2372
2358 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2373 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2359 if (nlh == NULL) 2374 if (nlh == NULL)
@@ -2363,13 +2378,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2363 copy_to_user_state(x, &ue->state); 2378 copy_to_user_state(x, &ue->state);
2364 ue->hard = (c->data.hard != 0) ? 1 : 0; 2379 ue->hard = (c->data.hard != 0) ? 1 : 0;
2365 2380
2366 if (xfrm_mark_put(skb, &x->mark)) 2381 err = xfrm_mark_put(skb, &x->mark);
2367 goto nla_put_failure; 2382 if (err)
2383 return err;
2368 2384
2369 return nlmsg_end(skb, nlh); 2385 return nlmsg_end(skb, nlh);
2370
2371nla_put_failure:
2372 return -EMSGSIZE;
2373} 2386}
2374 2387
2375static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2388static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2470,7 +2483,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2470 struct nlmsghdr *nlh; 2483 struct nlmsghdr *nlh;
2471 struct sk_buff *skb; 2484 struct sk_buff *skb;
2472 int len = xfrm_sa_len(x); 2485 int len = xfrm_sa_len(x);
2473 int headlen; 2486 int headlen, err;
2474 2487
2475 headlen = sizeof(*p); 2488 headlen = sizeof(*p);
2476 if (c->event == XFRM_MSG_DELSA) { 2489 if (c->event == XFRM_MSG_DELSA) {
@@ -2485,8 +2498,9 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2485 return -ENOMEM; 2498 return -ENOMEM;
2486 2499
2487 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2500 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2501 err = -EMSGSIZE;
2488 if (nlh == NULL) 2502 if (nlh == NULL)
2489 goto nla_put_failure; 2503 goto out_free_skb;
2490 2504
2491 p = nlmsg_data(nlh); 2505 p = nlmsg_data(nlh);
2492 if (c->event == XFRM_MSG_DELSA) { 2506 if (c->event == XFRM_MSG_DELSA) {
@@ -2499,24 +2513,23 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2499 id->proto = x->id.proto; 2513 id->proto = x->id.proto;
2500 2514
2501 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2515 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2516 err = -EMSGSIZE;
2502 if (attr == NULL) 2517 if (attr == NULL)
2503 goto nla_put_failure; 2518 goto out_free_skb;
2504 2519
2505 p = nla_data(attr); 2520 p = nla_data(attr);
2506 } 2521 }
2507 2522 err = copy_to_user_state_extra(x, p, skb);
2508 if (copy_to_user_state_extra(x, p, skb)) 2523 if (err)
2509 goto nla_put_failure; 2524 goto out_free_skb;
2510 2525
2511 nlmsg_end(skb, nlh); 2526 nlmsg_end(skb, nlh);
2512 2527
2513 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2528 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2514 2529
2515nla_put_failure: 2530out_free_skb:
2516 /* Somebody screwed up with xfrm_sa_len! */
2517 WARN_ON(1);
2518 kfree_skb(skb); 2531 kfree_skb(skb);
2519 return -1; 2532 return err;
2520} 2533}
2521 2534
2522static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2535static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2557,9 +2570,10 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2557 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2570 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2558 int dir) 2571 int dir)
2559{ 2572{
2573 __u32 seq = xfrm_get_acqseq();
2560 struct xfrm_user_acquire *ua; 2574 struct xfrm_user_acquire *ua;
2561 struct nlmsghdr *nlh; 2575 struct nlmsghdr *nlh;
2562 __u32 seq = xfrm_get_acqseq(); 2576 int err;
2563 2577
2564 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2578 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2565 if (nlh == NULL) 2579 if (nlh == NULL)
@@ -2575,21 +2589,19 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2575 ua->calgos = xt->calgos; 2589 ua->calgos = xt->calgos;
2576 ua->seq = x->km.seq = seq; 2590 ua->seq = x->km.seq = seq;
2577 2591
2578 if (copy_to_user_tmpl(xp, skb) < 0) 2592 err = copy_to_user_tmpl(xp, skb);
2579 goto nlmsg_failure; 2593 if (!err)
2580 if (copy_to_user_state_sec_ctx(x, skb)) 2594 err = copy_to_user_state_sec_ctx(x, skb);
2581 goto nlmsg_failure; 2595 if (!err)
2582 if (copy_to_user_policy_type(xp->type, skb) < 0) 2596 err = copy_to_user_policy_type(xp->type, skb);
2583 goto nlmsg_failure; 2597 if (!err)
2584 if (xfrm_mark_put(skb, &xp->mark)) 2598 err = xfrm_mark_put(skb, &xp->mark);
2585 goto nla_put_failure; 2599 if (err) {
2600 nlmsg_cancel(skb, nlh);
2601 return err;
2602 }
2586 2603
2587 return nlmsg_end(skb, nlh); 2604 return nlmsg_end(skb, nlh);
2588
2589nla_put_failure:
2590nlmsg_failure:
2591 nlmsg_cancel(skb, nlh);
2592 return -EMSGSIZE;
2593} 2605}
2594 2606
2595static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2607static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2681,8 +2693,9 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2681 int dir, const struct km_event *c) 2693 int dir, const struct km_event *c)
2682{ 2694{
2683 struct xfrm_user_polexpire *upe; 2695 struct xfrm_user_polexpire *upe;
2684 struct nlmsghdr *nlh;
2685 int hard = c->data.hard; 2696 int hard = c->data.hard;
2697 struct nlmsghdr *nlh;
2698 int err;
2686 2699
2687 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2700 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2688 if (nlh == NULL) 2701 if (nlh == NULL)
@@ -2690,22 +2703,20 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2690 2703
2691 upe = nlmsg_data(nlh); 2704 upe = nlmsg_data(nlh);
2692 copy_to_user_policy(xp, &upe->pol, dir); 2705 copy_to_user_policy(xp, &upe->pol, dir);
2693 if (copy_to_user_tmpl(xp, skb) < 0) 2706 err = copy_to_user_tmpl(xp, skb);
2694 goto nlmsg_failure; 2707 if (!err)
2695 if (copy_to_user_sec_ctx(xp, skb)) 2708 err = copy_to_user_sec_ctx(xp, skb);
2696 goto nlmsg_failure; 2709 if (!err)
2697 if (copy_to_user_policy_type(xp->type, skb) < 0) 2710 err = copy_to_user_policy_type(xp->type, skb);
2698 goto nlmsg_failure; 2711 if (!err)
2699 if (xfrm_mark_put(skb, &xp->mark)) 2712 err = xfrm_mark_put(skb, &xp->mark);
2700 goto nla_put_failure; 2713 if (err) {
2714 nlmsg_cancel(skb, nlh);
2715 return err;
2716 }
2701 upe->hard = !!hard; 2717 upe->hard = !!hard;
2702 2718
2703 return nlmsg_end(skb, nlh); 2719 return nlmsg_end(skb, nlh);
2704
2705nla_put_failure:
2706nlmsg_failure:
2707 nlmsg_cancel(skb, nlh);
2708 return -EMSGSIZE;
2709} 2720}
2710 2721
2711static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2722static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2725,13 +2736,13 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
2725 2736
2726static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2737static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2727{ 2738{
2739 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2728 struct net *net = xp_net(xp); 2740 struct net *net = xp_net(xp);
2729 struct xfrm_userpolicy_info *p; 2741 struct xfrm_userpolicy_info *p;
2730 struct xfrm_userpolicy_id *id; 2742 struct xfrm_userpolicy_id *id;
2731 struct nlmsghdr *nlh; 2743 struct nlmsghdr *nlh;
2732 struct sk_buff *skb; 2744 struct sk_buff *skb;
2733 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2745 int headlen, err;
2734 int headlen;
2735 2746
2736 headlen = sizeof(*p); 2747 headlen = sizeof(*p);
2737 if (c->event == XFRM_MSG_DELPOLICY) { 2748 if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2747,8 +2758,9 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2747 return -ENOMEM; 2758 return -ENOMEM;
2748 2759
2749 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2760 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2761 err = -EMSGSIZE;
2750 if (nlh == NULL) 2762 if (nlh == NULL)
2751 goto nlmsg_failure; 2763 goto out_free_skb;
2752 2764
2753 p = nlmsg_data(nlh); 2765 p = nlmsg_data(nlh);
2754 if (c->event == XFRM_MSG_DELPOLICY) { 2766 if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2763,29 +2775,29 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2763 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2775 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2764 2776
2765 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2777 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2778 err = -EMSGSIZE;
2766 if (attr == NULL) 2779 if (attr == NULL)
2767 goto nlmsg_failure; 2780 goto out_free_skb;
2768 2781
2769 p = nla_data(attr); 2782 p = nla_data(attr);
2770 } 2783 }
2771 2784
2772 copy_to_user_policy(xp, p, dir); 2785 copy_to_user_policy(xp, p, dir);
2773 if (copy_to_user_tmpl(xp, skb) < 0) 2786 err = copy_to_user_tmpl(xp, skb);
2774 goto nlmsg_failure; 2787 if (!err)
2775 if (copy_to_user_policy_type(xp->type, skb) < 0) 2788 err = copy_to_user_policy_type(xp->type, skb);
2776 goto nlmsg_failure; 2789 if (!err)
2777 2790 err = xfrm_mark_put(skb, &xp->mark);
2778 if (xfrm_mark_put(skb, &xp->mark)) 2791 if (err)
2779 goto nla_put_failure; 2792 goto out_free_skb;
2780 2793
2781 nlmsg_end(skb, nlh); 2794 nlmsg_end(skb, nlh);
2782 2795
2783 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2796 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2784 2797
2785nla_put_failure: 2798out_free_skb:
2786nlmsg_failure:
2787 kfree_skb(skb); 2799 kfree_skb(skb);
2788 return -1; 2800 return err;
2789} 2801}
2790 2802
2791static int xfrm_notify_policy_flush(const struct km_event *c) 2803static int xfrm_notify_policy_flush(const struct km_event *c)
@@ -2793,24 +2805,27 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
2793 struct net *net = c->net; 2805 struct net *net = c->net;
2794 struct nlmsghdr *nlh; 2806 struct nlmsghdr *nlh;
2795 struct sk_buff *skb; 2807 struct sk_buff *skb;
2808 int err;
2796 2809
2797 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2810 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2798 if (skb == NULL) 2811 if (skb == NULL)
2799 return -ENOMEM; 2812 return -ENOMEM;
2800 2813
2801 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2814 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2815 err = -EMSGSIZE;
2802 if (nlh == NULL) 2816 if (nlh == NULL)
2803 goto nlmsg_failure; 2817 goto out_free_skb;
2804 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2818 err = copy_to_user_policy_type(c->data.type, skb);
2805 goto nlmsg_failure; 2819 if (err)
2820 goto out_free_skb;
2806 2821
2807 nlmsg_end(skb, nlh); 2822 nlmsg_end(skb, nlh);
2808 2823
2809 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2824 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2810 2825
2811nlmsg_failure: 2826out_free_skb:
2812 kfree_skb(skb); 2827 kfree_skb(skb);
2813 return -1; 2828 return err;
2814} 2829}
2815 2830
2816static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2831static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2853,15 +2868,14 @@ static int build_report(struct sk_buff *skb, u8 proto,
2853 ur->proto = proto; 2868 ur->proto = proto;
2854 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2869 memcpy(&ur->sel, sel, sizeof(ur->sel));
2855 2870
2856 if (addr && 2871 if (addr) {
2857 nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr)) 2872 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
2858 goto nla_put_failure; 2873 if (err) {
2859 2874 nlmsg_cancel(skb, nlh);
2875 return err;
2876 }
2877 }
2860 return nlmsg_end(skb, nlh); 2878 return nlmsg_end(skb, nlh);
2861
2862nla_put_failure:
2863 nlmsg_cancel(skb, nlh);
2864 return -EMSGSIZE;
2865} 2879}
2866 2880
2867static int xfrm_send_report(struct net *net, u8 proto, 2881static int xfrm_send_report(struct net *net, u8 proto,
@@ -2945,9 +2959,12 @@ static struct xfrm_mgr netlink_mgr = {
2945static int __net_init xfrm_user_net_init(struct net *net) 2959static int __net_init xfrm_user_net_init(struct net *net)
2946{ 2960{
2947 struct sock *nlsk; 2961 struct sock *nlsk;
2962 struct netlink_kernel_cfg cfg = {
2963 .groups = XFRMNLGRP_MAX,
2964 .input = xfrm_netlink_rcv,
2965 };
2948 2966
2949 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX, 2967 nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
2950 xfrm_netlink_rcv, NULL, THIS_MODULE);
2951 if (nlsk == NULL) 2968 if (nlsk == NULL)
2952 return -ENOMEM; 2969 return -ENOMEM;
2953 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2970 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */